• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6 
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/export.h>
13 #include <linux/compiler.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/aio.h>
17 #include <linux/capability.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/security.h>
32 #include <linux/cpuset.h>
33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
34 #include <linux/memcontrol.h>
35 #include <linux/cleancache.h>
36 #include "internal.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/filemap.h>
40 
41 /*
42  * FIXME: remove all knowledge of the buffer layer from the core VM
43  */
44 #include <linux/buffer_head.h> /* for try_to_free_buffers */
45 
46 #include <asm/mman.h>
47 
48 /*
49  * Shared mappings implemented 30.11.1994. It's not fully working yet,
50  * though.
51  *
52  * Shared mappings now work. 15.8.1995  Bruno.
53  *
54  * finished 'unifying' the page and buffer cache and SMP-threaded the
55  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
56  *
57  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
58  */
59 
60 /*
61  * Lock ordering:
62  *
63  *  ->i_mmap_mutex		(truncate_pagecache)
64  *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
65  *      ->swap_lock		(exclusive_swap_page, others)
66  *        ->mapping->tree_lock
67  *
68  *  ->i_mutex
69  *    ->i_mmap_mutex		(truncate->unmap_mapping_range)
70  *
71  *  ->mmap_sem
72  *    ->i_mmap_mutex
73  *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
74  *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
75  *
76  *  ->mmap_sem
77  *    ->lock_page		(access_process_vm)
78  *
79  *  ->i_mutex			(generic_file_buffered_write)
80  *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
81  *
82  *  bdi->wb.list_lock
83  *    sb_lock			(fs/fs-writeback.c)
84  *    ->mapping->tree_lock	(__sync_single_inode)
85  *
86  *  ->i_mmap_mutex
87  *    ->anon_vma.lock		(vma_adjust)
88  *
89  *  ->anon_vma.lock
90  *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
91  *
92  *  ->page_table_lock or pte_lock
93  *    ->swap_lock		(try_to_unmap_one)
94  *    ->private_lock		(try_to_unmap_one)
95  *    ->tree_lock		(try_to_unmap_one)
96  *    ->zone.lru_lock		(follow_page->mark_page_accessed)
97  *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
98  *    ->private_lock		(page_remove_rmap->set_page_dirty)
99  *    ->tree_lock		(page_remove_rmap->set_page_dirty)
100  *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
101  *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
102  *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
103  *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
104  *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
105  *
106  * ->i_mmap_mutex
107  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
108  */
109 
110 /*
111  * Delete a page from the page cache and free it. Caller has to make
112  * sure the page is locked and that nobody else uses it - or that usage
113  * is safe.  The caller must hold the mapping's tree_lock.
114  */
__delete_from_page_cache(struct page * page)115 void __delete_from_page_cache(struct page *page)
116 {
117 	struct address_space *mapping = page->mapping;
118 
119 	trace_mm_filemap_delete_from_page_cache(page);
120 	/*
121 	 * if we're uptodate, flush out into the cleancache, otherwise
122 	 * invalidate any existing cleancache entries.  We can't leave
123 	 * stale data around in the cleancache once our page is gone
124 	 */
125 	if (PageUptodate(page) && PageMappedToDisk(page))
126 		cleancache_put_page(page);
127 	else
128 		cleancache_invalidate_page(mapping, page);
129 
130 	radix_tree_delete(&mapping->page_tree, page->index);
131 	page->mapping = NULL;
132 	/* Leave page->index set: truncation lookup relies upon it */
133 	mapping->nrpages--;
134 	__dec_zone_page_state(page, NR_FILE_PAGES);
135 	if (PageSwapBacked(page))
136 		__dec_zone_page_state(page, NR_SHMEM);
137 	BUG_ON(page_mapped(page));
138 
139 	/*
140 	 * Some filesystems seem to re-dirty the page even after
141 	 * the VM has canceled the dirty bit (eg ext3 journaling).
142 	 *
143 	 * Fix it up by doing a final dirty accounting check after
144 	 * having removed the page entirely.
145 	 */
146 	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
147 		dec_zone_page_state(page, NR_FILE_DIRTY);
148 		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
149 	}
150 }
151 
152 /**
153  * delete_from_page_cache - delete page from page cache
154  * @page: the page which the kernel is trying to remove from page cache
155  *
156  * This must be called only on pages that have been verified to be in the page
157  * cache and locked.  It will never put the page into the free list, the caller
158  * has a reference on the page.
159  */
delete_from_page_cache(struct page * page)160 void delete_from_page_cache(struct page *page)
161 {
162 	struct address_space *mapping = page->mapping;
163 	void (*freepage)(struct page *);
164 
165 	BUG_ON(!PageLocked(page));
166 
167 	freepage = mapping->a_ops->freepage;
168 	spin_lock_irq(&mapping->tree_lock);
169 	__delete_from_page_cache(page);
170 	spin_unlock_irq(&mapping->tree_lock);
171 	mem_cgroup_uncharge_cache_page(page);
172 
173 	if (freepage)
174 		freepage(page);
175 	page_cache_release(page);
176 }
177 EXPORT_SYMBOL(delete_from_page_cache);
178 
sleep_on_page(void * word)179 static int sleep_on_page(void *word)
180 {
181 	io_schedule();
182 	return 0;
183 }
184 
sleep_on_page_killable(void * word)185 static int sleep_on_page_killable(void *word)
186 {
187 	sleep_on_page(word);
188 	return fatal_signal_pending(current) ? -EINTR : 0;
189 }
190 
filemap_check_errors(struct address_space * mapping)191 static int filemap_check_errors(struct address_space *mapping)
192 {
193 	int ret = 0;
194 	/* Check for outstanding write errors */
195 	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
196 		ret = -ENOSPC;
197 	if (test_and_clear_bit(AS_EIO, &mapping->flags))
198 		ret = -EIO;
199 	return ret;
200 }
201 
202 /**
203  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
204  * @mapping:	address space structure to write
205  * @start:	offset in bytes where the range starts
206  * @end:	offset in bytes where the range ends (inclusive)
207  * @sync_mode:	enable synchronous operation
208  *
209  * Start writeback against all of a mapping's dirty pages that lie
210  * within the byte offsets <start, end> inclusive.
211  *
212  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
213  * opposed to a regular memory cleansing writeback.  The difference between
214  * these two operations is that if a dirty page/buffer is encountered, it must
215  * be waited upon, and not just skipped over.
216  */
__filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end,int sync_mode)217 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
218 				loff_t end, int sync_mode)
219 {
220 	int ret;
221 	struct writeback_control wbc = {
222 		.sync_mode = sync_mode,
223 		.nr_to_write = LONG_MAX,
224 		.range_start = start,
225 		.range_end = end,
226 	};
227 
228 	if (!mapping_cap_writeback_dirty(mapping))
229 		return 0;
230 
231 	ret = do_writepages(mapping, &wbc);
232 	return ret;
233 }
234 
__filemap_fdatawrite(struct address_space * mapping,int sync_mode)235 static inline int __filemap_fdatawrite(struct address_space *mapping,
236 	int sync_mode)
237 {
238 	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
239 }
240 
filemap_fdatawrite(struct address_space * mapping)241 int filemap_fdatawrite(struct address_space *mapping)
242 {
243 	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
244 }
245 EXPORT_SYMBOL(filemap_fdatawrite);
246 
filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end)247 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
248 				loff_t end)
249 {
250 	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
251 }
252 EXPORT_SYMBOL(filemap_fdatawrite_range);
253 
254 /**
255  * filemap_flush - mostly a non-blocking flush
256  * @mapping:	target address_space
257  *
258  * This is a mostly non-blocking flush.  Not suitable for data-integrity
259  * purposes - I/O may not be started against all dirty pages.
260  */
filemap_flush(struct address_space * mapping)261 int filemap_flush(struct address_space *mapping)
262 {
263 	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
264 }
265 EXPORT_SYMBOL(filemap_flush);
266 
267 /**
268  * filemap_fdatawait_range - wait for writeback to complete
269  * @mapping:		address space structure to wait for
270  * @start_byte:		offset in bytes where the range starts
271  * @end_byte:		offset in bytes where the range ends (inclusive)
272  *
273  * Walk the list of under-writeback pages of the given address space
274  * in the given range and wait for all of them.
275  */
filemap_fdatawait_range(struct address_space * mapping,loff_t start_byte,loff_t end_byte)276 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
277 			    loff_t end_byte)
278 {
279 	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
280 	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
281 	struct pagevec pvec;
282 	int nr_pages;
283 	int ret2, ret = 0;
284 
285 	if (end_byte < start_byte)
286 		goto out;
287 
288 	pagevec_init(&pvec, 0);
289 	while ((index <= end) &&
290 			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
291 			PAGECACHE_TAG_WRITEBACK,
292 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
293 		unsigned i;
294 
295 		for (i = 0; i < nr_pages; i++) {
296 			struct page *page = pvec.pages[i];
297 
298 			/* until radix tree lookup accepts end_index */
299 			if (page->index > end)
300 				continue;
301 
302 			wait_on_page_writeback(page);
303 			if (TestClearPageError(page))
304 				ret = -EIO;
305 		}
306 		pagevec_release(&pvec);
307 		cond_resched();
308 	}
309 out:
310 	ret2 = filemap_check_errors(mapping);
311 	if (!ret)
312 		ret = ret2;
313 
314 	return ret;
315 }
316 EXPORT_SYMBOL(filemap_fdatawait_range);
317 
318 /**
319  * filemap_fdatawait - wait for all under-writeback pages to complete
320  * @mapping: address space structure to wait for
321  *
322  * Walk the list of under-writeback pages of the given address space
323  * and wait for all of them.
324  */
filemap_fdatawait(struct address_space * mapping)325 int filemap_fdatawait(struct address_space *mapping)
326 {
327 	loff_t i_size = i_size_read(mapping->host);
328 
329 	if (i_size == 0)
330 		return 0;
331 
332 	return filemap_fdatawait_range(mapping, 0, i_size - 1);
333 }
334 EXPORT_SYMBOL(filemap_fdatawait);
335 
filemap_write_and_wait(struct address_space * mapping)336 int filemap_write_and_wait(struct address_space *mapping)
337 {
338 	int err = 0;
339 
340 	if (mapping->nrpages) {
341 		err = filemap_fdatawrite(mapping);
342 		/*
343 		 * Even if the above returned error, the pages may be
344 		 * written partially (e.g. -ENOSPC), so we wait for it.
345 		 * But the -EIO is special case, it may indicate the worst
346 		 * thing (e.g. bug) happened, so we avoid waiting for it.
347 		 */
348 		if (err != -EIO) {
349 			int err2 = filemap_fdatawait(mapping);
350 			if (!err)
351 				err = err2;
352 		}
353 	} else {
354 		err = filemap_check_errors(mapping);
355 	}
356 	return err;
357 }
358 EXPORT_SYMBOL(filemap_write_and_wait);
359 
360 /**
361  * filemap_write_and_wait_range - write out & wait on a file range
362  * @mapping:	the address_space for the pages
363  * @lstart:	offset in bytes where the range starts
364  * @lend:	offset in bytes where the range ends (inclusive)
365  *
366  * Write out and wait upon file offsets lstart->lend, inclusive.
367  *
368  * Note that `lend' is inclusive (describes the last byte to be written) so
369  * that this function can be used to write to the very end-of-file (end = -1).
370  */
filemap_write_and_wait_range(struct address_space * mapping,loff_t lstart,loff_t lend)371 int filemap_write_and_wait_range(struct address_space *mapping,
372 				 loff_t lstart, loff_t lend)
373 {
374 	int err = 0;
375 
376 	if (mapping->nrpages) {
377 		err = __filemap_fdatawrite_range(mapping, lstart, lend,
378 						 WB_SYNC_ALL);
379 		/* See comment of filemap_write_and_wait() */
380 		if (err != -EIO) {
381 			int err2 = filemap_fdatawait_range(mapping,
382 						lstart, lend);
383 			if (!err)
384 				err = err2;
385 		}
386 	} else {
387 		err = filemap_check_errors(mapping);
388 	}
389 	return err;
390 }
391 EXPORT_SYMBOL(filemap_write_and_wait_range);
392 
393 /**
394  * replace_page_cache_page - replace a pagecache page with a new one
395  * @old:	page to be replaced
396  * @new:	page to replace with
397  * @gfp_mask:	allocation mode
398  *
399  * This function replaces a page in the pagecache with a new one.  On
400  * success it acquires the pagecache reference for the new page and
401  * drops it for the old page.  Both the old and new pages must be
402  * locked.  This function does not add the new page to the LRU, the
403  * caller must do that.
404  *
405  * The remove + add is atomic.  The only way this function can fail is
406  * memory allocation failure.
407  */
replace_page_cache_page(struct page * old,struct page * new,gfp_t gfp_mask)408 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
409 {
410 	int error;
411 
412 	VM_BUG_ON(!PageLocked(old));
413 	VM_BUG_ON(!PageLocked(new));
414 	VM_BUG_ON(new->mapping);
415 
416 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
417 	if (!error) {
418 		struct address_space *mapping = old->mapping;
419 		void (*freepage)(struct page *);
420 
421 		pgoff_t offset = old->index;
422 		freepage = mapping->a_ops->freepage;
423 
424 		page_cache_get(new);
425 		new->mapping = mapping;
426 		new->index = offset;
427 
428 		spin_lock_irq(&mapping->tree_lock);
429 		__delete_from_page_cache(old);
430 		error = radix_tree_insert(&mapping->page_tree, offset, new);
431 		BUG_ON(error);
432 		mapping->nrpages++;
433 		__inc_zone_page_state(new, NR_FILE_PAGES);
434 		if (PageSwapBacked(new))
435 			__inc_zone_page_state(new, NR_SHMEM);
436 		spin_unlock_irq(&mapping->tree_lock);
437 		/* mem_cgroup codes must not be called under tree_lock */
438 		mem_cgroup_replace_page_cache(old, new);
439 		radix_tree_preload_end();
440 		if (freepage)
441 			freepage(old);
442 		page_cache_release(old);
443 	}
444 
445 	return error;
446 }
447 EXPORT_SYMBOL_GPL(replace_page_cache_page);
448 
449 /**
450  * add_to_page_cache_locked - add a locked page to the pagecache
451  * @page:	page to add
452  * @mapping:	the page's address_space
453  * @offset:	page index
454  * @gfp_mask:	page allocation mode
455  *
456  * This function is used to add a page to the pagecache. It must be locked.
457  * This function does not add the page to the LRU.  The caller must do that.
458  */
add_to_page_cache_locked(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)459 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 		pgoff_t offset, gfp_t gfp_mask)
461 {
462 	int error;
463 
464 	VM_BUG_ON(!PageLocked(page));
465 	VM_BUG_ON(PageSwapBacked(page));
466 
467 	error = mem_cgroup_cache_charge(page, current->mm,
468 					gfp_mask & GFP_RECLAIM_MASK);
469 	if (error)
470 		goto out;
471 
472 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
473 	if (error == 0) {
474 		page_cache_get(page);
475 		page->mapping = mapping;
476 		page->index = offset;
477 
478 		spin_lock_irq(&mapping->tree_lock);
479 		error = radix_tree_insert(&mapping->page_tree, offset, page);
480 		if (likely(!error)) {
481 			mapping->nrpages++;
482 			__inc_zone_page_state(page, NR_FILE_PAGES);
483 			spin_unlock_irq(&mapping->tree_lock);
484 			trace_mm_filemap_add_to_page_cache(page);
485 		} else {
486 			page->mapping = NULL;
487 			/* Leave page->index set: truncation relies upon it */
488 			spin_unlock_irq(&mapping->tree_lock);
489 			mem_cgroup_uncharge_cache_page(page);
490 			page_cache_release(page);
491 		}
492 		radix_tree_preload_end();
493 	} else
494 		mem_cgroup_uncharge_cache_page(page);
495 out:
496 	return error;
497 }
498 EXPORT_SYMBOL(add_to_page_cache_locked);
499 
add_to_page_cache_lru(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)500 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
501 				pgoff_t offset, gfp_t gfp_mask)
502 {
503 	int ret;
504 
505 	ret = add_to_page_cache(page, mapping, offset, gfp_mask);
506 	if (ret == 0)
507 		lru_cache_add_file(page);
508 	return ret;
509 }
510 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
511 
512 #ifdef CONFIG_NUMA
__page_cache_alloc(gfp_t gfp)513 struct page *__page_cache_alloc(gfp_t gfp)
514 {
515 	int n;
516 	struct page *page;
517 
518 	if (cpuset_do_page_mem_spread()) {
519 		unsigned int cpuset_mems_cookie;
520 		do {
521 			cpuset_mems_cookie = get_mems_allowed();
522 			n = cpuset_mem_spread_node();
523 			page = alloc_pages_exact_node(n, gfp, 0);
524 		} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
525 
526 		return page;
527 	}
528 	return alloc_pages(gfp, 0);
529 }
530 EXPORT_SYMBOL(__page_cache_alloc);
531 #endif
532 
533 /*
534  * In order to wait for pages to become available there must be
535  * waitqueues associated with pages. By using a hash table of
536  * waitqueues where the bucket discipline is to maintain all
537  * waiters on the same queue and wake all when any of the pages
538  * become available, and for the woken contexts to check to be
539  * sure the appropriate page became available, this saves space
540  * at a cost of "thundering herd" phenomena during rare hash
541  * collisions.
542  */
page_waitqueue(struct page * page)543 static wait_queue_head_t *page_waitqueue(struct page *page)
544 {
545 	const struct zone *zone = page_zone(page);
546 
547 	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
548 }
549 
wake_up_page(struct page * page,int bit)550 static inline void wake_up_page(struct page *page, int bit)
551 {
552 	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
553 }
554 
wait_on_page_bit(struct page * page,int bit_nr)555 void wait_on_page_bit(struct page *page, int bit_nr)
556 {
557 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
558 
559 	if (test_bit(bit_nr, &page->flags))
560 		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
561 							TASK_UNINTERRUPTIBLE);
562 }
563 EXPORT_SYMBOL(wait_on_page_bit);
564 
wait_on_page_bit_killable(struct page * page,int bit_nr)565 int wait_on_page_bit_killable(struct page *page, int bit_nr)
566 {
567 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
568 
569 	if (!test_bit(bit_nr, &page->flags))
570 		return 0;
571 
572 	return __wait_on_bit(page_waitqueue(page), &wait,
573 			     sleep_on_page_killable, TASK_KILLABLE);
574 }
575 
576 /**
577  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
578  * @page: Page defining the wait queue of interest
579  * @waiter: Waiter to add to the queue
580  *
581  * Add an arbitrary @waiter to the wait queue for the nominated @page.
582  */
add_page_wait_queue(struct page * page,wait_queue_t * waiter)583 void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
584 {
585 	wait_queue_head_t *q = page_waitqueue(page);
586 	unsigned long flags;
587 
588 	spin_lock_irqsave(&q->lock, flags);
589 	__add_wait_queue(q, waiter);
590 	spin_unlock_irqrestore(&q->lock, flags);
591 }
592 EXPORT_SYMBOL_GPL(add_page_wait_queue);
593 
594 /**
595  * unlock_page - unlock a locked page
596  * @page: the page
597  *
598  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
599  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
600  * mechananism between PageLocked pages and PageWriteback pages is shared.
601  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
602  *
603  * The mb is necessary to enforce ordering between the clear_bit and the read
604  * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
605  */
unlock_page(struct page * page)606 void unlock_page(struct page *page)
607 {
608 	VM_BUG_ON(!PageLocked(page));
609 	clear_bit_unlock(PG_locked, &page->flags);
610 	smp_mb__after_clear_bit();
611 	wake_up_page(page, PG_locked);
612 }
613 EXPORT_SYMBOL(unlock_page);
614 
615 /**
616  * end_page_writeback - end writeback against a page
617  * @page: the page
618  */
end_page_writeback(struct page * page)619 void end_page_writeback(struct page *page)
620 {
621 	if (TestClearPageReclaim(page))
622 		rotate_reclaimable_page(page);
623 
624 	if (!test_clear_page_writeback(page))
625 		BUG();
626 
627 	smp_mb__after_clear_bit();
628 	wake_up_page(page, PG_writeback);
629 }
630 EXPORT_SYMBOL(end_page_writeback);
631 
632 /**
633  * __lock_page - get a lock on the page, assuming we need to sleep to get it
634  * @page: the page to lock
635  */
__lock_page(struct page * page)636 void __lock_page(struct page *page)
637 {
638 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
639 
640 	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
641 							TASK_UNINTERRUPTIBLE);
642 }
643 EXPORT_SYMBOL(__lock_page);
644 
__lock_page_killable(struct page * page)645 int __lock_page_killable(struct page *page)
646 {
647 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
648 
649 	return __wait_on_bit_lock(page_waitqueue(page), &wait,
650 					sleep_on_page_killable, TASK_KILLABLE);
651 }
652 EXPORT_SYMBOL_GPL(__lock_page_killable);
653 
__lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)654 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
655 			 unsigned int flags)
656 {
657 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
658 		/*
659 		 * CAUTION! In this case, mmap_sem is not released
660 		 * even though return 0.
661 		 */
662 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
663 			return 0;
664 
665 		up_read(&mm->mmap_sem);
666 		if (flags & FAULT_FLAG_KILLABLE)
667 			wait_on_page_locked_killable(page);
668 		else
669 			wait_on_page_locked(page);
670 		return 0;
671 	} else {
672 		if (flags & FAULT_FLAG_KILLABLE) {
673 			int ret;
674 
675 			ret = __lock_page_killable(page);
676 			if (ret) {
677 				up_read(&mm->mmap_sem);
678 				return 0;
679 			}
680 		} else
681 			__lock_page(page);
682 		return 1;
683 	}
684 }
685 
686 /**
687  * page_cache_next_hole - find the next hole (not-present entry)
688  * @mapping: mapping
689  * @index: index
690  * @max_scan: maximum range to search
691  *
692  * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
693  * lowest indexed hole.
694  *
695  * Returns: the index of the hole if found, otherwise returns an index
696  * outside of the set specified (in which case 'return - index >=
697  * max_scan' will be true). In rare cases of index wrap-around, 0 will
698  * be returned.
699  *
700  * page_cache_next_hole may be called under rcu_read_lock. However,
701  * like radix_tree_gang_lookup, this will not atomically search a
702  * snapshot of the tree at a single point in time. For example, if a
703  * hole is created at index 5, then subsequently a hole is created at
704  * index 10, page_cache_next_hole covering both indexes may return 10
705  * if called under rcu_read_lock.
706  */
page_cache_next_hole(struct address_space * mapping,pgoff_t index,unsigned long max_scan)707 pgoff_t page_cache_next_hole(struct address_space *mapping,
708                              pgoff_t index, unsigned long max_scan)
709 {
710         unsigned long i;
711 
712         for (i = 0; i < max_scan; i++) {
713                 struct page *page;
714 
715                 page = radix_tree_lookup(&mapping->page_tree, index);
716                 if (!page || radix_tree_exceptional_entry(page))
717                         break;
718                 index++;
719                 if (index == 0)
720                         break;
721         }
722 
723         return index;
724 }
725 EXPORT_SYMBOL(page_cache_next_hole);
726 
727 /**
728  * find_get_page - find and get a page reference
729  * @mapping: the address_space to search
730  * @offset: the page index
731  *
732  * Is there a pagecache struct page at the given (mapping, offset) tuple?
733  * If yes, increment its refcount and return it; if no, return NULL.
734  */
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)735 struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset,
736 				 int fgp_flags)
737 {
738 	void **pagep;
739 	struct page *page;
740 
741 	rcu_read_lock();
742 repeat:
743 	page = NULL;
744 	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
745 	if (pagep) {
746 		page = radix_tree_deref_slot(pagep);
747 		if (unlikely(!page))
748 			goto out;
749 		if (radix_tree_exception(page)) {
750 			if (radix_tree_deref_retry(page))
751 				goto repeat;
752 			/*
753 			 * Otherwise, shmem/tmpfs must be storing a swap entry
754 			 * here as an exceptional entry: so return it without
755 			 * attempting to raise page count.
756 			 */
757 			goto out;
758 		}
759 		if (!page_cache_get_speculative(page))
760 			goto repeat;
761 
762 		/*
763 		 * Has the page moved?
764 		 * This is part of the lockless pagecache protocol. See
765 		 * include/linux/pagemap.h for details.
766 		 */
767 		if (unlikely(page != *pagep)) {
768 			page_cache_release(page);
769 			goto repeat;
770 		}
771 	}
772 out:
773 	if (page && (fgp_flags & FGP_ACCESSED))
774 		mark_page_accessed(page);
775 	rcu_read_unlock();
776 
777 	return page;
778 }
779 EXPORT_SYMBOL(find_get_page);
780 
781 /**
782  * find_lock_page - locate, pin and lock a pagecache page
783  * @mapping: the address_space to search
784  * @offset: the page index
785  *
786  * Locates the desired pagecache page, locks it, increments its reference
787  * count and returns its address.
788  *
789  * Returns zero if the page was not present. find_lock_page() may sleep.
790  */
find_lock_page(struct address_space * mapping,pgoff_t offset)791 struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
792 {
793 	struct page *page;
794 
795 repeat:
796 	page = find_get_page(mapping, offset);
797 	if (page && !radix_tree_exception(page)) {
798 		lock_page(page);
799 		/* Has the page been truncated? */
800 		if (unlikely(page->mapping != mapping)) {
801 			unlock_page(page);
802 			page_cache_release(page);
803 			goto repeat;
804 		}
805 		VM_BUG_ON(page->index != offset);
806 	}
807 	return page;
808 }
809 EXPORT_SYMBOL(find_lock_page);
810 
811 /**
812  * find_or_create_page - locate or add a pagecache page
813  * @mapping: the page's address_space
814  * @index: the page's index into the mapping
815  * @gfp_mask: page allocation mode
816  *
817  * Locates a page in the pagecache.  If the page is not present, a new page
818  * is allocated using @gfp_mask and is added to the pagecache and to the VM's
819  * LRU list.  The returned page is locked and has its reference count
820  * incremented.
821  *
822  * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
823  * allocation!
824  *
825  * find_or_create_page() returns the desired page's address, or zero on
826  * memory exhaustion.
827  */
find_or_create_page(struct address_space * mapping,pgoff_t index,gfp_t gfp_mask)828 struct page *find_or_create_page(struct address_space *mapping,
829 		pgoff_t index, gfp_t gfp_mask)
830 {
831 	struct page *page;
832 	int err;
833 repeat:
834 	page = find_lock_page(mapping, index);
835 	if (!page) {
836 		page = __page_cache_alloc(gfp_mask);
837 		if (!page)
838 			return NULL;
839 		/*
840 		 * We want a regular kernel memory (not highmem or DMA etc)
841 		 * allocation for the radix tree nodes, but we need to honour
842 		 * the context-specific requirements the caller has asked for.
843 		 * GFP_RECLAIM_MASK collects those requirements.
844 		 */
845 		err = add_to_page_cache_lru(page, mapping, index,
846 			(gfp_mask & GFP_RECLAIM_MASK));
847 		if (unlikely(err)) {
848 			page_cache_release(page);
849 			page = NULL;
850 			if (err == -EEXIST)
851 				goto repeat;
852 		}
853 	}
854 	return page;
855 }
856 EXPORT_SYMBOL(find_or_create_page);
857 
858 /**
859  * find_get_pages - gang pagecache lookup
860  * @mapping:	The address_space to search
861  * @start:	The starting page index
862  * @nr_pages:	The maximum number of pages
863  * @pages:	Where the resulting pages are placed
864  *
865  * find_get_pages() will search for and return a group of up to
866  * @nr_pages pages in the mapping.  The pages are placed at @pages.
867  * find_get_pages() takes a reference against the returned pages.
868  *
869  * The search returns a group of mapping-contiguous pages with ascending
870  * indexes.  There may be holes in the indices due to not-present pages.
871  *
872  * find_get_pages() returns the number of pages which were found.
873  */
find_get_pages(struct address_space * mapping,pgoff_t start,unsigned int nr_pages,struct page ** pages)874 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
875 			    unsigned int nr_pages, struct page **pages)
876 {
877 	struct radix_tree_iter iter;
878 	void **slot;
879 	unsigned ret = 0;
880 
881 	if (unlikely(!nr_pages))
882 		return 0;
883 
884 	rcu_read_lock();
885 restart:
886 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
887 		struct page *page;
888 repeat:
889 		page = radix_tree_deref_slot(slot);
890 		if (unlikely(!page))
891 			continue;
892 
893 		if (radix_tree_exception(page)) {
894 			if (radix_tree_deref_retry(page)) {
895 				/*
896 				 * Transient condition which can only trigger
897 				 * when entry at index 0 moves out of or back
898 				 * to root: none yet gotten, safe to restart.
899 				 */
900 				WARN_ON(iter.index);
901 				goto restart;
902 			}
903 			/*
904 			 * Otherwise, shmem/tmpfs must be storing a swap entry
905 			 * here as an exceptional entry: so skip over it -
906 			 * we only reach this from invalidate_mapping_pages().
907 			 */
908 			continue;
909 		}
910 
911 		if (!page_cache_get_speculative(page))
912 			goto repeat;
913 
914 		/* Has the page moved? */
915 		if (unlikely(page != *slot)) {
916 			page_cache_release(page);
917 			goto repeat;
918 		}
919 
920 		pages[ret] = page;
921 		if (++ret == nr_pages)
922 			break;
923 	}
924 
925 	rcu_read_unlock();
926 	return ret;
927 }
928 
929 /**
930  * find_get_pages_contig - gang contiguous pagecache lookup
931  * @mapping:	The address_space to search
932  * @index:	The starting page index
933  * @nr_pages:	The maximum number of pages
934  * @pages:	Where the resulting pages are placed
935  *
936  * find_get_pages_contig() works exactly like find_get_pages(), except
937  * that the returned number of pages are guaranteed to be contiguous.
938  *
939  * find_get_pages_contig() returns the number of pages which were found.
940  */
find_get_pages_contig(struct address_space * mapping,pgoff_t index,unsigned int nr_pages,struct page ** pages)941 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
942 			       unsigned int nr_pages, struct page **pages)
943 {
944 	struct radix_tree_iter iter;
945 	void **slot;
946 	unsigned int ret = 0;
947 
948 	if (unlikely(!nr_pages))
949 		return 0;
950 
951 	rcu_read_lock();
952 restart:
953 	radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
954 		struct page *page;
955 repeat:
956 		page = radix_tree_deref_slot(slot);
957 		/* The hole, there no reason to continue */
958 		if (unlikely(!page))
959 			break;
960 
961 		if (radix_tree_exception(page)) {
962 			if (radix_tree_deref_retry(page)) {
963 				/*
964 				 * Transient condition which can only trigger
965 				 * when entry at index 0 moves out of or back
966 				 * to root: none yet gotten, safe to restart.
967 				 */
968 				goto restart;
969 			}
970 			/*
971 			 * Otherwise, shmem/tmpfs must be storing a swap entry
972 			 * here as an exceptional entry: so stop looking for
973 			 * contiguous pages.
974 			 */
975 			break;
976 		}
977 
978 		if (!page_cache_get_speculative(page))
979 			goto repeat;
980 
981 		/* Has the page moved? */
982 		if (unlikely(page != *slot)) {
983 			page_cache_release(page);
984 			goto repeat;
985 		}
986 
987 		/*
988 		 * must check mapping and index after taking the ref.
989 		 * otherwise we can get both false positives and false
990 		 * negatives, which is just confusing to the caller.
991 		 */
992 		if (page->mapping == NULL || page->index != iter.index) {
993 			page_cache_release(page);
994 			break;
995 		}
996 
997 		pages[ret] = page;
998 		if (++ret == nr_pages)
999 			break;
1000 	}
1001 	rcu_read_unlock();
1002 	return ret;
1003 }
1004 EXPORT_SYMBOL(find_get_pages_contig);
1005 
1006 /**
1007  * find_get_pages_tag - find and return pages that match @tag
1008  * @mapping:	the address_space to search
1009  * @index:	the starting page index
1010  * @tag:	the tag index
1011  * @nr_pages:	the maximum number of pages
1012  * @pages:	where the resulting pages are placed
1013  *
1014  * Like find_get_pages, except we only return pages which are tagged with
1015  * @tag.   We update @index to index the next page for the traversal.
1016  */
find_get_pages_tag(struct address_space * mapping,pgoff_t * index,int tag,unsigned int nr_pages,struct page ** pages)1017 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1018 			int tag, unsigned int nr_pages, struct page **pages)
1019 {
1020 	struct radix_tree_iter iter;
1021 	void **slot;
1022 	unsigned ret = 0;
1023 
1024 	if (unlikely(!nr_pages))
1025 		return 0;
1026 
1027 	rcu_read_lock();
1028 restart:
1029 	radix_tree_for_each_tagged(slot, &mapping->page_tree,
1030 				   &iter, *index, tag) {
1031 		struct page *page;
1032 repeat:
1033 		page = radix_tree_deref_slot(slot);
1034 		if (unlikely(!page))
1035 			continue;
1036 
1037 		if (radix_tree_exception(page)) {
1038 			if (radix_tree_deref_retry(page)) {
1039 				/*
1040 				 * Transient condition which can only trigger
1041 				 * when entry at index 0 moves out of or back
1042 				 * to root: none yet gotten, safe to restart.
1043 				 */
1044 				goto restart;
1045 			}
1046 			/*
1047 			 * This function is never used on a shmem/tmpfs
1048 			 * mapping, so a swap entry won't be found here.
1049 			 */
1050 			BUG();
1051 		}
1052 
1053 		if (!page_cache_get_speculative(page))
1054 			goto repeat;
1055 
1056 		/* Has the page moved? */
1057 		if (unlikely(page != *slot)) {
1058 			page_cache_release(page);
1059 			goto repeat;
1060 		}
1061 
1062 		pages[ret] = page;
1063 		if (++ret == nr_pages)
1064 			break;
1065 	}
1066 
1067 	rcu_read_unlock();
1068 
1069 	if (ret)
1070 		*index = pages[ret - 1]->index + 1;
1071 
1072 	return ret;
1073 }
1074 EXPORT_SYMBOL(find_get_pages_tag);
1075 
1076 /**
1077  * grab_cache_page_nowait - returns locked page at given index in given cache
1078  * @mapping: target address_space
1079  * @index: the page index
1080  *
1081  * Same as grab_cache_page(), but do not wait if the page is unavailable.
1082  * This is intended for speculative data generators, where the data can
1083  * be regenerated if the page couldn't be grabbed.  This routine should
1084  * be safe to call while holding the lock for another page.
1085  *
1086  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1087  * and deadlock against the caller's locked page.
1088  */
1089 struct page *
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)1090 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1091 {
1092 	struct page *page = find_get_page(mapping, index);
1093 
1094 	if (page) {
1095 		if (trylock_page(page))
1096 			return page;
1097 		page_cache_release(page);
1098 		return NULL;
1099 	}
1100 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1101 	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1102 		page_cache_release(page);
1103 		page = NULL;
1104 	}
1105 	return page;
1106 }
1107 EXPORT_SYMBOL(grab_cache_page_nowait);
1108 
1109 /*
1110  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1111  * a _large_ part of the i/o request. Imagine the worst scenario:
1112  *
1113  *      ---R__________________________________________B__________
1114  *         ^ reading here                             ^ bad block(assume 4k)
1115  *
1116  * read(R) => miss => readahead(R...B) => media error => frustrating retries
1117  * => failing the whole request => read(R) => read(R+1) =>
1118  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1119  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1120  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1121  *
1122  * It is going insane. Fix it by quickly scaling down the readahead size.
1123  */
shrink_readahead_size_eio(struct file * filp,struct file_ra_state * ra)1124 static void shrink_readahead_size_eio(struct file *filp,
1125 					struct file_ra_state *ra)
1126 {
1127 	ra->ra_pages /= 4;
1128 }
1129 
1130 /**
1131  * do_generic_file_read - generic file read routine
1132  * @filp:	the file to read
1133  * @ppos:	current file position
1134  * @desc:	read_descriptor
1135  * @actor:	read method
1136  *
1137  * This is a generic file read routine, and uses the
1138  * mapping->a_ops->readpage() function for the actual low-level stuff.
1139  *
1140  * This is really ugly. But the goto's actually try to clarify some
1141  * of the logic when it comes to error handling etc.
1142  */
do_generic_file_read(struct file * filp,loff_t * ppos,read_descriptor_t * desc,read_actor_t actor)1143 static void do_generic_file_read(struct file *filp, loff_t *ppos,
1144 		read_descriptor_t *desc, read_actor_t actor)
1145 {
1146 	struct address_space *mapping = filp->f_mapping;
1147 	struct inode *inode = mapping->host;
1148 	struct file_ra_state *ra = &filp->f_ra;
1149 	pgoff_t index;
1150 	pgoff_t last_index;
1151 	pgoff_t prev_index;
1152 	unsigned long offset;      /* offset into pagecache page */
1153 	unsigned int prev_offset;
1154 	int error;
1155 
1156 	trace_mm_filemap_do_generic_file_read(filp, *ppos, desc->count, 1);
1157 
1158 	index = *ppos >> PAGE_CACHE_SHIFT;
1159 	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1160 	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1161 	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1162 	offset = *ppos & ~PAGE_CACHE_MASK;
1163 
1164 	for (;;) {
1165 		struct page *page;
1166 		pgoff_t end_index;
1167 		loff_t isize;
1168 		unsigned long nr, ret;
1169 
1170 		cond_resched();
1171 find_page:
1172 		page = find_get_page(mapping, index);
1173 		if (!page) {
1174 			page_cache_sync_readahead(mapping,
1175 					ra, filp,
1176 					index, last_index - index);
1177 			page = find_get_page(mapping, index);
1178 			if (unlikely(page == NULL))
1179 				goto no_cached_page;
1180 		}
1181 		if (PageReadahead(page)) {
1182 			page_cache_async_readahead(mapping,
1183 					ra, filp, page,
1184 					index, last_index - index);
1185 		}
1186 		if (!PageUptodate(page)) {
1187 			if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1188 					!mapping->a_ops->is_partially_uptodate)
1189 				goto page_not_up_to_date;
1190 			if (!trylock_page(page))
1191 				goto page_not_up_to_date;
1192 			/* Did it get truncated before we got the lock? */
1193 			if (!page->mapping)
1194 				goto page_not_up_to_date_locked;
1195 			if (!mapping->a_ops->is_partially_uptodate(page,
1196 								desc, offset))
1197 				goto page_not_up_to_date_locked;
1198 			unlock_page(page);
1199 		}
1200 page_ok:
1201 		/*
1202 		 * i_size must be checked after we know the page is Uptodate.
1203 		 *
1204 		 * Checking i_size after the check allows us to calculate
1205 		 * the correct value for "nr", which means the zero-filled
1206 		 * part of the page is not copied back to userspace (unless
1207 		 * another truncate extends the file - this is desired though).
1208 		 */
1209 
1210 		isize = i_size_read(inode);
1211 		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1212 		if (unlikely(!isize || index > end_index)) {
1213 			page_cache_release(page);
1214 			goto out;
1215 		}
1216 
1217 		/* nr is the maximum number of bytes to copy from this page */
1218 		nr = PAGE_CACHE_SIZE;
1219 		if (index == end_index) {
1220 			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1221 			if (nr <= offset) {
1222 				page_cache_release(page);
1223 				goto out;
1224 			}
1225 		}
1226 		nr = nr - offset;
1227 
1228 		/* If users can be writing to this page using arbitrary
1229 		 * virtual addresses, take care about potential aliasing
1230 		 * before reading the page on the kernel side.
1231 		 */
1232 		if (mapping_writably_mapped(mapping))
1233 			flush_dcache_page(page);
1234 
1235 		/*
1236 		 * When a sequential read accesses a page several times,
1237 		 * only mark it as accessed the first time.
1238 		 */
1239 		if (prev_index != index || offset != prev_offset)
1240 			mark_page_accessed(page);
1241 		prev_index = index;
1242 
1243 		/*
1244 		 * Ok, we have the page, and it's up-to-date, so
1245 		 * now we can copy it to user space...
1246 		 *
1247 		 * The actor routine returns how many bytes were actually used..
1248 		 * NOTE! This may not be the same as how much of a user buffer
1249 		 * we filled up (we may be padding etc), so we can only update
1250 		 * "pos" here (the actor routine has to update the user buffer
1251 		 * pointers and the remaining count).
1252 		 */
1253 		ret = actor(desc, page, offset, nr);
1254 		offset += ret;
1255 		index += offset >> PAGE_CACHE_SHIFT;
1256 		offset &= ~PAGE_CACHE_MASK;
1257 		prev_offset = offset;
1258 
1259 		page_cache_release(page);
1260 		if (ret == nr && desc->count)
1261 			continue;
1262 		goto out;
1263 
1264 page_not_up_to_date:
1265 		/* Get exclusive access to the page ... */
1266 		error = lock_page_killable(page);
1267 		if (unlikely(error))
1268 			goto readpage_error;
1269 
1270 page_not_up_to_date_locked:
1271 		/* Did it get truncated before we got the lock? */
1272 		if (!page->mapping) {
1273 			unlock_page(page);
1274 			page_cache_release(page);
1275 			continue;
1276 		}
1277 
1278 		/* Did somebody else fill it already? */
1279 		if (PageUptodate(page)) {
1280 			unlock_page(page);
1281 			goto page_ok;
1282 		}
1283 
1284 readpage:
1285 		/*
1286 		 * A previous I/O error may have been due to temporary
1287 		 * failures, eg. multipath errors.
1288 		 * PG_error will be set again if readpage fails.
1289 		 */
1290 		ClearPageError(page);
1291 		/* Start the actual read. The read will unlock the page. */
1292 		error = mapping->a_ops->readpage(filp, page);
1293 
1294 		if (unlikely(error)) {
1295 			if (error == AOP_TRUNCATED_PAGE) {
1296 				page_cache_release(page);
1297 				goto find_page;
1298 			}
1299 			goto readpage_error;
1300 		}
1301 
1302 		if (!PageUptodate(page)) {
1303 			error = lock_page_killable(page);
1304 			if (unlikely(error))
1305 				goto readpage_error;
1306 			if (!PageUptodate(page)) {
1307 				if (page->mapping == NULL) {
1308 					/*
1309 					 * invalidate_mapping_pages got it
1310 					 */
1311 					unlock_page(page);
1312 					page_cache_release(page);
1313 					goto find_page;
1314 				}
1315 				unlock_page(page);
1316 				shrink_readahead_size_eio(filp, ra);
1317 				error = -EIO;
1318 				goto readpage_error;
1319 			}
1320 			unlock_page(page);
1321 		}
1322 
1323 		goto page_ok;
1324 
1325 readpage_error:
1326 		/* UHHUH! A synchronous read error occurred. Report it */
1327 		desc->error = error;
1328 		page_cache_release(page);
1329 		goto out;
1330 
1331 no_cached_page:
1332 		/*
1333 		 * Ok, it wasn't cached, so we need to create a new
1334 		 * page..
1335 		 */
1336 		page = page_cache_alloc_cold(mapping);
1337 		if (!page) {
1338 			desc->error = -ENOMEM;
1339 			goto out;
1340 		}
1341 		error = add_to_page_cache_lru(page, mapping,
1342 						index, GFP_KERNEL);
1343 		if (error) {
1344 			page_cache_release(page);
1345 			if (error == -EEXIST)
1346 				goto find_page;
1347 			desc->error = error;
1348 			goto out;
1349 		}
1350 		goto readpage;
1351 	}
1352 
1353 out:
1354 	ra->prev_pos = prev_index;
1355 	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1356 	ra->prev_pos |= prev_offset;
1357 
1358 	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1359 	file_accessed(filp);
1360 }
1361 
file_read_actor(read_descriptor_t * desc,struct page * page,unsigned long offset,unsigned long size)1362 int file_read_actor(read_descriptor_t *desc, struct page *page,
1363 			unsigned long offset, unsigned long size)
1364 {
1365 	char *kaddr;
1366 	unsigned long left, count = desc->count;
1367 
1368 	if (size > count)
1369 		size = count;
1370 
1371 	/*
1372 	 * Faults on the destination of a read are common, so do it before
1373 	 * taking the kmap.
1374 	 */
1375 	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1376 		kaddr = kmap_atomic(page);
1377 		left = __copy_to_user_inatomic(desc->arg.buf,
1378 						kaddr + offset, size);
1379 		kunmap_atomic(kaddr);
1380 		if (left == 0)
1381 			goto success;
1382 	}
1383 
1384 	/* Do it the slow way */
1385 	kaddr = kmap(page);
1386 	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1387 	kunmap(page);
1388 
1389 	if (left) {
1390 		size -= left;
1391 		desc->error = -EFAULT;
1392 	}
1393 success:
1394 	desc->count = count - size;
1395 	desc->written += size;
1396 	desc->arg.buf += size;
1397 	return size;
1398 }
1399 
1400 /*
1401  * Performs necessary checks before doing a write
1402  * @iov:	io vector request
1403  * @nr_segs:	number of segments in the iovec
1404  * @count:	number of bytes to write
1405  * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1406  *
1407  * Adjust number of segments and amount of bytes to write (nr_segs should be
1408  * properly initialized first). Returns appropriate error code that caller
1409  * should return or zero in case that write should be allowed.
1410  */
generic_segment_checks(const struct iovec * iov,unsigned long * nr_segs,size_t * count,int access_flags)1411 int generic_segment_checks(const struct iovec *iov,
1412 			unsigned long *nr_segs, size_t *count, int access_flags)
1413 {
1414 	unsigned long   seg;
1415 	size_t cnt = 0;
1416 	for (seg = 0; seg < *nr_segs; seg++) {
1417 		const struct iovec *iv = &iov[seg];
1418 
1419 		/*
1420 		 * If any segment has a negative length, or the cumulative
1421 		 * length ever wraps negative then return -EINVAL.
1422 		 */
1423 		cnt += iv->iov_len;
1424 		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1425 			return -EINVAL;
1426 		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1427 			continue;
1428 		if (seg == 0)
1429 			return -EFAULT;
1430 		*nr_segs = seg;
1431 		cnt -= iv->iov_len;	/* This segment is no good */
1432 		break;
1433 	}
1434 	*count = cnt;
1435 	return 0;
1436 }
1437 EXPORT_SYMBOL(generic_segment_checks);
1438 
1439 /**
1440  * generic_file_aio_read - generic filesystem read routine
1441  * @iocb:	kernel I/O control block
1442  * @iov:	io vector request
1443  * @nr_segs:	number of segments in the iovec
1444  * @pos:	current file position
1445  *
1446  * This is the "read()" routine for all filesystems
1447  * that can use the page cache directly.
1448  */
1449 ssize_t
generic_file_aio_read(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1450 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1451 		unsigned long nr_segs, loff_t pos)
1452 {
1453 	struct file *filp = iocb->ki_filp;
1454 	ssize_t retval;
1455 	unsigned long seg = 0;
1456 	size_t count;
1457 	loff_t *ppos = &iocb->ki_pos;
1458 
1459 	count = 0;
1460 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1461 	if (retval)
1462 		return retval;
1463 
1464 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1465 	if (filp->f_flags & O_DIRECT) {
1466 		loff_t size;
1467 		struct address_space *mapping;
1468 		struct inode *inode;
1469 
1470 		mapping = filp->f_mapping;
1471 		inode = mapping->host;
1472 		if (!count)
1473 			goto out; /* skip atime */
1474 		size = i_size_read(inode);
1475 		if (pos < size) {
1476 			retval = filemap_write_and_wait_range(mapping, pos,
1477 					pos + iov_length(iov, nr_segs) - 1);
1478 			if (!retval) {
1479 				retval = mapping->a_ops->direct_IO(READ, iocb,
1480 							iov, pos, nr_segs);
1481 			}
1482 			if (retval > 0) {
1483 				*ppos = pos + retval;
1484 				count -= retval;
1485 			}
1486 
1487 			/*
1488 			 * Btrfs can have a short DIO read if we encounter
1489 			 * compressed extents, so if there was an error, or if
1490 			 * we've already read everything we wanted to, or if
1491 			 * there was a short read because we hit EOF, go ahead
1492 			 * and return.  Otherwise fallthrough to buffered io for
1493 			 * the rest of the read.
1494 			 */
1495 			if (retval < 0 || !count || *ppos >= size) {
1496 				file_accessed(filp);
1497 				goto out;
1498 			}
1499 		}
1500 	}
1501 
1502 	count = retval;
1503 	for (seg = 0; seg < nr_segs; seg++) {
1504 		read_descriptor_t desc;
1505 		loff_t offset = 0;
1506 
1507 		/*
1508 		 * If we did a short DIO read we need to skip the section of the
1509 		 * iov that we've already read data into.
1510 		 */
1511 		if (count) {
1512 			if (count > iov[seg].iov_len) {
1513 				count -= iov[seg].iov_len;
1514 				continue;
1515 			}
1516 			offset = count;
1517 			count = 0;
1518 		}
1519 
1520 		desc.written = 0;
1521 		desc.arg.buf = iov[seg].iov_base + offset;
1522 		desc.count = iov[seg].iov_len - offset;
1523 		if (desc.count == 0)
1524 			continue;
1525 		desc.error = 0;
1526 		do_generic_file_read(filp, ppos, &desc, file_read_actor);
1527 		retval += desc.written;
1528 		if (desc.error) {
1529 			retval = retval ?: desc.error;
1530 			break;
1531 		}
1532 		if (desc.count > 0)
1533 			break;
1534 	}
1535 out:
1536 	return retval;
1537 }
1538 EXPORT_SYMBOL(generic_file_aio_read);
1539 
1540 #ifdef CONFIG_MMU
1541 /**
1542  * page_cache_read - adds requested page to the page cache if not already there
1543  * @file:	file to read
1544  * @offset:	page index
1545  *
1546  * This adds the requested page to the page cache if it isn't already there,
1547  * and schedules an I/O to read in its contents from disk.
1548  */
page_cache_read(struct file * file,pgoff_t offset)1549 static int page_cache_read(struct file *file, pgoff_t offset)
1550 {
1551 	struct address_space *mapping = file->f_mapping;
1552 	struct page *page;
1553 	int ret;
1554 
1555 	do {
1556 		page = page_cache_alloc_cold(mapping);
1557 		if (!page)
1558 			return -ENOMEM;
1559 
1560 		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1561 		if (ret == 0)
1562 			ret = mapping->a_ops->readpage(file, page);
1563 		else if (ret == -EEXIST)
1564 			ret = 0; /* losing race to add is OK */
1565 
1566 		page_cache_release(page);
1567 
1568 	} while (ret == AOP_TRUNCATED_PAGE);
1569 
1570 	return ret;
1571 }
1572 
1573 #define MMAP_LOTSAMISS  (100)
1574 
1575 /*
1576  * Synchronous readahead happens when we don't even find
1577  * a page in the page cache at all.
1578  */
do_sync_mmap_readahead(struct vm_area_struct * vma,struct file_ra_state * ra,struct file * file,pgoff_t offset)1579 static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1580 				   struct file_ra_state *ra,
1581 				   struct file *file,
1582 				   pgoff_t offset)
1583 {
1584 	unsigned long ra_pages;
1585 	struct address_space *mapping = file->f_mapping;
1586 
1587 	/* If we don't want any read-ahead, don't bother */
1588 	if (VM_RandomReadHint(vma))
1589 		return;
1590 	if (!ra->ra_pages)
1591 		return;
1592 
1593 	if (VM_SequentialReadHint(vma)) {
1594 		page_cache_sync_readahead(mapping, ra, file, offset,
1595 					  ra->ra_pages);
1596 		return;
1597 	}
1598 
1599 	/* Avoid banging the cache line if not needed */
1600 	if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1601 		ra->mmap_miss++;
1602 
1603 	/*
1604 	 * Do we miss much more than hit in this file? If so,
1605 	 * stop bothering with read-ahead. It will only hurt.
1606 	 */
1607 	if (ra->mmap_miss > MMAP_LOTSAMISS)
1608 		return;
1609 
1610 	/*
1611 	 * mmap read-around
1612 	 */
1613 	ra_pages = max_sane_readahead(ra->ra_pages);
1614 	ra->start = max_t(long, 0, offset - ra_pages / 2);
1615 	ra->size = ra_pages;
1616 	ra->async_size = ra_pages / 4;
1617 	ra_submit(ra, mapping, file);
1618 }
1619 
1620 /*
1621  * Asynchronous readahead happens when we find the page and PG_readahead,
1622  * so we want to possibly extend the readahead further..
1623  */
do_async_mmap_readahead(struct vm_area_struct * vma,struct file_ra_state * ra,struct file * file,struct page * page,pgoff_t offset)1624 static void do_async_mmap_readahead(struct vm_area_struct *vma,
1625 				    struct file_ra_state *ra,
1626 				    struct file *file,
1627 				    struct page *page,
1628 				    pgoff_t offset)
1629 {
1630 	struct address_space *mapping = file->f_mapping;
1631 
1632 	/* If we don't want any read-ahead, don't bother */
1633 	if (VM_RandomReadHint(vma))
1634 		return;
1635 	if (ra->mmap_miss > 0)
1636 		ra->mmap_miss--;
1637 	if (PageReadahead(page))
1638 		page_cache_async_readahead(mapping, ra, file,
1639 					   page, offset, ra->ra_pages);
1640 }
1641 
1642 /**
1643  * filemap_fault - read in file data for page fault handling
1644  * @vma:	vma in which the fault was taken
1645  * @vmf:	struct vm_fault containing details of the fault
1646  *
1647  * filemap_fault() is invoked via the vma operations vector for a
1648  * mapped memory region to read in file data during a page fault.
1649  *
1650  * The goto's are kind of ugly, but this streamlines the normal case of having
1651  * it in the page cache, and handles the special cases reasonably without
1652  * having a lot of duplicated code.
1653  */
filemap_fault(struct vm_area_struct * vma,struct vm_fault * vmf)1654 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1655 {
1656 	int error;
1657 	struct file *file = vma->vm_file;
1658 	struct address_space *mapping = file->f_mapping;
1659 	struct file_ra_state *ra = &file->f_ra;
1660 	struct inode *inode = mapping->host;
1661 	pgoff_t offset = vmf->pgoff;
1662 	struct page *page;
1663 	pgoff_t size;
1664 	int ret = 0;
1665 
1666 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1667 	if (offset >= size)
1668 		return VM_FAULT_SIGBUS;
1669 
1670 	/*
1671 	 * Do we have something in the page cache already?
1672 	 */
1673 	page = find_get_page(mapping, offset);
1674 	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1675 		/*
1676 		 * We found the page, so try async readahead before
1677 		 * waiting for the lock.
1678 		 */
1679 		do_async_mmap_readahead(vma, ra, file, page, offset);
1680 	} else if (!page) {
1681 		/* No page in the page cache at all */
1682 		do_sync_mmap_readahead(vma, ra, file, offset);
1683 		count_vm_event(PGMAJFAULT);
1684 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1685 		ret = VM_FAULT_MAJOR;
1686 retry_find:
1687 		page = find_get_page(mapping, offset);
1688 		if (!page)
1689 			goto no_cached_page;
1690 	}
1691 
1692 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1693 		page_cache_release(page);
1694 		return ret | VM_FAULT_RETRY;
1695 	}
1696 
1697 	/* Did it get truncated? */
1698 	if (unlikely(page->mapping != mapping)) {
1699 		unlock_page(page);
1700 		put_page(page);
1701 		goto retry_find;
1702 	}
1703 	VM_BUG_ON(page->index != offset);
1704 
1705 	/*
1706 	 * We have a locked page in the page cache, now we need to check
1707 	 * that it's up-to-date. If not, it is going to be due to an error.
1708 	 */
1709 	if (unlikely(!PageUptodate(page)))
1710 		goto page_not_uptodate;
1711 
1712 	/*
1713 	 * Found the page and have a reference on it.
1714 	 * We must recheck i_size under page lock.
1715 	 */
1716 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1717 	if (unlikely(offset >= size)) {
1718 		unlock_page(page);
1719 		page_cache_release(page);
1720 		return VM_FAULT_SIGBUS;
1721 	}
1722 
1723 	vmf->page = page;
1724 	return ret | VM_FAULT_LOCKED;
1725 
1726 no_cached_page:
1727 	/*
1728 	 * We're only likely to ever get here if MADV_RANDOM is in
1729 	 * effect.
1730 	 */
1731 	error = page_cache_read(file, offset);
1732 
1733 	/*
1734 	 * The page we want has now been added to the page cache.
1735 	 * In the unlikely event that someone removed it in the
1736 	 * meantime, we'll just come back here and read it again.
1737 	 */
1738 	if (error >= 0)
1739 		goto retry_find;
1740 
1741 	/*
1742 	 * An error return from page_cache_read can result if the
1743 	 * system is low on memory, or a problem occurs while trying
1744 	 * to schedule I/O.
1745 	 */
1746 	if (error == -ENOMEM)
1747 		return VM_FAULT_OOM;
1748 	return VM_FAULT_SIGBUS;
1749 
1750 page_not_uptodate:
1751 	/*
1752 	 * Umm, take care of errors if the page isn't up-to-date.
1753 	 * Try to re-read it _once_. We do this synchronously,
1754 	 * because there really aren't any performance issues here
1755 	 * and we need to check for errors.
1756 	 */
1757 	ClearPageError(page);
1758 	error = mapping->a_ops->readpage(file, page);
1759 	if (!error) {
1760 		wait_on_page_locked(page);
1761 		if (!PageUptodate(page))
1762 			error = -EIO;
1763 	}
1764 	page_cache_release(page);
1765 
1766 	if (!error || error == AOP_TRUNCATED_PAGE)
1767 		goto retry_find;
1768 
1769 	/* Things didn't work out. Return zero to tell the mm layer so. */
1770 	shrink_readahead_size_eio(file, ra);
1771 	return VM_FAULT_SIGBUS;
1772 }
1773 EXPORT_SYMBOL(filemap_fault);
1774 
filemap_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)1775 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1776 {
1777 	struct page *page = vmf->page;
1778 	struct inode *inode = file_inode(vma->vm_file);
1779 	int ret = VM_FAULT_LOCKED;
1780 
1781 	sb_start_pagefault(inode->i_sb);
1782 	file_update_time(vma->vm_file);
1783 	lock_page(page);
1784 	if (page->mapping != inode->i_mapping) {
1785 		unlock_page(page);
1786 		ret = VM_FAULT_NOPAGE;
1787 		goto out;
1788 	}
1789 	/*
1790 	 * We mark the page dirty already here so that when freeze is in
1791 	 * progress, we are guaranteed that writeback during freezing will
1792 	 * see the dirty page and writeprotect it again.
1793 	 */
1794 	set_page_dirty(page);
1795 	wait_for_stable_page(page);
1796 out:
1797 	sb_end_pagefault(inode->i_sb);
1798 	return ret;
1799 }
1800 EXPORT_SYMBOL(filemap_page_mkwrite);
1801 
1802 const struct vm_operations_struct generic_file_vm_ops = {
1803 	.fault		= filemap_fault,
1804 	.page_mkwrite	= filemap_page_mkwrite,
1805 	.remap_pages	= generic_file_remap_pages,
1806 };
1807 
1808 /* This is used for a general mmap of a disk file */
1809 
generic_file_mmap(struct file * file,struct vm_area_struct * vma)1810 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1811 {
1812 	struct address_space *mapping = file->f_mapping;
1813 
1814 	if (!mapping->a_ops->readpage)
1815 		return -ENOEXEC;
1816 	file_accessed(file);
1817 	vma->vm_ops = &generic_file_vm_ops;
1818 	return 0;
1819 }
1820 
1821 /*
1822  * This is for filesystems which do not implement ->writepage.
1823  */
generic_file_readonly_mmap(struct file * file,struct vm_area_struct * vma)1824 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1825 {
1826 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1827 		return -EINVAL;
1828 	return generic_file_mmap(file, vma);
1829 }
1830 #else
generic_file_mmap(struct file * file,struct vm_area_struct * vma)1831 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1832 {
1833 	return -ENOSYS;
1834 }
generic_file_readonly_mmap(struct file * file,struct vm_area_struct * vma)1835 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1836 {
1837 	return -ENOSYS;
1838 }
1839 #endif /* CONFIG_MMU */
1840 
1841 EXPORT_SYMBOL(generic_file_mmap);
1842 EXPORT_SYMBOL(generic_file_readonly_mmap);
1843 
__read_cache_page(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data,gfp_t gfp)1844 static struct page *__read_cache_page(struct address_space *mapping,
1845 				pgoff_t index,
1846 				int (*filler)(void *, struct page *),
1847 				void *data,
1848 				gfp_t gfp)
1849 {
1850 	struct page *page;
1851 	int err;
1852 repeat:
1853 	page = find_get_page(mapping, index);
1854 	if (!page) {
1855 		page = __page_cache_alloc(gfp | __GFP_COLD);
1856 		if (!page)
1857 			return ERR_PTR(-ENOMEM);
1858 		err = add_to_page_cache_lru(page, mapping, index, gfp);
1859 		if (unlikely(err)) {
1860 			page_cache_release(page);
1861 			if (err == -EEXIST)
1862 				goto repeat;
1863 			/* Presumably ENOMEM for radix tree node */
1864 			return ERR_PTR(err);
1865 		}
1866 		err = filler(data, page);
1867 		if (err < 0) {
1868 			page_cache_release(page);
1869 			page = ERR_PTR(err);
1870 		}
1871 	}
1872 	return page;
1873 }
1874 
do_read_cache_page(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data,gfp_t gfp)1875 static struct page *do_read_cache_page(struct address_space *mapping,
1876 				pgoff_t index,
1877 				int (*filler)(void *, struct page *),
1878 				void *data,
1879 				gfp_t gfp)
1880 
1881 {
1882 	struct page *page;
1883 	int err;
1884 
1885 retry:
1886 	page = __read_cache_page(mapping, index, filler, data, gfp);
1887 	if (IS_ERR(page))
1888 		return page;
1889 	if (PageUptodate(page))
1890 		goto out;
1891 
1892 	lock_page(page);
1893 	if (!page->mapping) {
1894 		unlock_page(page);
1895 		page_cache_release(page);
1896 		goto retry;
1897 	}
1898 	if (PageUptodate(page)) {
1899 		unlock_page(page);
1900 		goto out;
1901 	}
1902 	err = filler(data, page);
1903 	if (err < 0) {
1904 		page_cache_release(page);
1905 		return ERR_PTR(err);
1906 	}
1907 out:
1908 	mark_page_accessed(page);
1909 	return page;
1910 }
1911 
1912 /**
1913  * read_cache_page_async - read into page cache, fill it if needed
1914  * @mapping:	the page's address_space
1915  * @index:	the page index
1916  * @filler:	function to perform the read
1917  * @data:	first arg to filler(data, page) function, often left as NULL
1918  *
1919  * Same as read_cache_page, but don't wait for page to become unlocked
1920  * after submitting it to the filler.
1921  *
1922  * Read into the page cache. If a page already exists, and PageUptodate() is
1923  * not set, try to fill the page but don't wait for it to become unlocked.
1924  *
1925  * If the page does not get brought uptodate, return -EIO.
1926  */
read_cache_page_async(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data)1927 struct page *read_cache_page_async(struct address_space *mapping,
1928 				pgoff_t index,
1929 				int (*filler)(void *, struct page *),
1930 				void *data)
1931 {
1932 	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1933 }
1934 EXPORT_SYMBOL(read_cache_page_async);
1935 
wait_on_page_read(struct page * page)1936 static struct page *wait_on_page_read(struct page *page)
1937 {
1938 	if (!IS_ERR(page)) {
1939 		wait_on_page_locked(page);
1940 		if (!PageUptodate(page)) {
1941 			page_cache_release(page);
1942 			page = ERR_PTR(-EIO);
1943 		}
1944 	}
1945 	return page;
1946 }
1947 
1948 /**
1949  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1950  * @mapping:	the page's address_space
1951  * @index:	the page index
1952  * @gfp:	the page allocator flags to use if allocating
1953  *
1954  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1955  * any new page allocations done using the specified allocation flags.
1956  *
1957  * If the page does not get brought uptodate, return -EIO.
1958  */
read_cache_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)1959 struct page *read_cache_page_gfp(struct address_space *mapping,
1960 				pgoff_t index,
1961 				gfp_t gfp)
1962 {
1963 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1964 
1965 	return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1966 }
1967 EXPORT_SYMBOL(read_cache_page_gfp);
1968 
1969 /**
1970  * read_cache_page - read into page cache, fill it if needed
1971  * @mapping:	the page's address_space
1972  * @index:	the page index
1973  * @filler:	function to perform the read
1974  * @data:	first arg to filler(data, page) function, often left as NULL
1975  *
1976  * Read into the page cache. If a page already exists, and PageUptodate() is
1977  * not set, try to fill the page then wait for it to become unlocked.
1978  *
1979  * If the page does not get brought uptodate, return -EIO.
1980  */
read_cache_page(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data)1981 struct page *read_cache_page(struct address_space *mapping,
1982 				pgoff_t index,
1983 				int (*filler)(void *, struct page *),
1984 				void *data)
1985 {
1986 	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1987 }
1988 EXPORT_SYMBOL(read_cache_page);
1989 
__iovec_copy_from_user_inatomic(char * vaddr,const struct iovec * iov,size_t base,size_t bytes)1990 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1991 			const struct iovec *iov, size_t base, size_t bytes)
1992 {
1993 	size_t copied = 0, left = 0;
1994 
1995 	while (bytes) {
1996 		char __user *buf = iov->iov_base + base;
1997 		int copy = min(bytes, iov->iov_len - base);
1998 
1999 		base = 0;
2000 		left = __copy_from_user_inatomic(vaddr, buf, copy);
2001 		copied += copy;
2002 		bytes -= copy;
2003 		vaddr += copy;
2004 		iov++;
2005 
2006 		if (unlikely(left))
2007 			break;
2008 	}
2009 	return copied - left;
2010 }
2011 
2012 /*
2013  * Copy as much as we can into the page and return the number of bytes which
2014  * were successfully copied.  If a fault is encountered then return the number of
2015  * bytes which were copied.
2016  */
iov_iter_copy_from_user_atomic(struct page * page,struct iov_iter * i,unsigned long offset,size_t bytes)2017 size_t iov_iter_copy_from_user_atomic(struct page *page,
2018 		struct iov_iter *i, unsigned long offset, size_t bytes)
2019 {
2020 	char *kaddr;
2021 	size_t copied;
2022 
2023 	BUG_ON(!in_atomic());
2024 	kaddr = kmap_atomic(page);
2025 	if (likely(i->nr_segs == 1)) {
2026 		int left;
2027 		char __user *buf = i->iov->iov_base + i->iov_offset;
2028 		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2029 		copied = bytes - left;
2030 	} else {
2031 		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2032 						i->iov, i->iov_offset, bytes);
2033 	}
2034 	kunmap_atomic(kaddr);
2035 
2036 	return copied;
2037 }
2038 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2039 
2040 /*
2041  * This has the same sideeffects and return value as
2042  * iov_iter_copy_from_user_atomic().
2043  * The difference is that it attempts to resolve faults.
2044  * Page must not be locked.
2045  */
iov_iter_copy_from_user(struct page * page,struct iov_iter * i,unsigned long offset,size_t bytes)2046 size_t iov_iter_copy_from_user(struct page *page,
2047 		struct iov_iter *i, unsigned long offset, size_t bytes)
2048 {
2049 	char *kaddr;
2050 	size_t copied;
2051 
2052 	kaddr = kmap(page);
2053 	if (likely(i->nr_segs == 1)) {
2054 		int left;
2055 		char __user *buf = i->iov->iov_base + i->iov_offset;
2056 		left = __copy_from_user(kaddr + offset, buf, bytes);
2057 		copied = bytes - left;
2058 	} else {
2059 		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2060 						i->iov, i->iov_offset, bytes);
2061 	}
2062 	kunmap(page);
2063 	return copied;
2064 }
2065 EXPORT_SYMBOL(iov_iter_copy_from_user);
2066 
iov_iter_advance(struct iov_iter * i,size_t bytes)2067 void iov_iter_advance(struct iov_iter *i, size_t bytes)
2068 {
2069 	BUG_ON(i->count < bytes);
2070 
2071 	if (likely(i->nr_segs == 1)) {
2072 		i->iov_offset += bytes;
2073 		i->count -= bytes;
2074 	} else {
2075 		const struct iovec *iov = i->iov;
2076 		size_t base = i->iov_offset;
2077 		unsigned long nr_segs = i->nr_segs;
2078 
2079 		/*
2080 		 * The !iov->iov_len check ensures we skip over unlikely
2081 		 * zero-length segments (without overruning the iovec).
2082 		 */
2083 		while (bytes || unlikely(i->count && !iov->iov_len)) {
2084 			int copy;
2085 
2086 			copy = min(bytes, iov->iov_len - base);
2087 			BUG_ON(!i->count || i->count < copy);
2088 			i->count -= copy;
2089 			bytes -= copy;
2090 			base += copy;
2091 			if (iov->iov_len == base) {
2092 				iov++;
2093 				nr_segs--;
2094 				base = 0;
2095 			}
2096 		}
2097 		i->iov = iov;
2098 		i->iov_offset = base;
2099 		i->nr_segs = nr_segs;
2100 	}
2101 }
2102 EXPORT_SYMBOL(iov_iter_advance);
2103 
2104 /*
2105  * Fault in the first iovec of the given iov_iter, to a maximum length
2106  * of bytes. Returns 0 on success, or non-zero if the memory could not be
2107  * accessed (ie. because it is an invalid address).
2108  *
2109  * writev-intensive code may want this to prefault several iovecs -- that
2110  * would be possible (callers must not rely on the fact that _only_ the
2111  * first iovec will be faulted with the current implementation).
2112  */
iov_iter_fault_in_readable(struct iov_iter * i,size_t bytes)2113 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2114 {
2115 	char __user *buf = i->iov->iov_base + i->iov_offset;
2116 	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2117 	return fault_in_pages_readable(buf, bytes);
2118 }
2119 EXPORT_SYMBOL(iov_iter_fault_in_readable);
2120 
2121 /*
2122  * Return the count of just the current iov_iter segment.
2123  */
iov_iter_single_seg_count(const struct iov_iter * i)2124 size_t iov_iter_single_seg_count(const struct iov_iter *i)
2125 {
2126 	const struct iovec *iov = i->iov;
2127 	if (i->nr_segs == 1)
2128 		return i->count;
2129 	else
2130 		return min(i->count, iov->iov_len - i->iov_offset);
2131 }
2132 EXPORT_SYMBOL(iov_iter_single_seg_count);
2133 
2134 /*
2135  * Performs necessary checks before doing a write
2136  *
2137  * Can adjust writing position or amount of bytes to write.
2138  * Returns appropriate error code that caller should return or
2139  * zero in case that write should be allowed.
2140  */
generic_write_checks(struct file * file,loff_t * pos,size_t * count,int isblk)2141 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2142 {
2143 	struct inode *inode = file->f_mapping->host;
2144 	unsigned long limit = rlimit(RLIMIT_FSIZE);
2145 
2146         if (unlikely(*pos < 0))
2147                 return -EINVAL;
2148 
2149 	if (!isblk) {
2150 		/* FIXME: this is for backwards compatibility with 2.4 */
2151 		if (file->f_flags & O_APPEND)
2152                         *pos = i_size_read(inode);
2153 
2154 		if (limit != RLIM_INFINITY) {
2155 			if (*pos >= limit) {
2156 				send_sig(SIGXFSZ, current, 0);
2157 				return -EFBIG;
2158 			}
2159 			if (*count > limit - (typeof(limit))*pos) {
2160 				*count = limit - (typeof(limit))*pos;
2161 			}
2162 		}
2163 	}
2164 
2165 	/*
2166 	 * LFS rule
2167 	 */
2168 	if (unlikely(*pos + *count > MAX_NON_LFS &&
2169 				!(file->f_flags & O_LARGEFILE))) {
2170 		if (*pos >= MAX_NON_LFS) {
2171 			return -EFBIG;
2172 		}
2173 		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2174 			*count = MAX_NON_LFS - (unsigned long)*pos;
2175 		}
2176 	}
2177 
2178 	/*
2179 	 * Are we about to exceed the fs block limit ?
2180 	 *
2181 	 * If we have written data it becomes a short write.  If we have
2182 	 * exceeded without writing data we send a signal and return EFBIG.
2183 	 * Linus frestrict idea will clean these up nicely..
2184 	 */
2185 	if (likely(!isblk)) {
2186 		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2187 			if (*count || *pos > inode->i_sb->s_maxbytes) {
2188 				return -EFBIG;
2189 			}
2190 			/* zero-length writes at ->s_maxbytes are OK */
2191 		}
2192 
2193 		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2194 			*count = inode->i_sb->s_maxbytes - *pos;
2195 	} else {
2196 #ifdef CONFIG_BLOCK
2197 		loff_t isize;
2198 		if (bdev_read_only(I_BDEV(inode)))
2199 			return -EPERM;
2200 		isize = i_size_read(inode);
2201 		if (*pos >= isize) {
2202 			if (*count || *pos > isize)
2203 				return -ENOSPC;
2204 		}
2205 
2206 		if (*pos + *count > isize)
2207 			*count = isize - *pos;
2208 #else
2209 		return -EPERM;
2210 #endif
2211 	}
2212 	return 0;
2213 }
2214 EXPORT_SYMBOL(generic_write_checks);
2215 
pagecache_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)2216 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2217 				loff_t pos, unsigned len, unsigned flags,
2218 				struct page **pagep, void **fsdata)
2219 {
2220 	const struct address_space_operations *aops = mapping->a_ops;
2221 
2222 	return aops->write_begin(file, mapping, pos, len, flags,
2223 							pagep, fsdata);
2224 }
2225 EXPORT_SYMBOL(pagecache_write_begin);
2226 
pagecache_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2227 int pagecache_write_end(struct file *file, struct address_space *mapping,
2228 				loff_t pos, unsigned len, unsigned copied,
2229 				struct page *page, void *fsdata)
2230 {
2231 	const struct address_space_operations *aops = mapping->a_ops;
2232 
2233 	mark_page_accessed(page);
2234 	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2235 }
2236 EXPORT_SYMBOL(pagecache_write_end);
2237 
2238 ssize_t
generic_file_direct_write(struct kiocb * iocb,const struct iovec * iov,unsigned long * nr_segs,loff_t pos,loff_t * ppos,size_t count,size_t ocount)2239 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2240 		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2241 		size_t count, size_t ocount)
2242 {
2243 	struct file	*file = iocb->ki_filp;
2244 	struct address_space *mapping = file->f_mapping;
2245 	struct inode	*inode = mapping->host;
2246 	ssize_t		written;
2247 	size_t		write_len;
2248 	pgoff_t		end;
2249 
2250 	if (count != ocount)
2251 		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2252 
2253 	write_len = iov_length(iov, *nr_segs);
2254 	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2255 
2256 	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2257 	if (written)
2258 		goto out;
2259 
2260 	/*
2261 	 * After a write we want buffered reads to be sure to go to disk to get
2262 	 * the new data.  We invalidate clean cached page from the region we're
2263 	 * about to write.  We do this *before* the write so that we can return
2264 	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2265 	 */
2266 	if (mapping->nrpages) {
2267 		written = invalidate_inode_pages2_range(mapping,
2268 					pos >> PAGE_CACHE_SHIFT, end);
2269 		/*
2270 		 * If a page can not be invalidated, return 0 to fall back
2271 		 * to buffered write.
2272 		 */
2273 		if (written) {
2274 			if (written == -EBUSY)
2275 				return 0;
2276 			goto out;
2277 		}
2278 	}
2279 
2280 	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2281 
2282 	/*
2283 	 * Finally, try again to invalidate clean pages which might have been
2284 	 * cached by non-direct readahead, or faulted in by get_user_pages()
2285 	 * if the source of the write was an mmap'ed region of the file
2286 	 * we're writing.  Either one is a pretty crazy thing to do,
2287 	 * so we don't support it 100%.  If this invalidation
2288 	 * fails, tough, the write still worked...
2289 	 */
2290 	if (mapping->nrpages) {
2291 		invalidate_inode_pages2_range(mapping,
2292 					      pos >> PAGE_CACHE_SHIFT, end);
2293 	}
2294 
2295 	if (written > 0) {
2296 		pos += written;
2297 		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2298 			i_size_write(inode, pos);
2299 			mark_inode_dirty(inode);
2300 		}
2301 		*ppos = pos;
2302 	}
2303 out:
2304 	return written;
2305 }
2306 EXPORT_SYMBOL(generic_file_direct_write);
2307 
2308 /*
2309  * Find or create a page at the given pagecache position. Return the locked
2310  * page. This function is specifically for buffered writes.
2311  */
grab_cache_page_write_begin(struct address_space * mapping,pgoff_t index,unsigned flags)2312 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2313 					pgoff_t index, unsigned flags)
2314 {
2315 	int status;
2316 	gfp_t gfp_mask;
2317 	struct page *page;
2318 	gfp_t gfp_notmask = 0;
2319 
2320 	gfp_mask = mapping_gfp_mask(mapping);
2321 	if (mapping_cap_account_dirty(mapping))
2322 		gfp_mask |= __GFP_WRITE;
2323 	if (flags & AOP_FLAG_NOFS)
2324 		gfp_notmask = __GFP_FS;
2325 repeat:
2326 	page = find_lock_page(mapping, index);
2327 	if (page)
2328 		goto found;
2329 
2330 	page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2331 	if (!page)
2332 		return NULL;
2333 	status = add_to_page_cache_lru(page, mapping, index,
2334 						GFP_KERNEL & ~gfp_notmask);
2335 	if (unlikely(status)) {
2336 		page_cache_release(page);
2337 		if (status == -EEXIST)
2338 			goto repeat;
2339 		return NULL;
2340 	}
2341 found:
2342 	wait_for_stable_page(page);
2343 	return page;
2344 }
2345 EXPORT_SYMBOL(grab_cache_page_write_begin);
2346 
generic_perform_write(struct file * file,struct iov_iter * i,loff_t pos)2347 static ssize_t generic_perform_write(struct file *file,
2348 				struct iov_iter *i, loff_t pos)
2349 {
2350 	struct address_space *mapping = file->f_mapping;
2351 	const struct address_space_operations *a_ops = mapping->a_ops;
2352 	long status = 0;
2353 	ssize_t written = 0;
2354 	unsigned int flags = 0;
2355 
2356 	trace_mm_filemap_generic_perform_write(file, pos, iov_iter_count(i), 0);
2357 
2358 	/*
2359 	 * Copies from kernel address space cannot fail (NFSD is a big user).
2360 	 */
2361 	if (segment_eq(get_fs(), KERNEL_DS))
2362 		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2363 
2364 	do {
2365 		struct page *page;
2366 		unsigned long offset;	/* Offset into pagecache page */
2367 		unsigned long bytes;	/* Bytes to write to page */
2368 		size_t copied;		/* Bytes copied from user */
2369 		void *fsdata;
2370 
2371 		offset = (pos & (PAGE_CACHE_SIZE - 1));
2372 		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2373 						iov_iter_count(i));
2374 
2375 again:
2376 		/*
2377 		 * Bring in the user page that we will copy from _first_.
2378 		 * Otherwise there's a nasty deadlock on copying from the
2379 		 * same page as we're writing to, without it being marked
2380 		 * up-to-date.
2381 		 *
2382 		 * Not only is this an optimisation, but it is also required
2383 		 * to check that the address is actually valid, when atomic
2384 		 * usercopies are used, below.
2385 		 */
2386 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2387 			status = -EFAULT;
2388 			break;
2389 		}
2390 
2391 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2392 						&page, &fsdata);
2393 		if (unlikely(status))
2394 			break;
2395 
2396 		if (mapping_writably_mapped(mapping))
2397 			flush_dcache_page(page);
2398 
2399 		pagefault_disable();
2400 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2401 		pagefault_enable();
2402 		flush_dcache_page(page);
2403 
2404 		mark_page_accessed(page);
2405 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2406 						page, fsdata);
2407 		if (unlikely(status < 0))
2408 			break;
2409 		copied = status;
2410 
2411 		cond_resched();
2412 
2413 		iov_iter_advance(i, copied);
2414 		if (unlikely(copied == 0)) {
2415 			/*
2416 			 * If we were unable to copy any data at all, we must
2417 			 * fall back to a single segment length write.
2418 			 *
2419 			 * If we didn't fallback here, we could livelock
2420 			 * because not all segments in the iov can be copied at
2421 			 * once without a pagefault.
2422 			 */
2423 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2424 						iov_iter_single_seg_count(i));
2425 			goto again;
2426 		}
2427 		pos += copied;
2428 		written += copied;
2429 
2430 		balance_dirty_pages_ratelimited(mapping);
2431 		if (fatal_signal_pending(current)) {
2432 			status = -EINTR;
2433 			break;
2434 		}
2435 	} while (iov_iter_count(i));
2436 
2437 	return written ? written : status;
2438 }
2439 
2440 ssize_t
generic_file_buffered_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos,loff_t * ppos,size_t count,ssize_t written)2441 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2442 		unsigned long nr_segs, loff_t pos, loff_t *ppos,
2443 		size_t count, ssize_t written)
2444 {
2445 	struct file *file = iocb->ki_filp;
2446 	ssize_t status;
2447 	struct iov_iter i;
2448 
2449 	iov_iter_init(&i, iov, nr_segs, count, written);
2450 	status = generic_perform_write(file, &i, pos);
2451 
2452 	if (likely(status >= 0)) {
2453 		written += status;
2454 		*ppos = pos + status;
2455   	}
2456 
2457 	return written ? written : status;
2458 }
2459 EXPORT_SYMBOL(generic_file_buffered_write);
2460 
2461 /**
2462  * __generic_file_aio_write - write data to a file
2463  * @iocb:	IO state structure (file, offset, etc.)
2464  * @iov:	vector with data to write
2465  * @nr_segs:	number of segments in the vector
2466  * @ppos:	position where to write
2467  *
2468  * This function does all the work needed for actually writing data to a
2469  * file. It does all basic checks, removes SUID from the file, updates
2470  * modification times and calls proper subroutines depending on whether we
2471  * do direct IO or a standard buffered write.
2472  *
2473  * It expects i_mutex to be grabbed unless we work on a block device or similar
2474  * object which does not need locking at all.
2475  *
2476  * This function does *not* take care of syncing data in case of O_SYNC write.
2477  * A caller has to handle it. This is mainly due to the fact that we want to
2478  * avoid syncing under i_mutex.
2479  */
__generic_file_aio_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t * ppos)2480 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2481 				 unsigned long nr_segs, loff_t *ppos)
2482 {
2483 	struct file *file = iocb->ki_filp;
2484 	struct address_space * mapping = file->f_mapping;
2485 	size_t ocount;		/* original count */
2486 	size_t count;		/* after file limit checks */
2487 	struct inode 	*inode = mapping->host;
2488 	loff_t		pos;
2489 	ssize_t		written;
2490 	ssize_t		err;
2491 
2492 	ocount = 0;
2493 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2494 	if (err)
2495 		return err;
2496 
2497 	count = ocount;
2498 	pos = *ppos;
2499 
2500 	/* We can write back this queue in page reclaim */
2501 	current->backing_dev_info = mapping->backing_dev_info;
2502 	written = 0;
2503 
2504 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2505 	if (err)
2506 		goto out;
2507 
2508 	if (count == 0)
2509 		goto out;
2510 
2511 	err = file_remove_suid(file);
2512 	if (err)
2513 		goto out;
2514 
2515 	err = file_update_time(file);
2516 	if (err)
2517 		goto out;
2518 
2519 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2520 	if (unlikely(file->f_flags & O_DIRECT)) {
2521 		loff_t endbyte;
2522 		ssize_t written_buffered;
2523 
2524 		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2525 							ppos, count, ocount);
2526 		if (written < 0 || written == count)
2527 			goto out;
2528 		/*
2529 		 * direct-io write to a hole: fall through to buffered I/O
2530 		 * for completing the rest of the request.
2531 		 */
2532 		pos += written;
2533 		count -= written;
2534 		written_buffered = generic_file_buffered_write(iocb, iov,
2535 						nr_segs, pos, ppos, count,
2536 						written);
2537 		/*
2538 		 * If generic_file_buffered_write() retuned a synchronous error
2539 		 * then we want to return the number of bytes which were
2540 		 * direct-written, or the error code if that was zero.  Note
2541 		 * that this differs from normal direct-io semantics, which
2542 		 * will return -EFOO even if some bytes were written.
2543 		 */
2544 		if (written_buffered < 0) {
2545 			err = written_buffered;
2546 			goto out;
2547 		}
2548 
2549 		/*
2550 		 * We need to ensure that the page cache pages are written to
2551 		 * disk and invalidated to preserve the expected O_DIRECT
2552 		 * semantics.
2553 		 */
2554 		endbyte = pos + written_buffered - written - 1;
2555 		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2556 		if (err == 0) {
2557 			written = written_buffered;
2558 			invalidate_mapping_pages(mapping,
2559 						 pos >> PAGE_CACHE_SHIFT,
2560 						 endbyte >> PAGE_CACHE_SHIFT);
2561 		} else {
2562 			/*
2563 			 * We don't know how much we wrote, so just return
2564 			 * the number of bytes which were direct-written
2565 			 */
2566 		}
2567 	} else {
2568 		written = generic_file_buffered_write(iocb, iov, nr_segs,
2569 				pos, ppos, count, written);
2570 	}
2571 out:
2572 	current->backing_dev_info = NULL;
2573 	return written ? written : err;
2574 }
2575 EXPORT_SYMBOL(__generic_file_aio_write);
2576 
2577 /**
2578  * generic_file_aio_write - write data to a file
2579  * @iocb:	IO state structure
2580  * @iov:	vector with data to write
2581  * @nr_segs:	number of segments in the vector
2582  * @pos:	position in file where to write
2583  *
2584  * This is a wrapper around __generic_file_aio_write() to be used by most
2585  * filesystems. It takes care of syncing the file in case of O_SYNC file
2586  * and acquires i_mutex as needed.
2587  */
generic_file_aio_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)2588 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2589 		unsigned long nr_segs, loff_t pos)
2590 {
2591 	struct file *file = iocb->ki_filp;
2592 	struct inode *inode = file->f_mapping->host;
2593 	ssize_t ret;
2594 
2595 	BUG_ON(iocb->ki_pos != pos);
2596 
2597 	mutex_lock(&inode->i_mutex);
2598 	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2599 	mutex_unlock(&inode->i_mutex);
2600 
2601 	if (ret > 0 || ret == -EIOCBQUEUED) {
2602 		ssize_t err;
2603 
2604 		err = generic_write_sync(file, pos, ret);
2605 		if (err < 0 && ret > 0)
2606 			ret = err;
2607 	}
2608 	return ret;
2609 }
2610 EXPORT_SYMBOL(generic_file_aio_write);
2611 
2612 /**
2613  * try_to_release_page() - release old fs-specific metadata on a page
2614  *
2615  * @page: the page which the kernel is trying to free
2616  * @gfp_mask: memory allocation flags (and I/O mode)
2617  *
2618  * The address_space is to try to release any data against the page
2619  * (presumably at page->private).  If the release was successful, return `1'.
2620  * Otherwise return zero.
2621  *
2622  * This may also be called if PG_fscache is set on a page, indicating that the
2623  * page is known to the local caching routines.
2624  *
2625  * The @gfp_mask argument specifies whether I/O may be performed to release
2626  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2627  *
2628  */
try_to_release_page(struct page * page,gfp_t gfp_mask)2629 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2630 {
2631 	struct address_space * const mapping = page->mapping;
2632 
2633 	BUG_ON(!PageLocked(page));
2634 	if (PageWriteback(page))
2635 		return 0;
2636 
2637 	if (mapping && mapping->a_ops->releasepage)
2638 		return mapping->a_ops->releasepage(page, gfp_mask);
2639 	return try_to_free_buffers(page);
2640 }
2641 
2642 EXPORT_SYMBOL(try_to_release_page);
2643