• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3 
4 /*
5  * Copyright 1995 Linus Torvalds
6  */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17 
18 /*
19  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
20  * allocation mode flags.
21  */
22 enum mapping_flags {
23 	AS_EIO		= __GFP_BITS_SHIFT + 0,	/* IO error on async write */
24 	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
25 	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
26 	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
27 	AS_EXITING	= __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 };
29 
mapping_set_error(struct address_space * mapping,int error)30 static inline void mapping_set_error(struct address_space *mapping, int error)
31 {
32 	if (unlikely(error)) {
33 		if (error == -ENOSPC)
34 			set_bit(AS_ENOSPC, &mapping->flags);
35 		else
36 			set_bit(AS_EIO, &mapping->flags);
37 	}
38 }
39 
mapping_set_unevictable(struct address_space * mapping)40 static inline void mapping_set_unevictable(struct address_space *mapping)
41 {
42 	set_bit(AS_UNEVICTABLE, &mapping->flags);
43 }
44 
mapping_clear_unevictable(struct address_space * mapping)45 static inline void mapping_clear_unevictable(struct address_space *mapping)
46 {
47 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
48 }
49 
mapping_unevictable(struct address_space * mapping)50 static inline int mapping_unevictable(struct address_space *mapping)
51 {
52 	if (mapping)
53 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 	return !!mapping;
55 }
56 
mapping_set_exiting(struct address_space * mapping)57 static inline void mapping_set_exiting(struct address_space *mapping)
58 {
59 	set_bit(AS_EXITING, &mapping->flags);
60 }
61 
mapping_exiting(struct address_space * mapping)62 static inline int mapping_exiting(struct address_space *mapping)
63 {
64 	return test_bit(AS_EXITING, &mapping->flags);
65 }
66 
mapping_gfp_mask(struct address_space * mapping)67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
68 {
69 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
70 }
71 
72 /*
73  * This is non-atomic.  Only to be used before the mapping is activated.
74  * Probably needs a barrier...
75  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)76 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
77 {
78 	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
79 				(__force unsigned long)mask;
80 }
81 
82 /*
83  * The page cache can be done in larger chunks than
84  * one page, because it allows for more efficient
85  * throughput (it can then be mapped into user
86  * space in smaller chunks for same flexibility).
87  *
88  * Or rather, it _will_ be done in larger chunks.
89  */
90 #define PAGE_CACHE_SHIFT	PAGE_SHIFT
91 #define PAGE_CACHE_SIZE		PAGE_SIZE
92 #define PAGE_CACHE_MASK		PAGE_MASK
93 #define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
94 
95 #define page_cache_get(page)		get_page(page)
96 #define page_cache_release(page)	put_page(page)
97 void release_pages(struct page **pages, int nr, bool cold);
98 
99 /*
100  * speculatively take a reference to a page.
101  * If the page is free (_count == 0), then _count is untouched, and 0
102  * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
103  *
104  * This function must be called inside the same rcu_read_lock() section as has
105  * been used to lookup the page in the pagecache radix-tree (or page table):
106  * this allows allocators to use a synchronize_rcu() to stabilize _count.
107  *
108  * Unless an RCU grace period has passed, the count of all pages coming out
109  * of the allocator must be considered unstable. page_count may return higher
110  * than expected, and put_page must be able to do the right thing when the
111  * page has been finished with, no matter what it is subsequently allocated
112  * for (because put_page is what is used here to drop an invalid speculative
113  * reference).
114  *
115  * This is the interesting part of the lockless pagecache (and lockless
116  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
117  * has the following pattern:
118  * 1. find page in radix tree
119  * 2. conditionally increment refcount
120  * 3. check the page is still in pagecache (if no, goto 1)
121  *
122  * Remove-side that cares about stability of _count (eg. reclaim) has the
123  * following (with tree_lock held for write):
124  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
125  * B. remove page from pagecache
126  * C. free the page
127  *
128  * There are 2 critical interleavings that matter:
129  * - 2 runs before A: in this case, A sees elevated refcount and bails out
130  * - A runs before 2: in this case, 2 sees zero refcount and retries;
131  *   subsequently, B will complete and 1 will find no page, causing the
132  *   lookup to return NULL.
133  *
134  * It is possible that between 1 and 2, the page is removed then the exact same
135  * page is inserted into the same position in pagecache. That's OK: the
136  * old find_get_page using tree_lock could equally have run before or after
137  * such a re-insertion, depending on order that locks are granted.
138  *
139  * Lookups racing against pagecache insertion isn't a big problem: either 1
140  * will find the page or it will not. Likewise, the old find_get_page could run
141  * either before the insertion or afterwards, depending on timing.
142  */
page_cache_get_speculative(struct page * page)143 static inline int page_cache_get_speculative(struct page *page)
144 {
145 	VM_BUG_ON(in_interrupt());
146 
147 #ifdef CONFIG_TINY_RCU
148 # ifdef CONFIG_PREEMPT_COUNT
149 	VM_BUG_ON(!in_atomic());
150 # endif
151 	/*
152 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
153 	 * this for us.
154 	 *
155 	 * Pagecache won't be truncated from interrupt context, so if we have
156 	 * found a page in the radix tree here, we have pinned its refcount by
157 	 * disabling preempt, and hence no need for the "speculative get" that
158 	 * SMP requires.
159 	 */
160 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
161 	atomic_inc(&page->_count);
162 
163 #else
164 	if (unlikely(!get_page_unless_zero(page))) {
165 		/*
166 		 * Either the page has been freed, or will be freed.
167 		 * In either case, retry here and the caller should
168 		 * do the right thing (see comments above).
169 		 */
170 		return 0;
171 	}
172 #endif
173 	VM_BUG_ON_PAGE(PageTail(page), page);
174 
175 	return 1;
176 }
177 
178 /*
179  * Same as above, but add instead of inc (could just be merged)
180  */
page_cache_add_speculative(struct page * page,int count)181 static inline int page_cache_add_speculative(struct page *page, int count)
182 {
183 	VM_BUG_ON(in_interrupt());
184 
185 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
186 # ifdef CONFIG_PREEMPT_COUNT
187 	VM_BUG_ON(!in_atomic());
188 # endif
189 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
190 	atomic_add(count, &page->_count);
191 
192 #else
193 	if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
194 		return 0;
195 #endif
196 	VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
197 
198 	return 1;
199 }
200 
page_freeze_refs(struct page * page,int count)201 static inline int page_freeze_refs(struct page *page, int count)
202 {
203 	return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
204 }
205 
page_unfreeze_refs(struct page * page,int count)206 static inline void page_unfreeze_refs(struct page *page, int count)
207 {
208 	VM_BUG_ON_PAGE(page_count(page) != 0, page);
209 	VM_BUG_ON(count == 0);
210 
211 	atomic_set(&page->_count, count);
212 }
213 
214 #ifdef CONFIG_NUMA
215 extern struct page *__page_cache_alloc(gfp_t gfp);
216 #else
__page_cache_alloc(gfp_t gfp)217 static inline struct page *__page_cache_alloc(gfp_t gfp)
218 {
219 	return alloc_pages(gfp, 0);
220 }
221 #endif
222 
page_cache_alloc(struct address_space * x)223 static inline struct page *page_cache_alloc(struct address_space *x)
224 {
225 	return __page_cache_alloc(mapping_gfp_mask(x));
226 }
227 
page_cache_alloc_cold(struct address_space * x)228 static inline struct page *page_cache_alloc_cold(struct address_space *x)
229 {
230 	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
231 }
232 
page_cache_alloc_readahead(struct address_space * x)233 static inline struct page *page_cache_alloc_readahead(struct address_space *x)
234 {
235 	return __page_cache_alloc(mapping_gfp_mask(x) |
236 				  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
237 }
238 
239 typedef int filler_t(void *, struct page *);
240 
241 pgoff_t page_cache_next_hole(struct address_space *mapping,
242 			     pgoff_t index, unsigned long max_scan);
243 pgoff_t page_cache_prev_hole(struct address_space *mapping,
244 			     pgoff_t index, unsigned long max_scan);
245 
246 #define FGP_ACCESSED		0x00000001
247 #define FGP_LOCK		0x00000002
248 #define FGP_CREAT		0x00000004
249 #define FGP_WRITE		0x00000008
250 #define FGP_NOFS		0x00000010
251 #define FGP_NOWAIT		0x00000020
252 
253 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
254 		int fgp_flags, gfp_t cache_gfp_mask);
255 
256 /**
257  * find_get_page - find and get a page reference
258  * @mapping: the address_space to search
259  * @offset: the page index
260  *
261  * Looks up the page cache slot at @mapping & @offset.  If there is a
262  * page cache page, it is returned with an increased refcount.
263  *
264  * Otherwise, %NULL is returned.
265  */
find_get_page(struct address_space * mapping,pgoff_t offset)266 static inline struct page *find_get_page(struct address_space *mapping,
267 					pgoff_t offset)
268 {
269 	return pagecache_get_page(mapping, offset, 0, 0);
270 }
271 
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)272 static inline struct page *find_get_page_flags(struct address_space *mapping,
273 					pgoff_t offset, int fgp_flags)
274 {
275 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
276 }
277 
278 /**
279  * find_lock_page - locate, pin and lock a pagecache page
280  * pagecache_get_page - find and get a page reference
281  * @mapping: the address_space to search
282  * @offset: the page index
283  *
284  * Looks up the page cache slot at @mapping & @offset.  If there is a
285  * page cache page, it is returned locked and with an increased
286  * refcount.
287  *
288  * Otherwise, %NULL is returned.
289  *
290  * find_lock_page() may sleep.
291  */
find_lock_page(struct address_space * mapping,pgoff_t offset)292 static inline struct page *find_lock_page(struct address_space *mapping,
293 					pgoff_t offset)
294 {
295 	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
296 }
297 
298 /**
299  * find_or_create_page - locate or add a pagecache page
300  * @mapping: the page's address_space
301  * @index: the page's index into the mapping
302  * @gfp_mask: page allocation mode
303  *
304  * Looks up the page cache slot at @mapping & @offset.  If there is a
305  * page cache page, it is returned locked and with an increased
306  * refcount.
307  *
308  * If the page is not present, a new page is allocated using @gfp_mask
309  * and added to the page cache and the VM's LRU list.  The page is
310  * returned locked and with an increased refcount.
311  *
312  * On memory exhaustion, %NULL is returned.
313  *
314  * find_or_create_page() may sleep, even if @gfp_flags specifies an
315  * atomic allocation!
316  */
find_or_create_page(struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)317 static inline struct page *find_or_create_page(struct address_space *mapping,
318 					pgoff_t offset, gfp_t gfp_mask)
319 {
320 	return pagecache_get_page(mapping, offset,
321 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
322 					gfp_mask);
323 }
324 
325 /**
326  * grab_cache_page_nowait - returns locked page at given index in given cache
327  * @mapping: target address_space
328  * @index: the page index
329  *
330  * Same as grab_cache_page(), but do not wait if the page is unavailable.
331  * This is intended for speculative data generators, where the data can
332  * be regenerated if the page couldn't be grabbed.  This routine should
333  * be safe to call while holding the lock for another page.
334  *
335  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
336  * and deadlock against the caller's locked page.
337  */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)338 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
339 				pgoff_t index)
340 {
341 	return pagecache_get_page(mapping, index,
342 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
343 			mapping_gfp_mask(mapping));
344 }
345 
346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
348 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
349 			  unsigned int nr_entries, struct page **entries,
350 			  pgoff_t *indices);
351 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
352 			unsigned int nr_pages, struct page **pages);
353 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
354 			       unsigned int nr_pages, struct page **pages);
355 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
356 			int tag, unsigned int nr_pages, struct page **pages);
357 
358 struct page *grab_cache_page_write_begin(struct address_space *mapping,
359 			pgoff_t index, unsigned flags);
360 
361 /*
362  * Returns locked page at given index in given cache, creating it if needed.
363  */
grab_cache_page(struct address_space * mapping,pgoff_t index)364 static inline struct page *grab_cache_page(struct address_space *mapping,
365 								pgoff_t index)
366 {
367 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
368 }
369 
370 extern struct page * read_cache_page(struct address_space *mapping,
371 				pgoff_t index, filler_t *filler, void *data);
372 extern struct page * read_cache_page_gfp(struct address_space *mapping,
373 				pgoff_t index, gfp_t gfp_mask);
374 extern int read_cache_pages(struct address_space *mapping,
375 		struct list_head *pages, filler_t *filler, void *data);
376 
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)377 static inline struct page *read_mapping_page(struct address_space *mapping,
378 				pgoff_t index, void *data)
379 {
380 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
381 	return read_cache_page(mapping, index, filler, data);
382 }
383 
384 /*
385  * Get the offset in PAGE_SIZE.
386  * (TODO: hugepage should have ->index in PAGE_SIZE)
387  */
page_to_pgoff(struct page * page)388 static inline pgoff_t page_to_pgoff(struct page *page)
389 {
390 	if (unlikely(PageHeadHuge(page)))
391 		return page->index << compound_order(page);
392 	else
393 		return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
394 }
395 
396 /*
397  * Return byte-offset into filesystem object for page.
398  */
page_offset(struct page * page)399 static inline loff_t page_offset(struct page *page)
400 {
401 	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
402 }
403 
page_file_offset(struct page * page)404 static inline loff_t page_file_offset(struct page *page)
405 {
406 	return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
407 }
408 
409 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
410 				     unsigned long address);
411 
linear_page_index(struct vm_area_struct * vma,unsigned long address)412 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
413 					unsigned long address)
414 {
415 	pgoff_t pgoff;
416 	if (unlikely(is_vm_hugetlb_page(vma)))
417 		return linear_hugepage_index(vma, address);
418 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
419 	pgoff += vma->vm_pgoff;
420 	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
421 }
422 
423 extern void __lock_page(struct page *page);
424 extern int __lock_page_killable(struct page *page);
425 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
426 				unsigned int flags);
427 extern void unlock_page(struct page *page);
428 
__set_page_locked(struct page * page)429 static inline void __set_page_locked(struct page *page)
430 {
431 	__set_bit(PG_locked, &page->flags);
432 }
433 
__clear_page_locked(struct page * page)434 static inline void __clear_page_locked(struct page *page)
435 {
436 	__clear_bit(PG_locked, &page->flags);
437 }
438 
trylock_page(struct page * page)439 static inline int trylock_page(struct page *page)
440 {
441 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
442 }
443 
444 /*
445  * lock_page may only be called if we have the page's inode pinned.
446  */
lock_page(struct page * page)447 static inline void lock_page(struct page *page)
448 {
449 	might_sleep();
450 	if (!trylock_page(page))
451 		__lock_page(page);
452 }
453 
454 /*
455  * lock_page_killable is like lock_page but can be interrupted by fatal
456  * signals.  It returns 0 if it locked the page and -EINTR if it was
457  * killed while waiting.
458  */
lock_page_killable(struct page * page)459 static inline int lock_page_killable(struct page *page)
460 {
461 	might_sleep();
462 	if (!trylock_page(page))
463 		return __lock_page_killable(page);
464 	return 0;
465 }
466 
467 /*
468  * lock_page_or_retry - Lock the page, unless this would block and the
469  * caller indicated that it can handle a retry.
470  *
471  * Return value and mmap_sem implications depend on flags; see
472  * __lock_page_or_retry().
473  */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)474 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
475 				     unsigned int flags)
476 {
477 	might_sleep();
478 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
479 }
480 
481 /*
482  * This is exported only for wait_on_page_locked/wait_on_page_writeback,
483  * and for filesystems which need to wait on PG_private.
484  */
485 extern void wait_on_page_bit(struct page *page, int bit_nr);
486 
487 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
488 extern int wait_on_page_bit_killable_timeout(struct page *page,
489 					     int bit_nr, unsigned long timeout);
490 
wait_on_page_locked_killable(struct page * page)491 static inline int wait_on_page_locked_killable(struct page *page)
492 {
493 	if (PageLocked(page))
494 		return wait_on_page_bit_killable(page, PG_locked);
495 	return 0;
496 }
497 
498 extern wait_queue_head_t *page_waitqueue(struct page *page);
wake_up_page(struct page * page,int bit)499 static inline void wake_up_page(struct page *page, int bit)
500 {
501 	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
502 }
503 
504 /*
505  * Wait for a page to be unlocked.
506  *
507  * This must be called with the caller "holding" the page,
508  * ie with increased "page->count" so that the page won't
509  * go away during the wait..
510  */
wait_on_page_locked(struct page * page)511 static inline void wait_on_page_locked(struct page *page)
512 {
513 	if (PageLocked(page))
514 		wait_on_page_bit(page, PG_locked);
515 }
516 
517 /*
518  * Wait for a page to complete writeback
519  */
wait_on_page_writeback(struct page * page)520 static inline void wait_on_page_writeback(struct page *page)
521 {
522 	if (PageWriteback(page))
523 		wait_on_page_bit(page, PG_writeback);
524 }
525 
526 extern void end_page_writeback(struct page *page);
527 void wait_for_stable_page(struct page *page);
528 
529 void page_endio(struct page *page, int rw, int err);
530 
531 /*
532  * Add an arbitrary waiter to a page's wait queue
533  */
534 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535 
536 /*
537  * Fault a userspace page into pagetables.  Return non-zero on a fault.
538  *
539  * This assumes that two userspace pages are always sufficient.  That's
540  * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
541  */
fault_in_pages_writeable(char __user * uaddr,int size)542 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
543 {
544 	int ret;
545 
546 	if (unlikely(size == 0))
547 		return 0;
548 
549 	/*
550 	 * Writing zeroes into userspace here is OK, because we know that if
551 	 * the zero gets there, we'll be overwriting it.
552 	 */
553 	ret = __put_user(0, uaddr);
554 	if (ret == 0) {
555 		char __user *end = uaddr + size - 1;
556 
557 		/*
558 		 * If the page was already mapped, this will get a cache miss
559 		 * for sure, so try to avoid doing it.
560 		 */
561 		if (((unsigned long)uaddr & PAGE_MASK) !=
562 				((unsigned long)end & PAGE_MASK))
563 			ret = __put_user(0, end);
564 	}
565 	return ret;
566 }
567 
fault_in_pages_readable(const char __user * uaddr,int size)568 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
569 {
570 	volatile char c;
571 	int ret;
572 
573 	if (unlikely(size == 0))
574 		return 0;
575 
576 	ret = __get_user(c, uaddr);
577 	if (ret == 0) {
578 		const char __user *end = uaddr + size - 1;
579 
580 		if (((unsigned long)uaddr & PAGE_MASK) !=
581 				((unsigned long)end & PAGE_MASK)) {
582 			ret = __get_user(c, end);
583 			(void)c;
584 		}
585 	}
586 	return ret;
587 }
588 
589 /*
590  * Multipage variants of the above prefault helpers, useful if more than
591  * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
592  * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
593  * filemap.c hotpaths.
594  */
fault_in_multipages_writeable(char __user * uaddr,int size)595 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
596 {
597 	char __user *end = uaddr + size - 1;
598 
599 	if (unlikely(size == 0))
600 		return 0;
601 
602 	if (unlikely(uaddr > end))
603 		return -EFAULT;
604 	/*
605 	 * Writing zeroes into userspace here is OK, because we know that if
606 	 * the zero gets there, we'll be overwriting it.
607 	 */
608 	do {
609 		if (unlikely(__put_user(0, uaddr) != 0))
610 			return -EFAULT;
611 		uaddr += PAGE_SIZE;
612 	} while (uaddr <= end);
613 
614 	/* Check whether the range spilled into the next page. */
615 	if (((unsigned long)uaddr & PAGE_MASK) ==
616 			((unsigned long)end & PAGE_MASK))
617 		return __put_user(0, end);
618 
619 	return 0;
620 }
621 
fault_in_multipages_readable(const char __user * uaddr,int size)622 static inline int fault_in_multipages_readable(const char __user *uaddr,
623 					       int size)
624 {
625 	volatile char c;
626 	const char __user *end = uaddr + size - 1;
627 
628 	if (unlikely(size == 0))
629 		return 0;
630 
631 	if (unlikely(uaddr > end))
632 		return -EFAULT;
633 
634 	do {
635 		if (unlikely(__get_user(c, uaddr) != 0))
636 			return -EFAULT;
637 		uaddr += PAGE_SIZE;
638 	} while (uaddr <= end);
639 
640 	/* Check whether the range spilled into the next page. */
641 	if (((unsigned long)uaddr & PAGE_MASK) ==
642 			((unsigned long)end & PAGE_MASK)) {
643 		return __get_user(c, end);
644 	}
645 
646 	return 0;
647 }
648 
649 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
650 				pgoff_t index, gfp_t gfp_mask);
651 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
652 				pgoff_t index, gfp_t gfp_mask);
653 extern void delete_from_page_cache(struct page *page);
654 extern void __delete_from_page_cache(struct page *page, void *shadow);
655 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
656 
657 /*
658  * Like add_to_page_cache_locked, but used to add newly allocated pages:
659  * the page is new, so we can just run __set_page_locked() against it.
660  */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)661 static inline int add_to_page_cache(struct page *page,
662 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
663 {
664 	int error;
665 
666 	__set_page_locked(page);
667 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
668 	if (unlikely(error))
669 		__clear_page_locked(page);
670 	return error;
671 }
672 
673 #endif /* _LINUX_PAGEMAP_H */
674