• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4 
5 /*
6  * Copyright 1995 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18 
19 /*
20  * Bits in mapping->flags.
21  */
22 enum mapping_flags {
23 	AS_EIO		= 0,	/* IO error on async write */
24 	AS_ENOSPC	= 1,	/* ENOSPC on async write */
25 	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
26 	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
27 	AS_EXITING	= 4, 	/* final truncate in progress */
28 	/* writeback related tags are not used */
29 	AS_NO_WRITEBACK_TAGS = 5,
30 };
31 
32 /**
33  * mapping_set_error - record a writeback error in the address_space
34  * @mapping - the mapping in which an error should be set
35  * @error - the error to set in the mapping
36  *
37  * When writeback fails in some way, we must record that error so that
38  * userspace can be informed when fsync and the like are called.  We endeavor
39  * to report errors on any file that was open at the time of the error.  Some
40  * internal callers also need to know when writeback errors have occurred.
41  *
42  * When a writeback error occurs, most filesystems will want to call
43  * mapping_set_error to record the error in the mapping so that it can be
44  * reported when the application calls fsync(2).
45  */
mapping_set_error(struct address_space * mapping,int error)46 static inline void mapping_set_error(struct address_space *mapping, int error)
47 {
48 	if (likely(!error))
49 		return;
50 
51 	/* Record in wb_err for checkers using errseq_t based tracking */
52 	filemap_set_wb_err(mapping, error);
53 
54 	/* Record it in flags for now, for legacy callers */
55 	if (error == -ENOSPC)
56 		set_bit(AS_ENOSPC, &mapping->flags);
57 	else
58 		set_bit(AS_EIO, &mapping->flags);
59 }
60 
mapping_set_unevictable(struct address_space * mapping)61 static inline void mapping_set_unevictable(struct address_space *mapping)
62 {
63 	set_bit(AS_UNEVICTABLE, &mapping->flags);
64 }
65 
mapping_clear_unevictable(struct address_space * mapping)66 static inline void mapping_clear_unevictable(struct address_space *mapping)
67 {
68 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
69 }
70 
mapping_unevictable(struct address_space * mapping)71 static inline int mapping_unevictable(struct address_space *mapping)
72 {
73 	if (mapping)
74 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
75 	return !!mapping;
76 }
77 
mapping_set_exiting(struct address_space * mapping)78 static inline void mapping_set_exiting(struct address_space *mapping)
79 {
80 	set_bit(AS_EXITING, &mapping->flags);
81 }
82 
mapping_exiting(struct address_space * mapping)83 static inline int mapping_exiting(struct address_space *mapping)
84 {
85 	return test_bit(AS_EXITING, &mapping->flags);
86 }
87 
mapping_set_no_writeback_tags(struct address_space * mapping)88 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
89 {
90 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
91 }
92 
mapping_use_writeback_tags(struct address_space * mapping)93 static inline int mapping_use_writeback_tags(struct address_space *mapping)
94 {
95 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
96 }
97 
mapping_gfp_mask(struct address_space * mapping)98 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
99 {
100 	return mapping->gfp_mask;
101 }
102 
103 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)104 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
105 		gfp_t gfp_mask)
106 {
107 	return mapping_gfp_mask(mapping) & gfp_mask;
108 }
109 
110 /*
111  * This is non-atomic.  Only to be used before the mapping is activated.
112  * Probably needs a barrier...
113  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)114 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
115 {
116 	m->gfp_mask = mask;
117 }
118 
119 void release_pages(struct page **pages, int nr, bool cold);
120 
121 /*
122  * speculatively take a reference to a page.
123  * If the page is free (_refcount == 0), then _refcount is untouched, and 0
124  * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
125  *
126  * This function must be called inside the same rcu_read_lock() section as has
127  * been used to lookup the page in the pagecache radix-tree (or page table):
128  * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
129  *
130  * Unless an RCU grace period has passed, the count of all pages coming out
131  * of the allocator must be considered unstable. page_count may return higher
132  * than expected, and put_page must be able to do the right thing when the
133  * page has been finished with, no matter what it is subsequently allocated
134  * for (because put_page is what is used here to drop an invalid speculative
135  * reference).
136  *
137  * This is the interesting part of the lockless pagecache (and lockless
138  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
139  * has the following pattern:
140  * 1. find page in radix tree
141  * 2. conditionally increment refcount
142  * 3. check the page is still in pagecache (if no, goto 1)
143  *
144  * Remove-side that cares about stability of _refcount (eg. reclaim) has the
145  * following (with tree_lock held for write):
146  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
147  * B. remove page from pagecache
148  * C. free the page
149  *
150  * There are 2 critical interleavings that matter:
151  * - 2 runs before A: in this case, A sees elevated refcount and bails out
152  * - A runs before 2: in this case, 2 sees zero refcount and retries;
153  *   subsequently, B will complete and 1 will find no page, causing the
154  *   lookup to return NULL.
155  *
156  * It is possible that between 1 and 2, the page is removed then the exact same
157  * page is inserted into the same position in pagecache. That's OK: the
158  * old find_get_page using tree_lock could equally have run before or after
159  * such a re-insertion, depending on order that locks are granted.
160  *
161  * Lookups racing against pagecache insertion isn't a big problem: either 1
162  * will find the page or it will not. Likewise, the old find_get_page could run
163  * either before the insertion or afterwards, depending on timing.
164  */
page_cache_get_speculative(struct page * page)165 static inline int page_cache_get_speculative(struct page *page)
166 {
167 #ifdef CONFIG_TINY_RCU
168 # ifdef CONFIG_PREEMPT_COUNT
169 	VM_BUG_ON(!in_atomic() && !irqs_disabled());
170 # endif
171 	/*
172 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
173 	 * this for us.
174 	 *
175 	 * Pagecache won't be truncated from interrupt context, so if we have
176 	 * found a page in the radix tree here, we have pinned its refcount by
177 	 * disabling preempt, and hence no need for the "speculative get" that
178 	 * SMP requires.
179 	 */
180 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
181 	page_ref_inc(page);
182 
183 #else
184 	if (unlikely(!get_page_unless_zero(page))) {
185 		/*
186 		 * Either the page has been freed, or will be freed.
187 		 * In either case, retry here and the caller should
188 		 * do the right thing (see comments above).
189 		 */
190 		return 0;
191 	}
192 #endif
193 	VM_BUG_ON_PAGE(PageTail(page), page);
194 
195 	return 1;
196 }
197 
198 /*
199  * Same as above, but add instead of inc (could just be merged)
200  */
page_cache_add_speculative(struct page * page,int count)201 static inline int page_cache_add_speculative(struct page *page, int count)
202 {
203 	VM_BUG_ON(in_interrupt());
204 
205 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
206 # ifdef CONFIG_PREEMPT_COUNT
207 	VM_BUG_ON(!in_atomic() && !irqs_disabled());
208 # endif
209 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
210 	page_ref_add(page, count);
211 
212 #else
213 	if (unlikely(!page_ref_add_unless(page, count, 0)))
214 		return 0;
215 #endif
216 	VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
217 
218 	return 1;
219 }
220 
221 #ifdef CONFIG_NUMA
222 extern struct page *__page_cache_alloc(gfp_t gfp);
223 #else
__page_cache_alloc(gfp_t gfp)224 static inline struct page *__page_cache_alloc(gfp_t gfp)
225 {
226 	return alloc_pages(gfp, 0);
227 }
228 #endif
229 
page_cache_alloc(struct address_space * x)230 static inline struct page *page_cache_alloc(struct address_space *x)
231 {
232 	return __page_cache_alloc(mapping_gfp_mask(x));
233 }
234 
page_cache_alloc_cold(struct address_space * x)235 static inline struct page *page_cache_alloc_cold(struct address_space *x)
236 {
237 	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
238 }
239 
readahead_gfp_mask(struct address_space * x)240 static inline gfp_t readahead_gfp_mask(struct address_space *x)
241 {
242 	return mapping_gfp_mask(x) |
243 				  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
244 }
245 
246 typedef int filler_t(struct file *, struct page *);
247 
248 pgoff_t page_cache_next_hole(struct address_space *mapping,
249 			     pgoff_t index, unsigned long max_scan);
250 pgoff_t page_cache_prev_hole(struct address_space *mapping,
251 			     pgoff_t index, unsigned long max_scan);
252 
253 #define FGP_ACCESSED		0x00000001
254 #define FGP_LOCK		0x00000002
255 #define FGP_CREAT		0x00000004
256 #define FGP_WRITE		0x00000008
257 #define FGP_NOFS		0x00000010
258 #define FGP_NOWAIT		0x00000020
259 #define FGP_FOR_MMAP		0x00000040
260 
261 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
262 		int fgp_flags, gfp_t cache_gfp_mask);
263 
264 /**
265  * find_get_page - find and get a page reference
266  * @mapping: the address_space to search
267  * @offset: the page index
268  *
269  * Looks up the page cache slot at @mapping & @offset.  If there is a
270  * page cache page, it is returned with an increased refcount.
271  *
272  * Otherwise, %NULL is returned.
273  */
find_get_page(struct address_space * mapping,pgoff_t offset)274 static inline struct page *find_get_page(struct address_space *mapping,
275 					pgoff_t offset)
276 {
277 	return pagecache_get_page(mapping, offset, 0, 0);
278 }
279 
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)280 static inline struct page *find_get_page_flags(struct address_space *mapping,
281 					pgoff_t offset, int fgp_flags)
282 {
283 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
284 }
285 
286 /**
287  * find_lock_page - locate, pin and lock a pagecache page
288  * @mapping: the address_space to search
289  * @offset: the page index
290  *
291  * Looks up the page cache slot at @mapping & @offset.  If there is a
292  * page cache page, it is returned locked and with an increased
293  * refcount.
294  *
295  * Otherwise, %NULL is returned.
296  *
297  * find_lock_page() may sleep.
298  */
find_lock_page(struct address_space * mapping,pgoff_t offset)299 static inline struct page *find_lock_page(struct address_space *mapping,
300 					pgoff_t offset)
301 {
302 	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
303 }
304 
305 /**
306  * find_or_create_page - locate or add a pagecache page
307  * @mapping: the page's address_space
308  * @index: the page's index into the mapping
309  * @gfp_mask: page allocation mode
310  *
311  * Looks up the page cache slot at @mapping & @offset.  If there is a
312  * page cache page, it is returned locked and with an increased
313  * refcount.
314  *
315  * If the page is not present, a new page is allocated using @gfp_mask
316  * and added to the page cache and the VM's LRU list.  The page is
317  * returned locked and with an increased refcount.
318  *
319  * On memory exhaustion, %NULL is returned.
320  *
321  * find_or_create_page() may sleep, even if @gfp_flags specifies an
322  * atomic allocation!
323  */
find_or_create_page(struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)324 static inline struct page *find_or_create_page(struct address_space *mapping,
325 					pgoff_t offset, gfp_t gfp_mask)
326 {
327 	return pagecache_get_page(mapping, offset,
328 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
329 					gfp_mask);
330 }
331 
332 /**
333  * grab_cache_page_nowait - returns locked page at given index in given cache
334  * @mapping: target address_space
335  * @index: the page index
336  *
337  * Same as grab_cache_page(), but do not wait if the page is unavailable.
338  * This is intended for speculative data generators, where the data can
339  * be regenerated if the page couldn't be grabbed.  This routine should
340  * be safe to call while holding the lock for another page.
341  *
342  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
343  * and deadlock against the caller's locked page.
344  */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)345 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
346 				pgoff_t index)
347 {
348 	return pagecache_get_page(mapping, index,
349 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
350 			mapping_gfp_mask(mapping));
351 }
352 
353 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
354 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
355 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
356 			  unsigned int nr_entries, struct page **entries,
357 			  pgoff_t *indices);
358 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
359 			pgoff_t end, unsigned int nr_pages,
360 			struct page **pages);
find_get_pages(struct address_space * mapping,pgoff_t * start,unsigned int nr_pages,struct page ** pages)361 static inline unsigned find_get_pages(struct address_space *mapping,
362 			pgoff_t *start, unsigned int nr_pages,
363 			struct page **pages)
364 {
365 	return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
366 				    pages);
367 }
368 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
369 			       unsigned int nr_pages, struct page **pages);
370 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
371 			pgoff_t end, int tag, unsigned int nr_pages,
372 			struct page **pages);
find_get_pages_tag(struct address_space * mapping,pgoff_t * index,int tag,unsigned int nr_pages,struct page ** pages)373 static inline unsigned find_get_pages_tag(struct address_space *mapping,
374 			pgoff_t *index, int tag, unsigned int nr_pages,
375 			struct page **pages)
376 {
377 	return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
378 					nr_pages, pages);
379 }
380 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
381 			int tag, unsigned int nr_entries,
382 			struct page **entries, pgoff_t *indices);
383 
384 struct page *grab_cache_page_write_begin(struct address_space *mapping,
385 			pgoff_t index, unsigned flags);
386 
387 /*
388  * Returns locked page at given index in given cache, creating it if needed.
389  */
grab_cache_page(struct address_space * mapping,pgoff_t index)390 static inline struct page *grab_cache_page(struct address_space *mapping,
391 								pgoff_t index)
392 {
393 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
394 }
395 
396 extern struct page * read_cache_page(struct address_space *mapping,
397 				pgoff_t index, filler_t *filler, void *data);
398 extern struct page * read_cache_page_gfp(struct address_space *mapping,
399 				pgoff_t index, gfp_t gfp_mask);
400 extern int read_cache_pages(struct address_space *mapping,
401 		struct list_head *pages, filler_t *filler, void *data);
402 
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)403 static inline struct page *read_mapping_page(struct address_space *mapping,
404 				pgoff_t index, void *data)
405 {
406 	filler_t *filler = mapping->a_ops->readpage;
407 	return read_cache_page(mapping, index, filler, data);
408 }
409 
410 /*
411  * Get index of the page with in radix-tree
412  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
413  */
page_to_index(struct page * page)414 static inline pgoff_t page_to_index(struct page *page)
415 {
416 	pgoff_t pgoff;
417 
418 	if (likely(!PageTransTail(page)))
419 		return page->index;
420 
421 	/*
422 	 *  We don't initialize ->index for tail pages: calculate based on
423 	 *  head page
424 	 */
425 	pgoff = compound_head(page)->index;
426 	pgoff += page - compound_head(page);
427 	return pgoff;
428 }
429 
430 /*
431  * Get the offset in PAGE_SIZE.
432  * (TODO: hugepage should have ->index in PAGE_SIZE)
433  */
page_to_pgoff(struct page * page)434 static inline pgoff_t page_to_pgoff(struct page *page)
435 {
436 	if (unlikely(PageHeadHuge(page)))
437 		return page->index << compound_order(page);
438 
439 	return page_to_index(page);
440 }
441 
442 /*
443  * Return byte-offset into filesystem object for page.
444  */
page_offset(struct page * page)445 static inline loff_t page_offset(struct page *page)
446 {
447 	return ((loff_t)page->index) << PAGE_SHIFT;
448 }
449 
page_file_offset(struct page * page)450 static inline loff_t page_file_offset(struct page *page)
451 {
452 	return ((loff_t)page_index(page)) << PAGE_SHIFT;
453 }
454 
455 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
456 				     unsigned long address);
457 
linear_page_index(struct vm_area_struct * vma,unsigned long address)458 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
459 					unsigned long address)
460 {
461 	pgoff_t pgoff;
462 	if (unlikely(is_vm_hugetlb_page(vma)))
463 		return linear_hugepage_index(vma, address);
464 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
465 	pgoff += vma->vm_pgoff;
466 	return pgoff;
467 }
468 
469 extern void __lock_page(struct page *page);
470 extern int __lock_page_killable(struct page *page);
471 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
472 				unsigned int flags);
473 extern void unlock_page(struct page *page);
474 
trylock_page(struct page * page)475 static inline int trylock_page(struct page *page)
476 {
477 	page = compound_head(page);
478 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
479 }
480 
481 /*
482  * lock_page may only be called if we have the page's inode pinned.
483  */
lock_page(struct page * page)484 static inline void lock_page(struct page *page)
485 {
486 	might_sleep();
487 	if (!trylock_page(page))
488 		__lock_page(page);
489 }
490 
491 /*
492  * lock_page_killable is like lock_page but can be interrupted by fatal
493  * signals.  It returns 0 if it locked the page and -EINTR if it was
494  * killed while waiting.
495  */
lock_page_killable(struct page * page)496 static inline int lock_page_killable(struct page *page)
497 {
498 	might_sleep();
499 	if (!trylock_page(page))
500 		return __lock_page_killable(page);
501 	return 0;
502 }
503 
504 /*
505  * lock_page_or_retry - Lock the page, unless this would block and the
506  * caller indicated that it can handle a retry.
507  *
508  * Return value and mmap_sem implications depend on flags; see
509  * __lock_page_or_retry().
510  */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)511 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
512 				     unsigned int flags)
513 {
514 	might_sleep();
515 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
516 }
517 
518 /*
519  * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
520  * and should not be used directly.
521  */
522 extern void wait_on_page_bit(struct page *page, int bit_nr);
523 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
524 
525 /*
526  * Wait for a page to be unlocked.
527  *
528  * This must be called with the caller "holding" the page,
529  * ie with increased "page->count" so that the page won't
530  * go away during the wait..
531  */
wait_on_page_locked(struct page * page)532 static inline void wait_on_page_locked(struct page *page)
533 {
534 	if (PageLocked(page))
535 		wait_on_page_bit(compound_head(page), PG_locked);
536 }
537 
wait_on_page_locked_killable(struct page * page)538 static inline int wait_on_page_locked_killable(struct page *page)
539 {
540 	if (!PageLocked(page))
541 		return 0;
542 	return wait_on_page_bit_killable(compound_head(page), PG_locked);
543 }
544 
545 /*
546  * Wait for a page to complete writeback
547  */
wait_on_page_writeback(struct page * page)548 static inline void wait_on_page_writeback(struct page *page)
549 {
550 	if (PageWriteback(page))
551 		wait_on_page_bit(page, PG_writeback);
552 }
553 
554 extern void end_page_writeback(struct page *page);
555 void wait_for_stable_page(struct page *page);
556 
557 void page_endio(struct page *page, bool is_write, int err);
558 
559 /*
560  * Add an arbitrary waiter to a page's wait queue
561  */
562 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
563 
564 /*
565  * Fault everything in given userspace address range in.
566  */
fault_in_pages_writeable(char __user * uaddr,int size)567 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
568 {
569 	char __user *end = uaddr + size - 1;
570 
571 	if (unlikely(size == 0))
572 		return 0;
573 
574 	if (unlikely(uaddr > end))
575 		return -EFAULT;
576 	/*
577 	 * Writing zeroes into userspace here is OK, because we know that if
578 	 * the zero gets there, we'll be overwriting it.
579 	 */
580 	do {
581 		if (unlikely(__put_user(0, uaddr) != 0))
582 			return -EFAULT;
583 		uaddr += PAGE_SIZE;
584 	} while (uaddr <= end);
585 
586 	/* Check whether the range spilled into the next page. */
587 	if (((unsigned long)uaddr & PAGE_MASK) ==
588 			((unsigned long)end & PAGE_MASK))
589 		return __put_user(0, end);
590 
591 	return 0;
592 }
593 
fault_in_pages_readable(const char __user * uaddr,int size)594 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
595 {
596 	volatile char c;
597 	const char __user *end = uaddr + size - 1;
598 
599 	if (unlikely(size == 0))
600 		return 0;
601 
602 	if (unlikely(uaddr > end))
603 		return -EFAULT;
604 
605 	do {
606 		if (unlikely(__get_user(c, uaddr) != 0))
607 			return -EFAULT;
608 		uaddr += PAGE_SIZE;
609 	} while (uaddr <= end);
610 
611 	/* Check whether the range spilled into the next page. */
612 	if (((unsigned long)uaddr & PAGE_MASK) ==
613 			((unsigned long)end & PAGE_MASK)) {
614 		return __get_user(c, end);
615 	}
616 
617 	(void)c;
618 	return 0;
619 }
620 
621 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
622 				pgoff_t index, gfp_t gfp_mask);
623 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
624 				pgoff_t index, gfp_t gfp_mask);
625 extern void delete_from_page_cache(struct page *page);
626 extern void __delete_from_page_cache(struct page *page, void *shadow);
627 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
628 
629 /*
630  * Like add_to_page_cache_locked, but used to add newly allocated pages:
631  * the page is new, so we can just run __SetPageLocked() against it.
632  */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)633 static inline int add_to_page_cache(struct page *page,
634 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
635 {
636 	int error;
637 
638 	__SetPageLocked(page);
639 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
640 	if (unlikely(error))
641 		__ClearPageLocked(page);
642 	return error;
643 }
644 
dir_pages(struct inode * inode)645 static inline unsigned long dir_pages(struct inode *inode)
646 {
647 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
648 			       PAGE_SHIFT;
649 }
650 
651 #endif /* _LINUX_PAGEMAP_H */
652