1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4
5 /*
6 * Copyright 1995 Linus Torvalds
7 */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18
19 struct pagevec;
20
21 /*
22 * Bits in mapping->flags.
23 */
24 enum mapping_flags {
25 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
30 /* writeback related tags are not used */
31 AS_NO_WRITEBACK_TAGS = 5,
32 };
33
34 /**
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping - the mapping in which an error should be set
37 * @error - the error to set in the mapping
38 *
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
43 *
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
47 */
mapping_set_error(struct address_space * mapping,int error)48 static inline void mapping_set_error(struct address_space *mapping, int error)
49 {
50 if (likely(!error))
51 return;
52
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 filemap_set_wb_err(mapping, error);
55
56 /* Record it in flags for now, for legacy callers */
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
61 }
62
mapping_set_unevictable(struct address_space * mapping)63 static inline void mapping_set_unevictable(struct address_space *mapping)
64 {
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66 }
67
mapping_clear_unevictable(struct address_space * mapping)68 static inline void mapping_clear_unevictable(struct address_space *mapping)
69 {
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71 }
72
mapping_unevictable(struct address_space * mapping)73 static inline int mapping_unevictable(struct address_space *mapping)
74 {
75 if (mapping)
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
78 }
79
mapping_set_exiting(struct address_space * mapping)80 static inline void mapping_set_exiting(struct address_space *mapping)
81 {
82 set_bit(AS_EXITING, &mapping->flags);
83 }
84
mapping_exiting(struct address_space * mapping)85 static inline int mapping_exiting(struct address_space *mapping)
86 {
87 return test_bit(AS_EXITING, &mapping->flags);
88 }
89
mapping_set_no_writeback_tags(struct address_space * mapping)90 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91 {
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93 }
94
mapping_use_writeback_tags(struct address_space * mapping)95 static inline int mapping_use_writeback_tags(struct address_space *mapping)
96 {
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98 }
99
mapping_gfp_mask(struct address_space * mapping)100 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101 {
102 return mapping->gfp_mask;
103 }
104
105 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)106 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108 {
109 return mapping_gfp_mask(mapping) & gfp_mask;
110 }
111
112 /*
113 * This is non-atomic. Only to be used before the mapping is activated.
114 * Probably needs a barrier...
115 */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)116 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117 {
118 m->gfp_mask = mask;
119 }
120
121 void release_pages(struct page **pages, int nr);
122
123 /*
124 * speculatively take a reference to a page.
125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
127 *
128 * This function must be called inside the same rcu_read_lock() section as has
129 * been used to lookup the page in the pagecache radix-tree (or page table):
130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
131 *
132 * Unless an RCU grace period has passed, the count of all pages coming out
133 * of the allocator must be considered unstable. page_count may return higher
134 * than expected, and put_page must be able to do the right thing when the
135 * page has been finished with, no matter what it is subsequently allocated
136 * for (because put_page is what is used here to drop an invalid speculative
137 * reference).
138 *
139 * This is the interesting part of the lockless pagecache (and lockless
140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141 * has the following pattern:
142 * 1. find page in radix tree
143 * 2. conditionally increment refcount
144 * 3. check the page is still in pagecache (if no, goto 1)
145 *
146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
147 * following (with the i_pages lock held):
148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149 * B. remove page from pagecache
150 * C. free the page
151 *
152 * There are 2 critical interleavings that matter:
153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
155 * subsequently, B will complete and 1 will find no page, causing the
156 * lookup to return NULL.
157 *
158 * It is possible that between 1 and 2, the page is removed then the exact same
159 * page is inserted into the same position in pagecache. That's OK: the
160 * old find_get_page using a lock could equally have run before or after
161 * such a re-insertion, depending on order that locks are granted.
162 *
163 * Lookups racing against pagecache insertion isn't a big problem: either 1
164 * will find the page or it will not. Likewise, the old find_get_page could run
165 * either before the insertion or afterwards, depending on timing.
166 */
__page_cache_add_speculative(struct page * page,int count)167 static inline int __page_cache_add_speculative(struct page *page, int count)
168 {
169 #ifdef CONFIG_TINY_RCU
170 # ifdef CONFIG_PREEMPT_COUNT
171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172 # endif
173 /*
174 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 * this for us.
176 *
177 * Pagecache won't be truncated from interrupt context, so if we have
178 * found a page in the radix tree here, we have pinned its refcount by
179 * disabling preempt, and hence no need for the "speculative get" that
180 * SMP requires.
181 */
182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_add(page, count);
184
185 #else
186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
187 /*
188 * Either the page has been freed, or will be freed.
189 * In either case, retry here and the caller should
190 * do the right thing (see comments above).
191 */
192 return 0;
193 }
194 #endif
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197 return 1;
198 }
199
page_cache_get_speculative(struct page * page)200 static inline int page_cache_get_speculative(struct page *page)
201 {
202 return __page_cache_add_speculative(page, 1);
203 }
204
page_cache_add_speculative(struct page * page,int count)205 static inline int page_cache_add_speculative(struct page *page, int count)
206 {
207 return __page_cache_add_speculative(page, count);
208 }
209
210 #ifdef CONFIG_NUMA
211 extern struct page *__page_cache_alloc(gfp_t gfp);
212 #else
__page_cache_alloc(gfp_t gfp)213 static inline struct page *__page_cache_alloc(gfp_t gfp)
214 {
215 return alloc_pages(gfp, 0);
216 }
217 #endif
218
page_cache_alloc(struct address_space * x)219 static inline struct page *page_cache_alloc(struct address_space *x)
220 {
221 return __page_cache_alloc(mapping_gfp_mask(x));
222 }
223
readahead_gfp_mask(struct address_space * x)224 static inline gfp_t readahead_gfp_mask(struct address_space *x)
225 {
226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
227 }
228
229 typedef int filler_t(void *, struct page *);
230
231 pgoff_t page_cache_next_miss(struct address_space *mapping,
232 pgoff_t index, unsigned long max_scan);
233 pgoff_t page_cache_prev_miss(struct address_space *mapping,
234 pgoff_t index, unsigned long max_scan);
235
236 #define FGP_ACCESSED 0x00000001
237 #define FGP_LOCK 0x00000002
238 #define FGP_CREAT 0x00000004
239 #define FGP_WRITE 0x00000008
240 #define FGP_NOFS 0x00000010
241 #define FGP_NOWAIT 0x00000020
242 #define FGP_FOR_MMAP 0x00000040
243
244 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
245 int fgp_flags, gfp_t cache_gfp_mask);
246
247 /**
248 * find_get_page - find and get a page reference
249 * @mapping: the address_space to search
250 * @offset: the page index
251 *
252 * Looks up the page cache slot at @mapping & @offset. If there is a
253 * page cache page, it is returned with an increased refcount.
254 *
255 * Otherwise, %NULL is returned.
256 */
find_get_page(struct address_space * mapping,pgoff_t offset)257 static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259 {
260 return pagecache_get_page(mapping, offset, 0, 0);
261 }
262
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)263 static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265 {
266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
267 }
268
269 /**
270 * find_lock_page - locate, pin and lock a pagecache page
271 * @mapping: the address_space to search
272 * @offset: the page index
273 *
274 * Looks up the page cache slot at @mapping & @offset. If there is a
275 * page cache page, it is returned locked and with an increased
276 * refcount.
277 *
278 * Otherwise, %NULL is returned.
279 *
280 * find_lock_page() may sleep.
281 */
find_lock_page(struct address_space * mapping,pgoff_t offset)282 static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284 {
285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
286 }
287
288 /**
289 * find_or_create_page - locate or add a pagecache page
290 * @mapping: the page's address_space
291 * @index: the page's index into the mapping
292 * @gfp_mask: page allocation mode
293 *
294 * Looks up the page cache slot at @mapping & @offset. If there is a
295 * page cache page, it is returned locked and with an increased
296 * refcount.
297 *
298 * If the page is not present, a new page is allocated using @gfp_mask
299 * and added to the page cache and the VM's LRU list. The page is
300 * returned locked and with an increased refcount.
301 *
302 * On memory exhaustion, %NULL is returned.
303 *
304 * find_or_create_page() may sleep, even if @gfp_flags specifies an
305 * atomic allocation!
306 */
find_or_create_page(struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)307 static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309 {
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
312 gfp_mask);
313 }
314
315 /**
316 * grab_cache_page_nowait - returns locked page at given index in given cache
317 * @mapping: target address_space
318 * @index: the page index
319 *
320 * Same as grab_cache_page(), but do not wait if the page is unavailable.
321 * This is intended for speculative data generators, where the data can
322 * be regenerated if the page couldn't be grabbed. This routine should
323 * be safe to call while holding the lock for another page.
324 *
325 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
326 * and deadlock against the caller's locked page.
327 */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)328 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330 {
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
333 mapping_gfp_mask(mapping));
334 }
335
find_subpage(struct page * page,pgoff_t offset)336 static inline struct page *find_subpage(struct page *page, pgoff_t offset)
337 {
338 if (PageHuge(page))
339 return page;
340
341 VM_BUG_ON_PAGE(PageTail(page), page);
342
343 return page + (offset & (compound_nr(page) - 1));
344 }
345
346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
348 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
349 unsigned int nr_entries, struct page **entries,
350 pgoff_t *indices);
351 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
352 pgoff_t end, unsigned int nr_pages,
353 struct page **pages);
find_get_pages(struct address_space * mapping,pgoff_t * start,unsigned int nr_pages,struct page ** pages)354 static inline unsigned find_get_pages(struct address_space *mapping,
355 pgoff_t *start, unsigned int nr_pages,
356 struct page **pages)
357 {
358 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
359 pages);
360 }
361 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
362 unsigned int nr_pages, struct page **pages);
363 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
364 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
365 struct page **pages);
find_get_pages_tag(struct address_space * mapping,pgoff_t * index,xa_mark_t tag,unsigned int nr_pages,struct page ** pages)366 static inline unsigned find_get_pages_tag(struct address_space *mapping,
367 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
368 struct page **pages)
369 {
370 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
371 nr_pages, pages);
372 }
373
374 struct page *grab_cache_page_write_begin(struct address_space *mapping,
375 pgoff_t index, unsigned flags);
376
377 /*
378 * Returns locked page at given index in given cache, creating it if needed.
379 */
grab_cache_page(struct address_space * mapping,pgoff_t index)380 static inline struct page *grab_cache_page(struct address_space *mapping,
381 pgoff_t index)
382 {
383 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
384 }
385
386 extern struct page * read_cache_page(struct address_space *mapping,
387 pgoff_t index, filler_t *filler, void *data);
388 extern struct page * read_cache_page_gfp(struct address_space *mapping,
389 pgoff_t index, gfp_t gfp_mask);
390 extern int read_cache_pages(struct address_space *mapping,
391 struct list_head *pages, filler_t *filler, void *data);
392
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)393 static inline struct page *read_mapping_page(struct address_space *mapping,
394 pgoff_t index, void *data)
395 {
396 return read_cache_page(mapping, index, NULL, data);
397 }
398
399 /*
400 * Get index of the page within radix-tree (but not for hugetlb pages).
401 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
402 */
page_to_index(struct page * page)403 static inline pgoff_t page_to_index(struct page *page)
404 {
405 pgoff_t pgoff;
406
407 if (likely(!PageTransTail(page)))
408 return page->index;
409
410 /*
411 * We don't initialize ->index for tail pages: calculate based on
412 * head page
413 */
414 pgoff = compound_head(page)->index;
415 pgoff += page - compound_head(page);
416 return pgoff;
417 }
418
419 extern pgoff_t hugetlb_basepage_index(struct page *page);
420
421 /*
422 * Get the offset in PAGE_SIZE (even for hugetlb pages).
423 * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
424 */
page_to_pgoff(struct page * page)425 static inline pgoff_t page_to_pgoff(struct page *page)
426 {
427 if (unlikely(PageHuge(page)))
428 return hugetlb_basepage_index(page);
429 return page_to_index(page);
430 }
431
432 /*
433 * Return byte-offset into filesystem object for page.
434 */
page_offset(struct page * page)435 static inline loff_t page_offset(struct page *page)
436 {
437 return ((loff_t)page->index) << PAGE_SHIFT;
438 }
439
page_file_offset(struct page * page)440 static inline loff_t page_file_offset(struct page *page)
441 {
442 return ((loff_t)page_index(page)) << PAGE_SHIFT;
443 }
444
445 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
446 unsigned long address);
447
linear_page_index(struct vm_area_struct * vma,unsigned long address)448 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
449 unsigned long address)
450 {
451 pgoff_t pgoff;
452 if (unlikely(is_vm_hugetlb_page(vma)))
453 return linear_hugepage_index(vma, address);
454 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
455 pgoff += vma->vm_pgoff;
456 return pgoff;
457 }
458
459 extern void __lock_page(struct page *page);
460 extern int __lock_page_killable(struct page *page);
461 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
462 unsigned int flags);
463 extern void unlock_page(struct page *page);
464
465 /*
466 * Return true if the page was successfully locked
467 */
trylock_page(struct page * page)468 static inline int trylock_page(struct page *page)
469 {
470 page = compound_head(page);
471 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
472 }
473
474 /*
475 * lock_page may only be called if we have the page's inode pinned.
476 */
lock_page(struct page * page)477 static inline void lock_page(struct page *page)
478 {
479 might_sleep();
480 if (!trylock_page(page))
481 __lock_page(page);
482 }
483
484 /*
485 * lock_page_killable is like lock_page but can be interrupted by fatal
486 * signals. It returns 0 if it locked the page and -EINTR if it was
487 * killed while waiting.
488 */
lock_page_killable(struct page * page)489 static inline int lock_page_killable(struct page *page)
490 {
491 might_sleep();
492 if (!trylock_page(page))
493 return __lock_page_killable(page);
494 return 0;
495 }
496
497 /*
498 * lock_page_or_retry - Lock the page, unless this would block and the
499 * caller indicated that it can handle a retry.
500 *
501 * Return value and mmap_sem implications depend on flags; see
502 * __lock_page_or_retry().
503 */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)504 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
505 unsigned int flags)
506 {
507 might_sleep();
508 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
509 }
510
511 /*
512 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
513 * and should not be used directly.
514 */
515 extern void wait_on_page_bit(struct page *page, int bit_nr);
516 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
517
518 /*
519 * Wait for a page to be unlocked.
520 *
521 * This must be called with the caller "holding" the page,
522 * ie with increased "page->count" so that the page won't
523 * go away during the wait..
524 */
wait_on_page_locked(struct page * page)525 static inline void wait_on_page_locked(struct page *page)
526 {
527 if (PageLocked(page))
528 wait_on_page_bit(compound_head(page), PG_locked);
529 }
530
wait_on_page_locked_killable(struct page * page)531 static inline int wait_on_page_locked_killable(struct page *page)
532 {
533 if (!PageLocked(page))
534 return 0;
535 return wait_on_page_bit_killable(compound_head(page), PG_locked);
536 }
537
538 extern void put_and_wait_on_page_locked(struct page *page);
539
540 void wait_on_page_writeback(struct page *page);
541 extern void end_page_writeback(struct page *page);
542 void wait_for_stable_page(struct page *page);
543
544 void page_endio(struct page *page, bool is_write, int err);
545
546 /*
547 * Add an arbitrary waiter to a page's wait queue
548 */
549 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
550
551 /*
552 * Fault everything in given userspace address range in.
553 */
fault_in_pages_writeable(char __user * uaddr,int size)554 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
555 {
556 char __user *end = uaddr + size - 1;
557
558 if (unlikely(size == 0))
559 return 0;
560
561 if (unlikely(uaddr > end))
562 return -EFAULT;
563 /*
564 * Writing zeroes into userspace here is OK, because we know that if
565 * the zero gets there, we'll be overwriting it.
566 */
567 do {
568 if (unlikely(__put_user(0, uaddr) != 0))
569 return -EFAULT;
570 uaddr += PAGE_SIZE;
571 } while (uaddr <= end);
572
573 /* Check whether the range spilled into the next page. */
574 if (((unsigned long)uaddr & PAGE_MASK) ==
575 ((unsigned long)end & PAGE_MASK))
576 return __put_user(0, end);
577
578 return 0;
579 }
580
fault_in_pages_readable(const char __user * uaddr,int size)581 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
582 {
583 volatile char c;
584 const char __user *end = uaddr + size - 1;
585
586 if (unlikely(size == 0))
587 return 0;
588
589 if (unlikely(uaddr > end))
590 return -EFAULT;
591
592 do {
593 if (unlikely(__get_user(c, uaddr) != 0))
594 return -EFAULT;
595 uaddr += PAGE_SIZE;
596 } while (uaddr <= end);
597
598 /* Check whether the range spilled into the next page. */
599 if (((unsigned long)uaddr & PAGE_MASK) ==
600 ((unsigned long)end & PAGE_MASK)) {
601 return __get_user(c, end);
602 }
603
604 (void)c;
605 return 0;
606 }
607
608 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
609 pgoff_t index, gfp_t gfp_mask);
610 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
611 pgoff_t index, gfp_t gfp_mask);
612 extern void delete_from_page_cache(struct page *page);
613 extern void __delete_from_page_cache(struct page *page, void *shadow);
614 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
615 void delete_from_page_cache_batch(struct address_space *mapping,
616 struct pagevec *pvec);
617
618 /*
619 * Like add_to_page_cache_locked, but used to add newly allocated pages:
620 * the page is new, so we can just run __SetPageLocked() against it.
621 */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)622 static inline int add_to_page_cache(struct page *page,
623 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
624 {
625 int error;
626
627 __SetPageLocked(page);
628 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
629 if (unlikely(error))
630 __ClearPageLocked(page);
631 return error;
632 }
633
dir_pages(struct inode * inode)634 static inline unsigned long dir_pages(struct inode *inode)
635 {
636 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
637 PAGE_SHIFT;
638 }
639
640 #endif /* _LINUX_PAGEMAP_H */
641