1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17
18 /*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
22 enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
28 };
29
mapping_set_error(struct address_space * mapping,int error)30 static inline void mapping_set_error(struct address_space *mapping, int error)
31 {
32 if (unlikely(error)) {
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
37 }
38 }
39
mapping_set_unevictable(struct address_space * mapping)40 static inline void mapping_set_unevictable(struct address_space *mapping)
41 {
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
43 }
44
mapping_clear_unevictable(struct address_space * mapping)45 static inline void mapping_clear_unevictable(struct address_space *mapping)
46 {
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
48 }
49
mapping_unevictable(struct address_space * mapping)50 static inline int mapping_unevictable(struct address_space *mapping)
51 {
52 if (mapping)
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 return !!mapping;
55 }
56
mapping_set_balloon(struct address_space * mapping)57 static inline void mapping_set_balloon(struct address_space *mapping)
58 {
59 set_bit(AS_BALLOON_MAP, &mapping->flags);
60 }
61
mapping_clear_balloon(struct address_space * mapping)62 static inline void mapping_clear_balloon(struct address_space *mapping)
63 {
64 clear_bit(AS_BALLOON_MAP, &mapping->flags);
65 }
66
mapping_balloon(struct address_space * mapping)67 static inline int mapping_balloon(struct address_space *mapping)
68 {
69 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
70 }
71
mapping_gfp_mask(struct address_space * mapping)72 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
73 {
74 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
75 }
76
77 /*
78 * This is non-atomic. Only to be used before the mapping is activated.
79 * Probably needs a barrier...
80 */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)81 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
82 {
83 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
84 (__force unsigned long)mask;
85 }
86
87 /*
88 * The page cache can done in larger chunks than
89 * one page, because it allows for more efficient
90 * throughput (it can then be mapped into user
91 * space in smaller chunks for same flexibility).
92 *
93 * Or rather, it _will_ be done in larger chunks.
94 */
95 #define PAGE_CACHE_SHIFT PAGE_SHIFT
96 #define PAGE_CACHE_SIZE PAGE_SIZE
97 #define PAGE_CACHE_MASK PAGE_MASK
98 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
99
100 #define page_cache_get(page) get_page(page)
101 #define page_cache_release(page) put_page(page)
102 void release_pages(struct page **pages, int nr, int cold);
103
104 /*
105 * speculatively take a reference to a page.
106 * If the page is free (_count == 0), then _count is untouched, and 0
107 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
108 *
109 * This function must be called inside the same rcu_read_lock() section as has
110 * been used to lookup the page in the pagecache radix-tree (or page table):
111 * this allows allocators to use a synchronize_rcu() to stabilize _count.
112 *
113 * Unless an RCU grace period has passed, the count of all pages coming out
114 * of the allocator must be considered unstable. page_count may return higher
115 * than expected, and put_page must be able to do the right thing when the
116 * page has been finished with, no matter what it is subsequently allocated
117 * for (because put_page is what is used here to drop an invalid speculative
118 * reference).
119 *
120 * This is the interesting part of the lockless pagecache (and lockless
121 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
122 * has the following pattern:
123 * 1. find page in radix tree
124 * 2. conditionally increment refcount
125 * 3. check the page is still in pagecache (if no, goto 1)
126 *
127 * Remove-side that cares about stability of _count (eg. reclaim) has the
128 * following (with tree_lock held for write):
129 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
130 * B. remove page from pagecache
131 * C. free the page
132 *
133 * There are 2 critical interleavings that matter:
134 * - 2 runs before A: in this case, A sees elevated refcount and bails out
135 * - A runs before 2: in this case, 2 sees zero refcount and retries;
136 * subsequently, B will complete and 1 will find no page, causing the
137 * lookup to return NULL.
138 *
139 * It is possible that between 1 and 2, the page is removed then the exact same
140 * page is inserted into the same position in pagecache. That's OK: the
141 * old find_get_page using tree_lock could equally have run before or after
142 * such a re-insertion, depending on order that locks are granted.
143 *
144 * Lookups racing against pagecache insertion isn't a big problem: either 1
145 * will find the page or it will not. Likewise, the old find_get_page could run
146 * either before the insertion or afterwards, depending on timing.
147 */
page_cache_get_speculative(struct page * page)148 static inline int page_cache_get_speculative(struct page *page)
149 {
150 VM_BUG_ON(in_interrupt());
151
152 #ifdef CONFIG_TINY_RCU
153 # ifdef CONFIG_PREEMPT_COUNT
154 VM_BUG_ON(!in_atomic());
155 # endif
156 /*
157 * Preempt must be disabled here - we rely on rcu_read_lock doing
158 * this for us.
159 *
160 * Pagecache won't be truncated from interrupt context, so if we have
161 * found a page in the radix tree here, we have pinned its refcount by
162 * disabling preempt, and hence no need for the "speculative get" that
163 * SMP requires.
164 */
165 VM_BUG_ON(page_count(page) == 0);
166 atomic_inc(&page->_count);
167
168 #else
169 if (unlikely(!get_page_unless_zero(page))) {
170 /*
171 * Either the page has been freed, or will be freed.
172 * In either case, retry here and the caller should
173 * do the right thing (see comments above).
174 */
175 return 0;
176 }
177 #endif
178 VM_BUG_ON(PageTail(page));
179
180 return 1;
181 }
182
183 /*
184 * Same as above, but add instead of inc (could just be merged)
185 */
page_cache_add_speculative(struct page * page,int count)186 static inline int page_cache_add_speculative(struct page *page, int count)
187 {
188 VM_BUG_ON(in_interrupt());
189
190 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
191 # ifdef CONFIG_PREEMPT_COUNT
192 VM_BUG_ON(!in_atomic());
193 # endif
194 VM_BUG_ON(page_count(page) == 0);
195 atomic_add(count, &page->_count);
196
197 #else
198 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
199 return 0;
200 #endif
201 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
202
203 return 1;
204 }
205
page_freeze_refs(struct page * page,int count)206 static inline int page_freeze_refs(struct page *page, int count)
207 {
208 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
209 }
210
page_unfreeze_refs(struct page * page,int count)211 static inline void page_unfreeze_refs(struct page *page, int count)
212 {
213 VM_BUG_ON(page_count(page) != 0);
214 VM_BUG_ON(count == 0);
215
216 atomic_set(&page->_count, count);
217 }
218
219 #ifdef CONFIG_NUMA
220 extern struct page *__page_cache_alloc(gfp_t gfp);
221 #else
__page_cache_alloc(gfp_t gfp)222 static inline struct page *__page_cache_alloc(gfp_t gfp)
223 {
224 return alloc_pages(gfp, 0);
225 }
226 #endif
227
page_cache_alloc(struct address_space * x)228 static inline struct page *page_cache_alloc(struct address_space *x)
229 {
230 return __page_cache_alloc(mapping_gfp_mask(x));
231 }
232
page_cache_alloc_cold(struct address_space * x)233 static inline struct page *page_cache_alloc_cold(struct address_space *x)
234 {
235 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
236 }
237
page_cache_alloc_readahead(struct address_space * x)238 static inline struct page *page_cache_alloc_readahead(struct address_space *x)
239 {
240 return __page_cache_alloc(mapping_gfp_mask(x) |
241 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
242 }
243
244 typedef int filler_t(void *, struct page *);
245
246 pgoff_t page_cache_next_hole(struct address_space *mapping,
247 pgoff_t index, unsigned long max_scan);
248
249 extern struct page * find_get_page_flags(struct address_space *mapping,
250 pgoff_t index, int fgp_flags);
251
252 #define FGP_ACCESSED 0x00000001
253
find_get_page(struct address_space * mapping,pgoff_t index)254 static inline struct page* find_get_page(struct address_space *mapping,
255 pgoff_t index)
256 {
257 return find_get_page_flags(mapping, index, 0);
258 }
259
260
261 extern struct page * find_lock_page(struct address_space *mapping,
262 pgoff_t index);
263 extern struct page * find_or_create_page(struct address_space *mapping,
264 pgoff_t index, gfp_t gfp_mask);
265 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
266 unsigned int nr_pages, struct page **pages);
267 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
268 unsigned int nr_pages, struct page **pages);
269 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
270 int tag, unsigned int nr_pages, struct page **pages);
271
272 struct page *grab_cache_page_write_begin(struct address_space *mapping,
273 pgoff_t index, unsigned flags);
274
275 /*
276 * Returns locked page at given index in given cache, creating it if needed.
277 */
grab_cache_page(struct address_space * mapping,pgoff_t index)278 static inline struct page *grab_cache_page(struct address_space *mapping,
279 pgoff_t index)
280 {
281 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
282 }
283
284 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
285 pgoff_t index);
286 extern struct page * read_cache_page_async(struct address_space *mapping,
287 pgoff_t index, filler_t *filler, void *data);
288 extern struct page * read_cache_page(struct address_space *mapping,
289 pgoff_t index, filler_t *filler, void *data);
290 extern struct page * read_cache_page_gfp(struct address_space *mapping,
291 pgoff_t index, gfp_t gfp_mask);
292 extern int read_cache_pages(struct address_space *mapping,
293 struct list_head *pages, filler_t *filler, void *data);
294
read_mapping_page_async(struct address_space * mapping,pgoff_t index,void * data)295 static inline struct page *read_mapping_page_async(
296 struct address_space *mapping,
297 pgoff_t index, void *data)
298 {
299 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
300 return read_cache_page_async(mapping, index, filler, data);
301 }
302
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)303 static inline struct page *read_mapping_page(struct address_space *mapping,
304 pgoff_t index, void *data)
305 {
306 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
307 return read_cache_page(mapping, index, filler, data);
308 }
309
310 /*
311 * Return byte-offset into filesystem object for page.
312 */
page_offset(struct page * page)313 static inline loff_t page_offset(struct page *page)
314 {
315 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
316 }
317
page_file_offset(struct page * page)318 static inline loff_t page_file_offset(struct page *page)
319 {
320 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
321 }
322
323 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
324 unsigned long address);
325
linear_page_index(struct vm_area_struct * vma,unsigned long address)326 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
327 unsigned long address)
328 {
329 pgoff_t pgoff;
330 if (unlikely(is_vm_hugetlb_page(vma)))
331 return linear_hugepage_index(vma, address);
332 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
333 pgoff += vma->vm_pgoff;
334 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
335 }
336
337 extern void __lock_page(struct page *page);
338 extern int __lock_page_killable(struct page *page);
339 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
340 unsigned int flags);
341 extern void unlock_page(struct page *page);
342
__set_page_locked(struct page * page)343 static inline void __set_page_locked(struct page *page)
344 {
345 __set_bit(PG_locked, &page->flags);
346 }
347
__clear_page_locked(struct page * page)348 static inline void __clear_page_locked(struct page *page)
349 {
350 __clear_bit(PG_locked, &page->flags);
351 }
352
trylock_page(struct page * page)353 static inline int trylock_page(struct page *page)
354 {
355 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
356 }
357
358 /*
359 * lock_page may only be called if we have the page's inode pinned.
360 */
lock_page(struct page * page)361 static inline void lock_page(struct page *page)
362 {
363 might_sleep();
364 if (!trylock_page(page))
365 __lock_page(page);
366 }
367
368 /*
369 * lock_page_killable is like lock_page but can be interrupted by fatal
370 * signals. It returns 0 if it locked the page and -EINTR if it was
371 * killed while waiting.
372 */
lock_page_killable(struct page * page)373 static inline int lock_page_killable(struct page *page)
374 {
375 might_sleep();
376 if (!trylock_page(page))
377 return __lock_page_killable(page);
378 return 0;
379 }
380
381 /*
382 * lock_page_or_retry - Lock the page, unless this would block and the
383 * caller indicated that it can handle a retry.
384 */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)385 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
386 unsigned int flags)
387 {
388 might_sleep();
389 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
390 }
391
392 /*
393 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
394 * Never use this directly!
395 */
396 extern void wait_on_page_bit(struct page *page, int bit_nr);
397
398 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
399
wait_on_page_locked_killable(struct page * page)400 static inline int wait_on_page_locked_killable(struct page *page)
401 {
402 if (PageLocked(page))
403 return wait_on_page_bit_killable(page, PG_locked);
404 return 0;
405 }
406
407 /*
408 * Wait for a page to be unlocked.
409 *
410 * This must be called with the caller "holding" the page,
411 * ie with increased "page->count" so that the page won't
412 * go away during the wait..
413 */
wait_on_page_locked(struct page * page)414 static inline void wait_on_page_locked(struct page *page)
415 {
416 if (PageLocked(page))
417 wait_on_page_bit(page, PG_locked);
418 }
419
420 /*
421 * Wait for a page to complete writeback
422 */
wait_on_page_writeback(struct page * page)423 static inline void wait_on_page_writeback(struct page *page)
424 {
425 if (PageWriteback(page))
426 wait_on_page_bit(page, PG_writeback);
427 }
428
429 extern void end_page_writeback(struct page *page);
430 void wait_for_stable_page(struct page *page);
431
432 /*
433 * Add an arbitrary waiter to a page's wait queue
434 */
435 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
436
437 /*
438 * Fault a userspace page into pagetables. Return non-zero on a fault.
439 *
440 * This assumes that two userspace pages are always sufficient. That's
441 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
442 */
fault_in_pages_writeable(char __user * uaddr,int size)443 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
444 {
445 int ret;
446
447 if (unlikely(size == 0))
448 return 0;
449
450 /*
451 * Writing zeroes into userspace here is OK, because we know that if
452 * the zero gets there, we'll be overwriting it.
453 */
454 ret = __put_user(0, uaddr);
455 if (ret == 0) {
456 char __user *end = uaddr + size - 1;
457
458 /*
459 * If the page was already mapped, this will get a cache miss
460 * for sure, so try to avoid doing it.
461 */
462 if (((unsigned long)uaddr & PAGE_MASK) !=
463 ((unsigned long)end & PAGE_MASK))
464 ret = __put_user(0, end);
465 }
466 return ret;
467 }
468
fault_in_pages_readable(const char __user * uaddr,int size)469 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
470 {
471 volatile char c;
472 int ret;
473
474 if (unlikely(size == 0))
475 return 0;
476
477 ret = __get_user(c, uaddr);
478 if (ret == 0) {
479 const char __user *end = uaddr + size - 1;
480
481 if (((unsigned long)uaddr & PAGE_MASK) !=
482 ((unsigned long)end & PAGE_MASK)) {
483 ret = __get_user(c, end);
484 (void)c;
485 }
486 }
487 return ret;
488 }
489
490 /*
491 * Multipage variants of the above prefault helpers, useful if more than
492 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
493 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
494 * filemap.c hotpaths.
495 */
fault_in_multipages_writeable(char __user * uaddr,int size)496 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
497 {
498 int ret = 0;
499 char __user *end = uaddr + size - 1;
500
501 if (unlikely(size == 0))
502 return ret;
503
504 /*
505 * Writing zeroes into userspace here is OK, because we know that if
506 * the zero gets there, we'll be overwriting it.
507 */
508 while (uaddr <= end) {
509 ret = __put_user(0, uaddr);
510 if (ret != 0)
511 return ret;
512 uaddr += PAGE_SIZE;
513 }
514
515 /* Check whether the range spilled into the next page. */
516 if (((unsigned long)uaddr & PAGE_MASK) ==
517 ((unsigned long)end & PAGE_MASK))
518 ret = __put_user(0, end);
519
520 return ret;
521 }
522
fault_in_multipages_readable(const char __user * uaddr,int size)523 static inline int fault_in_multipages_readable(const char __user *uaddr,
524 int size)
525 {
526 volatile char c;
527 int ret = 0;
528 const char __user *end = uaddr + size - 1;
529
530 if (unlikely(size == 0))
531 return ret;
532
533 while (uaddr <= end) {
534 ret = __get_user(c, uaddr);
535 if (ret != 0)
536 return ret;
537 uaddr += PAGE_SIZE;
538 }
539
540 /* Check whether the range spilled into the next page. */
541 if (((unsigned long)uaddr & PAGE_MASK) ==
542 ((unsigned long)end & PAGE_MASK)) {
543 ret = __get_user(c, end);
544 (void)c;
545 }
546
547 return ret;
548 }
549
550 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
551 pgoff_t index, gfp_t gfp_mask);
552 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
553 pgoff_t index, gfp_t gfp_mask);
554 extern void delete_from_page_cache(struct page *page);
555 extern void __delete_from_page_cache(struct page *page);
556 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
557
558 /*
559 * Like add_to_page_cache_locked, but used to add newly allocated pages:
560 * the page is new, so we can just run __set_page_locked() against it.
561 */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)562 static inline int add_to_page_cache(struct page *page,
563 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
564 {
565 int error;
566
567 __set_page_locked(page);
568 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
569 if (unlikely(error))
570 __clear_page_locked(page);
571 return error;
572 }
573
574 #endif /* _LINUX_PAGEMAP_H */
575