1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16
17 /*
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
20 */
21 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
22 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
23 #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
24
mapping_set_error(struct address_space * mapping,int error)25 static inline void mapping_set_error(struct address_space *mapping, int error)
26 {
27 if (unlikely(error)) {
28 if (error == -ENOSPC)
29 set_bit(AS_ENOSPC, &mapping->flags);
30 else
31 set_bit(AS_EIO, &mapping->flags);
32 }
33 }
34
35 #ifdef CONFIG_UNEVICTABLE_LRU
36 #define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
37
mapping_set_unevictable(struct address_space * mapping)38 static inline void mapping_set_unevictable(struct address_space *mapping)
39 {
40 set_bit(AS_UNEVICTABLE, &mapping->flags);
41 }
42
mapping_clear_unevictable(struct address_space * mapping)43 static inline void mapping_clear_unevictable(struct address_space *mapping)
44 {
45 clear_bit(AS_UNEVICTABLE, &mapping->flags);
46 }
47
mapping_unevictable(struct address_space * mapping)48 static inline int mapping_unevictable(struct address_space *mapping)
49 {
50 if (likely(mapping))
51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
52 return !!mapping;
53 }
54 #else
mapping_set_unevictable(struct address_space * mapping)55 static inline void mapping_set_unevictable(struct address_space *mapping) { }
mapping_clear_unevictable(struct address_space * mapping)56 static inline void mapping_clear_unevictable(struct address_space *mapping) { }
mapping_unevictable(struct address_space * mapping)57 static inline int mapping_unevictable(struct address_space *mapping)
58 {
59 return 0;
60 }
61 #endif
62
mapping_gfp_mask(struct address_space * mapping)63 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
64 {
65 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
66 }
67
68 /*
69 * This is non-atomic. Only to be used before the mapping is activated.
70 * Probably needs a barrier...
71 */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)72 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
73 {
74 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
75 (__force unsigned long)mask;
76 }
77
78 /*
79 * The page cache can done in larger chunks than
80 * one page, because it allows for more efficient
81 * throughput (it can then be mapped into user
82 * space in smaller chunks for same flexibility).
83 *
84 * Or rather, it _will_ be done in larger chunks.
85 */
86 #define PAGE_CACHE_SHIFT PAGE_SHIFT
87 #define PAGE_CACHE_SIZE PAGE_SIZE
88 #define PAGE_CACHE_MASK PAGE_MASK
89 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
90
91 #define page_cache_get(page) get_page(page)
92 #define page_cache_release(page) put_page(page)
93 void release_pages(struct page **pages, int nr, int cold);
94
95 /*
96 * speculatively take a reference to a page.
97 * If the page is free (_count == 0), then _count is untouched, and 0
98 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
99 *
100 * This function must be called inside the same rcu_read_lock() section as has
101 * been used to lookup the page in the pagecache radix-tree (or page table):
102 * this allows allocators to use a synchronize_rcu() to stabilize _count.
103 *
104 * Unless an RCU grace period has passed, the count of all pages coming out
105 * of the allocator must be considered unstable. page_count may return higher
106 * than expected, and put_page must be able to do the right thing when the
107 * page has been finished with, no matter what it is subsequently allocated
108 * for (because put_page is what is used here to drop an invalid speculative
109 * reference).
110 *
111 * This is the interesting part of the lockless pagecache (and lockless
112 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
113 * has the following pattern:
114 * 1. find page in radix tree
115 * 2. conditionally increment refcount
116 * 3. check the page is still in pagecache (if no, goto 1)
117 *
118 * Remove-side that cares about stability of _count (eg. reclaim) has the
119 * following (with tree_lock held for write):
120 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
121 * B. remove page from pagecache
122 * C. free the page
123 *
124 * There are 2 critical interleavings that matter:
125 * - 2 runs before A: in this case, A sees elevated refcount and bails out
126 * - A runs before 2: in this case, 2 sees zero refcount and retries;
127 * subsequently, B will complete and 1 will find no page, causing the
128 * lookup to return NULL.
129 *
130 * It is possible that between 1 and 2, the page is removed then the exact same
131 * page is inserted into the same position in pagecache. That's OK: the
132 * old find_get_page using tree_lock could equally have run before or after
133 * such a re-insertion, depending on order that locks are granted.
134 *
135 * Lookups racing against pagecache insertion isn't a big problem: either 1
136 * will find the page or it will not. Likewise, the old find_get_page could run
137 * either before the insertion or afterwards, depending on timing.
138 */
page_cache_get_speculative(struct page * page)139 static inline int page_cache_get_speculative(struct page *page)
140 {
141 VM_BUG_ON(in_interrupt());
142
143 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
144 # ifdef CONFIG_PREEMPT
145 VM_BUG_ON(!in_atomic());
146 # endif
147 /*
148 * Preempt must be disabled here - we rely on rcu_read_lock doing
149 * this for us.
150 *
151 * Pagecache won't be truncated from interrupt context, so if we have
152 * found a page in the radix tree here, we have pinned its refcount by
153 * disabling preempt, and hence no need for the "speculative get" that
154 * SMP requires.
155 */
156 VM_BUG_ON(page_count(page) == 0);
157 atomic_inc(&page->_count);
158
159 #else
160 if (unlikely(!get_page_unless_zero(page))) {
161 /*
162 * Either the page has been freed, or will be freed.
163 * In either case, retry here and the caller should
164 * do the right thing (see comments above).
165 */
166 return 0;
167 }
168 #endif
169 VM_BUG_ON(PageTail(page));
170
171 return 1;
172 }
173
174 /*
175 * Same as above, but add instead of inc (could just be merged)
176 */
page_cache_add_speculative(struct page * page,int count)177 static inline int page_cache_add_speculative(struct page *page, int count)
178 {
179 VM_BUG_ON(in_interrupt());
180
181 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
182 # ifdef CONFIG_PREEMPT
183 VM_BUG_ON(!in_atomic());
184 # endif
185 VM_BUG_ON(page_count(page) == 0);
186 atomic_add(count, &page->_count);
187
188 #else
189 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
190 return 0;
191 #endif
192 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
193
194 return 1;
195 }
196
page_freeze_refs(struct page * page,int count)197 static inline int page_freeze_refs(struct page *page, int count)
198 {
199 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
200 }
201
page_unfreeze_refs(struct page * page,int count)202 static inline void page_unfreeze_refs(struct page *page, int count)
203 {
204 VM_BUG_ON(page_count(page) != 0);
205 VM_BUG_ON(count == 0);
206
207 atomic_set(&page->_count, count);
208 }
209
210 #ifdef CONFIG_NUMA
211 extern struct page *__page_cache_alloc(gfp_t gfp);
212 #else
__page_cache_alloc(gfp_t gfp)213 static inline struct page *__page_cache_alloc(gfp_t gfp)
214 {
215 return alloc_pages(gfp, 0);
216 }
217 #endif
218
page_cache_alloc(struct address_space * x)219 static inline struct page *page_cache_alloc(struct address_space *x)
220 {
221 return __page_cache_alloc(mapping_gfp_mask(x));
222 }
223
page_cache_alloc_cold(struct address_space * x)224 static inline struct page *page_cache_alloc_cold(struct address_space *x)
225 {
226 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
227 }
228
229 typedef int filler_t(void *, struct page *);
230
231 extern struct page * find_get_page(struct address_space *mapping,
232 pgoff_t index);
233 extern struct page * find_lock_page(struct address_space *mapping,
234 pgoff_t index);
235 extern struct page * find_or_create_page(struct address_space *mapping,
236 pgoff_t index, gfp_t gfp_mask);
237 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
238 unsigned int nr_pages, struct page **pages);
239 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
240 unsigned int nr_pages, struct page **pages);
241 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
242 int tag, unsigned int nr_pages, struct page **pages);
243
244 struct page *grab_cache_page_write_begin(struct address_space *mapping,
245 pgoff_t index, unsigned flags);
246
247 /*
248 * Returns locked page at given index in given cache, creating it if needed.
249 */
grab_cache_page(struct address_space * mapping,pgoff_t index)250 static inline struct page *grab_cache_page(struct address_space *mapping,
251 pgoff_t index)
252 {
253 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
254 }
255
256 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
257 pgoff_t index);
258 extern struct page * read_cache_page_async(struct address_space *mapping,
259 pgoff_t index, filler_t *filler,
260 void *data);
261 extern struct page * read_cache_page(struct address_space *mapping,
262 pgoff_t index, filler_t *filler,
263 void *data);
264 extern int read_cache_pages(struct address_space *mapping,
265 struct list_head *pages, filler_t *filler, void *data);
266
read_mapping_page_async(struct address_space * mapping,pgoff_t index,void * data)267 static inline struct page *read_mapping_page_async(
268 struct address_space *mapping,
269 pgoff_t index, void *data)
270 {
271 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
272 return read_cache_page_async(mapping, index, filler, data);
273 }
274
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)275 static inline struct page *read_mapping_page(struct address_space *mapping,
276 pgoff_t index, void *data)
277 {
278 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
279 return read_cache_page(mapping, index, filler, data);
280 }
281
282 /*
283 * Return byte-offset into filesystem object for page.
284 */
page_offset(struct page * page)285 static inline loff_t page_offset(struct page *page)
286 {
287 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
288 }
289
linear_page_index(struct vm_area_struct * vma,unsigned long address)290 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
291 unsigned long address)
292 {
293 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
294 pgoff += vma->vm_pgoff;
295 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
296 }
297
298 extern void __lock_page(struct page *page);
299 extern int __lock_page_killable(struct page *page);
300 extern void __lock_page_nosync(struct page *page);
301 extern void unlock_page(struct page *page);
302
__set_page_locked(struct page * page)303 static inline void __set_page_locked(struct page *page)
304 {
305 __set_bit(PG_locked, &page->flags);
306 }
307
__clear_page_locked(struct page * page)308 static inline void __clear_page_locked(struct page *page)
309 {
310 __clear_bit(PG_locked, &page->flags);
311 }
312
trylock_page(struct page * page)313 static inline int trylock_page(struct page *page)
314 {
315 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
316 }
317
318 /*
319 * lock_page may only be called if we have the page's inode pinned.
320 */
lock_page(struct page * page)321 static inline void lock_page(struct page *page)
322 {
323 might_sleep();
324 if (!trylock_page(page))
325 __lock_page(page);
326 }
327
328 /*
329 * lock_page_killable is like lock_page but can be interrupted by fatal
330 * signals. It returns 0 if it locked the page and -EINTR if it was
331 * killed while waiting.
332 */
lock_page_killable(struct page * page)333 static inline int lock_page_killable(struct page *page)
334 {
335 might_sleep();
336 if (!trylock_page(page))
337 return __lock_page_killable(page);
338 return 0;
339 }
340
341 /*
342 * lock_page_nosync should only be used if we can't pin the page's inode.
343 * Doesn't play quite so well with block device plugging.
344 */
lock_page_nosync(struct page * page)345 static inline void lock_page_nosync(struct page *page)
346 {
347 might_sleep();
348 if (!trylock_page(page))
349 __lock_page_nosync(page);
350 }
351
352 /*
353 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
354 * Never use this directly!
355 */
356 extern void wait_on_page_bit(struct page *page, int bit_nr);
357
358 /*
359 * Wait for a page to be unlocked.
360 *
361 * This must be called with the caller "holding" the page,
362 * ie with increased "page->count" so that the page won't
363 * go away during the wait..
364 */
wait_on_page_locked(struct page * page)365 static inline void wait_on_page_locked(struct page *page)
366 {
367 if (PageLocked(page))
368 wait_on_page_bit(page, PG_locked);
369 }
370
371 /*
372 * Wait for a page to complete writeback
373 */
wait_on_page_writeback(struct page * page)374 static inline void wait_on_page_writeback(struct page *page)
375 {
376 if (PageWriteback(page))
377 wait_on_page_bit(page, PG_writeback);
378 }
379
380 extern void end_page_writeback(struct page *page);
381
382 /*
383 * Fault a userspace page into pagetables. Return non-zero on a fault.
384 *
385 * This assumes that two userspace pages are always sufficient. That's
386 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
387 */
fault_in_pages_writeable(char __user * uaddr,int size)388 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
389 {
390 int ret;
391
392 if (unlikely(size == 0))
393 return 0;
394
395 /*
396 * Writing zeroes into userspace here is OK, because we know that if
397 * the zero gets there, we'll be overwriting it.
398 */
399 ret = __put_user(0, uaddr);
400 if (ret == 0) {
401 char __user *end = uaddr + size - 1;
402
403 /*
404 * If the page was already mapped, this will get a cache miss
405 * for sure, so try to avoid doing it.
406 */
407 if (((unsigned long)uaddr & PAGE_MASK) !=
408 ((unsigned long)end & PAGE_MASK))
409 ret = __put_user(0, end);
410 }
411 return ret;
412 }
413
fault_in_pages_readable(const char __user * uaddr,int size)414 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
415 {
416 volatile char c;
417 int ret;
418
419 if (unlikely(size == 0))
420 return 0;
421
422 ret = __get_user(c, uaddr);
423 if (ret == 0) {
424 const char __user *end = uaddr + size - 1;
425
426 if (((unsigned long)uaddr & PAGE_MASK) !=
427 ((unsigned long)end & PAGE_MASK))
428 ret = __get_user(c, end);
429 }
430 return ret;
431 }
432
433 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
434 pgoff_t index, gfp_t gfp_mask);
435 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
436 pgoff_t index, gfp_t gfp_mask);
437 extern void remove_from_page_cache(struct page *page);
438 extern void __remove_from_page_cache(struct page *page);
439
440 /*
441 * Like add_to_page_cache_locked, but used to add newly allocated pages:
442 * the page is new, so we can just run __set_page_locked() against it.
443 */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)444 static inline int add_to_page_cache(struct page *page,
445 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
446 {
447 int error;
448
449 __set_page_locked(page);
450 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
451 if (unlikely(error))
452 __clear_page_locked(page);
453 return error;
454 }
455
456 #endif /* _LINUX_PAGEMAP_H */
457