• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_REF_H
3 #define _LINUX_PAGE_REF_H
4 
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/page-flags.h>
8 #include <linux/tracepoint-defs.h>
9 
10 extern struct tracepoint __tracepoint_page_ref_set;
11 extern struct tracepoint __tracepoint_page_ref_mod;
12 extern struct tracepoint __tracepoint_page_ref_mod_and_test;
13 extern struct tracepoint __tracepoint_page_ref_mod_and_return;
14 extern struct tracepoint __tracepoint_page_ref_mod_unless;
15 extern struct tracepoint __tracepoint_page_ref_freeze;
16 extern struct tracepoint __tracepoint_page_ref_unfreeze;
17 
18 #ifdef CONFIG_DEBUG_PAGE_REF
19 
20 /*
21  * Ideally we would want to use the trace_<tracepoint>_enabled() helper
22  * functions. But due to include header file issues, that is not
23  * feasible. Instead we have to open code the static key functions.
24  *
25  * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26  */
27 #define page_ref_tracepoint_active(t) static_key_false(&(t).key)
28 
29 extern void __page_ref_set(struct page *page, int v);
30 extern void __page_ref_mod(struct page *page, int v);
31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
34 extern void __page_ref_freeze(struct page *page, int v, int ret);
35 extern void __page_ref_unfreeze(struct page *page, int v);
36 
37 #else
38 
39 #define page_ref_tracepoint_active(t) false
40 
__page_ref_set(struct page * page,int v)41 static inline void __page_ref_set(struct page *page, int v)
42 {
43 }
__page_ref_mod(struct page * page,int v)44 static inline void __page_ref_mod(struct page *page, int v)
45 {
46 }
__page_ref_mod_and_test(struct page * page,int v,int ret)47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
48 {
49 }
__page_ref_mod_and_return(struct page * page,int v,int ret)50 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
51 {
52 }
__page_ref_mod_unless(struct page * page,int v,int u)53 static inline void __page_ref_mod_unless(struct page *page, int v, int u)
54 {
55 }
__page_ref_freeze(struct page * page,int v,int ret)56 static inline void __page_ref_freeze(struct page *page, int v, int ret)
57 {
58 }
__page_ref_unfreeze(struct page * page,int v)59 static inline void __page_ref_unfreeze(struct page *page, int v)
60 {
61 }
62 
63 #endif
64 
page_ref_count(struct page * page)65 static inline int page_ref_count(struct page *page)
66 {
67 	return atomic_read(&page->_refcount);
68 }
69 
page_count(struct page * page)70 static inline int page_count(struct page *page)
71 {
72 	return atomic_read(&compound_head(page)->_refcount);
73 }
74 
set_page_count(struct page * page,int v)75 static inline void set_page_count(struct page *page, int v)
76 {
77 	atomic_set(&page->_refcount, v);
78 	if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
79 		__page_ref_set(page, v);
80 }
81 
82 /*
83  * Setup the page count before being freed into the page allocator for
84  * the first time (boot or memory hotplug)
85  */
init_page_count(struct page * page)86 static inline void init_page_count(struct page *page)
87 {
88 	set_page_count(page, 1);
89 }
90 
page_ref_add(struct page * page,int nr)91 static inline void page_ref_add(struct page *page, int nr)
92 {
93 	atomic_add(nr, &page->_refcount);
94 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
95 		__page_ref_mod(page, nr);
96 }
97 
page_ref_sub(struct page * page,int nr)98 static inline void page_ref_sub(struct page *page, int nr)
99 {
100 	atomic_sub(nr, &page->_refcount);
101 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
102 		__page_ref_mod(page, -nr);
103 }
104 
page_ref_inc(struct page * page)105 static inline void page_ref_inc(struct page *page)
106 {
107 	atomic_inc(&page->_refcount);
108 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
109 		__page_ref_mod(page, 1);
110 }
111 
page_ref_dec(struct page * page)112 static inline void page_ref_dec(struct page *page)
113 {
114 	atomic_dec(&page->_refcount);
115 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
116 		__page_ref_mod(page, -1);
117 }
118 
page_ref_sub_and_test(struct page * page,int nr)119 static inline int page_ref_sub_and_test(struct page *page, int nr)
120 {
121 	int ret = atomic_sub_and_test(nr, &page->_refcount);
122 
123 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
124 		__page_ref_mod_and_test(page, -nr, ret);
125 	return ret;
126 }
127 
page_ref_inc_return(struct page * page)128 static inline int page_ref_inc_return(struct page *page)
129 {
130 	int ret = atomic_inc_return(&page->_refcount);
131 
132 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
133 		__page_ref_mod_and_return(page, 1, ret);
134 	return ret;
135 }
136 
page_ref_dec_and_test(struct page * page)137 static inline int page_ref_dec_and_test(struct page *page)
138 {
139 	int ret = atomic_dec_and_test(&page->_refcount);
140 
141 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
142 		__page_ref_mod_and_test(page, -1, ret);
143 	return ret;
144 }
145 
page_ref_dec_return(struct page * page)146 static inline int page_ref_dec_return(struct page *page)
147 {
148 	int ret = atomic_dec_return(&page->_refcount);
149 
150 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
151 		__page_ref_mod_and_return(page, -1, ret);
152 	return ret;
153 }
154 
page_ref_add_unless(struct page * page,int nr,int u)155 static inline int page_ref_add_unless(struct page *page, int nr, int u)
156 {
157 	int ret = atomic_add_unless(&page->_refcount, nr, u);
158 
159 	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
160 		__page_ref_mod_unless(page, nr, ret);
161 	return ret;
162 }
163 
page_ref_freeze(struct page * page,int count)164 static inline int page_ref_freeze(struct page *page, int count)
165 {
166 	int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
167 
168 	if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
169 		__page_ref_freeze(page, count, ret);
170 	return ret;
171 }
172 
page_ref_unfreeze(struct page * page,int count)173 static inline void page_ref_unfreeze(struct page *page, int count)
174 {
175 	VM_BUG_ON_PAGE(page_count(page) != 0, page);
176 	VM_BUG_ON(count == 0);
177 
178 	atomic_set_release(&page->_refcount, count);
179 	if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
180 		__page_ref_unfreeze(page, count);
181 }
182 
183 #endif
184