1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
4 #include <linux/fs.h>
5 #include <linux/init.h>
6 #include <linux/ksm.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/page_size_compat.h>
18 #include <linux/kernel-page-flags.h>
19 #include <linux/uaccess.h>
20 #include "internal.h"
21
22 #define KPMSIZE sizeof(u64)
23 #define KPMMASK (KPMSIZE - 1)
24 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
25
get_max_dump_pfn(void)26 static inline unsigned long get_max_dump_pfn(void)
27 {
28 #ifdef CONFIG_SPARSEMEM
29 /*
30 * The memmap of early sections is completely populated and marked
31 * online even if max_pfn does not fall on a section boundary -
32 * pfn_to_online_page() will succeed on all pages. Allow inspecting
33 * these memmaps.
34 */
35 return round_up(max_pfn, PAGES_PER_SECTION);
36 #else
37 return max_pfn;
38 #endif
39 }
40
41 /* /proc/kpagecount - an array exposing page mapcounts
42 *
43 * Each entry is a u64 representing the corresponding
44 * physical page mapcount.
45 */
kpagecount_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)46 static ssize_t kpagecount_read(struct file *file, char __user *buf,
47 size_t count, loff_t *ppos)
48 {
49 const unsigned long max_dump_pfn = get_max_dump_pfn();
50 u64 __user *out = (u64 __user *)buf;
51 unsigned long src = *ppos;
52 unsigned long pfn;
53 ssize_t ret = 0;
54
55 pfn = src / KPMSIZE;
56 if (src & KPMMASK || count & KPMMASK)
57 return -EINVAL;
58 if (src >= max_dump_pfn * KPMSIZE)
59 return 0;
60 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
61
62 while (count > 0) {
63 struct page *page;
64 u64 mapcount = 0;
65
66 /*
67 * TODO: ZONE_DEVICE support requires to identify
68 * memmaps that were actually initialized.
69 */
70 page = pfn_to_online_page(pfn);
71 if (page)
72 mapcount = folio_precise_page_mapcount(page_folio(page),
73 page);
74
75 if (put_user(mapcount, out)) {
76 ret = -EFAULT;
77 break;
78 }
79
80 pfn++;
81 out++;
82 count -= KPMSIZE;
83
84 cond_resched();
85 }
86
87 *ppos += (char __user *)out - buf;
88 if (!ret)
89 ret = (char __user *)out - buf;
90 return ret;
91 }
92
93 static const struct proc_ops kpagecount_proc_ops = {
94 .proc_flags = PROC_ENTRY_PERMANENT,
95 .proc_lseek = mem_lseek,
96 .proc_read = kpagecount_read,
97 };
98
99 /* /proc/kpageflags - an array exposing page flags
100 *
101 * Each entry is a u64 representing the corresponding
102 * physical page flags.
103 */
104
kpf_copy_bit(u64 kflags,int ubit,int kbit)105 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106 {
107 return ((kflags >> kbit) & 1) << ubit;
108 }
109
stable_page_flags(const struct page * page)110 u64 stable_page_flags(const struct page *page)
111 {
112 const struct folio *folio;
113 unsigned long k;
114 unsigned long mapping;
115 bool is_anon;
116 u64 u = 0;
117
118 /*
119 * pseudo flag: KPF_NOPAGE
120 * it differentiates a memory hole from a page with no flags
121 */
122 if (!page)
123 return 1 << KPF_NOPAGE;
124 folio = page_folio(page);
125
126 k = folio->flags;
127 mapping = (unsigned long)folio->mapping;
128 is_anon = mapping & PAGE_MAPPING_ANON;
129
130 /*
131 * pseudo flags for the well known (anonymous) memory mapped pages
132 */
133 if (page_mapped(page))
134 u |= 1 << KPF_MMAP;
135 if (is_anon) {
136 u |= 1 << KPF_ANON;
137 if (mapping & PAGE_MAPPING_KSM)
138 u |= 1 << KPF_KSM;
139 }
140
141 /*
142 * compound pages: export both head/tail info
143 * they together define a compound page's start/end pos and order
144 */
145 if (page == &folio->page)
146 u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
147 else
148 u |= 1 << KPF_COMPOUND_TAIL;
149 if (folio_test_hugetlb(folio))
150 u |= 1 << KPF_HUGE;
151 else if (folio_test_large(folio) &&
152 folio_test_large_rmappable(folio)) {
153 /* Note: we indicate any THPs here, not just PMD-sized ones */
154 u |= 1 << KPF_THP;
155 } else if (is_huge_zero_folio(folio)) {
156 u |= 1 << KPF_ZERO_PAGE;
157 u |= 1 << KPF_THP;
158 } else if (is_zero_folio(folio)) {
159 u |= 1 << KPF_ZERO_PAGE;
160 }
161
162 /*
163 * Caveats on high order pages: PG_buddy and PG_slab will only be set
164 * on the head page.
165 */
166 if (PageBuddy(page))
167 u |= 1 << KPF_BUDDY;
168 else if (page_count(page) == 0 && is_free_buddy_page(page))
169 u |= 1 << KPF_BUDDY;
170
171 if (PageOffline(page))
172 u |= 1 << KPF_OFFLINE;
173 if (PageTable(page))
174 u |= 1 << KPF_PGTABLE;
175 if (folio_test_slab(folio))
176 u |= 1 << KPF_SLAB;
177
178 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
179 u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
180 #else
181 if (folio_test_idle(folio))
182 u |= 1 << KPF_IDLE;
183 #endif
184
185 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
186 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
187 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
188 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
189 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
190
191 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
192 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
193 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
194 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
195
196 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
197 if ((k & SWAPCACHE) == SWAPCACHE)
198 u |= 1 << KPF_SWAPCACHE;
199 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
200
201 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
202 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
203
204 #ifdef CONFIG_MEMORY_FAILURE
205 if (u & (1 << KPF_HUGE))
206 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
207 else
208 u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
209 #endif
210
211 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
212 u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2);
213 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
214 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
215 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
216 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
217 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
218 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
219 #endif
220 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
221 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
222 #endif
223
224 return u;
225 };
226
kpageflags_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)227 static ssize_t kpageflags_read(struct file *file, char __user *buf,
228 size_t count, loff_t *ppos)
229 {
230 const unsigned long max_dump_pfn = get_max_dump_pfn();
231 u64 __user *out = (u64 __user *)buf;
232 unsigned long src = *ppos;
233 unsigned long pfn;
234 ssize_t ret = 0;
235
236 pfn = src / KPMSIZE;
237 if (src & KPMMASK || count & KPMMASK)
238 return -EINVAL;
239 if (src >= max_dump_pfn * KPMSIZE)
240 return 0;
241 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
242
243 while (count > 0) {
244 /*
245 * TODO: ZONE_DEVICE support requires to identify
246 * memmaps that were actually initialized.
247 */
248 struct page *page = pfn_to_online_page(pfn);
249
250 if (put_user(stable_page_flags(page), out)) {
251 ret = -EFAULT;
252 break;
253 }
254
255 pfn++;
256 out++;
257 count -= KPMSIZE;
258
259 cond_resched();
260 }
261
262 *ppos += (char __user *)out - buf;
263 if (!ret)
264 ret = (char __user *)out - buf;
265 return ret;
266 }
267
268 static const struct proc_ops kpageflags_proc_ops = {
269 .proc_flags = PROC_ENTRY_PERMANENT,
270 .proc_lseek = mem_lseek,
271 .proc_read = kpageflags_read,
272 };
273
274 #ifdef CONFIG_MEMCG
kpagecgroup_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)275 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
276 size_t count, loff_t *ppos)
277 {
278 const unsigned long max_dump_pfn = get_max_dump_pfn();
279 u64 __user *out = (u64 __user *)buf;
280 struct page *ppage;
281 unsigned long src = *ppos;
282 unsigned long pfn;
283 ssize_t ret = 0;
284 u64 ino;
285
286 pfn = src / KPMSIZE;
287 if (src & KPMMASK || count & KPMMASK)
288 return -EINVAL;
289 if (src >= max_dump_pfn * KPMSIZE)
290 return 0;
291 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
292
293 while (count > 0) {
294 /*
295 * TODO: ZONE_DEVICE support requires to identify
296 * memmaps that were actually initialized.
297 */
298 ppage = pfn_to_online_page(pfn);
299
300 if (ppage)
301 ino = page_cgroup_ino(ppage);
302 else
303 ino = 0;
304
305 if (put_user(ino, out)) {
306 ret = -EFAULT;
307 break;
308 }
309
310 pfn++;
311 out++;
312 count -= KPMSIZE;
313
314 cond_resched();
315 }
316
317 *ppos += (char __user *)out - buf;
318 if (!ret)
319 ret = (char __user *)out - buf;
320 return ret;
321 }
322
323 static const struct proc_ops kpagecgroup_proc_ops = {
324 .proc_flags = PROC_ENTRY_PERMANENT,
325 .proc_lseek = mem_lseek,
326 .proc_read = kpagecgroup_read,
327 };
328 #endif /* CONFIG_MEMCG */
329
proc_page_init(void)330 static int __init proc_page_init(void)
331 {
332 if (__PAGE_SIZE != PAGE_SIZE)
333 return 0;
334
335 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
336 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
337 #ifdef CONFIG_MEMCG
338 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
339 #endif
340 return 0;
341 }
342 fs_initcall(proc_page_init);
343