1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/memblock.h>
4 #include <linux/fs.h>
5 #include <linux/sysfs.h>
6 #include <linux/kobject.h>
7 #include <linux/memory_hotplug.h>
8 #include <linux/mm.h>
9 #include <linux/mmzone.h>
10 #include <linux/pagemap.h>
11 #include <linux/page_size_compat.h>
12 #include <linux/rmap.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_ext.h>
15 #include <linux/page_idle.h>
16
17 #include "internal.h"
18
19 #define BITMAP_CHUNK_SIZE sizeof(u64)
20 #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
21
22 /*
23 * Idle page tracking only considers user memory pages, for other types of
24 * pages the idle flag is always unset and an attempt to set it is silently
25 * ignored.
26 *
27 * We treat a page as a user memory page if it is on an LRU list, because it is
28 * always safe to pass such a page to rmap_walk(), which is essential for idle
29 * page tracking. With such an indicator of user pages we can skip isolated
30 * pages, but since there are not usually many of them, it will hardly affect
31 * the overall result.
32 *
33 * This function tries to get a user memory page by pfn as described above.
34 */
page_idle_get_folio(unsigned long pfn)35 static struct folio *page_idle_get_folio(unsigned long pfn)
36 {
37 struct page *page = pfn_to_online_page(pfn);
38 struct folio *folio;
39
40 if (!page || PageTail(page))
41 return NULL;
42
43 folio = page_folio(page);
44 if (!folio_test_lru(folio) || !folio_try_get(folio))
45 return NULL;
46 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
47 folio_put(folio);
48 folio = NULL;
49 }
50 return folio;
51 }
52
page_idle_clear_pte_refs_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)53 static bool page_idle_clear_pte_refs_one(struct folio *folio,
54 struct vm_area_struct *vma,
55 unsigned long addr, void *arg)
56 {
57 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
58 bool referenced = false;
59
60 while (page_vma_mapped_walk(&pvmw)) {
61 addr = pvmw.address;
62 if (pvmw.pte) {
63 /*
64 * For PTE-mapped THP, one sub page is referenced,
65 * the whole THP is referenced.
66 */
67 if (ptep_clear_young_notify(vma, addr, pvmw.pte))
68 referenced = true;
69 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
70 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
71 referenced = true;
72 } else {
73 /* unexpected pmd-mapped page? */
74 WARN_ON_ONCE(1);
75 }
76 }
77
78 if (referenced) {
79 folio_clear_idle(folio);
80 /*
81 * We cleared the referenced bit in a mapping to this page. To
82 * avoid interference with page reclaim, mark it young so that
83 * folio_referenced() will return > 0.
84 */
85 folio_set_young(folio);
86 }
87 return true;
88 }
89
page_idle_clear_pte_refs(struct folio * folio)90 static void page_idle_clear_pte_refs(struct folio *folio)
91 {
92 /*
93 * Since rwc.try_lock is unused, rwc is effectively immutable, so we
94 * can make it static to save some cycles and stack.
95 */
96 static struct rmap_walk_control rwc = {
97 .rmap_one = page_idle_clear_pte_refs_one,
98 .anon_lock = folio_lock_anon_vma_read,
99 };
100 bool need_lock;
101
102 if (!folio_mapped(folio) || !folio_raw_mapping(folio))
103 return;
104
105 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
106 if (need_lock && !folio_trylock(folio))
107 return;
108
109 rmap_walk(folio, &rwc);
110
111 if (need_lock)
112 folio_unlock(folio);
113 }
114
page_idle_bitmap_read(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)115 static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
116 struct bin_attribute *attr, char *buf,
117 loff_t pos, size_t count)
118 {
119 u64 *out = (u64 *)buf;
120 struct folio *folio;
121 unsigned long pfn, end_pfn;
122 int bit;
123
124 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
125 return -EINVAL;
126
127 pfn = pos * BITS_PER_BYTE;
128 if (pfn >= max_pfn)
129 return 0;
130
131 end_pfn = pfn + count * BITS_PER_BYTE;
132 if (end_pfn > max_pfn)
133 end_pfn = max_pfn;
134
135 for (; pfn < end_pfn; pfn++) {
136 bit = pfn % BITMAP_CHUNK_BITS;
137 if (!bit)
138 *out = 0ULL;
139 folio = page_idle_get_folio(pfn);
140 if (folio) {
141 if (folio_test_idle(folio)) {
142 /*
143 * The page might have been referenced via a
144 * pte, in which case it is not idle. Clear
145 * refs and recheck.
146 */
147 page_idle_clear_pte_refs(folio);
148 if (folio_test_idle(folio))
149 *out |= 1ULL << bit;
150 }
151 folio_put(folio);
152 }
153 if (bit == BITMAP_CHUNK_BITS - 1)
154 out++;
155 cond_resched();
156 }
157 return (char *)out - buf;
158 }
159
page_idle_bitmap_write(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)160 static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
161 struct bin_attribute *attr, char *buf,
162 loff_t pos, size_t count)
163 {
164 const u64 *in = (u64 *)buf;
165 struct folio *folio;
166 unsigned long pfn, end_pfn;
167 int bit;
168
169 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
170 return -EINVAL;
171
172 pfn = pos * BITS_PER_BYTE;
173 if (pfn >= max_pfn)
174 return -ENXIO;
175
176 end_pfn = pfn + count * BITS_PER_BYTE;
177 if (end_pfn > max_pfn)
178 end_pfn = max_pfn;
179
180 for (; pfn < end_pfn; pfn++) {
181 bit = pfn % BITMAP_CHUNK_BITS;
182 if ((*in >> bit) & 1) {
183 folio = page_idle_get_folio(pfn);
184 if (folio) {
185 page_idle_clear_pte_refs(folio);
186 folio_set_idle(folio);
187 folio_put(folio);
188 }
189 }
190 if (bit == BITMAP_CHUNK_BITS - 1)
191 in++;
192 cond_resched();
193 }
194 return (char *)in - buf;
195 }
196
197 static struct bin_attribute page_idle_bitmap_attr =
198 __BIN_ATTR(bitmap, 0600,
199 page_idle_bitmap_read, page_idle_bitmap_write, 0);
200
201 static struct bin_attribute *page_idle_bin_attrs[] = {
202 &page_idle_bitmap_attr,
203 NULL,
204 };
205
206 static const struct attribute_group page_idle_attr_group = {
207 .bin_attrs = page_idle_bin_attrs,
208 .name = "page_idle",
209 };
210
page_idle_init(void)211 static int __init page_idle_init(void)
212 {
213 int err;
214
215 if (__PAGE_SIZE != PAGE_SIZE)
216 return 0;
217
218 err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
219 if (err) {
220 pr_err("page_idle: register sysfs failed\n");
221 return err;
222 }
223 return 0;
224 }
225 subsys_initcall(page_idle_init);
226