1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_isolation.c
4 */
5
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/page_pinner.h>
13 #include <linux/migrate.h>
14 #include "internal.h"
15
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/page_isolation.h>
18
set_migratetype_isolate(struct page * page,int migratetype,int isol_flags)19 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
20 {
21 struct zone *zone = page_zone(page);
22 struct page *unmovable;
23 unsigned long flags;
24
25 spin_lock_irqsave(&zone->lock, flags);
26
27 /*
28 * We assume the caller intended to SET migrate type to isolate.
29 * If it is already set, then someone else must have raced and
30 * set it before us.
31 */
32 if (is_migrate_isolate_page(page)) {
33 spin_unlock_irqrestore(&zone->lock, flags);
34 return -EBUSY;
35 }
36
37 /*
38 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
39 * We just check MOVABLE pages.
40 */
41 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
42 if (!unmovable) {
43 unsigned long nr_pages;
44 int mt = get_pageblock_migratetype(page);
45
46 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
47 zone->nr_isolate_pageblock++;
48 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
49 NULL);
50
51 __mod_zone_freepage_state(zone, -nr_pages, mt);
52 spin_unlock_irqrestore(&zone->lock, flags);
53 return 0;
54 }
55
56 spin_unlock_irqrestore(&zone->lock, flags);
57 if (isol_flags & REPORT_FAILURE) {
58 /*
59 * printk() with zone->lock held will likely trigger a
60 * lockdep splat, so defer it here.
61 */
62 dump_page(unmovable, "unmovable page");
63 }
64
65 return -EBUSY;
66 }
67
unset_migratetype_isolate(struct page * page,unsigned migratetype)68 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
69 {
70 struct zone *zone;
71 unsigned long flags, nr_pages;
72 bool isolated_page = false;
73 unsigned int order;
74 unsigned long pfn, buddy_pfn;
75 struct page *buddy;
76
77 zone = page_zone(page);
78 spin_lock_irqsave(&zone->lock, flags);
79 if (!is_migrate_isolate_page(page))
80 goto out;
81
82 /*
83 * Because freepage with more than pageblock_order on isolated
84 * pageblock is restricted to merge due to freepage counting problem,
85 * it is possible that there is free buddy page.
86 * move_freepages_block() doesn't care of merge so we need other
87 * approach in order to merge them. Isolation and free will make
88 * these pages to be merged.
89 */
90 if (PageBuddy(page)) {
91 order = buddy_order(page);
92 if (order >= pageblock_order && order < MAX_ORDER - 1) {
93 pfn = page_to_pfn(page);
94 buddy_pfn = __find_buddy_pfn(pfn, order);
95 buddy = page + (buddy_pfn - pfn);
96
97 if (!is_migrate_isolate_page(buddy)) {
98 __isolate_free_page(page, order);
99 isolated_page = true;
100 }
101 }
102 }
103
104 /*
105 * If we isolate freepage with more than pageblock_order, there
106 * should be no freepage in the range, so we could avoid costly
107 * pageblock scanning for freepage moving.
108 *
109 * We didn't actually touch any of the isolated pages, so place them
110 * to the tail of the freelist. This is an optimization for memory
111 * onlining - just onlined memory won't immediately be considered for
112 * allocation.
113 */
114 if (!isolated_page) {
115 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
116 __mod_zone_freepage_state(zone, nr_pages, migratetype);
117 }
118 set_pageblock_migratetype(page, migratetype);
119 if (isolated_page)
120 __putback_isolated_page(page, order, migratetype);
121 zone->nr_isolate_pageblock--;
122 out:
123 spin_unlock_irqrestore(&zone->lock, flags);
124 }
125
126 static inline struct page *
__first_valid_page(unsigned long pfn,unsigned long nr_pages)127 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
128 {
129 int i;
130
131 for (i = 0; i < nr_pages; i++) {
132 struct page *page;
133
134 page = pfn_to_online_page(pfn + i);
135 if (!page)
136 continue;
137 return page;
138 }
139 return NULL;
140 }
141
142 /**
143 * start_isolate_page_range() - make page-allocation-type of range of pages to
144 * be MIGRATE_ISOLATE.
145 * @start_pfn: The lower PFN of the range to be isolated.
146 * @end_pfn: The upper PFN of the range to be isolated.
147 * start_pfn/end_pfn must be aligned to pageblock_order.
148 * @migratetype: Migrate type to set in error recovery.
149 * @flags: The following flags are allowed (they can be combined in
150 * a bit mask)
151 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
152 * e.g., skip over PageHWPoison() pages
153 * and PageOffline() pages.
154 * REPORT_FAILURE - report details about the failure to
155 * isolate the range
156 *
157 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
158 * the range will never be allocated. Any free pages and pages freed in the
159 * future will not be allocated again. If specified range includes migrate types
160 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
161 * pages in the range finally, the caller have to free all pages in the range.
162 * test_page_isolated() can be used for test it.
163 *
164 * There is no high level synchronization mechanism that prevents two threads
165 * from trying to isolate overlapping ranges. If this happens, one thread
166 * will notice pageblocks in the overlapping range already set to isolate.
167 * This happens in set_migratetype_isolate, and set_migratetype_isolate
168 * returns an error. We then clean up by restoring the migration type on
169 * pageblocks we may have modified and return -EBUSY to caller. This
170 * prevents two threads from simultaneously working on overlapping ranges.
171 *
172 * Please note that there is no strong synchronization with the page allocator
173 * either. Pages might be freed while their page blocks are marked ISOLATED.
174 * A call to drain_all_pages() after isolation can flush most of them. However
175 * in some cases pages might still end up on pcp lists and that would allow
176 * for their allocation even when they are in fact isolated already. Depending
177 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
178 * might be used to flush and disable pcplist before isolation and enable after
179 * unisolation.
180 *
181 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
182 */
start_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype,int flags)183 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
184 unsigned migratetype, int flags)
185 {
186 unsigned long pfn;
187 unsigned long undo_pfn;
188 struct page *page;
189
190 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
191 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
192
193 for (pfn = start_pfn;
194 pfn < end_pfn;
195 pfn += pageblock_nr_pages) {
196 page = __first_valid_page(pfn, pageblock_nr_pages);
197 if (page) {
198 if (set_migratetype_isolate(page, migratetype, flags)) {
199 undo_pfn = pfn;
200 goto undo;
201 }
202 }
203 }
204 return 0;
205 undo:
206 for (pfn = start_pfn;
207 pfn < undo_pfn;
208 pfn += pageblock_nr_pages) {
209 struct page *page = pfn_to_online_page(pfn);
210 if (!page)
211 continue;
212 unset_migratetype_isolate(page, migratetype);
213 }
214
215 return -EBUSY;
216 }
217
218 /*
219 * Make isolated pages available again.
220 */
undo_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype)221 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
222 unsigned migratetype)
223 {
224 unsigned long pfn;
225 struct page *page;
226
227 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
228 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
229
230 for (pfn = start_pfn;
231 pfn < end_pfn;
232 pfn += pageblock_nr_pages) {
233 page = __first_valid_page(pfn, pageblock_nr_pages);
234 if (!page || !is_migrate_isolate_page(page))
235 continue;
236 unset_migratetype_isolate(page, migratetype);
237 }
238 }
239 /*
240 * Test all pages in the range is free(means isolated) or not.
241 * all pages in [start_pfn...end_pfn) must be in the same zone.
242 * zone->lock must be held before call this.
243 *
244 * Returns the last tested pfn.
245 */
246 static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn,unsigned long end_pfn,int flags)247 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
248 int flags)
249 {
250 struct page *page;
251
252 while (pfn < end_pfn) {
253 page = pfn_to_page(pfn);
254 if (PageBuddy(page))
255 /*
256 * If the page is on a free list, it has to be on
257 * the correct MIGRATE_ISOLATE freelist. There is no
258 * simple way to verify that as VM_BUG_ON(), though.
259 */
260 pfn += 1 << buddy_order(page);
261 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
262 /* A HWPoisoned page cannot be also PageBuddy */
263 pfn++;
264 else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
265 !page_count(page))
266 /*
267 * The responsible driver agreed to skip PageOffline()
268 * pages when offlining memory by dropping its
269 * reference in MEM_GOING_OFFLINE.
270 */
271 pfn++;
272 else
273 break;
274 }
275
276 return pfn;
277 }
278
279 /* Caller should ensure that requested range is in a single zone */
test_pages_isolated(unsigned long start_pfn,unsigned long end_pfn,int isol_flags)280 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
281 int isol_flags)
282 {
283 unsigned long pfn, flags;
284 struct page *page;
285 struct zone *zone;
286 int ret;
287
288 /*
289 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
290 * are not aligned to pageblock_nr_pages.
291 * Then we just check migratetype first.
292 */
293 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
294 page = __first_valid_page(pfn, pageblock_nr_pages);
295 if (page && !is_migrate_isolate_page(page))
296 break;
297 }
298 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
299 if ((pfn < end_pfn) || !page) {
300 ret = -EBUSY;
301 goto out;
302 }
303
304 /* Check all pages are free or marked as ISOLATED */
305 zone = page_zone(page);
306 spin_lock_irqsave(&zone->lock, flags);
307 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
308 spin_unlock_irqrestore(&zone->lock, flags);
309
310 ret = pfn < end_pfn ? -EBUSY : 0;
311
312 out:
313 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
314 if (pfn < end_pfn)
315 page_pinner_failure_detect(pfn_to_page(pfn));
316
317 return ret;
318 }
319