• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/mmzone.h>
4 #include <linux/memblock.h>
5 #include <linux/page_ext.h>
6 #include <linux/memory.h>
7 #include <linux/vmalloc.h>
8 #include <linux/kmemleak.h>
9 #include <linux/page_owner.h>
10 #include <linux/page_idle.h>
11 #include <linux/rcupdate.h>
12 /*
13  * struct page extension
14  *
15  * This is the feature to manage memory for extended data per page.
16  *
17  * Until now, we must modify struct page itself to store extra data per page.
18  * This requires rebuilding the kernel and it is really time consuming process.
19  * And, sometimes, rebuild is impossible due to third party module dependency.
20  * At last, enlarging struct page could cause un-wanted system behaviour change.
21  *
22  * This feature is intended to overcome above mentioned problems. This feature
23  * allocates memory for extended data per page in certain place rather than
24  * the struct page itself. This memory can be accessed by the accessor
25  * functions provided by this code. During the boot process, it checks whether
26  * allocation of huge chunk of memory is needed or not. If not, it avoids
27  * allocating memory at all. With this advantage, we can include this feature
28  * into the kernel in default and can avoid rebuild and solve related problems.
29  *
30  * To help these things to work well, there are two callbacks for clients. One
31  * is the need callback which is mandatory if user wants to avoid useless
32  * memory allocation at boot-time. The other is optional, init callback, which
33  * is used to do proper initialization after memory is allocated.
34  *
35  * The need callback is used to decide whether extended memory allocation is
36  * needed or not. Sometimes users want to deactivate some features in this
37  * boot and extra memory would be unneccessary. In this case, to avoid
38  * allocating huge chunk of memory, each clients represent their need of
39  * extra memory through the need callback. If one of the need callbacks
40  * returns true, it means that someone needs extra memory so that
41  * page extension core should allocates memory for page extension. If
42  * none of need callbacks return true, memory isn't needed at all in this boot
43  * and page extension core can skip to allocate memory. As result,
44  * none of memory is wasted.
45  *
46  * When need callback returns true, page_ext checks if there is a request for
47  * extra memory through size in struct page_ext_operations. If it is non-zero,
48  * extra space is allocated for each page_ext entry and offset is returned to
49  * user through offset in struct page_ext_operations.
50  *
51  * The init callback is used to do proper initialization after page extension
52  * is completely initialized. In sparse memory system, extra memory is
53  * allocated some time later than memmap is allocated. In other words, lifetime
54  * of memory for page extension isn't same with memmap for struct page.
55  * Therefore, clients can't store extra data until page extension is
56  * initialized, even if pages are allocated and used freely. This could
57  * cause inadequate state of extra data per page, so, to prevent it, client
58  * can utilize this callback to initialize the state of it correctly.
59  */
60 
61 #ifdef CONFIG_SPARSEMEM
62 #define PAGE_EXT_INVALID       (0x1)
63 #endif
64 
65 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
need_page_idle(void)66 static bool need_page_idle(void)
67 {
68 	return true;
69 }
70 struct page_ext_operations page_idle_ops = {
71 	.need = need_page_idle,
72 };
73 #endif
74 
75 static struct page_ext_operations *page_ext_ops[] = {
76 #ifdef CONFIG_PAGE_OWNER
77 	&page_owner_ops,
78 #endif
79 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
80 	&page_idle_ops,
81 #endif
82 #ifdef CONFIG_PAGE_PINNER
83 	&page_pinner_ops,
84 #endif
85 };
86 
87 unsigned long page_ext_size = sizeof(struct page_ext);
88 
89 static unsigned long total_usage;
90 
invoke_need_callbacks(void)91 static bool __init invoke_need_callbacks(void)
92 {
93 	int i;
94 	int entries = ARRAY_SIZE(page_ext_ops);
95 	bool need = false;
96 
97 	for (i = 0; i < entries; i++) {
98 		if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
99 			page_ext_ops[i]->offset = page_ext_size;
100 			page_ext_size += page_ext_ops[i]->size;
101 			need = true;
102 		}
103 	}
104 
105 	return need;
106 }
107 
invoke_init_callbacks(void)108 static void __init invoke_init_callbacks(void)
109 {
110 	int i;
111 	int entries = ARRAY_SIZE(page_ext_ops);
112 
113 	for (i = 0; i < entries; i++) {
114 		if (page_ext_ops[i]->init)
115 			page_ext_ops[i]->init();
116 	}
117 }
118 
get_entry(void * base,unsigned long index)119 static inline struct page_ext *get_entry(void *base, unsigned long index)
120 {
121 	return base + page_ext_size * index;
122 }
123 
124 /**
125  * page_ext_get() - Get the extended information for a page.
126  * @page: The page we're interested in.
127  *
128  * Ensures that the page_ext will remain valid until page_ext_put()
129  * is called.
130  *
131  * Return: NULL if no page_ext exists for this page.
132  * Context: Any context.  Caller may not sleep until they have called
133  * page_ext_put().
134  */
page_ext_get(struct page * page)135 struct page_ext *page_ext_get(struct page *page)
136 {
137 	struct page_ext *page_ext;
138 
139 	rcu_read_lock();
140 	page_ext = lookup_page_ext(page);
141 	if (!page_ext) {
142 		rcu_read_unlock();
143 		return NULL;
144 	}
145 
146 	return page_ext;
147 }
148 
149 /**
150  * page_ext_put() - Working with page extended information is done.
151  * @page_ext: Page extended information received from page_ext_get().
152  *
153  * The page extended information of the page may not be valid after this
154  * function is called.
155  *
156  * Return: None.
157  * Context: Any context with corresponding page_ext_get() is called.
158  */
page_ext_put(struct page_ext * page_ext)159 void page_ext_put(struct page_ext *page_ext)
160 {
161 	if (unlikely(!page_ext))
162 		return;
163 
164 	rcu_read_unlock();
165 }
166 
167 #if !defined(CONFIG_SPARSEMEM)
168 
169 
pgdat_page_ext_init(struct pglist_data * pgdat)170 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
171 {
172 	pgdat->node_page_ext = NULL;
173 }
174 
lookup_page_ext(const struct page * page)175 struct page_ext *lookup_page_ext(const struct page *page)
176 {
177 	unsigned long pfn = page_to_pfn(page);
178 	unsigned long index;
179 	struct page_ext *base;
180 
181 	WARN_ON_ONCE(!rcu_read_lock_held());
182 	base = NODE_DATA(page_to_nid(page))->node_page_ext;
183 	/*
184 	 * The sanity checks the page allocator does upon freeing a
185 	 * page can reach here before the page_ext arrays are
186 	 * allocated when feeding a range of pages to the allocator
187 	 * for the first time during bootup or memory hotplug.
188 	 */
189 	if (unlikely(!base))
190 		return NULL;
191 	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
192 					MAX_ORDER_NR_PAGES);
193 	return get_entry(base, index);
194 }
195 EXPORT_SYMBOL_GPL(lookup_page_ext);
196 
alloc_node_page_ext(int nid)197 static int __init alloc_node_page_ext(int nid)
198 {
199 	struct page_ext *base;
200 	unsigned long table_size;
201 	unsigned long nr_pages;
202 
203 	nr_pages = NODE_DATA(nid)->node_spanned_pages;
204 	if (!nr_pages)
205 		return 0;
206 
207 	/*
208 	 * Need extra space if node range is not aligned with
209 	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
210 	 * checks buddy's status, range could be out of exact node range.
211 	 */
212 	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
213 		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
214 		nr_pages += MAX_ORDER_NR_PAGES;
215 
216 	table_size = page_ext_size * nr_pages;
217 
218 	base = memblock_alloc_try_nid(
219 			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
220 			MEMBLOCK_ALLOC_ACCESSIBLE, nid);
221 	if (!base)
222 		return -ENOMEM;
223 	NODE_DATA(nid)->node_page_ext = base;
224 	total_usage += table_size;
225 	return 0;
226 }
227 
page_ext_init_flatmem(void)228 void __init page_ext_init_flatmem(void)
229 {
230 
231 	int nid, fail;
232 
233 	if (!invoke_need_callbacks())
234 		return;
235 
236 	for_each_online_node(nid)  {
237 		fail = alloc_node_page_ext(nid);
238 		if (fail)
239 			goto fail;
240 	}
241 	pr_info("allocated %ld bytes of page_ext\n", total_usage);
242 	invoke_init_callbacks();
243 	return;
244 
245 fail:
246 	pr_crit("allocation of page_ext failed.\n");
247 	panic("Out of memory");
248 }
249 
250 #else /* CONFIG_FLAT_NODE_MEM_MAP */
page_ext_invalid(struct page_ext * page_ext)251 static bool page_ext_invalid(struct page_ext *page_ext)
252 {
253 	return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
254 }
255 
lookup_page_ext(const struct page * page)256 struct page_ext *lookup_page_ext(const struct page *page)
257 {
258 	unsigned long pfn = page_to_pfn(page);
259 	struct mem_section *section = __pfn_to_section(pfn);
260 	struct page_ext *page_ext = READ_ONCE(section->page_ext);
261 
262 	WARN_ON_ONCE(!rcu_read_lock_held());
263 	/*
264 	 * The sanity checks the page allocator does upon freeing a
265 	 * page can reach here before the page_ext arrays are
266 	 * allocated when feeding a range of pages to the allocator
267 	 * for the first time during bootup or memory hotplug.
268 	 */
269 	if (page_ext_invalid(page_ext))
270 		return NULL;
271 	return get_entry(page_ext, pfn);
272 }
273 EXPORT_SYMBOL_GPL(lookup_page_ext);
274 
alloc_page_ext(size_t size,int nid)275 static void *__meminit alloc_page_ext(size_t size, int nid)
276 {
277 	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
278 	void *addr = NULL;
279 
280 	addr = alloc_pages_exact_nid(nid, size, flags);
281 	if (addr) {
282 		kmemleak_alloc(addr, size, 1, flags);
283 		return addr;
284 	}
285 
286 	addr = vzalloc_node(size, nid);
287 
288 	return addr;
289 }
290 
init_section_page_ext(unsigned long pfn,int nid)291 static int __meminit init_section_page_ext(unsigned long pfn, int nid)
292 {
293 	struct mem_section *section;
294 	struct page_ext *base;
295 	unsigned long table_size;
296 
297 	section = __pfn_to_section(pfn);
298 
299 	if (section->page_ext)
300 		return 0;
301 
302 	table_size = page_ext_size * PAGES_PER_SECTION;
303 	base = alloc_page_ext(table_size, nid);
304 
305 	/*
306 	 * The value stored in section->page_ext is (base - pfn)
307 	 * and it does not point to the memory block allocated above,
308 	 * causing kmemleak false positives.
309 	 */
310 	kmemleak_not_leak(base);
311 
312 	if (!base) {
313 		pr_err("page ext allocation failure\n");
314 		return -ENOMEM;
315 	}
316 
317 	/*
318 	 * The passed "pfn" may not be aligned to SECTION.  For the calculation
319 	 * we need to apply a mask.
320 	 */
321 	pfn &= PAGE_SECTION_MASK;
322 	section->page_ext = (void *)base - page_ext_size * pfn;
323 	total_usage += table_size;
324 	return 0;
325 }
326 #ifdef CONFIG_MEMORY_HOTPLUG
free_page_ext(void * addr)327 static void free_page_ext(void *addr)
328 {
329 	if (is_vmalloc_addr(addr)) {
330 		vfree(addr);
331 	} else {
332 		struct page *page = virt_to_page(addr);
333 		size_t table_size;
334 
335 		table_size = page_ext_size * PAGES_PER_SECTION;
336 
337 		BUG_ON(PageReserved(page));
338 		kmemleak_free(addr);
339 		free_pages_exact(addr, table_size);
340 	}
341 }
342 
__free_page_ext(unsigned long pfn)343 static void __free_page_ext(unsigned long pfn)
344 {
345 	struct mem_section *ms;
346 	struct page_ext *base;
347 
348 	ms = __pfn_to_section(pfn);
349 	if (!ms || !ms->page_ext)
350 		return;
351 
352 	base = READ_ONCE(ms->page_ext);
353 	/*
354 	 * page_ext here can be valid while doing the roll back
355 	 * operation in online_page_ext().
356 	 */
357 	if (page_ext_invalid(base))
358 		base = (void *)base - PAGE_EXT_INVALID;
359 	WRITE_ONCE(ms->page_ext, NULL);
360 
361 	base = get_entry(base, pfn);
362 	free_page_ext(base);
363 }
364 
__invalidate_page_ext(unsigned long pfn)365 static void __invalidate_page_ext(unsigned long pfn)
366 {
367 	struct mem_section *ms;
368 	void *val;
369 
370 	ms = __pfn_to_section(pfn);
371 	if (!ms || !ms->page_ext)
372 		return;
373 	val = (void *)ms->page_ext + PAGE_EXT_INVALID;
374 	WRITE_ONCE(ms->page_ext, val);
375 }
376 
online_page_ext(unsigned long start_pfn,unsigned long nr_pages,int nid)377 static int __meminit online_page_ext(unsigned long start_pfn,
378 				unsigned long nr_pages,
379 				int nid)
380 {
381 	unsigned long start, end, pfn;
382 	int fail = 0;
383 
384 	start = SECTION_ALIGN_DOWN(start_pfn);
385 	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
386 
387 	if (nid == NUMA_NO_NODE) {
388 		/*
389 		 * In this case, "nid" already exists and contains valid memory.
390 		 * "start_pfn" passed to us is a pfn which is an arg for
391 		 * online__pages(), and start_pfn should exist.
392 		 */
393 		nid = pfn_to_nid(start_pfn);
394 		VM_BUG_ON(!node_state(nid, N_ONLINE));
395 	}
396 
397 	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
398 		fail = init_section_page_ext(pfn, nid);
399 	if (!fail)
400 		return 0;
401 
402 	/* rollback */
403 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
404 		__free_page_ext(pfn);
405 
406 	return -ENOMEM;
407 }
408 
offline_page_ext(unsigned long start_pfn,unsigned long nr_pages,int nid)409 static int __meminit offline_page_ext(unsigned long start_pfn,
410 				unsigned long nr_pages, int nid)
411 {
412 	unsigned long start, end, pfn;
413 
414 	start = SECTION_ALIGN_DOWN(start_pfn);
415 	end = SECTION_ALIGN_UP(start_pfn + nr_pages);
416 
417 	/*
418 	 * Freeing of page_ext is done in 3 steps to avoid
419 	 * use-after-free of it:
420 	 * 1) Traverse all the sections and mark their page_ext
421 	 *    as invalid.
422 	 * 2) Wait for all the existing users of page_ext who
423 	 *    started before invalidation to finish.
424 	 * 3) Free the page_ext.
425 	 */
426 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
427 		__invalidate_page_ext(pfn);
428 
429 	synchronize_rcu();
430 
431 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
432 		__free_page_ext(pfn);
433 	return 0;
434 
435 }
436 
page_ext_callback(struct notifier_block * self,unsigned long action,void * arg)437 static int __meminit page_ext_callback(struct notifier_block *self,
438 			       unsigned long action, void *arg)
439 {
440 	struct memory_notify *mn = arg;
441 	int ret = 0;
442 
443 	switch (action) {
444 	case MEM_GOING_ONLINE:
445 		ret = online_page_ext(mn->start_pfn,
446 				   mn->nr_pages, mn->status_change_nid);
447 		break;
448 	case MEM_OFFLINE:
449 		offline_page_ext(mn->start_pfn,
450 				mn->nr_pages, mn->status_change_nid);
451 		break;
452 	case MEM_CANCEL_ONLINE:
453 		offline_page_ext(mn->start_pfn,
454 				mn->nr_pages, mn->status_change_nid);
455 		break;
456 	case MEM_GOING_OFFLINE:
457 		break;
458 	case MEM_ONLINE:
459 	case MEM_CANCEL_OFFLINE:
460 		break;
461 	}
462 
463 	return notifier_from_errno(ret);
464 }
465 
466 #endif
467 
page_ext_init(void)468 void __init page_ext_init(void)
469 {
470 	unsigned long pfn;
471 	int nid;
472 
473 	if (!invoke_need_callbacks())
474 		return;
475 
476 	for_each_node_state(nid, N_MEMORY) {
477 		unsigned long start_pfn, end_pfn;
478 
479 		start_pfn = node_start_pfn(nid);
480 		end_pfn = node_end_pfn(nid);
481 		/*
482 		 * start_pfn and end_pfn may not be aligned to SECTION and the
483 		 * page->flags of out of node pages are not initialized.  So we
484 		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
485 		 */
486 		for (pfn = start_pfn; pfn < end_pfn;
487 			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
488 
489 			if (!pfn_valid(pfn))
490 				continue;
491 			/*
492 			 * Nodes's pfns can be overlapping.
493 			 * We know some arch can have a nodes layout such as
494 			 * -------------pfn-------------->
495 			 * N0 | N1 | N2 | N0 | N1 | N2|....
496 			 */
497 			if (pfn_to_nid(pfn) != nid)
498 				continue;
499 			if (init_section_page_ext(pfn, nid))
500 				goto oom;
501 			cond_resched();
502 		}
503 	}
504 	hotplug_memory_notifier(page_ext_callback, 0);
505 	pr_info("allocated %ld bytes of page_ext\n", total_usage);
506 	invoke_init_callbacks();
507 	return;
508 
509 oom:
510 	panic("Out of memory");
511 }
512 
pgdat_page_ext_init(struct pglist_data * pgdat)513 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
514 {
515 }
516 
517 #endif
518