• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/sched/clock.h>
14 
15 #include "internal.h"
16 
17 /*
18  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
19  * to use off stack temporal storage
20  */
21 #define PAGE_OWNER_STACK_DEPTH (16)
22 
23 struct page_owner {
24 	unsigned short order;
25 	short last_migrate_reason;
26 	gfp_t gfp_mask;
27 	depot_stack_handle_t handle;
28 	depot_stack_handle_t free_handle;
29 	u64 ts_nsec;
30 	u64 free_ts_nsec;
31 	pid_t pid;
32 };
33 
34 static bool page_owner_enabled = false;
35 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
36 EXPORT_SYMBOL_GPL(page_owner_inited);
37 
38 static depot_stack_handle_t dummy_handle;
39 static depot_stack_handle_t failure_handle;
40 static depot_stack_handle_t early_handle;
41 
42 static void init_early_allocated_pages(void);
43 
early_page_owner_param(char * buf)44 static int __init early_page_owner_param(char *buf)
45 {
46 	return kstrtobool(buf, &page_owner_enabled);
47 }
48 early_param("page_owner", early_page_owner_param);
49 
need_page_owner(void)50 static bool need_page_owner(void)
51 {
52 	return page_owner_enabled;
53 }
54 
create_dummy_stack(void)55 static __always_inline depot_stack_handle_t create_dummy_stack(void)
56 {
57 	unsigned long entries[4];
58 	unsigned int nr_entries;
59 
60 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
61 	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
62 }
63 
register_dummy_stack(void)64 static noinline void register_dummy_stack(void)
65 {
66 	dummy_handle = create_dummy_stack();
67 }
68 
register_failure_stack(void)69 static noinline void register_failure_stack(void)
70 {
71 	failure_handle = create_dummy_stack();
72 }
73 
register_early_stack(void)74 static noinline void register_early_stack(void)
75 {
76 	early_handle = create_dummy_stack();
77 }
78 
init_page_owner(void)79 static void init_page_owner(void)
80 {
81 	if (!page_owner_enabled)
82 		return;
83 
84 	register_dummy_stack();
85 	register_failure_stack();
86 	register_early_stack();
87 	static_branch_enable(&page_owner_inited);
88 	init_early_allocated_pages();
89 }
90 
91 struct page_ext_operations page_owner_ops = {
92 	.size = sizeof(struct page_owner),
93 	.need = need_page_owner,
94 	.init = init_page_owner,
95 };
96 
get_page_owner(struct page_ext * page_ext)97 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
98 {
99 	return (void *)page_ext + page_owner_ops.offset;
100 }
101 
get_page_owner_handle(struct page_ext * page_ext,unsigned long pfn)102 depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn)
103 {
104 	struct page_owner *page_owner;
105 	depot_stack_handle_t handle;
106 
107 	if (!page_owner_enabled)
108 		return 0;
109 
110 	page_owner = get_page_owner(page_ext);
111 
112 	/* skip handle for tail pages of higher order allocations */
113 	if (!IS_ALIGNED(pfn, 1 << page_owner->order))
114 		return 0;
115 
116 	handle = READ_ONCE(page_owner->handle);
117 	return handle;
118 }
119 EXPORT_SYMBOL_NS_GPL(get_page_owner_handle, MINIDUMP);
120 
save_stack(gfp_t flags)121 static noinline depot_stack_handle_t save_stack(gfp_t flags)
122 {
123 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
124 	depot_stack_handle_t handle;
125 	unsigned int nr_entries;
126 
127 	/*
128 	 * Avoid recursion.
129 	 *
130 	 * Sometimes page metadata allocation tracking requires more
131 	 * memory to be allocated:
132 	 * - when new stack trace is saved to stack depot
133 	 * - when backtrace itself is calculated (ia64)
134 	 */
135 	if (current->in_page_owner)
136 		return dummy_handle;
137 	current->in_page_owner = 1;
138 
139 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
140 	handle = stack_depot_save(entries, nr_entries, flags);
141 	if (!handle)
142 		handle = failure_handle;
143 
144 	current->in_page_owner = 0;
145 	return handle;
146 }
147 
__reset_page_owner(struct page * page,unsigned int order)148 void __reset_page_owner(struct page *page, unsigned int order)
149 {
150 	int i;
151 	struct page_ext *page_ext;
152 	depot_stack_handle_t handle;
153 	struct page_owner *page_owner;
154 	u64 free_ts_nsec = local_clock();
155 
156 	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
157 	page_ext = page_ext_get(page);
158 
159 	if (unlikely(!page_ext))
160 		return;
161 
162 	for (i = 0; i < (1 << order); i++) {
163 		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
164 		page_owner = get_page_owner(page_ext);
165 		page_owner->free_handle = handle;
166 		page_owner->free_ts_nsec = free_ts_nsec;
167 		page_ext = page_ext_next(page_ext);
168 	}
169 	page_ext_put(page_ext);
170 }
171 
__set_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned int order,gfp_t gfp_mask)172 static inline void __set_page_owner_handle(struct page_ext *page_ext,
173 					depot_stack_handle_t handle,
174 					unsigned int order, gfp_t gfp_mask)
175 {
176 	struct page_owner *page_owner;
177 	int i;
178 
179 	for (i = 0; i < (1 << order); i++) {
180 		page_owner = get_page_owner(page_ext);
181 		page_owner->handle = handle;
182 		page_owner->order = order;
183 		page_owner->gfp_mask = gfp_mask;
184 		page_owner->last_migrate_reason = -1;
185 		page_owner->pid = current->pid;
186 		page_owner->ts_nsec = local_clock();
187 		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
188 		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
189 
190 		page_ext = page_ext_next(page_ext);
191 	}
192 }
193 
__set_page_owner(struct page * page,unsigned int order,gfp_t gfp_mask)194 noinline void __set_page_owner(struct page *page, unsigned int order,
195 					gfp_t gfp_mask)
196 {
197 	struct page_ext *page_ext;
198 	depot_stack_handle_t handle;
199 
200 	handle = save_stack(gfp_mask);
201 
202 	page_ext = page_ext_get(page);
203 	if (unlikely(!page_ext))
204 		return;
205 
206 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
207 	page_ext_put(page_ext);
208 }
209 EXPORT_SYMBOL_GPL(__set_page_owner);
210 
__set_page_owner_migrate_reason(struct page * page,int reason)211 void __set_page_owner_migrate_reason(struct page *page, int reason)
212 {
213 	struct page_ext *page_ext = page_ext_get(page);
214 	struct page_owner *page_owner;
215 
216 	if (unlikely(!page_ext))
217 		return;
218 
219 	page_owner = get_page_owner(page_ext);
220 	page_owner->last_migrate_reason = reason;
221 	page_ext_put(page_ext);
222 }
223 
__split_page_owner(struct page * page,unsigned int nr)224 void __split_page_owner(struct page *page, unsigned int nr)
225 {
226 	int i;
227 	struct page_ext *page_ext = page_ext_get(page);
228 	struct page_owner *page_owner;
229 
230 	if (unlikely(!page_ext))
231 		return;
232 
233 	for (i = 0; i < nr; i++) {
234 		page_owner = get_page_owner(page_ext);
235 		page_owner->order = 0;
236 		page_ext = page_ext_next(page_ext);
237 	}
238 	page_ext_put(page_ext);
239 }
240 
__copy_page_owner(struct page * oldpage,struct page * newpage)241 void __copy_page_owner(struct page *oldpage, struct page *newpage)
242 {
243 	struct page_ext *old_ext;
244 	struct page_ext *new_ext;
245 	struct page_owner *old_page_owner, *new_page_owner;
246 
247 	old_ext = page_ext_get(oldpage);
248 	if (unlikely(!old_ext))
249 		return;
250 
251 	new_ext = page_ext_get(newpage);
252 	if (unlikely(!new_ext)) {
253 		page_ext_put(old_ext);
254 		return;
255 	}
256 
257 	old_page_owner = get_page_owner(old_ext);
258 	new_page_owner = get_page_owner(new_ext);
259 	new_page_owner->order = old_page_owner->order;
260 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
261 	new_page_owner->last_migrate_reason =
262 		old_page_owner->last_migrate_reason;
263 	new_page_owner->handle = old_page_owner->handle;
264 	new_page_owner->pid = old_page_owner->pid;
265 	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
266 	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
267 
268 	/*
269 	 * We don't clear the bit on the oldpage as it's going to be freed
270 	 * after migration. Until then, the info can be useful in case of
271 	 * a bug, and the overall stats will be off a bit only temporarily.
272 	 * Also, migrate_misplaced_transhuge_page() can still fail the
273 	 * migration and then we want the oldpage to retain the info. But
274 	 * in that case we also don't need to explicitly clear the info from
275 	 * the new page, which will be freed.
276 	 */
277 	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
278 	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
279 	page_ext_put(new_ext);
280 	page_ext_put(old_ext);
281 }
282 
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)283 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
284 				       pg_data_t *pgdat, struct zone *zone)
285 {
286 	struct page *page;
287 	struct page_ext *page_ext;
288 	struct page_owner *page_owner;
289 	unsigned long pfn, block_end_pfn;
290 	unsigned long end_pfn = zone_end_pfn(zone);
291 	unsigned long count[MIGRATE_TYPES] = { 0, };
292 	int pageblock_mt, page_mt;
293 	int i;
294 
295 	/* Scan block by block. First and last block may be incomplete */
296 	pfn = zone->zone_start_pfn;
297 
298 	/*
299 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
300 	 * a zone boundary, it will be double counted between zones. This does
301 	 * not matter as the mixed block count will still be correct
302 	 */
303 	for (; pfn < end_pfn; ) {
304 		page = pfn_to_online_page(pfn);
305 		if (!page) {
306 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
307 			continue;
308 		}
309 
310 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
311 		block_end_pfn = min(block_end_pfn, end_pfn);
312 
313 		pageblock_mt = get_pageblock_migratetype(page);
314 
315 		for (; pfn < block_end_pfn; pfn++) {
316 			/* The pageblock is online, no need to recheck. */
317 			page = pfn_to_page(pfn);
318 
319 			if (page_zone(page) != zone)
320 				continue;
321 
322 			if (PageBuddy(page)) {
323 				unsigned long freepage_order;
324 
325 				freepage_order = buddy_order_unsafe(page);
326 				if (freepage_order < MAX_ORDER)
327 					pfn += (1UL << freepage_order) - 1;
328 				continue;
329 			}
330 
331 			if (PageReserved(page))
332 				continue;
333 
334 			page_ext = page_ext_get(page);
335 			if (unlikely(!page_ext))
336 				continue;
337 
338 			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
339 				goto ext_put_continue;
340 
341 			page_owner = get_page_owner(page_ext);
342 			page_mt = gfp_migratetype(page_owner->gfp_mask);
343 			if (pageblock_mt != page_mt) {
344 				if (is_migrate_cma(pageblock_mt))
345 					count[MIGRATE_MOVABLE]++;
346 				else
347 					count[pageblock_mt]++;
348 
349 				pfn = block_end_pfn;
350 				page_ext_put(page_ext);
351 				break;
352 			}
353 			pfn += (1UL << page_owner->order) - 1;
354 ext_put_continue:
355 			page_ext_put(page_ext);
356 		}
357 	}
358 
359 	/* Print counts */
360 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
361 	for (i = 0; i < MIGRATE_TYPES; i++)
362 		seq_printf(m, "%12lu ", count[i]);
363 	seq_putc(m, '\n');
364 }
365 
366 static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)367 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
368 		struct page *page, struct page_owner *page_owner,
369 		depot_stack_handle_t handle)
370 {
371 	int ret, pageblock_mt, page_mt;
372 	unsigned long *entries;
373 	unsigned int nr_entries;
374 	char *kbuf;
375 
376 	count = min_t(size_t, count, PAGE_SIZE);
377 	kbuf = kmalloc(count, GFP_KERNEL);
378 	if (!kbuf)
379 		return -ENOMEM;
380 
381 	ret = snprintf(kbuf, count,
382 			"Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n",
383 			page_owner->order, page_owner->gfp_mask,
384 			&page_owner->gfp_mask, page_owner->pid,
385 			page_owner->ts_nsec, page_owner->free_ts_nsec);
386 
387 	if (ret >= count)
388 		goto err;
389 
390 	/* Print information relevant to grouping pages by mobility */
391 	pageblock_mt = get_pageblock_migratetype(page);
392 	page_mt  = gfp_migratetype(page_owner->gfp_mask);
393 	ret += snprintf(kbuf + ret, count - ret,
394 			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
395 			pfn,
396 			migratetype_names[page_mt],
397 			pfn >> pageblock_order,
398 			migratetype_names[pageblock_mt],
399 			page->flags, &page->flags);
400 
401 	if (ret >= count)
402 		goto err;
403 
404 	nr_entries = stack_depot_fetch(handle, &entries);
405 	ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
406 	if (ret >= count)
407 		goto err;
408 
409 	if (page_owner->last_migrate_reason != -1) {
410 		ret += snprintf(kbuf + ret, count - ret,
411 			"Page has been migrated, last migrate reason: %s\n",
412 			migrate_reason_names[page_owner->last_migrate_reason]);
413 		if (ret >= count)
414 			goto err;
415 	}
416 
417 	ret += snprintf(kbuf + ret, count - ret, "\n");
418 	if (ret >= count)
419 		goto err;
420 
421 	if (copy_to_user(buf, kbuf, ret))
422 		ret = -EFAULT;
423 
424 	kfree(kbuf);
425 	return ret;
426 
427 err:
428 	kfree(kbuf);
429 	return -ENOMEM;
430 }
431 
__dump_page_owner(const struct page * page)432 void __dump_page_owner(const struct page *page)
433 {
434 	struct page_ext *page_ext = page_ext_get((void *)page);
435 	struct page_owner *page_owner;
436 	depot_stack_handle_t handle;
437 	unsigned long *entries;
438 	unsigned int nr_entries;
439 	gfp_t gfp_mask;
440 	int mt;
441 
442 	if (unlikely(!page_ext)) {
443 		pr_alert("There is not page extension available.\n");
444 		return;
445 	}
446 
447 	page_owner = get_page_owner(page_ext);
448 	gfp_mask = page_owner->gfp_mask;
449 	mt = gfp_migratetype(gfp_mask);
450 
451 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
452 		pr_alert("page_owner info is not present (never set?)\n");
453 		page_ext_put(page_ext);
454 		return;
455 	}
456 
457 	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
458 		pr_alert("page_owner tracks the page as allocated\n");
459 	else
460 		pr_alert("page_owner tracks the page as freed\n");
461 
462 	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n",
463 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
464 		 page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
465 
466 	handle = READ_ONCE(page_owner->handle);
467 	if (!handle) {
468 		pr_alert("page_owner allocation stack trace missing\n");
469 	} else {
470 		nr_entries = stack_depot_fetch(handle, &entries);
471 		stack_trace_print(entries, nr_entries, 0);
472 	}
473 
474 	handle = READ_ONCE(page_owner->free_handle);
475 	if (!handle) {
476 		pr_alert("page_owner free stack trace missing\n");
477 	} else {
478 		nr_entries = stack_depot_fetch(handle, &entries);
479 		pr_alert("page last free stack trace:\n");
480 		stack_trace_print(entries, nr_entries, 0);
481 	}
482 
483 	if (page_owner->last_migrate_reason != -1)
484 		pr_alert("page has been migrated, last migrate reason: %s\n",
485 			migrate_reason_names[page_owner->last_migrate_reason]);
486 	page_ext_put(page_ext);
487 }
488 
489 static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)490 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
491 {
492 	unsigned long pfn;
493 	struct page *page;
494 	struct page_ext *page_ext;
495 	struct page_owner *page_owner;
496 	depot_stack_handle_t handle;
497 
498 	if (!static_branch_unlikely(&page_owner_inited))
499 		return -EINVAL;
500 
501 	page = NULL;
502 	pfn = min_low_pfn + *ppos;
503 
504 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
505 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
506 		pfn++;
507 
508 	drain_all_pages(NULL);
509 
510 	/* Find an allocated page */
511 	for (; pfn < max_pfn; pfn++) {
512 		/*
513 		 * This temporary page_owner is required so
514 		 * that we can avoid the context switches while holding
515 		 * the rcu lock and copying the page owner information to
516 		 * user through copy_to_user() or GFP_KERNEL allocations.
517 		 */
518 		struct page_owner page_owner_tmp;
519 
520 		/*
521 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
522 		 * validate the area as existing, skip it if not
523 		 */
524 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
525 			pfn += MAX_ORDER_NR_PAGES - 1;
526 			continue;
527 		}
528 
529 		page = pfn_to_page(pfn);
530 		if (PageBuddy(page)) {
531 			unsigned long freepage_order = buddy_order_unsafe(page);
532 
533 			if (freepage_order < MAX_ORDER)
534 				pfn += (1UL << freepage_order) - 1;
535 			continue;
536 		}
537 
538 		page_ext = page_ext_get(page);
539 		if (unlikely(!page_ext))
540 			continue;
541 
542 		/*
543 		 * Some pages could be missed by concurrent allocation or free,
544 		 * because we don't hold the zone lock.
545 		 */
546 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
547 			goto ext_put_continue;
548 
549 		/*
550 		 * Although we do have the info about past allocation of free
551 		 * pages, it's not relevant for current memory usage.
552 		 */
553 		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
554 			goto ext_put_continue;
555 
556 		page_owner = get_page_owner(page_ext);
557 
558 		/*
559 		 * Don't print "tail" pages of high-order allocations as that
560 		 * would inflate the stats.
561 		 */
562 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
563 			goto ext_put_continue;
564 
565 		/*
566 		 * Access to page_ext->handle isn't synchronous so we should
567 		 * be careful to access it.
568 		 */
569 		handle = READ_ONCE(page_owner->handle);
570 		if (!handle)
571 			goto ext_put_continue;
572 
573 		/* Record the next PFN to read in the file offset */
574 		*ppos = (pfn - min_low_pfn) + 1;
575 
576 		page_owner_tmp = *page_owner;
577 		page_ext_put(page_ext);
578 		return print_page_owner(buf, count, pfn, page,
579 				&page_owner_tmp, handle);
580 ext_put_continue:
581 		page_ext_put(page_ext);
582 	}
583 
584 	return 0;
585 }
586 
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)587 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
588 {
589 	unsigned long pfn = zone->zone_start_pfn;
590 	unsigned long end_pfn = zone_end_pfn(zone);
591 	unsigned long count = 0;
592 
593 	/*
594 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
595 	 * a zone boundary, it will be double counted between zones. This does
596 	 * not matter as the mixed block count will still be correct
597 	 */
598 	for (; pfn < end_pfn; ) {
599 		unsigned long block_end_pfn;
600 
601 		if (!pfn_valid(pfn)) {
602 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
603 			continue;
604 		}
605 
606 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
607 		block_end_pfn = min(block_end_pfn, end_pfn);
608 
609 		for (; pfn < block_end_pfn; pfn++) {
610 			struct page *page = pfn_to_page(pfn);
611 			struct page_ext *page_ext;
612 
613 			if (page_zone(page) != zone)
614 				continue;
615 
616 			/*
617 			 * To avoid having to grab zone->lock, be a little
618 			 * careful when reading buddy page order. The only
619 			 * danger is that we skip too much and potentially miss
620 			 * some early allocated pages, which is better than
621 			 * heavy lock contention.
622 			 */
623 			if (PageBuddy(page)) {
624 				unsigned long order = buddy_order_unsafe(page);
625 
626 				if (order > 0 && order < MAX_ORDER)
627 					pfn += (1UL << order) - 1;
628 				continue;
629 			}
630 
631 			if (PageReserved(page))
632 				continue;
633 
634 			page_ext = page_ext_get(page);
635 			if (unlikely(!page_ext))
636 				continue;
637 
638 			/* Maybe overlapping zone */
639 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
640 				goto ext_put_continue;
641 
642 			/* Found early allocated page */
643 			__set_page_owner_handle(page_ext, early_handle,
644 						0, 0);
645 			count++;
646 ext_put_continue:
647 			page_ext_put(page_ext);
648 		}
649 		cond_resched();
650 	}
651 
652 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
653 		pgdat->node_id, zone->name, count);
654 }
655 
init_zones_in_node(pg_data_t * pgdat)656 static void init_zones_in_node(pg_data_t *pgdat)
657 {
658 	struct zone *zone;
659 	struct zone *node_zones = pgdat->node_zones;
660 
661 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
662 		if (!populated_zone(zone))
663 			continue;
664 
665 		init_pages_in_zone(pgdat, zone);
666 	}
667 }
668 
init_early_allocated_pages(void)669 static void init_early_allocated_pages(void)
670 {
671 	pg_data_t *pgdat;
672 
673 	for_each_online_pgdat(pgdat)
674 		init_zones_in_node(pgdat);
675 }
676 
677 static const struct file_operations proc_page_owner_operations = {
678 	.read		= read_page_owner,
679 };
680 
pageowner_init(void)681 static int __init pageowner_init(void)
682 {
683 	if (!static_branch_unlikely(&page_owner_inited)) {
684 		pr_info("page_owner is disabled\n");
685 		return 0;
686 	}
687 
688 	debugfs_create_file("page_owner", 0400, NULL, NULL,
689 			    &proc_page_owner_operations);
690 
691 	return 0;
692 }
693 late_initcall(pageowner_init)
694