1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/seq_file.h>
12 #include <linux/memcontrol.h>
13 #include <linux/sched/clock.h>
14
15 #include "internal.h"
16
17 /*
18 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
19 * to use off stack temporal storage
20 */
21 #define PAGE_OWNER_STACK_DEPTH (16)
22
23 struct page_owner {
24 unsigned short order;
25 short last_migrate_reason;
26 gfp_t gfp_mask;
27 depot_stack_handle_t handle;
28 depot_stack_handle_t free_handle;
29 u64 ts_nsec;
30 u64 free_ts_nsec;
31 char comm[TASK_COMM_LEN];
32 pid_t pid;
33 pid_t tgid;
34 pid_t free_pid;
35 pid_t free_tgid;
36 };
37
38 struct stack {
39 struct stack_record *stack_record;
40 struct stack *next;
41 };
42 static struct stack dummy_stack;
43 static struct stack failure_stack;
44 static struct stack *stack_list;
45 static DEFINE_SPINLOCK(stack_list_lock);
46
47 static bool page_owner_enabled __initdata;
48 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
49
50 static depot_stack_handle_t dummy_handle;
51 static depot_stack_handle_t failure_handle;
52 static depot_stack_handle_t early_handle;
53
54 static void init_early_allocated_pages(void);
55
set_current_in_page_owner(void)56 static inline void set_current_in_page_owner(void)
57 {
58 /*
59 * Avoid recursion.
60 *
61 * We might need to allocate more memory from page_owner code, so make
62 * sure to signal it in order to avoid recursion.
63 */
64 current->in_page_owner = 1;
65 }
66
unset_current_in_page_owner(void)67 static inline void unset_current_in_page_owner(void)
68 {
69 current->in_page_owner = 0;
70 }
71
early_page_owner_param(char * buf)72 static int __init early_page_owner_param(char *buf)
73 {
74 int ret = kstrtobool(buf, &page_owner_enabled);
75
76 if (page_owner_enabled)
77 stack_depot_request_early_init();
78
79 return ret;
80 }
81 early_param("page_owner", early_page_owner_param);
82
need_page_owner(void)83 static __init bool need_page_owner(void)
84 {
85 return page_owner_enabled;
86 }
87
create_dummy_stack(void)88 static __always_inline depot_stack_handle_t create_dummy_stack(void)
89 {
90 unsigned long entries[4];
91 unsigned int nr_entries;
92
93 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
94 return stack_depot_save(entries, nr_entries, GFP_KERNEL);
95 }
96
register_dummy_stack(void)97 static noinline void register_dummy_stack(void)
98 {
99 dummy_handle = create_dummy_stack();
100 }
101
register_failure_stack(void)102 static noinline void register_failure_stack(void)
103 {
104 failure_handle = create_dummy_stack();
105 }
106
register_early_stack(void)107 static noinline void register_early_stack(void)
108 {
109 early_handle = create_dummy_stack();
110 }
111
init_page_owner(void)112 static __init void init_page_owner(void)
113 {
114 if (!page_owner_enabled)
115 return;
116
117 register_dummy_stack();
118 register_failure_stack();
119 register_early_stack();
120 init_early_allocated_pages();
121 /* Initialize dummy and failure stacks and link them to stack_list */
122 dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle);
123 failure_stack.stack_record = __stack_depot_get_stack_record(failure_handle);
124 if (dummy_stack.stack_record)
125 refcount_set(&dummy_stack.stack_record->count, 1);
126 if (failure_stack.stack_record)
127 refcount_set(&failure_stack.stack_record->count, 1);
128 dummy_stack.next = &failure_stack;
129 stack_list = &dummy_stack;
130 static_branch_enable(&page_owner_inited);
131 }
132
133 struct page_ext_operations page_owner_ops = {
134 .size = sizeof(struct page_owner),
135 .need = need_page_owner,
136 .init = init_page_owner,
137 .need_shared_flags = true,
138 };
139
get_page_owner(struct page_ext * page_ext)140 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
141 {
142 return page_ext_data(page_ext, &page_owner_ops);
143 }
144
get_page_owner_handle(struct page_ext * page_ext,unsigned long pfn)145 depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn)
146 {
147 struct page_owner *page_owner;
148 depot_stack_handle_t handle;
149
150 if (!static_branch_unlikely(&page_owner_inited))
151 return 0;
152
153 page_owner = get_page_owner(page_ext);
154
155 /* skip handle for tail pages of higher order allocations */
156 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
157 return 0;
158
159 handle = READ_ONCE(page_owner->handle);
160 return handle;
161 }
162 EXPORT_SYMBOL_NS_GPL(get_page_owner_handle, MINIDUMP);
163
save_stack(gfp_t flags)164 static noinline depot_stack_handle_t save_stack(gfp_t flags)
165 {
166 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
167 depot_stack_handle_t handle;
168 unsigned int nr_entries;
169
170 if (current->in_page_owner)
171 return dummy_handle;
172
173 set_current_in_page_owner();
174 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
175 handle = stack_depot_save(entries, nr_entries, flags);
176 if (!handle)
177 handle = failure_handle;
178 unset_current_in_page_owner();
179
180 return handle;
181 }
182
add_stack_record_to_list(struct stack_record * stack_record,gfp_t gfp_mask)183 static void add_stack_record_to_list(struct stack_record *stack_record,
184 gfp_t gfp_mask)
185 {
186 unsigned long flags;
187 struct stack *stack;
188
189 set_current_in_page_owner();
190 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
191 if (!stack) {
192 unset_current_in_page_owner();
193 return;
194 }
195 unset_current_in_page_owner();
196
197 stack->stack_record = stack_record;
198 stack->next = NULL;
199
200 spin_lock_irqsave(&stack_list_lock, flags);
201 stack->next = stack_list;
202 /*
203 * This pairs with smp_load_acquire() from function
204 * stack_start(). This guarantees that stack_start()
205 * will see an updated stack_list before starting to
206 * traverse the list.
207 */
208 smp_store_release(&stack_list, stack);
209 spin_unlock_irqrestore(&stack_list_lock, flags);
210 }
211
inc_stack_record_count(depot_stack_handle_t handle,gfp_t gfp_mask,int nr_base_pages)212 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask,
213 int nr_base_pages)
214 {
215 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
216
217 if (!stack_record)
218 return;
219
220 /*
221 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
222 * with REFCOUNT_SATURATED to catch spurious increments of their
223 * refcount.
224 * Since we do not use STACK_DEPOT_FLAG_GET API, let us
225 * set a refcount of 1 ourselves.
226 */
227 if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) {
228 int old = REFCOUNT_SATURATED;
229
230 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1))
231 /* Add the new stack_record to our list */
232 add_stack_record_to_list(stack_record, gfp_mask);
233 }
234 refcount_add(nr_base_pages, &stack_record->count);
235 }
236
dec_stack_record_count(depot_stack_handle_t handle,int nr_base_pages)237 static void dec_stack_record_count(depot_stack_handle_t handle,
238 int nr_base_pages)
239 {
240 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
241
242 if (!stack_record)
243 return;
244
245 if (refcount_sub_and_test(nr_base_pages, &stack_record->count))
246 pr_warn("%s: refcount went to 0 for %u handle\n", __func__,
247 handle);
248 }
249
__update_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask,short last_migrate_reason,u64 ts_nsec,pid_t pid,pid_t tgid,char * comm)250 static inline void __update_page_owner_handle(struct page_ext *page_ext,
251 depot_stack_handle_t handle,
252 unsigned short order,
253 gfp_t gfp_mask,
254 short last_migrate_reason, u64 ts_nsec,
255 pid_t pid, pid_t tgid, char *comm)
256 {
257 int i;
258 struct page_owner *page_owner;
259
260 for (i = 0; i < (1 << order); i++) {
261 page_owner = get_page_owner(page_ext);
262 page_owner->handle = handle;
263 page_owner->order = order;
264 page_owner->gfp_mask = gfp_mask;
265 page_owner->last_migrate_reason = last_migrate_reason;
266 page_owner->pid = pid;
267 page_owner->tgid = tgid;
268 page_owner->ts_nsec = ts_nsec;
269 strscpy(page_owner->comm, comm,
270 sizeof(page_owner->comm));
271 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
272 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
273 page_ext = page_ext_next(page_ext);
274 }
275 }
276
__update_page_owner_free_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,pid_t pid,pid_t tgid,u64 free_ts_nsec)277 static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
278 depot_stack_handle_t handle,
279 unsigned short order,
280 pid_t pid, pid_t tgid,
281 u64 free_ts_nsec)
282 {
283 int i;
284 struct page_owner *page_owner;
285
286 for (i = 0; i < (1 << order); i++) {
287 page_owner = get_page_owner(page_ext);
288 /* Only __reset_page_owner() wants to clear the bit */
289 if (handle) {
290 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
291 page_owner->free_handle = handle;
292 }
293 page_owner->free_ts_nsec = free_ts_nsec;
294 page_owner->free_pid = current->pid;
295 page_owner->free_tgid = current->tgid;
296 page_ext = page_ext_next(page_ext);
297 }
298 }
299
__reset_page_owner(struct page * page,unsigned short order)300 void __reset_page_owner(struct page *page, unsigned short order)
301 {
302 struct page_ext *page_ext;
303 depot_stack_handle_t handle;
304 depot_stack_handle_t alloc_handle;
305 struct page_owner *page_owner;
306 u64 free_ts_nsec = local_clock();
307
308 page_ext = page_ext_get(page);
309 if (unlikely(!page_ext))
310 return;
311
312 page_owner = get_page_owner(page_ext);
313 alloc_handle = page_owner->handle;
314
315 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
316 __update_page_owner_free_handle(page_ext, handle, order, current->pid,
317 current->tgid, free_ts_nsec);
318 page_ext_put(page_ext);
319
320 if (alloc_handle != early_handle)
321 /*
322 * early_handle is being set as a handle for all those
323 * early allocated pages. See init_pages_in_zone().
324 * Since their refcount is not being incremented because
325 * the machinery is not ready yet, we cannot decrement
326 * their refcount either.
327 */
328 dec_stack_record_count(alloc_handle, 1 << order);
329 }
330
__set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask)331 noinline void __set_page_owner(struct page *page, unsigned short order,
332 gfp_t gfp_mask)
333 {
334 struct page_ext *page_ext;
335 u64 ts_nsec = local_clock();
336 depot_stack_handle_t handle;
337
338 handle = save_stack(gfp_mask);
339
340 page_ext = page_ext_get(page);
341 if (unlikely(!page_ext))
342 return;
343 __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
344 ts_nsec, current->pid, current->tgid,
345 current->comm);
346 page_ext_put(page_ext);
347 inc_stack_record_count(handle, gfp_mask, 1 << order);
348 }
349
__set_page_owner_migrate_reason(struct page * page,int reason)350 void __set_page_owner_migrate_reason(struct page *page, int reason)
351 {
352 struct page_ext *page_ext = page_ext_get(page);
353 struct page_owner *page_owner;
354
355 if (unlikely(!page_ext))
356 return;
357
358 page_owner = get_page_owner(page_ext);
359 page_owner->last_migrate_reason = reason;
360 page_ext_put(page_ext);
361 }
362
__split_page_owner(struct page * page,int old_order,int new_order)363 void __split_page_owner(struct page *page, int old_order, int new_order)
364 {
365 int i;
366 struct page_ext *page_ext = page_ext_get(page);
367 struct page_owner *page_owner;
368
369 if (unlikely(!page_ext))
370 return;
371
372 for (i = 0; i < (1 << old_order); i++) {
373 page_owner = get_page_owner(page_ext);
374 page_owner->order = new_order;
375 page_ext = page_ext_next(page_ext);
376 }
377 page_ext_put(page_ext);
378 }
379
__folio_copy_owner(struct folio * newfolio,struct folio * old)380 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
381 {
382 int i;
383 struct page_ext *old_ext;
384 struct page_ext *new_ext;
385 struct page_owner *old_page_owner;
386 struct page_owner *new_page_owner;
387 depot_stack_handle_t migrate_handle;
388
389 old_ext = page_ext_get(&old->page);
390 if (unlikely(!old_ext))
391 return;
392
393 new_ext = page_ext_get(&newfolio->page);
394 if (unlikely(!new_ext)) {
395 page_ext_put(old_ext);
396 return;
397 }
398
399 old_page_owner = get_page_owner(old_ext);
400 new_page_owner = get_page_owner(new_ext);
401 migrate_handle = new_page_owner->handle;
402 __update_page_owner_handle(new_ext, old_page_owner->handle,
403 old_page_owner->order, old_page_owner->gfp_mask,
404 old_page_owner->last_migrate_reason,
405 old_page_owner->ts_nsec, old_page_owner->pid,
406 old_page_owner->tgid, old_page_owner->comm);
407 /*
408 * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio
409 * will be freed after migration. Keep them until then as they may be
410 * useful.
411 */
412 __update_page_owner_free_handle(new_ext, 0, old_page_owner->order,
413 old_page_owner->free_pid,
414 old_page_owner->free_tgid,
415 old_page_owner->free_ts_nsec);
416 /*
417 * We linked the original stack to the new folio, we need to do the same
418 * for the new one and the old folio otherwise there will be an imbalance
419 * when subtracting those pages from the stack.
420 */
421 for (i = 0; i < (1 << new_page_owner->order); i++) {
422 old_page_owner->handle = migrate_handle;
423 old_ext = page_ext_next(old_ext);
424 old_page_owner = get_page_owner(old_ext);
425 }
426
427 page_ext_put(new_ext);
428 page_ext_put(old_ext);
429 }
430
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)431 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
432 pg_data_t *pgdat, struct zone *zone)
433 {
434 struct page *page;
435 struct page_ext *page_ext;
436 struct page_owner *page_owner;
437 unsigned long pfn, block_end_pfn;
438 unsigned long end_pfn = zone_end_pfn(zone);
439 unsigned long count[MIGRATE_TYPES] = { 0, };
440 int pageblock_mt, page_mt;
441 int i;
442
443 /* Scan block by block. First and last block may be incomplete */
444 pfn = zone->zone_start_pfn;
445
446 /*
447 * Walk the zone in pageblock_nr_pages steps. If a page block spans
448 * a zone boundary, it will be double counted between zones. This does
449 * not matter as the mixed block count will still be correct
450 */
451 for (; pfn < end_pfn; ) {
452 page = pfn_to_online_page(pfn);
453 if (!page) {
454 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
455 continue;
456 }
457
458 block_end_pfn = pageblock_end_pfn(pfn);
459 block_end_pfn = min(block_end_pfn, end_pfn);
460
461 pageblock_mt = get_pageblock_migratetype(page);
462
463 for (; pfn < block_end_pfn; pfn++) {
464 /* The pageblock is online, no need to recheck. */
465 page = pfn_to_page(pfn);
466
467 if (page_zone(page) != zone)
468 continue;
469
470 if (PageBuddy(page)) {
471 unsigned long freepage_order;
472
473 freepage_order = buddy_order_unsafe(page);
474 if (freepage_order <= MAX_PAGE_ORDER)
475 pfn += (1UL << freepage_order) - 1;
476 continue;
477 }
478
479 if (PageReserved(page))
480 continue;
481
482 page_ext = page_ext_get(page);
483 if (unlikely(!page_ext))
484 continue;
485
486 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
487 goto ext_put_continue;
488
489 page_owner = get_page_owner(page_ext);
490 page_mt = gfp_migratetype(page_owner->gfp_mask);
491 if (pageblock_mt != page_mt) {
492 if (is_migrate_cma(pageblock_mt))
493 count[MIGRATE_MOVABLE]++;
494 else
495 count[pageblock_mt]++;
496
497 pfn = block_end_pfn;
498 page_ext_put(page_ext);
499 break;
500 }
501 pfn += (1UL << page_owner->order) - 1;
502 ext_put_continue:
503 page_ext_put(page_ext);
504 }
505 }
506
507 /* Print counts */
508 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
509 for (i = 0; i < MIGRATE_TYPES; i++)
510 seq_printf(m, "%12lu ", count[i]);
511 seq_putc(m, '\n');
512 }
513
514 /*
515 * Looking for memcg information and print it out
516 */
print_page_owner_memcg(char * kbuf,size_t count,int ret,struct page * page)517 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
518 struct page *page)
519 {
520 #ifdef CONFIG_MEMCG
521 unsigned long memcg_data;
522 struct mem_cgroup *memcg;
523 bool online;
524 char name[80];
525
526 rcu_read_lock();
527 memcg_data = READ_ONCE(page->memcg_data);
528 if (!memcg_data)
529 goto out_unlock;
530
531 if (memcg_data & MEMCG_DATA_OBJEXTS)
532 ret += scnprintf(kbuf + ret, count - ret,
533 "Slab cache page\n");
534
535 memcg = page_memcg_check(page);
536 if (!memcg)
537 goto out_unlock;
538
539 online = (memcg->css.flags & CSS_ONLINE);
540 cgroup_name(memcg->css.cgroup, name, sizeof(name));
541 ret += scnprintf(kbuf + ret, count - ret,
542 "Charged %sto %smemcg %s\n",
543 PageMemcgKmem(page) ? "(via objcg) " : "",
544 online ? "" : "offline ",
545 name);
546 out_unlock:
547 rcu_read_unlock();
548 #endif /* CONFIG_MEMCG */
549
550 return ret;
551 }
552
553 static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)554 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
555 struct page *page, struct page_owner *page_owner,
556 depot_stack_handle_t handle)
557 {
558 int ret, pageblock_mt, page_mt;
559 char *kbuf;
560
561 count = min_t(size_t, count, PAGE_SIZE);
562 kbuf = kmalloc(count, GFP_KERNEL);
563 if (!kbuf)
564 return -ENOMEM;
565
566 ret = scnprintf(kbuf, count,
567 "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
568 page_owner->order, page_owner->gfp_mask,
569 &page_owner->gfp_mask, page_owner->pid,
570 page_owner->tgid, page_owner->comm,
571 page_owner->ts_nsec);
572
573 /* Print information relevant to grouping pages by mobility */
574 pageblock_mt = get_pageblock_migratetype(page);
575 page_mt = gfp_migratetype(page_owner->gfp_mask);
576 ret += scnprintf(kbuf + ret, count - ret,
577 "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
578 pfn,
579 migratetype_names[page_mt],
580 pfn >> pageblock_order,
581 migratetype_names[pageblock_mt],
582 &page->flags);
583
584 ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
585 if (ret >= count)
586 goto err;
587
588 if (page_owner->last_migrate_reason != -1) {
589 ret += scnprintf(kbuf + ret, count - ret,
590 "Page has been migrated, last migrate reason: %s\n",
591 migrate_reason_names[page_owner->last_migrate_reason]);
592 }
593
594 ret = print_page_owner_memcg(kbuf, count, ret, page);
595
596 ret += snprintf(kbuf + ret, count - ret, "\n");
597 if (ret >= count)
598 goto err;
599
600 if (copy_to_user(buf, kbuf, ret))
601 ret = -EFAULT;
602
603 kfree(kbuf);
604 return ret;
605
606 err:
607 kfree(kbuf);
608 return -ENOMEM;
609 }
610
__dump_page_owner(const struct page * page)611 void __dump_page_owner(const struct page *page)
612 {
613 struct page_ext *page_ext = page_ext_get((void *)page);
614 struct page_owner *page_owner;
615 depot_stack_handle_t handle;
616 gfp_t gfp_mask;
617 int mt;
618
619 if (unlikely(!page_ext)) {
620 pr_alert("There is not page extension available.\n");
621 return;
622 }
623
624 page_owner = get_page_owner(page_ext);
625 gfp_mask = page_owner->gfp_mask;
626 mt = gfp_migratetype(gfp_mask);
627
628 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
629 pr_alert("page_owner info is not present (never set?)\n");
630 page_ext_put(page_ext);
631 return;
632 }
633
634 if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
635 pr_alert("page_owner tracks the page as allocated\n");
636 else
637 pr_alert("page_owner tracks the page as freed\n");
638
639 pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
640 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
641 page_owner->pid, page_owner->tgid, page_owner->comm,
642 page_owner->ts_nsec, page_owner->free_ts_nsec);
643
644 handle = READ_ONCE(page_owner->handle);
645 if (!handle)
646 pr_alert("page_owner allocation stack trace missing\n");
647 else
648 stack_depot_print(handle);
649
650 handle = READ_ONCE(page_owner->free_handle);
651 if (!handle) {
652 pr_alert("page_owner free stack trace missing\n");
653 } else {
654 pr_alert("page last free pid %d tgid %d stack trace:\n",
655 page_owner->free_pid, page_owner->free_tgid);
656 stack_depot_print(handle);
657 }
658
659 if (page_owner->last_migrate_reason != -1)
660 pr_alert("page has been migrated, last migrate reason: %s\n",
661 migrate_reason_names[page_owner->last_migrate_reason]);
662 page_ext_put(page_ext);
663 }
664
665 static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)666 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
667 {
668 unsigned long pfn;
669 struct page *page;
670 struct page_ext *page_ext;
671 struct page_owner *page_owner;
672 depot_stack_handle_t handle;
673
674 if (!static_branch_unlikely(&page_owner_inited))
675 return -EINVAL;
676
677 page = NULL;
678 if (*ppos == 0)
679 pfn = min_low_pfn;
680 else
681 pfn = *ppos;
682 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
683 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
684 pfn++;
685
686 /* Find an allocated page */
687 for (; pfn < max_pfn; pfn++) {
688 /*
689 * This temporary page_owner is required so
690 * that we can avoid the context switches while holding
691 * the rcu lock and copying the page owner information to
692 * user through copy_to_user() or GFP_KERNEL allocations.
693 */
694 struct page_owner page_owner_tmp;
695
696 /*
697 * If the new page is in a new MAX_ORDER_NR_PAGES area,
698 * validate the area as existing, skip it if not
699 */
700 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
701 pfn += MAX_ORDER_NR_PAGES - 1;
702 continue;
703 }
704
705 page = pfn_to_page(pfn);
706 if (PageBuddy(page)) {
707 unsigned long freepage_order = buddy_order_unsafe(page);
708
709 if (freepage_order <= MAX_PAGE_ORDER)
710 pfn += (1UL << freepage_order) - 1;
711 continue;
712 }
713
714 page_ext = page_ext_get(page);
715 if (unlikely(!page_ext))
716 continue;
717
718 /*
719 * Some pages could be missed by concurrent allocation or free,
720 * because we don't hold the zone lock.
721 */
722 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
723 goto ext_put_continue;
724
725 /*
726 * Although we do have the info about past allocation of free
727 * pages, it's not relevant for current memory usage.
728 */
729 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
730 goto ext_put_continue;
731
732 page_owner = get_page_owner(page_ext);
733
734 /*
735 * Don't print "tail" pages of high-order allocations as that
736 * would inflate the stats.
737 */
738 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
739 goto ext_put_continue;
740
741 /*
742 * Access to page_ext->handle isn't synchronous so we should
743 * be careful to access it.
744 */
745 handle = READ_ONCE(page_owner->handle);
746 if (!handle)
747 goto ext_put_continue;
748
749 /* Record the next PFN to read in the file offset */
750 *ppos = pfn + 1;
751
752 page_owner_tmp = *page_owner;
753 page_ext_put(page_ext);
754 return print_page_owner(buf, count, pfn, page,
755 &page_owner_tmp, handle);
756 ext_put_continue:
757 page_ext_put(page_ext);
758 }
759
760 return 0;
761 }
762
lseek_page_owner(struct file * file,loff_t offset,int orig)763 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
764 {
765 switch (orig) {
766 case SEEK_SET:
767 file->f_pos = offset;
768 break;
769 case SEEK_CUR:
770 file->f_pos += offset;
771 break;
772 default:
773 return -EINVAL;
774 }
775 return file->f_pos;
776 }
777
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)778 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
779 {
780 unsigned long pfn = zone->zone_start_pfn;
781 unsigned long end_pfn = zone_end_pfn(zone);
782 unsigned long count = 0;
783
784 /*
785 * Walk the zone in pageblock_nr_pages steps. If a page block spans
786 * a zone boundary, it will be double counted between zones. This does
787 * not matter as the mixed block count will still be correct
788 */
789 for (; pfn < end_pfn; ) {
790 unsigned long block_end_pfn;
791
792 if (!pfn_valid(pfn)) {
793 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
794 continue;
795 }
796
797 block_end_pfn = pageblock_end_pfn(pfn);
798 block_end_pfn = min(block_end_pfn, end_pfn);
799
800 for (; pfn < block_end_pfn; pfn++) {
801 struct page *page = pfn_to_page(pfn);
802 struct page_ext *page_ext;
803
804 if (page_zone(page) != zone)
805 continue;
806
807 /*
808 * To avoid having to grab zone->lock, be a little
809 * careful when reading buddy page order. The only
810 * danger is that we skip too much and potentially miss
811 * some early allocated pages, which is better than
812 * heavy lock contention.
813 */
814 if (PageBuddy(page)) {
815 unsigned long order = buddy_order_unsafe(page);
816
817 if (order > 0 && order <= MAX_PAGE_ORDER)
818 pfn += (1UL << order) - 1;
819 continue;
820 }
821
822 if (PageReserved(page))
823 continue;
824
825 page_ext = page_ext_get(page);
826 if (unlikely(!page_ext))
827 continue;
828
829 /* Maybe overlapping zone */
830 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
831 goto ext_put_continue;
832
833 /* Found early allocated page */
834 __update_page_owner_handle(page_ext, early_handle, 0, 0,
835 -1, local_clock(), current->pid,
836 current->tgid, current->comm);
837 count++;
838 ext_put_continue:
839 page_ext_put(page_ext);
840 }
841 cond_resched();
842 }
843
844 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
845 pgdat->node_id, zone->name, count);
846 }
847
init_zones_in_node(pg_data_t * pgdat)848 static void init_zones_in_node(pg_data_t *pgdat)
849 {
850 struct zone *zone;
851 struct zone *node_zones = pgdat->node_zones;
852
853 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
854 if (!populated_zone(zone))
855 continue;
856
857 init_pages_in_zone(pgdat, zone);
858 }
859 }
860
init_early_allocated_pages(void)861 static void init_early_allocated_pages(void)
862 {
863 pg_data_t *pgdat;
864
865 for_each_online_pgdat(pgdat)
866 init_zones_in_node(pgdat);
867 }
868
869 static const struct file_operations proc_page_owner_operations = {
870 .read = read_page_owner,
871 .llseek = lseek_page_owner,
872 };
873
stack_start(struct seq_file * m,loff_t * ppos)874 static void *stack_start(struct seq_file *m, loff_t *ppos)
875 {
876 struct stack *stack;
877
878 if (*ppos == -1UL)
879 return NULL;
880
881 if (!*ppos) {
882 /*
883 * This pairs with smp_store_release() from function
884 * add_stack_record_to_list(), so we get a consistent
885 * value of stack_list.
886 */
887 stack = smp_load_acquire(&stack_list);
888 m->private = stack;
889 } else {
890 stack = m->private;
891 }
892
893 return stack;
894 }
895
stack_next(struct seq_file * m,void * v,loff_t * ppos)896 static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
897 {
898 struct stack *stack = v;
899
900 stack = stack->next;
901 *ppos = stack ? *ppos + 1 : -1UL;
902 m->private = stack;
903
904 return stack;
905 }
906
907 static unsigned long page_owner_pages_threshold;
908
stack_print(struct seq_file * m,void * v)909 static int stack_print(struct seq_file *m, void *v)
910 {
911 int i, nr_base_pages;
912 struct stack *stack = v;
913 unsigned long *entries;
914 unsigned long nr_entries;
915 struct stack_record *stack_record = stack->stack_record;
916
917 if (!stack->stack_record)
918 return 0;
919
920 nr_entries = stack_record->size;
921 entries = stack_record->entries;
922 nr_base_pages = refcount_read(&stack_record->count) - 1;
923
924 if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold)
925 return 0;
926
927 for (i = 0; i < nr_entries; i++)
928 seq_printf(m, " %pS\n", (void *)entries[i]);
929 seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages);
930
931 return 0;
932 }
933
stack_stop(struct seq_file * m,void * v)934 static void stack_stop(struct seq_file *m, void *v)
935 {
936 }
937
938 static const struct seq_operations page_owner_stack_op = {
939 .start = stack_start,
940 .next = stack_next,
941 .stop = stack_stop,
942 .show = stack_print
943 };
944
page_owner_stack_open(struct inode * inode,struct file * file)945 static int page_owner_stack_open(struct inode *inode, struct file *file)
946 {
947 return seq_open_private(file, &page_owner_stack_op, 0);
948 }
949
950 static const struct file_operations page_owner_stack_operations = {
951 .open = page_owner_stack_open,
952 .read = seq_read,
953 .llseek = seq_lseek,
954 .release = seq_release,
955 };
956
page_owner_threshold_get(void * data,u64 * val)957 static int page_owner_threshold_get(void *data, u64 *val)
958 {
959 *val = READ_ONCE(page_owner_pages_threshold);
960 return 0;
961 }
962
page_owner_threshold_set(void * data,u64 val)963 static int page_owner_threshold_set(void *data, u64 val)
964 {
965 WRITE_ONCE(page_owner_pages_threshold, val);
966 return 0;
967 }
968
969 DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_threshold, &page_owner_threshold_get,
970 &page_owner_threshold_set, "%llu");
971
972
pageowner_init(void)973 static int __init pageowner_init(void)
974 {
975 struct dentry *dir;
976
977 if (!static_branch_unlikely(&page_owner_inited)) {
978 pr_info("page_owner is disabled\n");
979 return 0;
980 }
981
982 debugfs_create_file("page_owner", 0400, NULL, NULL,
983 &proc_page_owner_operations);
984 dir = debugfs_create_dir("page_owner_stacks", NULL);
985 debugfs_create_file("show_stacks", 0400, dir, NULL,
986 &page_owner_stack_operations);
987 debugfs_create_file("count_threshold", 0600, dir, NULL,
988 &proc_page_owner_threshold);
989
990 return 0;
991 }
992 late_initcall(pageowner_init)
993