• Home
  • Raw
  • Download

Lines Matching +full:4 +full:kb +full:- +full:page

1 // SPDX-License-Identifier: GPL-2.0
34 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
56 hiwater_vm = total_vm = mm->total_vm; in task_mem()
57 if (hiwater_vm < mm->hiwater_vm) in task_mem()
58 hiwater_vm = mm->hiwater_vm; in task_mem()
60 if (hiwater_rss < mm->hiwater_rss) in task_mem()
61 hiwater_rss = mm->hiwater_rss; in task_mem()
64 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); in task_mem()
65 text = min(text, mm->exec_vm << PAGE_SHIFT); in task_mem()
66 lib = (mm->exec_vm << PAGE_SHIFT) - text; in task_mem()
70 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); in task_mem()
71 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
72 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); in task_mem()
73 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); in task_mem()
74 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); in task_mem()
75 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); in task_mem()
76 SEQ_PUT_DEC(" kB\nRssFile:\t", file); in task_mem()
77 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); in task_mem()
78 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); in task_mem()
79 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); in task_mem()
81 " kB\nVmExe:\t", text >> 10, 8); in task_mem()
83 " kB\nVmLib:\t", lib >> 10, 8); in task_mem()
85 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); in task_mem()
86 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); in task_mem()
88 SEQ_PUT_DEC(" kB\nPurgSum:\t", nr_purg_sum); in task_mem()
89 SEQ_PUT_DEC(" kB\nPurgPin:\t", nr_purg_pin); in task_mem()
91 seq_puts(m, " kB\n"); in task_mem()
98 return PAGE_SIZE * mm->total_vm; in task_vsize()
107 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
109 *data = mm->data_vm + mm->stack_vm; in task_statm()
111 return mm->total_vm; in task_statm()
120 struct task_struct *task = priv->task; in hold_task_mempolicy()
123 priv->task_mempolicy = get_task_policy(task); in hold_task_mempolicy()
124 mpol_get(priv->task_mempolicy); in hold_task_mempolicy()
129 mpol_put(priv->task_mempolicy); in release_task_mempolicy()
143 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma()
146 *ppos = vma->vm_start; in proc_get_vma()
148 *ppos = -2UL; in proc_get_vma()
149 vma = get_gate_vma(priv->mm); in proc_get_vma()
157 struct proc_maps_private *priv = m->private; in m_start()
162 if (last_addr == -1UL) in m_start()
165 priv->task = get_proc_task(priv->inode); in m_start()
166 if (!priv->task) in m_start()
167 return ERR_PTR(-ESRCH); in m_start()
169 mm = priv->mm; in m_start()
171 put_task_struct(priv->task); in m_start()
172 priv->task = NULL; in m_start()
178 put_task_struct(priv->task); in m_start()
179 priv->task = NULL; in m_start()
180 return ERR_PTR(-EINTR); in m_start()
183 vma_iter_init(&priv->iter, mm, last_addr); in m_start()
185 if (last_addr == -2UL) in m_start()
193 if (*ppos == -2UL) { in m_next()
194 *ppos = -1UL; in m_next()
197 return proc_get_vma(m->private, ppos); in m_next()
202 struct proc_maps_private *priv = m->private; in m_stop()
203 struct mm_struct *mm = priv->mm; in m_stop()
205 if (!priv->task) in m_stop()
211 put_task_struct(priv->task); in m_stop()
212 priv->task = NULL; in m_stop()
221 return -ENOMEM; in proc_maps_open()
223 priv->inode = inode; in proc_maps_open()
224 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
225 if (IS_ERR(priv->mm)) { in proc_maps_open()
226 int err = PTR_ERR(priv->mm); in proc_maps_open()
237 struct seq_file *seq = file->private_data; in proc_map_release()
238 struct proc_maps_private *priv = seq->private; in proc_map_release()
240 if (priv->mm) in proc_map_release()
241 mmdrop(priv->mm); in proc_map_release()
258 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); in show_vma_header_prefix()
260 seq_put_hex_ll(m, "-", end, 8); in show_vma_header_prefix()
262 seq_putc(m, flags & VM_READ ? 'r' : '-'); in show_vma_header_prefix()
263 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); in show_vma_header_prefix()
264 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); in show_vma_header_prefix()
277 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
278 struct file *file = vma->vm_file; in show_map_vma()
279 vm_flags_t flags = vma->vm_flags; in show_map_vma()
287 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
288 dev = inode->i_sb->s_dev; in show_map_vma()
289 ino = inode->i_ino; in show_map_vma()
290 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
293 start = vma->vm_start; in show_map_vma()
294 end = vma->vm_end; in show_map_vma()
311 seq_printf(m, "[anon_shmem:%s]", anon_name->name); in show_map_vma()
317 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
318 name = vma->vm_ops->name(vma); in show_map_vma()
342 seq_printf(m, "[anon:%s]", anon_name->name); in show_map_vma()
383 * page is divided by the number of processes sharing it. So if a
388 * fixed-point pss counter to minimize division errors. So (pss >>
391 * A shift of 12 before division means (assuming 4K page size):
392 * - 1M 3-user-pages add up to 8KB errors;
393 * - supports mapcount up to 2^24, or 16M;
394 * - supports PSS up to 2^52 bytes, or 4PB.
425 struct page *page, unsigned long size, unsigned long pss, in smaps_page_accumulate() argument
428 mss->pss += pss; in smaps_page_accumulate()
430 if (PageAnon(page)) in smaps_page_accumulate()
431 mss->pss_anon += pss; in smaps_page_accumulate()
432 else if (PageSwapBacked(page)) in smaps_page_accumulate()
433 mss->pss_shmem += pss; in smaps_page_accumulate()
435 mss->pss_file += pss; in smaps_page_accumulate()
438 mss->pss_locked += pss; in smaps_page_accumulate()
440 if (dirty || PageDirty(page)) { in smaps_page_accumulate()
441 mss->pss_dirty += pss; in smaps_page_accumulate()
443 mss->private_dirty += size; in smaps_page_accumulate()
445 mss->shared_dirty += size; in smaps_page_accumulate()
448 mss->private_clean += size; in smaps_page_accumulate()
450 mss->shared_clean += size; in smaps_page_accumulate()
454 static void smaps_account(struct mem_size_stats *mss, struct page *page, in smaps_account() argument
458 int i, nr = compound ? compound_nr(page) : 1; in smaps_account()
463 * of the compound page. in smaps_account()
465 if (PageAnon(page)) { in smaps_account()
466 mss->anonymous += size; in smaps_account()
467 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) in smaps_account()
468 mss->lazyfree += size; in smaps_account()
471 if (PageKsm(page)) in smaps_account()
472 mss->ksm += size; in smaps_account()
474 mss->resident += size; in smaps_account()
476 if (young || page_is_young(page) || PageReferenced(page)) in smaps_account()
477 mss->referenced += size; in smaps_account()
481 * differ page-by-page. in smaps_account()
483 * page_count(page) == 1 guarantees the page is mapped exactly once. in smaps_account()
484 * If any subpage of the compound page mapped with PTE it would elevate in smaps_account()
488 * Without holding the page lock this snapshot can be slightly wrong as in smaps_account()
490 * call page_mapcount() even with PTL held if the page is not mapped, in smaps_account()
494 if ((page_count(page) == 1) || migration) { in smaps_account()
495 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, in smaps_account()
499 for (i = 0; i < nr; i++, page++) { in smaps_account()
500 int mapcount = page_mapcount(page); in smaps_account()
504 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, in smaps_account()
513 struct mem_size_stats *mss = walk->private; in smaps_pte_hole()
514 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole()
516 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, in smaps_pte_hole()
529 if (walk->ops->pte_hole) { in smaps_pte_hole_lookup()
539 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
540 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
541 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
542 struct page *page = NULL; in smaps_pte_entry() local
547 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry()
556 mss->swap += PAGE_SIZE; in smaps_pte_entry()
562 mss->swap_pss += pss_delta; in smaps_pte_entry()
564 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; in smaps_pte_entry()
569 page = pfn_swap_entry_to_page(swpent); in smaps_pte_entry()
576 if (!page) in smaps_pte_entry()
579 smaps_account(mss, page, false, young, dirty, locked, migration); in smaps_pte_entry()
586 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
587 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
588 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
589 struct page *page = NULL; in smaps_pmd_entry() local
593 page = vm_normal_page_pmd(vma, addr, *pmd); in smaps_pmd_entry()
599 page = pfn_swap_entry_to_page(entry); in smaps_pmd_entry()
602 if (IS_ERR_OR_NULL(page)) in smaps_pmd_entry()
604 if (PageAnon(page)) in smaps_pmd_entry()
605 mss->anonymous_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
606 else if (PageSwapBacked(page)) in smaps_pmd_entry()
607 mss->shmem_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
608 else if (is_zone_device_page(page)) in smaps_pmd_entry()
611 mss->file_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
613 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), in smaps_pmd_entry()
626 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
637 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
639 walk->action = ACTION_AGAIN; in smaps_pte_range()
644 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
659 [0 ... (BITS_PER_LONG-1)] = "??", in show_smap_vma_flags()
724 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
738 struct mem_size_stats *mss = walk->private; in smaps_hugetlb_range()
739 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range()
740 struct page *page = NULL; in smaps_hugetlb_range() local
744 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range()
749 page = pfn_swap_entry_to_page(swpent); in smaps_hugetlb_range()
751 if (page) { in smaps_hugetlb_range()
752 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) in smaps_hugetlb_range()
753 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
755 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
788 if (start >= vma->vm_end) in smap_gather_stats()
791 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
798 * object, so we have to distinguish them during the page walk. in smap_gather_stats()
804 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
805 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
806 mss->swap += shmem_swapped; in smap_gather_stats()
816 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
826 SEQ_PUT_DEC("Rss: ", mss->resident); in __show_smap()
827 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); in __show_smap()
828 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT); in __show_smap()
834 SEQ_PUT_DEC(" kB\nPss_Anon: ", in __show_smap()
835 mss->pss_anon >> PSS_SHIFT); in __show_smap()
836 SEQ_PUT_DEC(" kB\nPss_File: ", in __show_smap()
837 mss->pss_file >> PSS_SHIFT); in __show_smap()
838 SEQ_PUT_DEC(" kB\nPss_Shmem: ", in __show_smap()
839 mss->pss_shmem >> PSS_SHIFT); in __show_smap()
841 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); in __show_smap()
842 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); in __show_smap()
843 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); in __show_smap()
844 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); in __show_smap()
845 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); in __show_smap()
846 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); in __show_smap()
847 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm); in __show_smap()
848 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); in __show_smap()
849 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); in __show_smap()
850 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); in __show_smap()
851 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); in __show_smap()
852 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); in __show_smap()
853 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", in __show_smap()
854 mss->private_hugetlb >> 10, 7); in __show_smap()
855 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); in __show_smap()
856 SEQ_PUT_DEC(" kB\nSwapPss: ", in __show_smap()
857 mss->swap_pss >> PSS_SHIFT); in __show_smap()
858 SEQ_PUT_DEC(" kB\nLocked: ", in __show_smap()
859 mss->pss_locked >> PSS_SHIFT); in __show_smap()
860 seq_puts(m, " kB\n"); in __show_smap()
874 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
875 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
876 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
877 seq_puts(m, " kB\n"); in show_smap()
882 hugepage_vma_check(vma, vma->vm_flags, true, false, true)); in show_smap()
893 struct proc_maps_private *priv = m->private; in show_smaps_rollup()
895 struct mm_struct *mm = priv->mm; in show_smaps_rollup()
901 priv->task = get_proc_task(priv->inode); in show_smaps_rollup()
902 if (!priv->task) in show_smaps_rollup()
903 return -ESRCH; in show_smaps_rollup()
906 ret = -ESRCH; in show_smaps_rollup()
922 vma_start = vma->vm_start; in show_smaps_rollup()
925 last_vma_end = vma->vm_end; in show_smaps_rollup()
944 * +------+------+-----------+ in show_smaps_rollup()
946 * +------+------+-----------+ in show_smaps_rollup()
948 * 4k 8k 16k 400k in show_smaps_rollup()
970 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): in show_smaps_rollup()
982 if (vma->vm_start >= last_vma_end) { in show_smaps_rollup()
984 last_vma_end = vma->vm_end; in show_smaps_rollup()
988 /* Case 4 above */ in show_smaps_rollup()
989 if (vma->vm_end > last_vma_end) { in show_smaps_rollup()
991 last_vma_end = vma->vm_end; in show_smaps_rollup()
1009 put_task_struct(priv->task); in show_smaps_rollup()
1010 priv->task = NULL; in show_smaps_rollup()
1035 return -ENOMEM; in smaps_rollup_open()
1041 priv->inode = inode; in smaps_rollup_open()
1042 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in smaps_rollup_open()
1043 if (IS_ERR(priv->mm)) { in smaps_rollup_open()
1044 ret = PTR_ERR(priv->mm); in smaps_rollup_open()
1059 struct seq_file *seq = file->private_data; in smaps_rollup_release()
1060 struct proc_maps_private *priv = seq->private; in smaps_rollup_release()
1062 if (priv->mm) in smaps_rollup_release()
1063 mmdrop(priv->mm); in smaps_rollup_release()
1100 struct page *page; in pte_is_pinned() local
1104 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1106 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) in pte_is_pinned()
1108 page = vm_normal_page(vma, addr, pte); in pte_is_pinned()
1109 if (!page) in pte_is_pinned()
1111 return page_maybe_dma_pinned(page); in pte_is_pinned()
1118 * The soft-dirty tracker uses #PF-s to catch writes in clear_soft_dirty()
1119 * to pages, so write-protect the pte as well. See the in clear_soft_dirty()
1120 * Documentation/admin-guide/mm/soft-dirty.rst for full description in clear_soft_dirty()
1121 * of how soft-dirty works. in clear_soft_dirty()
1136 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1163 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1166 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1179 struct clear_refs_private *cp = walk->private; in clear_refs_pte_range()
1180 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range()
1183 struct page *page; in clear_refs_pte_range() local
1187 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1195 page = pmd_page(*pmd); in clear_refs_pte_range()
1199 test_and_clear_page_young(page); in clear_refs_pte_range()
1200 ClearPageReferenced(page); in clear_refs_pte_range()
1206 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1208 walk->action = ACTION_AGAIN; in clear_refs_pte_range()
1214 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1222 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
1223 if (!page) in clear_refs_pte_range()
1228 test_and_clear_page_young(page); in clear_refs_pte_range()
1229 ClearPageReferenced(page); in clear_refs_pte_range()
1231 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
1239 struct clear_refs_private *cp = walk->private; in clear_refs_test_walk()
1240 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk()
1242 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1249 * Writing 4 to /proc/pid/clear_refs affects all pages. in clear_refs_test_walk()
1251 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1253 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1276 if (count > sizeof(buffer) - 1) in clear_refs_write()
1277 count = sizeof(buffer) - 1; in clear_refs_write()
1279 return -EFAULT; in clear_refs_write()
1285 return -EINVAL; in clear_refs_write()
1289 return -ESRCH; in clear_refs_write()
1299 count = -EINTR; in clear_refs_write()
1313 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1321 0, mm, 0, -1UL); in clear_refs_write()
1324 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); in clear_refs_write()
1360 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1377 pm->buffer[pm->pos++] = *pme; in add_to_pagemap()
1378 if (pm->pos >= pm->len) in add_to_pagemap()
1386 struct pagemapread *pm = walk->private; in pagemap_pte_hole()
1391 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1393 /* End of address space hole, which we mark as non-present. */ in pagemap_pte_hole()
1397 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1411 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1413 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1427 struct page *page = NULL; in pte_to_pagemap_entry() local
1430 if (pm->show_pfn) in pte_to_pagemap_entry()
1433 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1445 if (pm->show_pfn) { in pte_to_pagemap_entry()
1460 page = pfn_swap_entry_to_page(entry); in pte_to_pagemap_entry()
1465 if (page && !PageAnon(page)) in pte_to_pagemap_entry()
1467 if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1) in pte_to_pagemap_entry()
1469 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1478 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range()
1479 struct pagemapread *pm = walk->private; in pagemap_pmd_range()
1490 struct page *page = NULL; in pagemap_pmd_range() local
1492 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1496 page = pmd_page(pmd); in pagemap_pmd_range()
1503 if (pm->show_pfn) in pagemap_pmd_range()
1511 if (pm->show_pfn) { in pagemap_pmd_range()
1525 page = pfn_swap_entry_to_page(entry); in pagemap_pmd_range()
1529 if (page && !PageAnon(page)) in pagemap_pmd_range()
1536 if (page && (flags & PM_PRESENT) && in pagemap_pmd_range()
1537 page_mapcount(page + idx) == 1) in pagemap_pmd_range()
1544 if (pm->show_pfn) { in pagemap_pmd_range()
1558 * goes beyond vma->vm_end. in pagemap_pmd_range()
1560 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range()
1562 walk->action = ACTION_AGAIN; in pagemap_pmd_range()
1586 struct pagemapread *pm = walk->private; in pagemap_hugetlb_range()
1587 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range()
1592 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1597 struct page *page = pte_page(pte); in pagemap_hugetlb_range() local
1599 if (!PageAnon(page)) in pagemap_hugetlb_range()
1602 if (page_mapcount(page) == 1) in pagemap_hugetlb_range()
1609 if (pm->show_pfn) in pagemap_hugetlb_range()
1622 if (pm->show_pfn && (flags & PM_PRESENT)) in pagemap_hugetlb_range()
1642 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1644 * For each page in the address space, this file contains one 64-bit entry
1647 * Bits 0-54 page frame number (PFN) if present
1648 * Bits 0-4 swap type if swapped
1649 * Bits 5-54 swap offset if swapped
1650 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1651 * Bit 56 page exclusively mapped
1652 * Bit 57 pte is uffd-wp write-protected
1653 * Bits 58-60 zero
1654 * Bit 61 page is file-page or shared-anon
1655 * Bit 62 page swapped
1656 * Bit 63 page present
1658 * If the page is not present but in swap, then the PFN contains an
1659 * encoding of the swap file number and the page's offset into the
1671 struct mm_struct *mm = file->private_data; in pagemap_read()
1682 ret = -EINVAL; in pagemap_read()
1696 ret = -ENOMEM; in pagemap_read()
1702 end_vaddr = mm->task_size; in pagemap_read()
1716 if (end >= start_vaddr && end < mm->task_size) in pagemap_read()
1721 if (start_vaddr > mm->task_size) in pagemap_read()
1743 ret = -EFAULT; in pagemap_read()
1748 count -= len; in pagemap_read()
1769 file->private_data = mm; in pagemap_open()
1775 struct mm_struct *mm = file->private_data; in pagemap_release()
1808 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, in gather_stats() argument
1811 int count = page_mapcount(page); in gather_stats()
1813 md->pages += nr_pages; in gather_stats()
1814 if (pte_dirty || PageDirty(page)) in gather_stats()
1815 md->dirty += nr_pages; in gather_stats()
1817 if (PageSwapCache(page)) in gather_stats()
1818 md->swapcache += nr_pages; in gather_stats()
1820 if (PageActive(page) || PageUnevictable(page)) in gather_stats()
1821 md->active += nr_pages; in gather_stats()
1823 if (PageWriteback(page)) in gather_stats()
1824 md->writeback += nr_pages; in gather_stats()
1826 if (PageAnon(page)) in gather_stats()
1827 md->anon += nr_pages; in gather_stats()
1829 if (count > md->mapcount_max) in gather_stats()
1830 md->mapcount_max = count; in gather_stats()
1832 md->node[page_to_nid(page)] += nr_pages; in gather_stats()
1835 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats()
1838 struct page *page; in can_gather_numa_stats() local
1844 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1845 if (!page || is_zone_device_page(page)) in can_gather_numa_stats()
1848 if (PageReserved(page)) in can_gather_numa_stats()
1851 nid = page_to_nid(page); in can_gather_numa_stats()
1855 return page; in can_gather_numa_stats()
1859 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, in can_gather_numa_stats_pmd()
1863 struct page *page; in can_gather_numa_stats_pmd() local
1869 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1870 if (!page) in can_gather_numa_stats_pmd()
1873 if (PageReserved(page)) in can_gather_numa_stats_pmd()
1876 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
1880 return page; in can_gather_numa_stats_pmd()
1887 struct numa_maps *md = walk->private; in gather_pte_stats()
1888 struct vm_area_struct *vma = walk->vma; in gather_pte_stats()
1896 struct page *page; in gather_pte_stats() local
1898 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1899 if (page) in gather_pte_stats()
1900 gather_stats(page, md, pmd_dirty(*pmd), in gather_pte_stats()
1906 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
1908 walk->action = ACTION_AGAIN; in gather_pte_stats()
1913 struct page *page = can_gather_numa_stats(ptent, vma, addr); in gather_pte_stats() local
1914 if (!page) in gather_pte_stats()
1916 gather_stats(page, md, pte_dirty(ptent), 1); in gather_pte_stats()
1929 struct page *page; in gather_hugetlb_stats() local
1934 page = pte_page(huge_pte); in gather_hugetlb_stats()
1936 md = walk->private; in gather_hugetlb_stats()
1937 gather_stats(page, md, pte_dirty(huge_pte), 1); in gather_hugetlb_stats()
1960 struct numa_maps_private *numa_priv = m->private; in show_numa_map()
1961 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; in show_numa_map()
1963 struct numa_maps *md = &numa_priv->md; in show_numa_map()
1964 struct file *file = vma->vm_file; in show_numa_map()
1965 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1976 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1981 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); in show_numa_map()
1984 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
2001 if (!md->pages) in show_numa_map()
2004 if (md->anon) in show_numa_map()
2005 seq_printf(m, " anon=%lu", md->anon); in show_numa_map()
2007 if (md->dirty) in show_numa_map()
2008 seq_printf(m, " dirty=%lu", md->dirty); in show_numa_map()
2010 if (md->pages != md->anon && md->pages != md->dirty) in show_numa_map()
2011 seq_printf(m, " mapped=%lu", md->pages); in show_numa_map()
2013 if (md->mapcount_max > 1) in show_numa_map()
2014 seq_printf(m, " mapmax=%lu", md->mapcount_max); in show_numa_map()
2016 if (md->swapcache) in show_numa_map()
2017 seq_printf(m, " swapcache=%lu", md->swapcache); in show_numa_map()
2019 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
2020 seq_printf(m, " active=%lu", md->active); in show_numa_map()
2022 if (md->writeback) in show_numa_map()
2023 seq_printf(m, " writeback=%lu", md->writeback); in show_numa_map()
2026 if (md->node[nid]) in show_numa_map()
2027 seq_printf(m, " N%d=%lu", nid, md->node[nid]); in show_numa_map()