Lines Matching +full:4 +full:kb +full:- +full:page
1 // SPDX-License-Identifier: GPL-2.0
30 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
47 hiwater_vm = total_vm = mm->total_vm; in task_mem()
48 if (hiwater_vm < mm->hiwater_vm) in task_mem()
49 hiwater_vm = mm->hiwater_vm; in task_mem()
51 if (hiwater_rss < mm->hiwater_rss) in task_mem()
52 hiwater_rss = mm->hiwater_rss; in task_mem()
55 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); in task_mem()
56 text = min(text, mm->exec_vm << PAGE_SHIFT); in task_mem()
57 lib = (mm->exec_vm << PAGE_SHIFT) - text; in task_mem()
61 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); in task_mem()
62 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
63 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); in task_mem()
64 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); in task_mem()
65 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); in task_mem()
66 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); in task_mem()
67 SEQ_PUT_DEC(" kB\nRssFile:\t", file); in task_mem()
68 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); in task_mem()
69 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); in task_mem()
70 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); in task_mem()
72 " kB\nVmExe:\t", text >> 10, 8); in task_mem()
74 " kB\nVmLib:\t", lib >> 10, 8); in task_mem()
76 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); in task_mem()
77 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); in task_mem()
78 seq_puts(m, " kB\n"); in task_mem()
85 return PAGE_SIZE * mm->total_vm; in task_vsize()
94 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
96 *data = mm->data_vm + mm->stack_vm; in task_statm()
98 return mm->total_vm; in task_statm()
107 struct task_struct *task = priv->task; in hold_task_mempolicy()
110 priv->task_mempolicy = get_task_policy(task); in hold_task_mempolicy()
111 mpol_get(priv->task_mempolicy); in hold_task_mempolicy()
116 mpol_put(priv->task_mempolicy); in release_task_mempolicy()
130 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma()
133 *ppos = vma->vm_start; in proc_get_vma()
135 *ppos = -2UL; in proc_get_vma()
136 vma = get_gate_vma(priv->mm); in proc_get_vma()
144 struct proc_maps_private *priv = m->private; in m_start()
149 if (last_addr == -1UL) in m_start()
152 priv->task = get_proc_task(priv->inode); in m_start()
153 if (!priv->task) in m_start()
154 return ERR_PTR(-ESRCH); in m_start()
156 mm = priv->mm; in m_start()
158 put_task_struct(priv->task); in m_start()
159 priv->task = NULL; in m_start()
165 put_task_struct(priv->task); in m_start()
166 priv->task = NULL; in m_start()
167 return ERR_PTR(-EINTR); in m_start()
170 vma_iter_init(&priv->iter, mm, last_addr); in m_start()
172 if (last_addr == -2UL) in m_start()
180 if (*ppos == -2UL) { in m_next()
181 *ppos = -1UL; in m_next()
184 return proc_get_vma(m->private, ppos); in m_next()
189 struct proc_maps_private *priv = m->private; in m_stop()
190 struct mm_struct *mm = priv->mm; in m_stop()
192 if (!priv->task) in m_stop()
198 put_task_struct(priv->task); in m_stop()
199 priv->task = NULL; in m_stop()
208 return -ENOMEM; in proc_maps_open()
210 priv->inode = inode; in proc_maps_open()
211 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
212 if (IS_ERR(priv->mm)) { in proc_maps_open()
213 int err = PTR_ERR(priv->mm); in proc_maps_open()
224 struct seq_file *seq = file->private_data; in proc_map_release()
225 struct proc_maps_private *priv = seq->private; in proc_map_release()
227 if (priv->mm) in proc_map_release()
228 mmdrop(priv->mm); in proc_map_release()
245 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); in show_vma_header_prefix()
247 seq_put_hex_ll(m, "-", end, 8); in show_vma_header_prefix()
249 seq_putc(m, flags & VM_READ ? 'r' : '-'); in show_vma_header_prefix()
250 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); in show_vma_header_prefix()
251 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); in show_vma_header_prefix()
264 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
265 struct file *file = vma->vm_file; in show_map_vma()
266 vm_flags_t flags = vma->vm_flags; in show_map_vma()
274 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
275 dev = inode->i_sb->s_dev; in show_map_vma()
276 ino = inode->i_ino; in show_map_vma()
277 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
280 start = vma->vm_start; in show_map_vma()
281 end = vma->vm_end; in show_map_vma()
297 seq_printf(m, "[anon_shmem:%s]", anon_name->name); in show_map_vma()
303 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
304 name = vma->vm_ops->name(vma); in show_map_vma()
328 seq_printf(m, "[anon:%s]", anon_name->name); in show_map_vma()
369 * page is divided by the number of processes sharing it. So if a
374 * fixed-point pss counter to minimize division errors. So (pss >>
377 * A shift of 12 before division means (assuming 4K page size):
378 * - 1M 3-user-pages add up to 8KB errors;
379 * - supports mapcount up to 2^24, or 16M;
380 * - supports PSS up to 2^52 bytes, or 4PB.
411 struct page *page, unsigned long size, unsigned long pss, in smaps_page_accumulate() argument
414 mss->pss += pss; in smaps_page_accumulate()
416 if (PageAnon(page)) in smaps_page_accumulate()
417 mss->pss_anon += pss; in smaps_page_accumulate()
418 else if (PageSwapBacked(page)) in smaps_page_accumulate()
419 mss->pss_shmem += pss; in smaps_page_accumulate()
421 mss->pss_file += pss; in smaps_page_accumulate()
424 mss->pss_locked += pss; in smaps_page_accumulate()
426 if (dirty || PageDirty(page)) { in smaps_page_accumulate()
427 mss->pss_dirty += pss; in smaps_page_accumulate()
429 mss->private_dirty += size; in smaps_page_accumulate()
431 mss->shared_dirty += size; in smaps_page_accumulate()
434 mss->private_clean += size; in smaps_page_accumulate()
436 mss->shared_clean += size; in smaps_page_accumulate()
440 static void smaps_account(struct mem_size_stats *mss, struct page *page, in smaps_account() argument
444 int i, nr = compound ? compound_nr(page) : 1; in smaps_account()
449 * of the compound page. in smaps_account()
451 if (PageAnon(page)) { in smaps_account()
452 mss->anonymous += size; in smaps_account()
453 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) in smaps_account()
454 mss->lazyfree += size; in smaps_account()
457 if (PageKsm(page)) in smaps_account()
458 mss->ksm += size; in smaps_account()
460 mss->resident += size; in smaps_account()
462 if (young || page_is_young(page) || PageReferenced(page)) in smaps_account()
463 mss->referenced += size; in smaps_account()
467 * differ page-by-page. in smaps_account()
469 * page_count(page) == 1 guarantees the page is mapped exactly once. in smaps_account()
470 * If any subpage of the compound page mapped with PTE it would elevate in smaps_account()
474 * Without holding the page lock this snapshot can be slightly wrong as in smaps_account()
476 * call page_mapcount() even with PTL held if the page is not mapped, in smaps_account()
480 if ((page_count(page) == 1) || migration) { in smaps_account()
481 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, in smaps_account()
485 for (i = 0; i < nr; i++, page++) { in smaps_account()
486 int mapcount = page_mapcount(page); in smaps_account()
490 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, in smaps_account()
499 struct mem_size_stats *mss = walk->private; in smaps_pte_hole()
500 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole()
502 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, in smaps_pte_hole()
515 if (walk->ops->pte_hole) { in smaps_pte_hole_lookup()
525 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
526 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
527 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
528 struct page *page = NULL; in smaps_pte_entry() local
533 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry()
542 mss->swap += PAGE_SIZE; in smaps_pte_entry()
548 mss->swap_pss += pss_delta; in smaps_pte_entry()
550 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; in smaps_pte_entry()
555 page = pfn_swap_entry_to_page(swpent); in smaps_pte_entry()
562 if (!page) in smaps_pte_entry()
565 smaps_account(mss, page, false, young, dirty, locked, migration); in smaps_pte_entry()
572 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
573 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
574 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
575 struct page *page = NULL; in smaps_pmd_entry() local
579 page = vm_normal_page_pmd(vma, addr, *pmd); in smaps_pmd_entry()
585 page = pfn_swap_entry_to_page(entry); in smaps_pmd_entry()
588 if (IS_ERR_OR_NULL(page)) in smaps_pmd_entry()
590 if (PageAnon(page)) in smaps_pmd_entry()
591 mss->anonymous_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
592 else if (PageSwapBacked(page)) in smaps_pmd_entry()
593 mss->shmem_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
594 else if (is_zone_device_page(page)) in smaps_pmd_entry()
597 mss->file_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
599 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), in smaps_pmd_entry()
612 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
623 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
625 walk->action = ACTION_AGAIN; in smaps_pte_range()
630 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
645 [0 ... (BITS_PER_LONG-1)] = "??", in show_smap_vma_flags()
710 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
724 struct mem_size_stats *mss = walk->private; in smaps_hugetlb_range()
725 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range()
726 struct page *page = NULL; in smaps_hugetlb_range() local
730 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range()
735 page = pfn_swap_entry_to_page(swpent); in smaps_hugetlb_range()
737 if (page) { in smaps_hugetlb_range()
738 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) in smaps_hugetlb_range()
739 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
741 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
774 if (start >= vma->vm_end) in smap_gather_stats()
777 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
784 * object, so we have to distinguish them during the page walk. in smap_gather_stats()
790 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
791 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
792 mss->swap += shmem_swapped; in smap_gather_stats()
802 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
812 SEQ_PUT_DEC("Rss: ", mss->resident); in __show_smap()
813 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); in __show_smap()
814 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT); in __show_smap()
820 SEQ_PUT_DEC(" kB\nPss_Anon: ", in __show_smap()
821 mss->pss_anon >> PSS_SHIFT); in __show_smap()
822 SEQ_PUT_DEC(" kB\nPss_File: ", in __show_smap()
823 mss->pss_file >> PSS_SHIFT); in __show_smap()
824 SEQ_PUT_DEC(" kB\nPss_Shmem: ", in __show_smap()
825 mss->pss_shmem >> PSS_SHIFT); in __show_smap()
827 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); in __show_smap()
828 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); in __show_smap()
829 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); in __show_smap()
830 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); in __show_smap()
831 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); in __show_smap()
832 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); in __show_smap()
833 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm); in __show_smap()
834 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); in __show_smap()
835 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); in __show_smap()
836 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); in __show_smap()
837 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); in __show_smap()
838 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); in __show_smap()
839 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", in __show_smap()
840 mss->private_hugetlb >> 10, 7); in __show_smap()
841 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); in __show_smap()
842 SEQ_PUT_DEC(" kB\nSwapPss: ", in __show_smap()
843 mss->swap_pss >> PSS_SHIFT); in __show_smap()
844 SEQ_PUT_DEC(" kB\nLocked: ", in __show_smap()
845 mss->pss_locked >> PSS_SHIFT); in __show_smap()
846 seq_puts(m, " kB\n"); in __show_smap()
860 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
861 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
862 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
863 seq_puts(m, " kB\n"); in show_smap()
868 hugepage_vma_check(vma, vma->vm_flags, true, false, true)); in show_smap()
879 struct proc_maps_private *priv = m->private; in show_smaps_rollup()
881 struct mm_struct *mm = priv->mm; in show_smaps_rollup()
887 priv->task = get_proc_task(priv->inode); in show_smaps_rollup()
888 if (!priv->task) in show_smaps_rollup()
889 return -ESRCH; in show_smaps_rollup()
892 ret = -ESRCH; in show_smaps_rollup()
908 vma_start = vma->vm_start; in show_smaps_rollup()
911 last_vma_end = vma->vm_end; in show_smaps_rollup()
930 * +------+------+-----------+ in show_smaps_rollup()
932 * +------+------+-----------+ in show_smaps_rollup()
934 * 4k 8k 16k 400k in show_smaps_rollup()
956 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): in show_smaps_rollup()
968 if (vma->vm_start >= last_vma_end) in show_smaps_rollup()
971 /* Case 4 above */ in show_smaps_rollup()
972 if (vma->vm_end > last_vma_end) in show_smaps_rollup()
990 put_task_struct(priv->task); in show_smaps_rollup()
991 priv->task = NULL; in show_smaps_rollup()
1016 return -ENOMEM; in smaps_rollup_open()
1022 priv->inode = inode; in smaps_rollup_open()
1023 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in smaps_rollup_open()
1024 if (IS_ERR(priv->mm)) { in smaps_rollup_open()
1025 ret = PTR_ERR(priv->mm); in smaps_rollup_open()
1040 struct seq_file *seq = file->private_data; in smaps_rollup_release()
1041 struct proc_maps_private *priv = seq->private; in smaps_rollup_release()
1043 if (priv->mm) in smaps_rollup_release()
1044 mmdrop(priv->mm); in smaps_rollup_release()
1081 struct page *page; in pte_is_pinned() local
1085 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1087 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) in pte_is_pinned()
1089 page = vm_normal_page(vma, addr, pte); in pte_is_pinned()
1090 if (!page) in pte_is_pinned()
1092 return page_maybe_dma_pinned(page); in pte_is_pinned()
1099 * The soft-dirty tracker uses #PF-s to catch writes in clear_soft_dirty()
1100 * to pages, so write-protect the pte as well. See the in clear_soft_dirty()
1101 * Documentation/admin-guide/mm/soft-dirty.rst for full description in clear_soft_dirty()
1102 * of how soft-dirty works. in clear_soft_dirty()
1117 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1144 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1147 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1160 struct clear_refs_private *cp = walk->private; in clear_refs_pte_range()
1161 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range()
1164 struct page *page; in clear_refs_pte_range() local
1168 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1176 page = pmd_page(*pmd); in clear_refs_pte_range()
1180 test_and_clear_page_young(page); in clear_refs_pte_range()
1181 ClearPageReferenced(page); in clear_refs_pte_range()
1187 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1189 walk->action = ACTION_AGAIN; in clear_refs_pte_range()
1195 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1203 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
1204 if (!page) in clear_refs_pte_range()
1209 test_and_clear_page_young(page); in clear_refs_pte_range()
1210 ClearPageReferenced(page); in clear_refs_pte_range()
1212 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
1220 struct clear_refs_private *cp = walk->private; in clear_refs_test_walk()
1221 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk()
1223 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1230 * Writing 4 to /proc/pid/clear_refs affects all pages. in clear_refs_test_walk()
1232 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1234 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1257 if (count > sizeof(buffer) - 1) in clear_refs_write()
1258 count = sizeof(buffer) - 1; in clear_refs_write()
1260 return -EFAULT; in clear_refs_write()
1266 return -EINVAL; in clear_refs_write()
1270 return -ESRCH; in clear_refs_write()
1280 count = -EINTR; in clear_refs_write()
1294 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1302 0, mm, 0, -1UL); in clear_refs_write()
1305 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); in clear_refs_write()
1341 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1359 pm->buffer[pm->pos++] = *pme; in add_to_pagemap()
1360 if (pm->pos >= pm->len) in add_to_pagemap()
1368 struct pagemapread *pm = walk->private; in pagemap_pte_hole()
1373 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1375 /* End of address space hole, which we mark as non-present. */ in pagemap_pte_hole()
1379 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1393 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1395 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1409 struct page *page = NULL; in pte_to_pagemap_entry() local
1413 if (pm->show_pfn) in pte_to_pagemap_entry()
1416 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1428 if (pm->show_pfn) { in pte_to_pagemap_entry()
1444 page = pfn_swap_entry_to_page(entry); in pte_to_pagemap_entry()
1449 if (page && !PageAnon(page)) in pte_to_pagemap_entry()
1451 if (page && !migration && page_mapcount(page) == 1) in pte_to_pagemap_entry()
1453 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1462 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range()
1463 struct pagemapread *pm = walk->private; in pagemap_pmd_range()
1474 struct page *page = NULL; in pagemap_pmd_range() local
1476 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1480 page = pmd_page(pmd); in pagemap_pmd_range()
1487 if (pm->show_pfn) in pagemap_pmd_range()
1496 if (pm->show_pfn) { in pagemap_pmd_range()
1513 page = pfn_swap_entry_to_page(entry); in pagemap_pmd_range()
1517 if (page && !migration && page_mapcount(page) == 1) in pagemap_pmd_range()
1526 if (pm->show_pfn) { in pagemap_pmd_range()
1540 * goes beyond vma->vm_end. in pagemap_pmd_range()
1542 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range()
1544 walk->action = ACTION_AGAIN; in pagemap_pmd_range()
1568 struct pagemapread *pm = walk->private; in pagemap_hugetlb_range()
1569 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range()
1574 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1579 struct page *page = pte_page(pte); in pagemap_hugetlb_range() local
1581 if (!PageAnon(page)) in pagemap_hugetlb_range()
1584 if (page_mapcount(page) == 1) in pagemap_hugetlb_range()
1591 if (pm->show_pfn) in pagemap_hugetlb_range()
1604 if (pm->show_pfn && (flags & PM_PRESENT)) in pagemap_hugetlb_range()
1624 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1626 * For each page in the address space, this file contains one 64-bit entry
1629 * Bits 0-54 page frame number (PFN) if present
1630 * Bits 0-4 swap type if swapped
1631 * Bits 5-54 swap offset if swapped
1632 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1633 * Bit 56 page exclusively mapped
1634 * Bit 57 pte is uffd-wp write-protected
1635 * Bits 58-60 zero
1636 * Bit 61 page is file-page or shared-anon
1637 * Bit 62 page swapped
1638 * Bit 63 page present
1640 * If the page is not present but in swap, then the PFN contains an
1641 * encoding of the swap file number and the page's offset into the
1653 struct mm_struct *mm = file->private_data; in pagemap_read()
1664 ret = -EINVAL; in pagemap_read()
1678 ret = -ENOMEM; in pagemap_read()
1684 end_vaddr = mm->task_size; in pagemap_read()
1698 if (end >= start_vaddr && end < mm->task_size) in pagemap_read()
1703 if (start_vaddr > mm->task_size) in pagemap_read()
1725 ret = -EFAULT; in pagemap_read()
1730 count -= len; in pagemap_read()
1751 file->private_data = mm; in pagemap_open()
1757 struct mm_struct *mm = file->private_data; in pagemap_release()
1790 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, in gather_stats() argument
1793 int count = page_mapcount(page); in gather_stats()
1795 md->pages += nr_pages; in gather_stats()
1796 if (pte_dirty || PageDirty(page)) in gather_stats()
1797 md->dirty += nr_pages; in gather_stats()
1799 if (PageSwapCache(page)) in gather_stats()
1800 md->swapcache += nr_pages; in gather_stats()
1802 if (PageActive(page) || PageUnevictable(page)) in gather_stats()
1803 md->active += nr_pages; in gather_stats()
1805 if (PageWriteback(page)) in gather_stats()
1806 md->writeback += nr_pages; in gather_stats()
1808 if (PageAnon(page)) in gather_stats()
1809 md->anon += nr_pages; in gather_stats()
1811 if (count > md->mapcount_max) in gather_stats()
1812 md->mapcount_max = count; in gather_stats()
1814 md->node[page_to_nid(page)] += nr_pages; in gather_stats()
1817 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats()
1820 struct page *page; in can_gather_numa_stats() local
1826 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1827 if (!page || is_zone_device_page(page)) in can_gather_numa_stats()
1830 if (PageReserved(page)) in can_gather_numa_stats()
1833 nid = page_to_nid(page); in can_gather_numa_stats()
1837 return page; in can_gather_numa_stats()
1841 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, in can_gather_numa_stats_pmd()
1845 struct page *page; in can_gather_numa_stats_pmd() local
1851 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1852 if (!page) in can_gather_numa_stats_pmd()
1855 if (PageReserved(page)) in can_gather_numa_stats_pmd()
1858 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
1862 return page; in can_gather_numa_stats_pmd()
1869 struct numa_maps *md = walk->private; in gather_pte_stats()
1870 struct vm_area_struct *vma = walk->vma; in gather_pte_stats()
1878 struct page *page; in gather_pte_stats() local
1880 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1881 if (page) in gather_pte_stats()
1882 gather_stats(page, md, pmd_dirty(*pmd), in gather_pte_stats()
1888 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
1890 walk->action = ACTION_AGAIN; in gather_pte_stats()
1895 struct page *page = can_gather_numa_stats(ptent, vma, addr); in gather_pte_stats() local
1896 if (!page) in gather_pte_stats()
1898 gather_stats(page, md, pte_dirty(ptent), 1); in gather_pte_stats()
1911 struct page *page; in gather_hugetlb_stats() local
1916 page = pte_page(huge_pte); in gather_hugetlb_stats()
1918 md = walk->private; in gather_hugetlb_stats()
1919 gather_stats(page, md, pte_dirty(huge_pte), 1); in gather_hugetlb_stats()
1942 struct numa_maps_private *numa_priv = m->private; in show_numa_map()
1943 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; in show_numa_map()
1945 struct numa_maps *md = &numa_priv->md; in show_numa_map()
1946 struct file *file = vma->vm_file; in show_numa_map()
1947 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1958 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1963 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); in show_numa_map()
1966 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
1983 if (!md->pages) in show_numa_map()
1986 if (md->anon) in show_numa_map()
1987 seq_printf(m, " anon=%lu", md->anon); in show_numa_map()
1989 if (md->dirty) in show_numa_map()
1990 seq_printf(m, " dirty=%lu", md->dirty); in show_numa_map()
1992 if (md->pages != md->anon && md->pages != md->dirty) in show_numa_map()
1993 seq_printf(m, " mapped=%lu", md->pages); in show_numa_map()
1995 if (md->mapcount_max > 1) in show_numa_map()
1996 seq_printf(m, " mapmax=%lu", md->mapcount_max); in show_numa_map()
1998 if (md->swapcache) in show_numa_map()
1999 seq_printf(m, " swapcache=%lu", md->swapcache); in show_numa_map()
2001 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
2002 seq_printf(m, " active=%lu", md->active); in show_numa_map()
2004 if (md->writeback) in show_numa_map()
2005 seq_printf(m, " writeback=%lu", md->writeback); in show_numa_map()
2008 if (md->node[nid]) in show_numa_map()
2009 seq_printf(m, " N%d=%lu", nid, md->node[nid]); in show_numa_map()