Lines Matching +full:4 +full:kb +full:- +full:page
1 // SPDX-License-Identifier: GPL-2.0
34 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
56 hiwater_vm = total_vm = mm->total_vm; in task_mem()
57 if (hiwater_vm < mm->hiwater_vm) in task_mem()
58 hiwater_vm = mm->hiwater_vm; in task_mem()
60 if (hiwater_rss < mm->hiwater_rss) in task_mem()
61 hiwater_rss = mm->hiwater_rss; in task_mem()
64 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); in task_mem()
65 text = min(text, mm->exec_vm << PAGE_SHIFT); in task_mem()
66 lib = (mm->exec_vm << PAGE_SHIFT) - text; in task_mem()
70 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); in task_mem()
71 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
72 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); in task_mem()
73 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); in task_mem()
74 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); in task_mem()
75 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); in task_mem()
76 SEQ_PUT_DEC(" kB\nRssFile:\t", file); in task_mem()
77 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); in task_mem()
78 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); in task_mem()
79 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); in task_mem()
81 " kB\nVmExe:\t", text >> 10, 8); in task_mem()
83 " kB\nVmLib:\t", lib >> 10, 8); in task_mem()
85 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); in task_mem()
86 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); in task_mem()
88 SEQ_PUT_DEC(" kB\nPurgSum:\t", nr_purg_sum); in task_mem()
89 SEQ_PUT_DEC(" kB\nPurgPin:\t", nr_purg_pin); in task_mem()
91 seq_puts(m, " kB\n"); in task_mem()
98 return PAGE_SIZE * mm->total_vm; in task_vsize()
107 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
109 *data = mm->data_vm + mm->stack_vm; in task_statm()
111 return mm->total_vm; in task_statm()
120 struct task_struct *task = priv->task; in hold_task_mempolicy()
123 priv->task_mempolicy = get_task_policy(task); in hold_task_mempolicy()
124 mpol_get(priv->task_mempolicy); in hold_task_mempolicy()
129 mpol_put(priv->task_mempolicy); in release_task_mempolicy()
142 struct proc_maps_private *priv = m->private; in m_start()
148 if (last_addr == -1UL) in m_start()
151 priv->task = get_proc_task(priv->inode); in m_start()
152 if (!priv->task) in m_start()
153 return ERR_PTR(-ESRCH); in m_start()
155 mm = priv->mm; in m_start()
157 put_task_struct(priv->task); in m_start()
158 priv->task = NULL; in m_start()
164 put_task_struct(priv->task); in m_start()
165 priv->task = NULL; in m_start()
166 return ERR_PTR(-EINTR); in m_start()
170 priv->tail_vma = get_gate_vma(mm); in m_start()
176 return priv->tail_vma; in m_start()
181 struct proc_maps_private *priv = m->private; in m_next()
184 if (vma == priv->tail_vma) in m_next()
186 else if (vma->vm_next) in m_next()
187 next = vma->vm_next; in m_next()
189 next = priv->tail_vma; in m_next()
191 *ppos = next ? next->vm_start : -1UL; in m_next()
198 struct proc_maps_private *priv = m->private; in m_stop()
199 struct mm_struct *mm = priv->mm; in m_stop()
201 if (!priv->task) in m_stop()
207 put_task_struct(priv->task); in m_stop()
208 priv->task = NULL; in m_stop()
217 return -ENOMEM; in proc_maps_open()
219 priv->inode = inode; in proc_maps_open()
220 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
221 if (IS_ERR(priv->mm)) { in proc_maps_open()
222 int err = PTR_ERR(priv->mm); in proc_maps_open()
233 struct seq_file *seq = file->private_data; in proc_map_release()
234 struct proc_maps_private *priv = seq->private; in proc_map_release()
236 if (priv->mm) in proc_map_release()
237 mmdrop(priv->mm); in proc_map_release()
257 * its "stack". It's not even well-defined for programs written in is_stack()
260 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack()
261 vma->vm_end >= vma->vm_mm->start_stack; in is_stack()
269 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); in show_vma_header_prefix()
271 seq_put_hex_ll(m, "-", end, 8); in show_vma_header_prefix()
273 seq_putc(m, flags & VM_READ ? 'r' : '-'); in show_vma_header_prefix()
274 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); in show_vma_header_prefix()
275 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); in show_vma_header_prefix()
287 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
288 struct file *file = vma->vm_file; in show_map_vma()
289 vm_flags_t flags = vma->vm_flags; in show_map_vma()
297 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
298 dev = inode->i_sb->s_dev; in show_map_vma()
299 ino = inode->i_ino; in show_map_vma()
300 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
303 start = vma->vm_start; in show_map_vma()
304 end = vma->vm_end; in show_map_vma()
318 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
319 name = vma->vm_ops->name(vma); in show_map_vma()
333 if (vma->vm_start <= mm->brk && in show_map_vma()
334 vma->vm_end >= mm->start_brk) { in show_map_vma()
347 seq_printf(m, "[anon:%s]", anon_name->name); in show_map_vma()
388 * page is divided by the number of processes sharing it. So if a
393 * fixed-point pss counter to minimize division errors. So (pss >>
396 * A shift of 12 before division means (assuming 4K page size):
397 * - 1M 3-user-pages add up to 8KB errors;
398 * - supports mapcount up to 2^24, or 16M;
399 * - supports PSS up to 2^52 bytes, or 4PB.
429 struct page *page, unsigned long size, unsigned long pss, in smaps_page_accumulate() argument
432 mss->pss += pss; in smaps_page_accumulate()
434 if (PageAnon(page)) in smaps_page_accumulate()
435 mss->pss_anon += pss; in smaps_page_accumulate()
436 else if (PageSwapBacked(page)) in smaps_page_accumulate()
437 mss->pss_shmem += pss; in smaps_page_accumulate()
439 mss->pss_file += pss; in smaps_page_accumulate()
442 mss->pss_locked += pss; in smaps_page_accumulate()
444 if (dirty || PageDirty(page)) { in smaps_page_accumulate()
446 mss->private_dirty += size; in smaps_page_accumulate()
448 mss->shared_dirty += size; in smaps_page_accumulate()
451 mss->private_clean += size; in smaps_page_accumulate()
453 mss->shared_clean += size; in smaps_page_accumulate()
457 static void smaps_account(struct mem_size_stats *mss, struct page *page, in smaps_account() argument
461 int i, nr = compound ? compound_nr(page) : 1; in smaps_account()
466 * of the compound page. in smaps_account()
468 if (PageAnon(page)) { in smaps_account()
469 mss->anonymous += size; in smaps_account()
470 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) in smaps_account()
471 mss->lazyfree += size; in smaps_account()
474 mss->resident += size; in smaps_account()
476 if (young || page_is_young(page) || PageReferenced(page)) in smaps_account()
477 mss->referenced += size; in smaps_account()
481 * differ page-by-page. in smaps_account()
483 * page_count(page) == 1 guarantees the page is mapped exactly once. in smaps_account()
484 * If any subpage of the compound page mapped with PTE it would elevate in smaps_account()
488 * Without holding the page lock this snapshot can be slightly wrong as in smaps_account()
490 * call page_mapcount() even with PTL held if the page is not mapped, in smaps_account()
494 if ((page_count(page) == 1) || migration) { in smaps_account()
495 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, in smaps_account()
499 for (i = 0; i < nr; i++, page++) { in smaps_account()
500 int mapcount = page_mapcount(page); in smaps_account()
504 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, in smaps_account()
513 struct mem_size_stats *mss = walk->private; in smaps_pte_hole()
515 mss->swap += shmem_partial_swap_usage( in smaps_pte_hole()
516 walk->vma->vm_file->f_mapping, addr, end); in smaps_pte_hole()
527 struct mem_size_stats *mss = walk->private; in smaps_pte_entry()
528 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry()
529 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
530 struct page *page = NULL; in smaps_pte_entry() local
534 page = vm_normal_page(vma, addr, *pte); in smaps_pte_entry()
543 mss->swap += PAGE_SIZE; in smaps_pte_entry()
549 mss->swap_pss += pss_delta; in smaps_pte_entry()
551 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; in smaps_pte_entry()
555 page = migration_entry_to_page(swpent); in smaps_pte_entry()
557 page = device_private_entry_to_page(swpent); in smaps_pte_entry()
558 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap in smaps_pte_entry()
560 page = xa_load(&vma->vm_file->f_mapping->i_pages, in smaps_pte_entry()
562 if (xa_is_value(page)) in smaps_pte_entry()
563 mss->swap += PAGE_SIZE; in smaps_pte_entry()
567 if (!page) in smaps_pte_entry()
570 smaps_account(mss, page, false, young, dirty, locked, migration); in smaps_pte_entry()
577 struct mem_size_stats *mss = walk->private; in smaps_pmd_entry()
578 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry()
579 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
580 struct page *page = NULL; in smaps_pmd_entry() local
584 /* FOLL_DUMP will return -EFAULT on huge zero page */ in smaps_pmd_entry()
585 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); in smaps_pmd_entry()
591 page = migration_entry_to_page(entry); in smaps_pmd_entry()
594 if (IS_ERR_OR_NULL(page)) in smaps_pmd_entry()
596 if (PageAnon(page)) in smaps_pmd_entry()
597 mss->anonymous_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
598 else if (PageSwapBacked(page)) in smaps_pmd_entry()
599 mss->shmem_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
600 else if (is_zone_device_page(page)) in smaps_pmd_entry()
603 mss->file_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry()
605 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), in smaps_pmd_entry()
618 struct vm_area_struct *vma = walk->vma; in smaps_pte_range()
636 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
639 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
654 [0 ... (BITS_PER_LONG-1)] = "??", in show_smap_vma_flags()
713 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
727 struct mem_size_stats *mss = walk->private; in smaps_hugetlb_range()
728 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range()
729 struct page *page = NULL; in smaps_hugetlb_range() local
732 page = vm_normal_page(vma, addr, *pte); in smaps_hugetlb_range()
737 page = migration_entry_to_page(swpent); in smaps_hugetlb_range()
739 page = device_private_entry_to_page(swpent); in smaps_hugetlb_range()
741 if (page) { in smaps_hugetlb_range()
742 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) in smaps_hugetlb_range()
743 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
745 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
776 if (start >= vma->vm_end) in smap_gather_stats()
781 mss->check_shmem_swap = false; in smap_gather_stats()
782 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
789 * object, so we have to distinguish them during the page walk. in smap_gather_stats()
795 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
796 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
797 mss->swap += shmem_swapped; in smap_gather_stats()
799 mss->check_shmem_swap = true; in smap_gather_stats()
808 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
818 SEQ_PUT_DEC("Rss: ", mss->resident); in __show_smap()
819 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); in __show_smap()
825 SEQ_PUT_DEC(" kB\nPss_Anon: ", in __show_smap()
826 mss->pss_anon >> PSS_SHIFT); in __show_smap()
827 SEQ_PUT_DEC(" kB\nPss_File: ", in __show_smap()
828 mss->pss_file >> PSS_SHIFT); in __show_smap()
829 SEQ_PUT_DEC(" kB\nPss_Shmem: ", in __show_smap()
830 mss->pss_shmem >> PSS_SHIFT); in __show_smap()
832 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); in __show_smap()
833 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); in __show_smap()
834 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); in __show_smap()
835 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); in __show_smap()
836 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); in __show_smap()
837 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); in __show_smap()
838 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); in __show_smap()
839 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); in __show_smap()
840 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); in __show_smap()
841 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); in __show_smap()
842 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); in __show_smap()
843 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", in __show_smap()
844 mss->private_hugetlb >> 10, 7); in __show_smap()
845 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); in __show_smap()
846 SEQ_PUT_DEC(" kB\nSwapPss: ", in __show_smap()
847 mss->swap_pss >> PSS_SHIFT); in __show_smap()
848 SEQ_PUT_DEC(" kB\nLocked: ", in __show_smap()
849 mss->pss_locked >> PSS_SHIFT); in __show_smap()
850 seq_puts(m, " kB\n"); in __show_smap()
864 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
865 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
866 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
867 seq_puts(m, " kB\n"); in show_smap()
883 struct proc_maps_private *priv = m->private; in show_smaps_rollup()
890 priv->task = get_proc_task(priv->inode); in show_smaps_rollup()
891 if (!priv->task) in show_smaps_rollup()
892 return -ESRCH; in show_smaps_rollup()
894 mm = priv->mm; in show_smaps_rollup()
896 ret = -ESRCH; in show_smaps_rollup()
908 for (vma = priv->mm->mmap; vma;) { in show_smaps_rollup()
910 last_vma_end = vma->vm_end; in show_smaps_rollup()
928 * +------+------+-----------+ in show_smaps_rollup()
930 * +------+------+-----------+ in show_smaps_rollup()
932 * 4k 8k 16k 400k in show_smaps_rollup()
941 * find_vma(mm, 16k - 1) will return VMA3. in show_smaps_rollup()
946 * find_vma(mm, 16k - 1) will return VMA2. in show_smaps_rollup()
951 * find_vma(mm, 16k - 1) will return NULL. in show_smaps_rollup()
954 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): in show_smaps_rollup()
956 * find_vma(mm, 16k - 1) will return VMA' whose range in show_smaps_rollup()
960 vma = find_vma(mm, last_vma_end - 1); in show_smaps_rollup()
966 if (vma->vm_start >= last_vma_end) in show_smaps_rollup()
969 /* Case 4 above */ in show_smaps_rollup()
970 if (vma->vm_end > last_vma_end) in show_smaps_rollup()
974 vma = vma->vm_next; in show_smaps_rollup()
977 show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0, in show_smaps_rollup()
990 put_task_struct(priv->task); in show_smaps_rollup()
991 priv->task = NULL; in show_smaps_rollup()
1016 return -ENOMEM; in smaps_rollup_open()
1022 priv->inode = inode; in smaps_rollup_open()
1023 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in smaps_rollup_open()
1024 if (IS_ERR(priv->mm)) { in smaps_rollup_open()
1025 ret = PTR_ERR(priv->mm); in smaps_rollup_open()
1040 struct seq_file *seq = file->private_data; in smaps_rollup_release()
1041 struct proc_maps_private *priv = seq->private; in smaps_rollup_release()
1043 if (priv->mm) in smaps_rollup_release()
1044 mmdrop(priv->mm); in smaps_rollup_release()
1083 struct page *page; in pte_is_pinned() local
1087 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1089 if (likely(!atomic_read(&vma->vm_mm->has_pinned))) in pte_is_pinned()
1091 page = vm_normal_page(vma, addr, pte); in pte_is_pinned()
1092 if (!page) in pte_is_pinned()
1094 return page_maybe_dma_pinned(page); in pte_is_pinned()
1101 * The soft-dirty tracker uses #PF-s to catch writes in clear_soft_dirty()
1102 * to pages, so write-protect the pte as well. See the in clear_soft_dirty()
1103 * Documentation/admin-guide/mm/soft-dirty.rst for full description in clear_soft_dirty()
1104 * of how soft-dirty works. in clear_soft_dirty()
1119 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1146 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1149 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1162 struct clear_refs_private *cp = walk->private; in clear_refs_pte_range()
1163 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range()
1166 struct page *page; in clear_refs_pte_range() local
1170 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1178 page = pmd_page(*pmd); in clear_refs_pte_range()
1182 test_and_clear_page_young(page); in clear_refs_pte_range()
1183 ClearPageReferenced(page); in clear_refs_pte_range()
1192 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1196 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { in clear_refs_pte_range()
1204 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
1205 if (!page) in clear_refs_pte_range()
1210 test_and_clear_page_young(page); in clear_refs_pte_range()
1211 ClearPageReferenced(page); in clear_refs_pte_range()
1213 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
1221 struct clear_refs_private *cp = walk->private; in clear_refs_test_walk()
1222 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk()
1224 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1231 * Writing 4 to /proc/pid/clear_refs affects all pages. in clear_refs_test_walk()
1233 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1235 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1257 if (count > sizeof(buffer) - 1) in clear_refs_write()
1258 count = sizeof(buffer) - 1; in clear_refs_write()
1260 return -EFAULT; in clear_refs_write()
1266 return -EINVAL; in clear_refs_write()
1270 return -ESRCH; in clear_refs_write()
1279 count = -EINTR; in clear_refs_write()
1292 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1293 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1295 vma->vm_flags &= ~VM_SOFTDIRTY; in clear_refs_write()
1301 0, NULL, mm, 0, -1UL); in clear_refs_write()
1304 walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops, in clear_refs_write()
1341 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1358 pm->buffer[pm->pos++] = *pme; in add_to_pagemap()
1359 if (pm->pos >= pm->len) in add_to_pagemap()
1367 struct pagemapread *pm = walk->private; in pagemap_pte_hole()
1372 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1374 /* End of address space hole, which we mark as non-present. */ in pagemap_pte_hole()
1378 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1392 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1394 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1408 struct page *page = NULL; in pte_to_pagemap_entry() local
1412 if (pm->show_pfn) in pte_to_pagemap_entry()
1415 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1423 if (pm->show_pfn) in pte_to_pagemap_entry()
1429 page = migration_entry_to_page(entry); in pte_to_pagemap_entry()
1433 page = device_private_entry_to_page(entry); in pte_to_pagemap_entry()
1436 if (page && !PageAnon(page)) in pte_to_pagemap_entry()
1438 if (page && !migration && page_mapcount(page) == 1) in pte_to_pagemap_entry()
1440 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1449 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range()
1450 struct pagemapread *pm = walk->private; in pagemap_pmd_range()
1461 struct page *page = NULL; in pagemap_pmd_range() local
1463 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1467 page = pmd_page(pmd); in pagemap_pmd_range()
1472 if (pm->show_pfn) in pagemap_pmd_range()
1481 if (pm->show_pfn) { in pagemap_pmd_range()
1492 page = migration_entry_to_page(entry); in pagemap_pmd_range()
1496 if (page && !migration && page_mapcount(page) == 1) in pagemap_pmd_range()
1505 if (pm->show_pfn) { in pagemap_pmd_range()
1522 * goes beyond vma->vm_end. in pagemap_pmd_range()
1524 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range()
1546 struct pagemapread *pm = walk->private; in pagemap_hugetlb_range()
1547 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range()
1552 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1557 struct page *page = pte_page(pte); in pagemap_hugetlb_range() local
1559 if (!PageAnon(page)) in pagemap_hugetlb_range()
1562 if (page_mapcount(page) == 1) in pagemap_hugetlb_range()
1566 if (pm->show_pfn) in pagemap_hugetlb_range()
1577 if (pm->show_pfn && (flags & PM_PRESENT)) in pagemap_hugetlb_range()
1596 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1598 * For each page in the address space, this file contains one 64-bit entry
1601 * Bits 0-54 page frame number (PFN) if present
1602 * Bits 0-4 swap type if swapped
1603 * Bits 5-54 swap offset if swapped
1604 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1605 * Bit 56 page exclusively mapped
1606 * Bits 57-60 zero
1607 * Bit 61 page is file-page or shared-anon
1608 * Bit 62 page swapped
1609 * Bit 63 page present
1611 * If the page is not present but in swap, then the PFN contains an
1612 * encoding of the swap file number and the page's offset into the
1624 struct mm_struct *mm = file->private_data; in pagemap_read()
1635 ret = -EINVAL; in pagemap_read()
1649 ret = -ENOMEM; in pagemap_read()
1655 end_vaddr = mm->task_size; in pagemap_read()
1663 if (start_vaddr > mm->task_size) in pagemap_read()
1691 ret = -EFAULT; in pagemap_read()
1696 count -= len; in pagemap_read()
1717 file->private_data = mm; in pagemap_open()
1723 struct mm_struct *mm = file->private_data; in pagemap_release()
1756 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, in gather_stats() argument
1759 int count = page_mapcount(page); in gather_stats()
1761 md->pages += nr_pages; in gather_stats()
1762 if (pte_dirty || PageDirty(page)) in gather_stats()
1763 md->dirty += nr_pages; in gather_stats()
1765 if (PageSwapCache(page)) in gather_stats()
1766 md->swapcache += nr_pages; in gather_stats()
1768 if (PageActive(page) || PageUnevictable(page)) in gather_stats()
1769 md->active += nr_pages; in gather_stats()
1771 if (PageWriteback(page)) in gather_stats()
1772 md->writeback += nr_pages; in gather_stats()
1774 if (PageAnon(page)) in gather_stats()
1775 md->anon += nr_pages; in gather_stats()
1777 if (count > md->mapcount_max) in gather_stats()
1778 md->mapcount_max = count; in gather_stats()
1780 md->node[page_to_nid(page)] += nr_pages; in gather_stats()
1783 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats()
1786 struct page *page; in can_gather_numa_stats() local
1792 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1793 if (!page) in can_gather_numa_stats()
1796 if (PageReserved(page)) in can_gather_numa_stats()
1799 nid = page_to_nid(page); in can_gather_numa_stats()
1803 return page; in can_gather_numa_stats()
1807 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, in can_gather_numa_stats_pmd()
1811 struct page *page; in can_gather_numa_stats_pmd() local
1817 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1818 if (!page) in can_gather_numa_stats_pmd()
1821 if (PageReserved(page)) in can_gather_numa_stats_pmd()
1824 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
1828 return page; in can_gather_numa_stats_pmd()
1835 struct numa_maps *md = walk->private; in gather_pte_stats()
1836 struct vm_area_struct *vma = walk->vma; in gather_pte_stats()
1844 struct page *page; in gather_pte_stats() local
1846 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1847 if (page) in gather_pte_stats()
1848 gather_stats(page, md, pmd_dirty(*pmd), in gather_pte_stats()
1857 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
1859 struct page *page = can_gather_numa_stats(*pte, vma, addr); in gather_pte_stats() local
1860 if (!page) in gather_pte_stats()
1862 gather_stats(page, md, pte_dirty(*pte), 1); in gather_pte_stats()
1875 struct page *page; in gather_hugetlb_stats() local
1880 page = pte_page(huge_pte); in gather_hugetlb_stats()
1881 if (!page) in gather_hugetlb_stats()
1884 md = walk->private; in gather_hugetlb_stats()
1885 gather_stats(page, md, pte_dirty(huge_pte), 1); in gather_hugetlb_stats()
1907 struct numa_maps_private *numa_priv = m->private; in show_numa_map()
1908 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; in show_numa_map()
1910 struct numa_maps *md = &numa_priv->md; in show_numa_map()
1911 struct file *file = vma->vm_file; in show_numa_map()
1912 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1923 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1928 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); in show_numa_map()
1931 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
1936 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()
1948 if (!md->pages) in show_numa_map()
1951 if (md->anon) in show_numa_map()
1952 seq_printf(m, " anon=%lu", md->anon); in show_numa_map()
1954 if (md->dirty) in show_numa_map()
1955 seq_printf(m, " dirty=%lu", md->dirty); in show_numa_map()
1957 if (md->pages != md->anon && md->pages != md->dirty) in show_numa_map()
1958 seq_printf(m, " mapped=%lu", md->pages); in show_numa_map()
1960 if (md->mapcount_max > 1) in show_numa_map()
1961 seq_printf(m, " mapmax=%lu", md->mapcount_max); in show_numa_map()
1963 if (md->swapcache) in show_numa_map()
1964 seq_printf(m, " swapcache=%lu", md->swapcache); in show_numa_map()
1966 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
1967 seq_printf(m, " active=%lu", md->active); in show_numa_map()
1969 if (md->writeback) in show_numa_map()
1970 seq_printf(m, " writeback=%lu", md->writeback); in show_numa_map()
1973 if (md->node[nid]) in show_numa_map()
1974 seq_printf(m, " N%d=%lu", nid, md->node[nid]); in show_numa_map()