/mm/ |
D | memory.c | 645 pte_t pte, struct page *page) in print_bad_pte() argument 682 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte() 749 pte_t pte) in vm_normal_page() argument 751 unsigned long pfn = pte_pfn(pte); in vm_normal_page() 754 if (likely(!pte_special(pte))) in vm_normal_page() 759 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page() 784 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page() 808 pte_t pte = *src_pte; in copy_one_pte() local 812 if (unlikely(!pte_present(pte))) { in copy_one_pte() 813 if (!pte_file(pte)) { in copy_one_pte() [all …]
|
D | rmap.c | 653 pte_t *pte; in __page_check_address() local 658 pte = huge_pte_offset(mm, address); in __page_check_address() 659 if (!pte) in __page_check_address() 662 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); in __page_check_address() 670 pte = pte_offset_map(pmd, address); in __page_check_address() 672 if (!sync && !pte_present(*pte)) { in __page_check_address() 673 pte_unmap(pte); in __page_check_address() 680 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { in __page_check_address() 682 return pte; in __page_check_address() 684 pte_unmap_unlock(pte, ptl); in __page_check_address() [all …]
|
D | gup.c | 39 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) in can_follow_write_pte() argument 41 return pte_write(pte) || in can_follow_write_pte() 42 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); in can_follow_write_pte() 51 pte_t *ptep, pte; in follow_page_pte() local 58 pte = *ptep; in follow_page_pte() 59 if (!pte_present(pte)) { in follow_page_pte() 68 if (pte_none(pte) || pte_file(pte)) in follow_page_pte() 70 entry = pte_to_swp_entry(pte); in follow_page_pte() 77 if ((flags & FOLL_NUMA) && pte_numa(pte)) in follow_page_pte() 79 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { in follow_page_pte() [all …]
|
D | fremap.c | 34 pte_t pte = *ptep; in zap_pte() local 38 if (pte_present(pte)) { in zap_pte() 39 flush_cache_page(vma, addr, pte_pfn(pte)); in zap_pte() 40 pte = ptep_clear_flush(vma, addr, ptep); in zap_pte() 41 page = vm_normal_page(vma, addr, pte); in zap_pte() 43 if (pte_dirty(pte)) in zap_pte() 51 if (!pte_file(pte)) { in zap_pte() 53 entry = pte_to_swp_entry(pte); in zap_pte() 76 pte_t *pte, ptfile; in install_file_pte() local 79 pte = get_locked_pte(mm, addr, &ptl); in install_file_pte() [all …]
|
D | mremap.c | 73 static pte_t move_soft_dirty_pte(pte_t pte) in move_soft_dirty_pte() argument 80 if (pte_present(pte)) in move_soft_dirty_pte() 81 pte = pte_mksoft_dirty(pte); in move_soft_dirty_pte() 82 else if (is_swap_pte(pte)) in move_soft_dirty_pte() 83 pte = pte_swp_mksoft_dirty(pte); in move_soft_dirty_pte() 84 else if (pte_file(pte)) in move_soft_dirty_pte() 85 pte = pte_file_mksoft_dirty(pte); in move_soft_dirty_pte() 87 return pte; in move_soft_dirty_pte() 98 pte_t *old_pte, *new_pte, pte; in move_ptes() local 145 pte = ptep_get_and_clear(mm, old_addr, old_pte); in move_ptes() [all …]
|
D | sparse-vmemmap.c | 90 void __meminit vmemmap_verify(pte_t *pte, int node, in vmemmap_verify() argument 93 unsigned long pfn = pte_pfn(*pte); in vmemmap_verify() 103 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() local 104 if (pte_none(*pte)) { in vmemmap_pte_populate() 110 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate() 112 return pte; in vmemmap_pte_populate() 158 pte_t *pte; in vmemmap_populate_basepages() local 170 pte = vmemmap_pte_populate(pmd, addr, node); in vmemmap_populate_basepages() 171 if (!pte) in vmemmap_populate_basepages() 173 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); in vmemmap_populate_basepages()
|
D | mprotect.c | 42 pte_t *pte; in lock_pte_protection() local 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 57 return pte; in lock_pte_protection() 65 pte_t *pte, oldpte; in change_pte_range() local 69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range() 70 if (!pte) in change_pte_range() 75 oldpte = *pte; in change_pte_range() 81 ptent = ptep_modify_prot_start(mm, addr, pte); in change_pte_range() 93 ptep_modify_prot_commit(mm, addr, pte, ptent); in change_pte_range() 101 ptep_set_numa(mm, addr, pte); in change_pte_range() [all …]
|
D | pagewalk.c | 9 pte_t *pte; in walk_pte_range() local 12 pte = pte_offset_map(pmd, addr); in walk_pte_range() 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 20 pte++; in walk_pte_range() 23 pte_unmap(pte); in walk_pte_range() 115 pte_t *pte; in walk_hugetlb_range() local 120 pte = huge_pte_offset(walk->mm, addr & hmask); in walk_hugetlb_range() 121 if (pte && walk->hugetlb_entry) in walk_hugetlb_range() 122 err = walk->hugetlb_entry(pte, hmask, addr, next, walk); in walk_hugetlb_range()
|
D | hugetlb.c | 2550 static int is_hugetlb_entry_migration(pte_t pte) in is_hugetlb_entry_migration() argument 2554 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_migration() 2556 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_migration() 2563 static int is_hugetlb_entry_hwpoisoned(pte_t pte) in is_hugetlb_entry_hwpoisoned() argument 2567 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_hwpoisoned() 2569 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_hwpoisoned() 2658 pte_t pte; in __unmap_hugepage_range() local 2682 pte = huge_ptep_get(ptep); in __unmap_hugepage_range() 2683 if (huge_pte_none(pte)) in __unmap_hugepage_range() 2690 if (unlikely(!pte_present(pte))) { in __unmap_hugepage_range() [all …]
|
D | migrate.c | 112 pte_t *ptep, pte; in remove_migration_pte() local 136 pte = *ptep; in remove_migration_pte() 137 if (!is_swap_pte(pte)) in remove_migration_pte() 140 entry = pte_to_swp_entry(pte); in remove_migration_pte() 147 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); in remove_migration_pte() 149 pte = pte_mksoft_dirty(pte); in remove_migration_pte() 153 pte = maybe_mkwrite(pte, vma); in remove_migration_pte() 157 pte = pte_mkhuge(pte); in remove_migration_pte() 158 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte() 162 set_pte_at(mm, addr, ptep, pte); in remove_migration_pte() [all …]
|
D | huge_memory.c | 1046 pte_t *pte, entry; in do_huge_pmd_wp_page_fallback() local 1054 pte = pte_offset_map(&_pmd, haddr); in do_huge_pmd_wp_page_fallback() 1055 VM_BUG_ON(!pte_none(*pte)); in do_huge_pmd_wp_page_fallback() 1056 set_pte_at(mm, haddr, pte, entry); in do_huge_pmd_wp_page_fallback() 1057 pte_unmap(pte); in do_huge_pmd_wp_page_fallback() 1807 pte_t *pte, entry; in __split_huge_page_map() local 1820 pte = pte_offset_map(&_pmd, haddr); in __split_huge_page_map() 1821 BUG_ON(!pte_none(*pte)); in __split_huge_page_map() 1822 set_pte_at(mm, haddr, pte, entry); in __split_huge_page_map() 1823 pte_unmap(pte); in __split_huge_page_map() [all …]
|
D | mincore.c | 126 pte_t pte = *ptep; in mincore_pte_range() local 130 if (pte_none(pte)) in mincore_pte_range() 132 else if (pte_present(pte)) in mincore_pte_range() 134 else if (pte_file(pte)) { in mincore_pte_range() 135 pgoff = pte_to_pgoff(pte); in mincore_pte_range() 138 swp_entry_t entry = pte_to_swp_entry(pte); in mincore_pte_range()
|
D | pgtable-generic.c | 114 pte_t pte; in ptep_clear_flush() local 115 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush() 116 if (pte_accessible(mm, pte)) in ptep_clear_flush() 118 return pte; in ptep_clear_flush()
|
D | madvise.c | 150 pte_t pte; in swapin_walk_pmd_entry() local 156 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() 159 if (pte_present(pte) || pte_none(pte) || pte_file(pte)) in swapin_walk_pmd_entry() 161 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry()
|
D | filemap_xip.c | 171 pte_t *pte; in __xip_unmap() local 191 pte = page_check_address(page, mm, address, &ptl, 1); in __xip_unmap() 192 if (pte) { in __xip_unmap() 194 flush_cache_page(vma, address, pte_pfn(*pte)); in __xip_unmap() 195 pteval = ptep_clear_flush(vma, address, pte); in __xip_unmap() 199 pte_unmap_unlock(pte, ptl); in __xip_unmap()
|
D | vmalloc.c | 60 pte_t *pte; in vunmap_pte_range() local 62 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 64 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() 66 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range() 115 pte_t *pte; in vmap_pte_range() local 122 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range() 123 if (!pte) in vmap_pte_range() 128 if (WARN_ON(!pte_none(*pte))) in vmap_pte_range() 132 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pte_range() 134 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pte_range() [all …]
|
D | mlock.c | 420 pte_t *pte; in __munlock_pagevec_fill() local 428 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 438 pte++; in __munlock_pagevec_fill() 439 if (pte_present(*pte)) in __munlock_pagevec_fill() 440 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 457 pte_unmap_unlock(pte, ptl); in __munlock_pagevec_fill()
|
D | swapfile.c | 1114 static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) in maybe_same_pte() argument 1123 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); in maybe_same_pte() 1125 return pte_same(pte, swp_pte); in maybe_same_pte() 1140 pte_t *pte; in unuse_pte() local 1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1154 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { in unuse_pte() 1163 set_pte_at(vma->vm_mm, addr, pte, in unuse_pte() 1180 pte_unmap_unlock(pte, ptl); in unuse_pte() 1194 pte_t *pte; in unuse_pte_range() local 1206 pte = pte_offset_map(pmd, addr); in unuse_pte_range() [all …]
|
D | mmu_notifier.c | 146 pte_t pte) in __mmu_notifier_change_pte() argument 154 mn->ops->change_pte(mn, mm, address, pte); in __mmu_notifier_change_pte()
|
D | filemap.c | 1997 pte_t *pte; in filemap_map_pages() local 2037 pte = vmf->pte + page->index - vmf->pgoff; in filemap_map_pages() 2038 if (!pte_none(*pte)) in filemap_map_pages() 2044 do_set_pte(vma, addr, page, pte, false, false); in filemap_map_pages()
|
D | mempolicy.c | 490 pte_t *pte; in queue_pages_pte_range() local 493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range() 498 if (!pte_present(*pte)) in queue_pages_pte_range() 500 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 517 } while (pte++, addr += PAGE_SIZE, addr != end); in queue_pages_pte_range()
|
D | memcontrol.c | 5840 pte_t *pte; in mem_cgroup_count_precharge_pte_range() local 5852 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range() 5853 for (; addr != end; pte++, addr += PAGE_SIZE) in mem_cgroup_count_precharge_pte_range() 5854 if (get_mctgt_type(vma, addr, *pte, NULL)) in mem_cgroup_count_precharge_pte_range() 5856 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range() 6017 pte_t *pte; in mem_cgroup_move_charge_pte_range() local 6060 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range() 6062 pte_t ptent = *(pte++); in mem_cgroup_move_charge_pte_range() 6096 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
|
D | Kconfig | 536 soft-dirty bit on pte-s. This bit it set when someone writes
|