/kernel/linux/linux-5.10/mm/ |
D | page_vma_mapped.c | 50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 51 spin_lock(pvmw->ptl); in map_pte() 173 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); in page_vma_mapped_walk() 174 spin_lock(pvmw->ptl); in page_vma_mapped_walk() 218 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 240 spin_unlock(pvmw->ptl); in page_vma_mapped_walk() 241 pvmw->ptl = NULL; in page_vma_mapped_walk() 250 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() local 252 spin_unlock(ptl); in page_vma_mapped_walk() 269 if (pvmw->ptl) { in page_vma_mapped_walk() [all …]
|
D | huge_memory.c | 613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 627 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 643 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 650 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 739 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page() 744 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 747 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 754 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 757 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 778 spinlock_t *ptl; in insert_pfn_pmd() local [all …]
|
D | memory.c | 428 spinlock_t *ptl; in __pte_alloc() local 448 ptl = pmd_lock(mm, pmd); in __pte_alloc() 454 spin_unlock(ptl); in __pte_alloc() 1216 spinlock_t *ptl; in zap_pte_range() local 1224 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range() 1325 pte_unmap_unlock(start_pte, ptl); in zap_pte_range() 1366 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range() local 1372 spin_unlock(ptl); in zap_pmd_range() 1633 spinlock_t **ptl) in __get_locked_pte() argument 1639 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte() [all …]
|
D | mincore.c | 102 spinlock_t *ptl; in mincore_pte_range() local 108 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 109 if (ptl) { in mincore_pte_range() 111 spin_unlock(ptl); in mincore_pte_range() 120 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range() 150 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
|
D | madvise.c | 205 spinlock_t *ptl; in swapin_walk_pmd_entry() local 207 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 209 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry() 322 spinlock_t *ptl; in madvise_cold_or_pageout_pte_range() local 335 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range() 336 if (!ptl) in madvise_cold_or_pageout_pte_range() 359 spin_unlock(ptl); in madvise_cold_or_pageout_pte_range() 389 spin_unlock(ptl); in madvise_cold_or_pageout_pte_range() 400 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 428 pte_unmap_unlock(orig_pte, ptl); in madvise_cold_or_pageout_pte_range() [all …]
|
D | hmm.c | 417 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); in hmm_vma_walk_pud() local 419 if (!ptl) in hmm_vma_walk_pud() 427 spin_unlock(ptl); in hmm_vma_walk_pud() 438 spin_unlock(ptl); in hmm_vma_walk_pud() 450 spin_unlock(ptl); in hmm_vma_walk_pud() 464 spin_unlock(ptl); in hmm_vma_walk_pud() 483 spinlock_t *ptl; in hmm_vma_walk_hugetlb_entry() local 486 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); in hmm_vma_walk_hugetlb_entry() 496 spin_unlock(ptl); in hmm_vma_walk_hugetlb_entry() 504 spin_unlock(ptl); in hmm_vma_walk_hugetlb_entry()
|
D | migrate.c | 313 spinlock_t *ptl) in __migration_entry_wait() argument 319 spin_lock(ptl); in __migration_entry_wait() 338 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait() 342 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait() 348 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() local 350 __migration_entry_wait(mm, ptep, ptl); in migration_entry_wait() 356 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge() local 357 __migration_entry_wait(mm, pte, ptl); in migration_entry_wait_huge() 363 spinlock_t *ptl; in pmd_migration_entry_wait() local 366 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait() [all …]
|
D | hugetlb.c | 3930 spinlock_t *ptl; in __unmap_hugepage_range() local 3961 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range() 3963 spin_unlock(ptl); in __unmap_hugepage_range() 3971 spin_unlock(ptl); in __unmap_hugepage_range() 3981 spin_unlock(ptl); in __unmap_hugepage_range() 3993 spin_unlock(ptl); in __unmap_hugepage_range() 4012 spin_unlock(ptl); in __unmap_hugepage_range() 4147 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow() argument 4188 spin_unlock(ptl); in hugetlb_cow() 4224 spin_lock(ptl); in hugetlb_cow() [all …]
|
D | userfaultfd.c | 60 spinlock_t *ptl; in mcopy_atomic_pte() local 110 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mcopy_atomic_pte() 133 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte() 138 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte() 150 spinlock_t *ptl; in mfill_zeropage_pte() local 157 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_zeropage_pte() 175 pte_unmap_unlock(dst_pte, ptl); in mfill_zeropage_pte()
|
D | gup.c | 400 spinlock_t *ptl; in follow_page_pte() local 424 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 440 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 447 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 481 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 547 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 550 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 562 spinlock_t *ptl; in follow_pmd_mask() local 606 ptl = pmd_lock(mm, pmd); in follow_pmd_mask() 608 spin_unlock(ptl); in follow_pmd_mask() [all …]
|
D | khugepaged.c | 739 spinlock_t *ptl, in __collapse_huge_page_copy() argument 755 spin_lock(ptl); in __collapse_huge_page_copy() 761 spin_unlock(ptl); in __collapse_huge_page_copy() 773 spin_lock(ptl); in __collapse_huge_page_copy() 780 spin_unlock(ptl); in __collapse_huge_page_copy() 1234 spinlock_t *ptl; in khugepaged_scan_pmd() local 1247 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd() 1371 pte_unmap_unlock(pte, ptl); in khugepaged_scan_pmd() 1441 spinlock_t *ptl; in collapse_pte_mapped_thp() local 1470 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); in collapse_pte_mapped_thp() [all …]
|
D | debug_vm_pgtable.c | 985 spinlock_t *ptl = NULL; in debug_vm_pgtable() local 1107 ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl); in debug_vm_pgtable() 1110 pte_unmap_unlock(ptep, ptl); in debug_vm_pgtable() 1112 ptl = pmd_lock(mm, pmdp); in debug_vm_pgtable() 1117 spin_unlock(ptl); in debug_vm_pgtable() 1119 ptl = pud_lock(mm, pudp); in debug_vm_pgtable() 1124 spin_unlock(ptl); in debug_vm_pgtable()
|
/kernel/linux/linux-5.10/arch/arm/lib/ |
D | uaccess_with_memcpy.c | 31 spinlock_t *ptl; in pin_page_for_write() local 60 ptl = ¤t->mm->page_table_lock; in pin_page_for_write() 61 spin_lock(ptl); in pin_page_for_write() 64 spin_unlock(ptl); in pin_page_for_write() 69 *ptlp = ptl; in pin_page_for_write() 76 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); in pin_page_for_write() 79 pte_unmap_unlock(pte, ptl); in pin_page_for_write() 84 *ptlp = ptl; in pin_page_for_write() 107 spinlock_t *ptl; in __copy_to_user_memcpy() local 110 while (!pin_page_for_write(to, &pte, &ptl)) { in __copy_to_user_memcpy() [all …]
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
D | fault-armv.c | 70 static inline void do_pte_lock(spinlock_t *ptl) in do_pte_lock() argument 76 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); in do_pte_lock() 79 static inline void do_pte_unlock(spinlock_t *ptl) in do_pte_unlock() argument 81 spin_unlock(ptl); in do_pte_unlock() 84 static inline void do_pte_lock(spinlock_t *ptl) {} in do_pte_lock() argument 85 static inline void do_pte_unlock(spinlock_t *ptl) {} in do_pte_unlock() argument 91 spinlock_t *ptl; in adjust_pte() local 120 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 122 do_pte_lock(ptl); in adjust_pte() 126 do_pte_unlock(ptl); in adjust_pte()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
D | hugetlbpage.c | 46 unsigned int pshift, spinlock_t *ptl) in __hugepte_alloc() argument 81 spin_lock(ptl); in __hugepte_alloc() 101 spin_unlock(ptl); in __hugepte_alloc() 118 spinlock_t *ptl; in huge_pte_alloc() local 132 ptl = &mm->page_table_lock; in huge_pte_alloc() 142 ptl = pud_lockptr(mm, pu); in huge_pte_alloc() 153 ptl = pmd_lockptr(mm, pm); in huge_pte_alloc() 160 ptl = &mm->page_table_lock; in huge_pte_alloc() 168 ptl = pud_lockptr(mm, pu); in huge_pte_alloc() 175 ptl = pmd_lockptr(mm, pm); in huge_pte_alloc() [all …]
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
D | pgtable.c | 776 spinlock_t *ptl; in set_guest_storage_key() local 785 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key() 787 spin_unlock(ptl); in set_guest_storage_key() 799 spin_unlock(ptl); in set_guest_storage_key() 802 spin_unlock(ptl); in set_guest_storage_key() 804 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key() 832 pte_unmap_unlock(ptep, ptl); in set_guest_storage_key() 877 spinlock_t *ptl; in reset_guest_reference_bit() local 888 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit() 890 spin_unlock(ptl); in reset_guest_reference_bit() [all …]
|
D | gmap.c | 544 spinlock_t *ptl; in __gmap_link() local 600 ptl = pmd_lock(mm, pmd); in __gmap_link() 622 spin_unlock(ptl); in __gmap_link() 677 spinlock_t *ptl; in __gmap_zap() local 686 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap() 689 pte_unmap_unlock(ptep, ptl); in __gmap_zap() 851 spinlock_t **ptl) in gmap_pte_op_walk() argument 860 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); in gmap_pte_op_walk() 896 static void gmap_pte_op_end(spinlock_t *ptl) in gmap_pte_op_end() argument 898 if (ptl) in gmap_pte_op_end() [all …]
|
/kernel/linux/linux-5.10/fs/proc/ |
D | task_mmu.c | 617 spinlock_t *ptl; in smaps_pte_range() local 619 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range() 620 if (ptl) { in smaps_pte_range() 622 spin_unlock(ptl); in smaps_pte_range() 633 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range() 636 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range() 1164 spinlock_t *ptl; in clear_refs_pte_range() local 1167 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range() 1168 if (ptl) { in clear_refs_pte_range() 1184 spin_unlock(ptl); in clear_refs_pte_range() [all …]
|
/kernel/linux/linux-5.10/Documentation/vm/ |
D | split_page_table_lock.rst | 63 This field shares storage with page->ptl. 80 page->ptl 83 page->ptl is used to access split page table lock, where 'page' is struct 92 - if size of spinlock_t is bigger then size of long, we use page->ptl as 100 Please, never access page->ptl directly -- use appropriate helper.
|
/kernel/linux/linux-5.10/include/linux/ |
D | rmap.h | 214 spinlock_t *ptl; member 223 if (pvmw->ptl) in page_vma_mapped_walk_done() 224 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done()
|
D | mm.h | 554 spinlock_t *ptl; /* Page table lock. member 2051 spinlock_t **ptl); 2053 spinlock_t **ptl) in get_locked_pte() argument 2056 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); in get_locked_pte() 2191 return page->ptl; in ptlock_ptr() 2209 return &page->ptl; in ptlock_ptr() 2227 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); in ptlock_init() 2278 #define pte_unmap_unlock(pte, ptl) do { \ argument 2279 spin_unlock(ptl); \ 2343 spinlock_t *ptl = pmd_lockptr(mm, pmd); in pmd_lock() local [all …]
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | ldt.c | 292 spinlock_t *ptl; in map_ldt_struct() local 326 ptep = get_locked_pte(mm, va, &ptl); in map_ldt_struct() 339 pte_unmap_unlock(ptep, ptl); in map_ldt_struct() 365 spinlock_t *ptl; in unmap_ldt_struct() local 369 ptep = get_locked_pte(mm, va, &ptl); in unmap_ldt_struct() 371 pte_unmap_unlock(ptep, ptl); in unmap_ldt_struct()
|
/kernel/linux/linux-5.10/arch/m68k/kernel/ |
D | sys_m68k.c | 474 spinlock_t *ptl; in sys_atomic_cmpxchg_32() local 490 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); in sys_atomic_cmpxchg_32() 493 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32() 505 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
D | subpage_prot.c | 61 spinlock_t *ptl; in hpte_flush_range() local 73 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range() 81 pte_unmap_unlock(pte - 1, ptl); in hpte_flush_range()
|
/kernel/linux/linux-5.10/arch/x86/xen/ |
D | mmu_pv.c | 647 spinlock_t *ptl = NULL; in xen_pte_lock() local 650 ptl = ptlock_ptr(page); in xen_pte_lock() 651 spin_lock_nest_lock(ptl, &mm->page_table_lock); in xen_pte_lock() 654 return ptl; in xen_pte_lock() 659 spinlock_t *ptl = v; in xen_pte_unlock() local 660 spin_unlock(ptl); in xen_pte_unlock() 682 spinlock_t *ptl; in xen_pin_page() local 704 ptl = NULL; in xen_pin_page() 706 ptl = xen_pte_lock(page, mm); in xen_pin_page() 712 if (ptl) { in xen_pin_page() [all …]
|