/mm/ |
D | pgtable-generic.c | 65 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 71 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 79 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument 82 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 84 flush_tlb_page(vma, address); in ptep_clear_flush_young() 90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 95 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush() 97 flush_tlb_page(vma, address); in ptep_clear_flush() 106 unsigned long address, pmd_t *pmdp, in pmdp_set_access_flags() argument [all …]
|
D | rmap.c | 741 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) in mm_find_pmd() argument 749 pgd = pgd_offset(mm, address); in mm_find_pmd() 753 p4d = p4d_offset(pgd, address); in mm_find_pmd() 757 pud = pud_offset(p4d, address); in mm_find_pmd() 761 pmd = pmd_offset(pud, address); in mm_find_pmd() 785 unsigned long address, void *arg) in page_referenced_one() argument 791 .address = address, in page_referenced_one() 796 address = pvmw.address; in page_referenced_one() 806 if (ptep_clear_flush_young_notify(vma, address, in page_referenced_one() 820 if (pmdp_clear_flush_young_notify(vma, address, in page_referenced_one() [all …]
|
D | page_vma_mapped.c | 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 120 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward() 121 if (!pvmw->address) in step_forward() 122 pvmw->address = ULONG_MAX; in step_forward() 169 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); in page_vma_mapped_walk() 188 pvmw->address + PAGE_SIZE; in page_vma_mapped_walk() 193 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk() 198 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk() 203 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk() 209 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() [all …]
|
D | pgalloc-track.h | 7 unsigned long address, in p4d_alloc_track() argument 11 if (__p4d_alloc(mm, pgd, address)) in p4d_alloc_track() 16 return p4d_offset(pgd, address); in p4d_alloc_track() 20 unsigned long address, in pud_alloc_track() argument 24 if (__pud_alloc(mm, p4d, address)) in pud_alloc_track() 29 return pud_offset(p4d, address); in pud_alloc_track() 33 unsigned long address, in pmd_alloc_track() argument 37 if (__pmd_alloc(mm, pud, address)) in pmd_alloc_track() 42 return pmd_offset(pud, address); in pmd_alloc_track() 46 #define pte_alloc_kernel_track(pmd, address, mask) \ argument [all …]
|
D | khugepaged.c | 115 unsigned long address; member 600 unsigned long address, in __collapse_huge_page_isolate() argument 610 _pte++, address += PAGE_SIZE) { in __collapse_huge_page_isolate() 630 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate() 717 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate() 743 unsigned long address, in __collapse_huge_page_copy() argument 750 _pte++, page++, address += PAGE_SIZE) { in __collapse_huge_page_copy() 754 clear_user_highpage(page, address); in __collapse_huge_page_copy() 765 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy() 770 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy() [all …]
|
D | memory.c | 1641 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument 1649 address, address + size); in zap_page_range_single() 1650 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single() 1653 unmap_single_vma(&tlb, vma, address, range.end, details); in zap_page_range_single() 1655 tlb_finish_mmu(&tlb, address, range.end); in zap_page_range_single() 1669 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument 1672 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes() 1676 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes() 2651 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock() 2663 trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock() [all …]
|
D | gup.c | 391 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument 406 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte() 407 update_mmu_cache(vma, address, pte); in follow_pfn_pte() 426 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument 445 page = follow_huge_pmd_pte(vma, address, flags); in follow_page_pte() 455 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 472 migration_entry_wait(mm, pmd, address); in follow_page_pte() 482 page = vm_normal_page(vma, address, pte); in follow_page_pte() 504 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte() 588 unsigned long address, pud_t *pudp, in follow_pmd_mask() argument [all …]
|
D | internal.h | 137 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); 410 unsigned long address; in vma_address() local 415 address = vma->vm_start + in vma_address() 418 if (address < vma->vm_start || address >= vma->vm_end) in vma_address() 419 address = -EFAULT; in vma_address() 423 address = vma->vm_start; in vma_address() 425 address = -EFAULT; in vma_address() 427 return address; in vma_address() 439 unsigned long address; in vma_address_end() local 443 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end() [all …]
|
D | ksm.c | 138 unsigned long address; member 204 unsigned long address; /* + low bits used for flags below */ member 537 unsigned long addr = rmap_item->address; in break_cow() 556 unsigned long addr = rmap_item->address; in get_mergeable_page() 647 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree() 776 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree() 798 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree() 800 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree() 809 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree() 815 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree() [all …]
|
D | hugetlb.c | 802 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument 804 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 809 unsigned long address) in linear_hugepage_index() argument 811 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 1140 unsigned long address, int avoid_reserve, in dequeue_huge_page_vma() argument 1163 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma() 1991 unsigned long address) in alloc_huge_page_vma() argument 2000 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma() 2288 struct vm_area_struct *vma, unsigned long address, in restore_reserve_on_error() argument 2292 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error() [all …]
|
D | huge_memory.c | 586 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page() 605 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 716 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page() 835 unsigned long addr = vmf->address & PMD_MASK; in vmf_insert_pfn_pmd_prot() 926 unsigned long addr = vmf->address & PUD_MASK; in vmf_insert_pfn_pud_prot() 1241 haddr = vmf->address & HPAGE_PUD_MASK; in huge_pud_set_accessed() 1243 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed() 1263 haddr = vmf->address & HPAGE_PMD_MASK; in huge_pmd_set_accessed() 1265 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed() 1275 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page() [all …]
|
D | mmap.c | 2520 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument 2531 address &= PAGE_MASK; in expand_upwards() 2532 if (address >= (TASK_SIZE & PAGE_MASK)) in expand_upwards() 2534 address += PAGE_SIZE; in expand_upwards() 2537 gap_addr = address + stack_guard_gap; in expand_upwards() 2540 if (gap_addr < address || gap_addr > TASK_SIZE) in expand_upwards() 2562 if (address > vma->vm_end) { in expand_upwards() 2565 size = address - vma->vm_start; in expand_upwards() 2566 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards() 2588 vma->vm_end = address; in expand_upwards() [all …]
|
D | userfaultfd.c | 253 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) in mm_alloc_pmd() argument 259 pgd = pgd_offset(mm, address); in mm_alloc_pmd() 260 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd() 263 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd() 271 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
|
D | migrate.c | 189 .address = addr, in remove_migration_pte() 202 linear_page_index(vma, pvmw.address); in remove_migration_pte() 240 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 242 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte() 248 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 251 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte() 262 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte() 325 unsigned long address) in migration_entry_wait() argument 328 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait() 2121 unsigned long address, in migrate_misplaced_transhuge_page() argument [all …]
|
D | page_idle.c | 58 .address = addr, in page_idle_clear_pte_refs_one() 63 addr = pvmw.address; in page_idle_clear_pte_refs_one()
|
D | Kconfig | 26 flat address space. The FLATMEM is the most efficient 30 For systems that have holes in their physical address 42 in their physical address spaces, and this option provides 59 holes is their physical address space and allows memory 107 # an extremely sparse physical address space. 187 # page_table_lock, so that faults on different parts of the user address 312 of an application's address space that an app has advised may be 322 int "Low address space to protect from user allocation" 330 For most ia64, ppc64 and x86 users with lots of address space 334 this low address space will need CAP_SYS_RAWIO or disable this [all …]
|
D | memory-failure.c | 295 unsigned long address = vma_address(page, vma); in dev_pagemap_mapping_shift() local 302 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift() 305 p4d = p4d_offset(pgd, address); in dev_pagemap_mapping_shift() 308 pud = pud_offset(p4d, address); in dev_pagemap_mapping_shift() 313 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift() 318 pte = pte_offset_map(pmd, address); in dev_pagemap_mapping_shift()
|
D | mmu_notifier.c | 412 unsigned long address) in __mmu_notifier_test_young() argument 423 address); in __mmu_notifier_test_young() 433 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument 444 subscription->ops->change_pte(subscription, mm, address, in __mmu_notifier_change_pte()
|
D | swap_state.c | 666 unsigned long addr = vmf->address; in swap_cluster_readahead() 776 faddr = vmf->address; in swap_ra_info() 865 vmf->address, &page_allocated); in swap_vma_readahead() 880 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, in swap_vma_readahead()
|
/mm/kfence/ |
D | report.c | 146 static void print_diff_canary(unsigned long address, size_t bytes_to_show, in print_diff_canary() argument 149 const unsigned long show_until_addr = address + bytes_to_show; in print_diff_canary() 153 end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) in print_diff_canary() 154 : min(show_until_addr, PAGE_ALIGN(address))); in print_diff_canary() 157 for (cur = (const u8 *)address; cur < end; cur++) { in print_diff_canary() 173 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, in kfence_report_error() argument 209 const bool left_of_object = address < meta->addr; in kfence_report_error() 214 get_access_type(is_write), (void *)address, in kfence_report_error() 215 left_of_object ? meta->addr - address : address - meta->addr, in kfence_report_error() 223 get_access_type(is_write), (void *)address, object_index); in kfence_report_error() [all …]
|
D | kfence.h | 101 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
|
/mm/damon/ |
D | Kconfig | 28 bool "Data access monitoring primitives for virtual address spaces" 33 that work for virtual address spaces. 36 bool "Data access monitoring primitives for the physical address space" 41 that works for the physical address space.
|
D | paddr.c | 25 .address = addr, in __damon_pa_mkold() 29 addr = pvmw.address; in __damon_pa_mkold() 99 .address = addr, in __damon_pa_young() 105 addr = pvmw.address; in __damon_pa_young()
|
/mm/kasan/ |
D | common.c | 61 void __kasan_unpoison_range(const void *address, size_t size) in __kasan_unpoison_range() argument 63 kasan_unpoison(address, size, false); in __kasan_unpoison_range() 578 bool __kasan_check_byte(const void *address, unsigned long ip) in __kasan_check_byte() argument 580 if (!kasan_byte_accessible(address)) { in __kasan_check_byte() 581 kasan_report((unsigned long)address, 1, false, ip); in __kasan_check_byte()
|
D | kasan.h | 443 void kasan_poison_last_granule(const void *address, size_t size); 447 static inline void kasan_poison_last_granule(const void *address, size_t size) { } in kasan_poison_last_granule() argument
|