Home
last modified time | relevance | path

Searched refs:address (Results 1 – 22 of 22) sorted by relevance

/mm/
Dpgtable-generic.c55 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument
60 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
61 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
69 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument
72 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
74 flush_tlb_page(vma, address); in ptep_clear_flush_young()
80 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument
85 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
87 flush_tlb_page(vma, address); in ptep_clear_flush()
96 unsigned long address, pmd_t *pmdp, in pmdp_set_access_flags() argument
[all …]
Drmap.c689 unsigned long address; in page_address_in_vma() local
704 address = __vma_address(page, vma); in page_address_in_vma()
705 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
707 return address; in page_address_in_vma()
710 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) in mm_find_pmd() argument
718 pgd = pgd_offset(mm, address); in mm_find_pmd()
722 p4d = p4d_offset(pgd, address); in mm_find_pmd()
726 pud = pud_offset(p4d, address); in mm_find_pmd()
730 pmd = pmd_offset(pud, address); in mm_find_pmd()
754 unsigned long address, void *arg) in page_referenced_one() argument
[all …]
Dkhugepaged.c102 unsigned long address; member
502 unsigned long address, in __collapse_huge_page_isolate() argument
511 _pte++, address += PAGE_SIZE) { in __collapse_huge_page_isolate()
527 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
594 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
617 unsigned long address, in __collapse_huge_page_copy() argument
622 _pte++, page++, address += PAGE_SIZE) { in __collapse_huge_page_copy()
627 clear_user_highpage(page, address); in __collapse_huge_page_copy()
638 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
643 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
[all …]
Dgup.c39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
54 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
55 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
74 unsigned long address, pmd_t *pmd, unsigned int flags) in follow_page_pte() argument
86 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
103 migration_entry_wait(mm, pmd, address); in follow_page_pte()
113 page = vm_normal_page(vma, address, pte); in follow_page_pte()
136 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
215 unsigned long address, pud_t *pudp, in follow_pmd_mask() argument
223 pmd = pmd_offset(pudp, address); in follow_pmd_mask()
[all …]
Dmemory.c653 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) in __pte_alloc() argument
656 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc()
687 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) in __pte_alloc_kernel() argument
689 pte_t *new = pte_alloc_one_kernel(&init_mm, address); in __pte_alloc_kernel()
1640 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1645 unsigned long end = address + size; in zap_page_range_single()
1648 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1650 mmu_notifier_invalidate_range_start(mm, address, end); in zap_page_range_single()
1651 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1652 mmu_notifier_invalidate_range_end(mm, address, end); in zap_page_range_single()
[all …]
Dhugetlb.c622 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
624 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
629 unsigned long address) in linear_hugepage_index() argument
631 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
938 unsigned long address, int avoid_reserve, in dequeue_huge_page_vma() argument
961 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
1953 struct vm_area_struct *vma, unsigned long address, in restore_reserve_on_error() argument
1957 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
1973 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
1981 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
155 pvmw->pte = huge_pte_offset(mm, pvmw->address, in page_vma_mapped_walk()
167 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk()
170 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
173 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
176 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
222 pvmw->address += PAGE_SIZE; in page_vma_mapped_walk()
223 if (pvmw->address >= pvmw->vma->vm_end || in page_vma_mapped_walk()
224 pvmw->address >= in page_vma_mapped_walk()
229 if (pvmw->address % PMD_SIZE == 0) { in page_vma_mapped_walk()
[all …]
Dksm.c122 unsigned long address; member
188 unsigned long address; /* + low bits used for flags below */ member
519 unsigned long addr = rmap_item->address; in break_cow()
538 unsigned long addr = rmap_item->address; in get_mergeable_page()
629 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree()
750 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree()
771 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
773 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree()
782 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree()
788 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
[all …]
Dhuge_memory.c556 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
574 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
678 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page()
1098 haddr = vmf->address & HPAGE_PUD_MASK; in huge_pud_set_accessed()
1100 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1120 haddr = vmf->address & HPAGE_PMD_MASK; in huge_pmd_set_accessed()
1122 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1132 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page_fallback()
1150 vmf->address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback()
1239 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page()
[all …]
Dmmap.c2259 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2270 address &= PAGE_MASK; in expand_upwards()
2271 if (address >= (TASK_SIZE & PAGE_MASK)) in expand_upwards()
2273 address += PAGE_SIZE; in expand_upwards()
2276 gap_addr = address + stack_guard_gap; in expand_upwards()
2279 if (gap_addr < address || gap_addr > TASK_SIZE) in expand_upwards()
2302 if (address > vma->vm_end) { in expand_upwards()
2305 size = address - vma->vm_start; in expand_upwards()
2306 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2328 vma->vm_end = address; in expand_upwards()
[all …]
Dmmu_notifier.c145 unsigned long address) in __mmu_notifier_test_young() argument
153 young = mn->ops->test_young(mn, mm, address); in __mmu_notifier_test_young()
163 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument
172 mn->ops->change_pte(mn, mm, address, pte); in __mmu_notifier_change_pte()
Duserfaultfd.c150 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) in mm_alloc_pmd() argument
156 pgd = pgd_offset(mm, address); in mm_alloc_pmd()
157 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd()
160 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd()
168 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
Dmigrate.c208 .address = addr, in remove_migration_pte()
221 linear_page_index(vma, pvmw.address); in remove_migration_pte()
257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
268 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte()
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
343 unsigned long address) in migration_entry_wait() argument
346 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait()
2026 unsigned long address, in migrate_misplaced_transhuge_page() argument
[all …]
DKconfig37 in their physical address spaces, and this option provides
39 majority of hardware has quite flat address spaces, and
109 # an extremely sparse physical address space.
193 # page_table_lock, so that faults on different parts of the user address
283 # a 32-bit address to OHCI. So we need to use a bounce pool instead.
310 of an application's address space that an app has advised may be
320 int "Low address space to protect from user allocation"
328 For most ia64, ppc64 and x86 users with lots of address space
332 this low address space will need CAP_SYS_RAWIO or disable this
637 address minus the given value, unless the RLIMIT_STACK hard limit is
Dpage_idle.c61 .address = addr, in page_idle_clear_pte_refs_one()
66 addr = pvmw.address; in page_idle_clear_pte_refs_one()
Dswap_state.c672 faddr = vmf->address; in swap_readahead_detect()
745 vmf->address, &page_allocated); in do_swap_page_readahead()
761 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, in do_swap_page_readahead()
Dmempolicy.c1139 unsigned long uninitialized_var(address); in new_page()
1143 address = page_address_in_vma(page, vma); in new_page()
1144 if (address != -EFAULT) in new_page()
1151 return alloc_huge_page_noerr(vma, address, 1); in new_page()
1155 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, in new_page()
1166 vma, address); in new_page()
Dnommu.c211 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
217 *pfn = address >> PAGE_SHIFT; in follow_pfn()
852 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
1746 unsigned long address, unsigned int flags, in follow_page_mask() argument
Dinternal.h98 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
Dslab.c4281 static void show_symbol(struct seq_file *m, unsigned long address) in show_symbol() argument
4287 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { in show_symbol()
4294 seq_printf(m, "%px", (void *)address); in show_symbol()
Dfilemap.c2671 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
/mm/kasan/
Dkasan.c57 static void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() argument
61 shadow_start = kasan_mem_to_shadow(address); in kasan_poison_shadow()
62 shadow_end = kasan_mem_to_shadow(address + size); in kasan_poison_shadow()
67 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() argument
69 kasan_poison_shadow(address, size, 0); in kasan_unpoison_shadow()
72 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); in kasan_unpoison_shadow()