Home
last modified time | relevance | path

Searched refs:address (Results 1 – 25 of 25) sorted by relevance

/mm/
Dpgtable-generic.c56 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument
61 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
62 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
70 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument
73 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
75 flush_tlb_page(vma, address); in ptep_clear_flush_young()
81 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument
86 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
88 flush_tlb_page(vma, address); in ptep_clear_flush()
97 unsigned long address, pmd_t *pmdp, in pmdp_set_access_flags() argument
[all …]
Drmap.c690 unsigned long address; in page_address_in_vma() local
705 address = __vma_address(page, vma); in page_address_in_vma()
706 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
708 return address; in page_address_in_vma()
711 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) in mm_find_pmd() argument
719 pgd = pgd_offset(mm, address); in mm_find_pmd()
723 p4d = p4d_offset(pgd, address); in mm_find_pmd()
727 pud = pud_offset(p4d, address); in mm_find_pmd()
731 pmd = pmd_offset(pud, address); in mm_find_pmd()
755 unsigned long address, void *arg) in page_referenced_one() argument
[all …]
Dkhugepaged.c109 unsigned long address; member
533 unsigned long address, in __collapse_huge_page_isolate() argument
542 _pte++, address += PAGE_SIZE) { in __collapse_huge_page_isolate()
558 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
625 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
648 unsigned long address, in __collapse_huge_page_copy() argument
653 _pte++, page++, address += PAGE_SIZE) { in __collapse_huge_page_copy()
658 clear_user_highpage(page, address); in __collapse_huge_page_copy()
669 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
674 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
[all …]
Dgup.c139 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
154 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
155 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
174 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument
186 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
203 migration_entry_wait(mm, pmd, address); in follow_page_pte()
213 page = vm_normal_page(vma, address, pte); in follow_page_pte()
236 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
309 unsigned long address, pud_t *pudp, in follow_pmd_mask() argument
318 pmd = pmd_offset(pudp, address); in follow_pmd_mask()
[all …]
Dmemory.c1372 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1380 address, address + size); in zap_page_range_single()
1381 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end); in zap_page_range_single()
1384 unmap_single_vma(&tlb, vma, address, range.end, details); in zap_page_range_single()
1386 tlb_finish_mmu(&tlb, address, range.end); in zap_page_range_single()
1400 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1403 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1407 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
2316 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2319 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
156 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); in page_vma_mapped_walk()
167 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk()
170 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
173 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
176 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
222 pvmw->address += PAGE_SIZE; in page_vma_mapped_walk()
223 if (pvmw->address >= pvmw->vma->vm_end || in page_vma_mapped_walk()
224 pvmw->address >= in page_vma_mapped_walk()
229 if (pvmw->address % PMD_SIZE == 0) { in page_vma_mapped_walk()
[all …]
Dksm.c138 unsigned long address; member
204 unsigned long address; /* + low bits used for flags below */ member
536 unsigned long addr = rmap_item->address; in break_cow()
555 unsigned long addr = rmap_item->address; in get_mergeable_page()
646 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree()
775 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree()
796 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
798 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree()
807 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree()
813 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
[all …]
Dhugetlb.c624 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
626 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
631 unsigned long address) in linear_hugepage_index() argument
633 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
941 unsigned long address, int avoid_reserve, in dequeue_huge_page_vma() argument
964 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
1796 unsigned long address) in alloc_huge_page_vma() argument
1805 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma()
2082 struct vm_area_struct *vma, unsigned long address, in restore_reserve_on_error() argument
2086 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
[all …]
Dhuge_memory.c589 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
606 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
720 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page()
826 unsigned long addr = vmf->address & PMD_MASK; in vmf_insert_pfn_pmd()
904 unsigned long addr = vmf->address & PUD_MASK; in vmf_insert_pfn_pud()
1176 haddr = vmf->address & HPAGE_PUD_MASK; in huge_pud_set_accessed()
1178 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1198 haddr = vmf->address & HPAGE_PMD_MASK; in huge_pmd_set_accessed()
1200 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1210 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page_fallback()
[all …]
Dmmap.c2352 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2363 address &= PAGE_MASK; in expand_upwards()
2364 if (address >= (TASK_SIZE & PAGE_MASK)) in expand_upwards()
2366 address += PAGE_SIZE; in expand_upwards()
2369 gap_addr = address + stack_guard_gap; in expand_upwards()
2372 if (gap_addr < address || gap_addr > TASK_SIZE) in expand_upwards()
2395 if (address > vma->vm_end) { in expand_upwards()
2398 size = address - vma->vm_start; in expand_upwards()
2399 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2421 vma->vm_end = address; in expand_upwards()
[all …]
Duserfaultfd.c147 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) in mm_alloc_pmd() argument
153 pgd = pgd_offset(mm, address); in mm_alloc_pmd()
154 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd()
157 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd()
165 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
Dmmu_notifier.c130 unsigned long address) in __mmu_notifier_test_young() argument
138 young = mn->ops->test_young(mn, mm, address); in __mmu_notifier_test_young()
148 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument
157 mn->ops->change_pte(mn, mm, address, pte); in __mmu_notifier_change_pte()
Dmigrate.c210 .address = addr, in remove_migration_pte()
223 linear_page_index(vma, pvmw.address); in remove_migration_pte()
257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
268 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte()
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
340 unsigned long address) in migration_entry_wait() argument
343 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait()
2006 unsigned long address, in migrate_misplaced_transhuge_page() argument
[all …]
DKconfig26 flat address space. The FLATMEM is the most efficient
30 For systems that have holes in their physical address
42 in their physical address spaces, and this option provides
59 holes is their physical address space and allows memory
111 # an extremely sparse physical address space.
185 # page_table_lock, so that faults on different parts of the user address
294 of an application's address space that an app has advised may be
304 int "Low address space to protect from user allocation"
312 For most ia64, ppc64 and x86 users with lots of address space
316 this low address space will need CAP_SYS_RAWIO or disable this
[all …]
Dswap_state.c551 unsigned long addr = vmf->address; in swap_cluster_readahead()
661 faddr = vmf->address; in swap_ra_info()
750 vmf->address, &page_allocated); in swap_vma_readahead()
765 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, in swap_vma_readahead()
Dpage_idle.c61 .address = addr, in page_idle_clear_pte_refs_one()
66 addr = pvmw.address; in page_idle_clear_pte_refs_one()
Dmemory-failure.c267 unsigned long address = vma_address(page, vma); in dev_pagemap_mapping_shift() local
274 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift()
277 p4d = p4d_offset(pgd, address); in dev_pagemap_mapping_shift()
280 pud = pud_offset(p4d, address); in dev_pagemap_mapping_shift()
285 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift()
290 pte = pte_offset_map(pmd, address); in dev_pagemap_mapping_shift()
Dnommu.c124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
130 *pfn = address >> PAGE_SHIFT; in follow_pfn()
739 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
1641 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, in follow_page() argument
Dmempolicy.c1169 unsigned long uninitialized_var(address); in new_page()
1173 address = page_address_in_vma(page, vma); in new_page()
1174 if (address != -EFAULT) in new_page()
1181 vma, address); in new_page()
1185 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, in new_page()
1196 vma, address); in new_page()
Dinternal.h94 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
Dfilemap.c2650 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
Dswapfile.c1941 vmf.address = addr; in unuse_pte_range()
Dshmem.c1459 vmf.address = 0; in shmem_swapin()
/mm/kasan/
Dcommon.c132 void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() argument
141 address = reset_tag(address); in kasan_poison_shadow()
143 shadow_start = kasan_mem_to_shadow(address); in kasan_poison_shadow()
144 shadow_end = kasan_mem_to_shadow(address + size); in kasan_poison_shadow()
149 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() argument
151 u8 tag = get_tag(address); in kasan_unpoison_shadow()
158 address = reset_tag(address); in kasan_unpoison_shadow()
160 kasan_poison_shadow(address, size, tag); in kasan_unpoison_shadow()
163 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); in kasan_unpoison_shadow()
Dkasan.h139 void kasan_poison_shadow(const void *address, size_t size, u8 value);