Home
last modified time | relevance | path

Searched refs:address (Results 1 – 25 of 30) sorted by relevance

12

/mm/
Dpgtable-generic.c65 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument
70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
71 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
79 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument
82 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
84 flush_tlb_page(vma, address); in ptep_clear_flush_young()
90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument
95 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
97 flush_tlb_page(vma, address); in ptep_clear_flush()
106 unsigned long address, pmd_t *pmdp, in pmdp_set_access_flags() argument
[all …]
Drmap.c745 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) in mm_find_pmd() argument
753 pgd = pgd_offset(mm, address); in mm_find_pmd()
757 p4d = p4d_offset(pgd, address); in mm_find_pmd()
761 pud = pud_offset(p4d, address); in mm_find_pmd()
765 pmd = pmd_offset(pud, address); in mm_find_pmd()
789 unsigned long address, void *arg) in page_referenced_one() argument
795 .address = address, in page_referenced_one()
800 address = pvmw.address; in page_referenced_one()
814 if (ptep_clear_flush_young_notify(vma, address, in page_referenced_one()
818 if (pmdp_clear_flush_young_notify(vma, address, in page_referenced_one()
[all …]
Dpgalloc-track.h7 unsigned long address, in p4d_alloc_track() argument
11 if (__p4d_alloc(mm, pgd, address)) in p4d_alloc_track()
16 return p4d_offset(pgd, address); in p4d_alloc_track()
20 unsigned long address, in pud_alloc_track() argument
24 if (__pud_alloc(mm, p4d, address)) in pud_alloc_track()
29 return pud_offset(p4d, address); in pud_alloc_track()
33 unsigned long address, in pmd_alloc_track() argument
37 if (__pmd_alloc(mm, pud, address)) in pmd_alloc_track()
42 return pmd_offset(pud, address); in pmd_alloc_track()
46 #define pte_alloc_kernel_track(pmd, address, mask) \ argument
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
124 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward()
125 if (!pvmw->address) in step_forward()
126 pvmw->address = ULONG_MAX; in step_forward()
173 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); in page_vma_mapped_walk()
192 pvmw->address + PAGE_SIZE; in page_vma_mapped_walk()
197 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk()
202 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
207 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
213 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
[all …]
Dkhugepaged.c117 unsigned long address; member
602 unsigned long address, in __collapse_huge_page_isolate() argument
612 _pte++, address += PAGE_SIZE) { in __collapse_huge_page_isolate()
632 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
719 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
745 unsigned long address, in __collapse_huge_page_copy() argument
752 _pte++, page++, address += PAGE_SIZE) { in __collapse_huge_page_copy()
756 clear_user_highpage(page, address); in __collapse_huge_page_copy()
767 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
772 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
[all …]
Dmemory.c764 struct page *page, unsigned long address, in restore_exclusive_pte() argument
780 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
787 page_add_anon_rmap(page, vma, address, false); in restore_exclusive_pte()
802 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
1746 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1754 address, address + size); in zap_page_range_single()
1758 unmap_single_vma(&tlb, vma, address, range.end, details); in zap_page_range_single()
1774 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1777 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1781 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
[all …]
Dgup.c464 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
479 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
480 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
499 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument
518 page = follow_huge_pmd_pte(vma, address, flags); in follow_page_pte()
528 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
545 migration_entry_wait(mm, pmd, address); in follow_page_pte()
555 page = vm_normal_page(vma, address, pte); in follow_page_pte()
577 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
649 unsigned long address, pud_t *pudp, in follow_pmd_mask() argument
[all …]
Dinternal.h117 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
398 unsigned long address; in vma_address() local
403 address = vma->vm_start + in vma_address()
406 if (address < vma->vm_start || address >= vma->vm_end) in vma_address()
407 address = -EFAULT; in vma_address()
411 address = vma->vm_start; in vma_address()
413 address = -EFAULT; in vma_address()
415 return address; in vma_address()
427 unsigned long address; in vma_address_end() local
431 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
[all …]
Dksm.c138 unsigned long address; member
204 unsigned long address; /* + low bits used for flags below */ member
533 unsigned long addr = rmap_item->address; in break_cow()
552 unsigned long addr = rmap_item->address; in get_mergeable_page()
643 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree()
770 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree()
792 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
794 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree()
803 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree()
809 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
[all …]
Dhugetlb.c804 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
806 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
811 unsigned long address) in linear_hugepage_index() argument
813 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
1147 unsigned long address, int avoid_reserve, in dequeue_huge_page_vma() argument
1170 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
2203 unsigned long address) in alloc_huge_page_vma() argument
2212 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma()
2527 unsigned long address, struct page *page) in restore_reserve_on_error() argument
2529 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
[all …]
Dhuge_memory.c601 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
620 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
654 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
729 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page()
767 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
849 unsigned long addr = vmf->address & PMD_MASK; in vmf_insert_pfn_pmd_prot()
940 unsigned long addr = vmf->address & PUD_MASK; in vmf_insert_pfn_pud_prot()
1252 haddr = vmf->address & HPAGE_PUD_MASK; in huge_pud_set_accessed()
1254 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1275 haddr = vmf->address & HPAGE_PMD_MASK; in huge_pmd_set_accessed()
[all …]
Dmmap.c2399 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2410 address &= PAGE_MASK; in expand_upwards()
2411 if (address >= (TASK_SIZE & PAGE_MASK)) in expand_upwards()
2413 address += PAGE_SIZE; in expand_upwards()
2416 gap_addr = address + stack_guard_gap; in expand_upwards()
2419 if (gap_addr < address || gap_addr > TASK_SIZE) in expand_upwards()
2441 if (address > vma->vm_end) { in expand_upwards()
2444 size = address - vma->vm_start; in expand_upwards()
2445 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2467 vma->vm_end = address; in expand_upwards()
[all …]
Duserfaultfd.c261 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) in mm_alloc_pmd() argument
267 pgd = pgd_offset(mm, address); in mm_alloc_pmd()
268 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd()
271 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd()
279 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
Dpage_idle.c54 .address = addr, in page_idle_clear_pte_refs_one()
59 addr = pvmw.address; in page_idle_clear_pte_refs_one()
Dmmu_notifier.c406 unsigned long address) in __mmu_notifier_test_young() argument
417 address); in __mmu_notifier_test_young()
427 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument
438 subscription->ops->change_pte(subscription, mm, address, in __mmu_notifier_change_pte()
Dswap_state.c625 unsigned long addr = vmf->address; in swap_cluster_readahead()
732 faddr = vmf->address; in swap_ra_info()
818 vmf->address, &page_allocated); in swap_vma_readahead()
833 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, in swap_vma_readahead()
Dmigrate.c182 .address = addr, in remove_migration_pte()
195 linear_page_index(vma, pvmw.address); in remove_migration_pte()
240 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
242 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
248 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
251 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte()
262 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
324 unsigned long address) in migration_entry_wait() argument
327 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait()
Dmemory-failure.c309 unsigned long address = vma_address(page, vma); in dev_pagemap_mapping_shift() local
317 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift()
320 p4d = p4d_offset(pgd, address); in dev_pagemap_mapping_shift()
323 pud = pud_offset(p4d, address); in dev_pagemap_mapping_shift()
328 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift()
333 pte = pte_offset_map(pmd, address); in dev_pagemap_mapping_shift()
DKconfig25 flat address space. The FLATMEM is the most efficient
29 For systems that have holes in their physical address
43 holes is their physical address space and allows memory
74 # an extremely sparse physical address space.
165 # page_table_lock, so that faults on different parts of the user address
303 of an application's address space that an app has advised may be
313 int "Low address space to protect from user allocation"
321 For most ia64, ppc64 and x86 users with lots of address space
325 this low address space will need CAP_SYS_RAWIO or disable this
/mm/kfence/
Dreport.c153 static void print_diff_canary(unsigned long address, size_t bytes_to_show, in print_diff_canary() argument
156 const unsigned long show_until_addr = address + bytes_to_show; in print_diff_canary()
160 end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) in print_diff_canary()
161 : min(show_until_addr, PAGE_ALIGN(address))); in print_diff_canary()
164 for (cur = (const u8 *)address; cur < end; cur++) { in print_diff_canary()
180 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, in kfence_report_error() argument
216 const bool left_of_object = address < meta->addr; in kfence_report_error()
221 get_access_type(is_write), (void *)address, in kfence_report_error()
222 left_of_object ? meta->addr - address : address - meta->addr, in kfence_report_error()
230 get_access_type(is_write), (void *)address, object_index); in kfence_report_error()
[all …]
Dkfence.h129 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
/mm/kasan/
Dcommon.c63 void __kasan_unpoison_range(const void *address, size_t size) in __kasan_unpoison_range() argument
65 kasan_unpoison(address, size, false); in __kasan_unpoison_range()
437 bool __kasan_check_byte(const void *address, unsigned long ip) in __kasan_check_byte() argument
439 if (!kasan_byte_accessible(address)) { in __kasan_check_byte()
440 kasan_report((unsigned long)address, 1, false, ip); in __kasan_check_byte()
Dkasan.h539 void kasan_poison_last_granule(const void *address, size_t size);
543 static inline void kasan_poison_last_granule(const void *address, size_t size) { } in kasan_poison_last_granule() argument
/mm/damon/
DKconfig28 bool "Data access monitoring primitives for virtual address spaces"
33 that work for virtual address spaces.
36 bool "Data access monitoring primitives for the physical address space"
41 that works for the physical address space.
Dpaddr.c25 .address = addr, in __damon_pa_mkold()
29 addr = pvmw.address; in __damon_pa_mkold()
99 .address = addr, in __damon_pa_young()
105 addr = pvmw.address; in __damon_pa_young()

12