/mm/kasan/ |
D | kasan.h | 208 static inline bool addr_has_metadata(const void *addr) in addr_has_metadata() argument 210 return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); in addr_has_metadata() 221 bool kasan_check_range(unsigned long addr, size_t size, bool write, 226 static inline bool addr_has_metadata(const void *addr) in addr_has_metadata() argument 228 return (is_vmalloc_addr(addr) || virt_addr_valid(addr)); in addr_has_metadata() 234 void kasan_print_tags(u8 addr_tag, const void *addr); 236 static inline void kasan_print_tags(u8 addr_tag, const void *addr) { } in kasan_print_tags() argument 239 void *kasan_find_first_bad_addr(void *addr, size_t size); 244 void kasan_print_address_stack_frame(const void *addr); 246 static inline void kasan_print_address_stack_frame(const void *addr) { } in kasan_print_address_stack_frame() argument [all …]
|
D | init.c | 92 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, in zero_pte_populate() argument 95 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 102 while (addr + PAGE_SIZE <= end) { in zero_pte_populate() 103 set_pte_at(&init_mm, addr, pte, zero_pte); in zero_pte_populate() 104 addr += PAGE_SIZE; in zero_pte_populate() 105 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 109 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, in zero_pmd_populate() argument 112 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate() 116 next = pmd_addr_end(addr, end); in zero_pmd_populate() 118 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in zero_pmd_populate() [all …]
|
D | generic.c | 43 static __always_inline bool memory_is_poisoned_1(unsigned long addr) in memory_is_poisoned_1() argument 45 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); in memory_is_poisoned_1() 48 s8 last_accessible_byte = addr & KASAN_GRANULE_MASK; in memory_is_poisoned_1() 55 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, in memory_is_poisoned_2_4_8() argument 58 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); in memory_is_poisoned_2_4_8() 64 if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) in memory_is_poisoned_2_4_8() 65 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8() 67 return memory_is_poisoned_1(addr + size - 1); in memory_is_poisoned_2_4_8() 70 static __always_inline bool memory_is_poisoned_16(unsigned long addr) in memory_is_poisoned_16() argument 72 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); in memory_is_poisoned_16() [all …]
|
D | report.c | 88 static void end_report(unsigned long *flags, unsigned long addr) in end_report() argument 91 trace_error_report_end(ERROR_DETECTOR_KASAN, addr); in end_report() 123 struct page *kasan_addr_to_page(const void *addr) in kasan_addr_to_page() argument 125 if ((addr >= (void *)PAGE_OFFSET) && in kasan_addr_to_page() 126 (addr < high_memory)) in kasan_addr_to_page() 127 return virt_to_head_page(addr); in kasan_addr_to_page() 132 const void *addr) in describe_object_addr() argument 134 unsigned long access_addr = (unsigned long)addr; in describe_object_addr() 143 if (!addr) in describe_object_addr() 164 const void *addr, u8 tag) in describe_object_stacks() argument [all …]
|
D | shadow.c | 42 void *memset(void *addr, int c, size_t len) in memset() argument 44 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) in memset() 47 return __memset(addr, c, len); in memset() 72 void kasan_poison(const void *addr, size_t size, u8 value, bool init) in kasan_poison() argument 81 addr = kasan_reset_tag(addr); in kasan_poison() 84 if (is_kfence_address(addr)) in kasan_poison() 87 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) in kasan_poison() 92 shadow_start = kasan_mem_to_shadow(addr); in kasan_poison() 93 shadow_end = kasan_mem_to_shadow(addr + size); in kasan_poison() 100 void kasan_poison_last_granule(const void *addr, size_t size) in kasan_poison_last_granule() argument [all …]
|
D | sw_tags.c | 70 bool kasan_check_range(unsigned long addr, size_t size, bool write, in kasan_check_range() argument 80 if (unlikely(addr + size < addr)) in kasan_check_range() 81 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range() 83 tag = get_tag((const void *)addr); in kasan_check_range() 105 untagged_addr = kasan_reset_tag((const void *)addr); in kasan_check_range() 108 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range() 114 return !kasan_report(addr, size, write, ret_ip); in kasan_check_range() 121 bool kasan_byte_accessible(const void *addr) in kasan_byte_accessible() argument 123 u8 tag = get_tag(addr); in kasan_byte_accessible() 124 void *untagged_addr = kasan_reset_tag(addr); in kasan_byte_accessible() [all …]
|
D | report_generic.c | 33 void *kasan_find_first_bad_addr(void *addr, size_t size) in kasan_find_first_bad_addr() argument 35 void *p = addr; in kasan_find_first_bad_addr() 37 while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) in kasan_find_first_bad_addr() 214 static bool __must_check get_address_stack_frame_info(const void *addr, in get_address_stack_frame_info() argument 231 if (!object_is_on_stack(addr)) in get_address_stack_frame_info() 234 aligned_addr = round_down((unsigned long)addr, sizeof(long)); in get_address_stack_frame_info() 259 *offset = (unsigned long)addr - (unsigned long)frame; in get_address_stack_frame_info() 266 void kasan_print_address_stack_frame(const void *addr) in kasan_print_address_stack_frame() argument 272 if (!get_address_stack_frame_info(addr, &offset, &frame_descr, in kasan_print_address_stack_frame() 282 addr, current->comm, task_pid_nr(current), offset); in kasan_print_address_stack_frame() [all …]
|
/mm/ |
D | ioremap.c | 64 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, in ioremap_pte_range() argument 72 pte = pte_alloc_kernel_track(pmd, addr, mask); in ioremap_pte_range() 77 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); in ioremap_pte_range() 79 } while (pte++, addr += PAGE_SIZE, addr != end); in ioremap_pte_range() 84 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, in ioremap_try_huge_pmd() argument 91 if ((end - addr) != PMD_SIZE) in ioremap_try_huge_pmd() 94 if (!IS_ALIGNED(addr, PMD_SIZE)) in ioremap_try_huge_pmd() 100 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) in ioremap_try_huge_pmd() 106 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, in ioremap_pmd_range() argument 113 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in ioremap_pmd_range() [all …]
|
D | vmalloc.c | 48 unsigned long addr = (unsigned long)x; in is_vmalloc_addr() local 50 return addr >= VMALLOC_START && addr < VMALLOC_END; in is_vmalloc_addr() 73 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vunmap_pte_range() argument 78 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 80 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() 82 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range() 86 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vunmap_pmd_range() argument 93 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 95 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 105 vunmap_pte_range(pmd, addr, next, mask); in vunmap_pmd_range() [all …]
|
D | pagewalk.c | 23 static int walk_pte_range_inner(pte_t *pte, unsigned long addr, in walk_pte_range_inner() argument 30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner() 33 if (addr >= end - PAGE_SIZE) in walk_pte_range_inner() 35 addr += PAGE_SIZE; in walk_pte_range_inner() 41 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 49 pte = pte_offset_map(pmd, addr); in walk_pte_range() 50 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range() 53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in walk_pte_range() 54 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range() 61 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument [all …]
|
D | memory.c | 246 unsigned long addr) in free_pte_range() argument 260 pte_free_tlb(tlb, token, addr); in free_pte_range() 265 unsigned long addr, unsigned long end, in free_pmd_range() argument 272 start = addr; in free_pmd_range() 273 pmd = pmd_offset(pud, addr); in free_pmd_range() 275 next = pmd_addr_end(addr, end); in free_pmd_range() 278 free_pte_range(tlb, pmd, addr); in free_pmd_range() 279 } while (pmd++, addr = next, addr != end); in free_pmd_range() 299 unsigned long addr, unsigned long end, in free_pud_range() argument 306 start = addr; in free_pud_range() [all …]
|
D | hmm.c | 40 static int hmm_pfns_fill(unsigned long addr, unsigned long end, in hmm_pfns_fill() argument 43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() 45 for (; addr < end; addr += PAGE_SIZE, i++) in hmm_pfns_fill() 61 static int hmm_vma_fault(unsigned long addr, unsigned long end, in hmm_vma_fault() argument 69 hmm_vma_walk->last = addr; in hmm_vma_fault() 77 for (; addr < end; addr += PAGE_SIZE) in hmm_vma_fault() 78 if (handle_mm_fault(vma, addr, fault_flags, NULL) & in hmm_vma_fault() 145 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, in hmm_vma_walk_hole() argument 154 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole() 155 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() [all …]
|
D | mincore.c | 24 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, in mincore_hugetlb() argument 36 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb() 71 static int __mincore_unmapped_range(unsigned long addr, unsigned long end, in __mincore_unmapped_range() argument 74 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() 80 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range() 90 static int mincore_unmapped_range(unsigned long addr, unsigned long end, in mincore_unmapped_range() argument 94 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range() 99 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument 106 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() 116 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() [all …]
|
D | mprotect.c | 39 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument 65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 91 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 116 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range() 140 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range() 176 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 180 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range() 213 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument 224 pmd = pmd_offset(pud, addr); in change_pmd_range() 228 next = pmd_addr_end(addr, end); in change_pmd_range() [all …]
|
D | mmap.c | 63 #define arch_mmap_check(addr, len, flags) (0) argument 207 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, 564 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument 579 if (vma_tmp->vm_end > addr) { in find_vma_links() 643 unsigned long addr, unsigned long end) in count_vma_pages_range() argument 649 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range() 654 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range() 1217 struct vm_area_struct *prev, unsigned long addr, in __vma_merge() argument 1224 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in __vma_merge() 1241 VM_WARN_ON(prev && addr <= prev->vm_start); in __vma_merge() [all …]
|
D | nommu.c | 137 void vfree(const void *addr) in vfree() argument 139 kfree(addr); in vfree() 191 struct page *vmalloc_to_page(const void *addr) in vmalloc_to_page() argument 193 return virt_to_page(addr); in vmalloc_to_page() 197 unsigned long vmalloc_to_pfn(const void *addr) in vmalloc_to_pfn() argument 199 return page_to_pfn(virt_to_page(addr)); in vmalloc_to_pfn() 203 long vread(char *buf, char *addr, unsigned long count) in vread() argument 209 memcpy(buf, addr, count); in vread() 213 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument 216 if ((unsigned long) addr + count < count) in vwrite() [all …]
|
D | ptdump.c | 16 unsigned long addr) in note_kasan_page_table() argument 20 st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0])); in note_kasan_page_table() 28 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, in ptdump_pgd_entry() argument 37 return note_kasan_page_table(walk, addr); in ptdump_pgd_entry() 44 st->note_page(st, addr, 0, pgd_val(val)); in ptdump_pgd_entry() 49 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, in ptdump_p4d_entry() argument 58 return note_kasan_page_table(walk, addr); in ptdump_p4d_entry() 65 st->note_page(st, addr, 1, p4d_val(val)); in ptdump_p4d_entry() 70 static int ptdump_pud_entry(pud_t *pud, unsigned long addr, in ptdump_pud_entry() argument 79 return note_kasan_page_table(walk, addr); in ptdump_pud_entry() [all …]
|
D | gup_benchmark.c | 17 __u64 addr; member 71 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local 94 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { in __gup_benchmark_ioctl() 98 next = addr + nr * PAGE_SIZE; in __gup_benchmark_ioctl() 99 if (next > gup->addr + gup->size) { in __gup_benchmark_ioctl() 100 next = gup->addr + gup->size; in __gup_benchmark_ioctl() 101 nr = (next - addr) / PAGE_SIZE; in __gup_benchmark_ioctl() 109 nr = get_user_pages_fast(addr, nr, gup->flags, in __gup_benchmark_ioctl() 113 nr = get_user_pages(addr, nr, gup->flags, pages + i, in __gup_benchmark_ioctl() 117 nr = pin_user_pages_fast(addr, nr, gup->flags, in __gup_benchmark_ioctl() [all …]
|
D | sparse-vmemmap.c | 143 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, in vmemmap_pte_populate() argument 146 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() 155 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate() 171 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument 173 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() 183 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) in vmemmap_pud_populate() argument 185 pud_t *pud = pud_offset(p4d, addr); in vmemmap_pud_populate() 195 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_p4d_populate() argument 197 p4d_t *p4d = p4d_offset(pgd, addr); in vmemmap_p4d_populate() 207 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) in vmemmap_pgd_populate() argument [all …]
|
D | mremap.c | 33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) in get_old_pud() argument 39 pgd = pgd_offset(mm, addr); in get_old_pud() 43 p4d = p4d_offset(pgd, addr); in get_old_pud() 47 pud = pud_offset(p4d, addr); in get_old_pud() 54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) in get_old_pmd() argument 59 pud = get_old_pud(mm, addr); in get_old_pmd() 63 pmd = pmd_offset(pud, addr); in get_old_pmd() 71 unsigned long addr) in alloc_new_pud() argument 76 pgd = pgd_offset(mm, addr); in alloc_new_pud() 77 p4d = p4d_alloc(mm, pgd, addr); in alloc_new_pud() [all …]
|
D | gup.c | 1597 struct page *get_dump_page(unsigned long addr) in get_dump_page() argument 1606 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, in get_dump_page() 2140 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, in gup_pte_range() argument 2148 ptem = ptep = pte_offset_map(&pmd, addr); in gup_pte_range() 2207 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range() 2228 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, in gup_pte_range() argument 2237 static int __gup_device_huge(unsigned long pfn, unsigned long addr, in __gup_device_huge() argument 2260 } while (addr += PAGE_SIZE, addr != end); in __gup_device_huge() 2267 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, in __gup_device_huge_pmd() argument 2274 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in __gup_device_huge_pmd() [all …]
|
D | madvise.c | 311 unsigned long addr, unsigned long end, in madvise_cold_or_pageout_pte_range() argument 336 unsigned long next = pmd_addr_end(addr, end); in madvise_cold_or_pageout_pte_range() 362 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range() 377 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range() 380 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_cold_or_pageout_pte_range() 381 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range() 407 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 410 for (; addr < end; pte++, addr += PAGE_SIZE) { in madvise_cold_or_pageout_pte_range() 419 page = vm_normal_page(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 445 pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() [all …]
|
/mm/kfence/ |
D | core.c | 128 static bool kfence_protect(unsigned long addr) in kfence_protect() argument 130 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); in kfence_protect() 133 static bool kfence_unprotect(unsigned long addr) in kfence_unprotect() argument 135 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); in kfence_unprotect() 138 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) in addr_to_metadata() argument 144 if (!is_kfence_address((void *)addr)) in addr_to_metadata() 152 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; in addr_to_metadata() 175 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) in metadata_to_pageaddr() 209 static inline bool set_canary_byte(u8 *addr) in set_canary_byte() argument 211 *addr = KFENCE_CANARY_PATTERN(addr); in set_canary_byte() [all …]
|
D | kfence_test.c | 73 char *addr; /* Address at which the bad access occurred. */ member 149 cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr); in report_matches() 322 expect.addr = buf - 1; in test_out_of_bounds_read() 323 READ_ONCE(*expect.addr); in test_out_of_bounds_read() 328 expect.addr = buf + size; in test_out_of_bounds_read() 329 READ_ONCE(*expect.addr); in test_out_of_bounds_read() 346 expect.addr = buf - 1; in test_out_of_bounds_write() 347 WRITE_ONCE(*expect.addr, 42); in test_out_of_bounds_write() 362 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); in test_use_after_free_read() 363 test_free(expect.addr); in test_use_after_free_read() [all …]
|
/mm/damon/ |
D | paddr.c | 20 unsigned long addr, void *arg) in __damon_pa_mkold() argument 25 .address = addr, in __damon_pa_mkold() 29 addr = pvmw.address; in __damon_pa_mkold() 31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 93 unsigned long addr, void *arg) in __damon_pa_young() argument 99 .address = addr, in __damon_pa_young() 105 addr = pvmw.address; in __damon_pa_young() 109 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young() 114 mmu_notifier_test_young(vma->vm_mm, addr); in __damon_pa_young() [all …]
|