/mm/kmsan/ |
D | shadow.c | 57 unsigned long addr64 = (unsigned long)addr, off; in vmalloc_meta() local 61 off = addr64 - VMALLOC_START; in vmalloc_meta() 62 return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START : in vmalloc_meta() 66 off = addr64 - MODULES_VADDR; in vmalloc_meta() 67 return off + (is_origin ? KMSAN_MODULES_ORIGIN_START : in vmalloc_meta() 126 u64 addr = (u64)address, pad, off; in kmsan_get_metadata() local 148 off = offset_in_page(addr); in kmsan_get_metadata() 150 return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off; in kmsan_get_metadata()
|
D | hooks.c | 157 unsigned long off = 0; in kmsan_ioremap_page_range() local 165 for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) { in kmsan_ioremap_page_range() 173 vmalloc_shadow(start + off), in kmsan_ioremap_page_range() 174 vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow, in kmsan_ioremap_page_range() 182 vmalloc_origin(start + off), in kmsan_ioremap_page_range() 183 vmalloc_origin(start + off + PAGE_SIZE), prot, &origin, in kmsan_ioremap_page_range() 187 vmalloc_shadow(start + off), in kmsan_ioremap_page_range() 188 vmalloc_shadow(start + off + PAGE_SIZE)); in kmsan_ioremap_page_range()
|
/mm/ |
D | percpu.c | 292 static unsigned long pcpu_off_to_block_index(int off) in pcpu_off_to_block_index() argument 294 return off / PCPU_BITMAP_BLOCK_BITS; in pcpu_off_to_block_index() 297 static unsigned long pcpu_off_to_block_off(int off) in pcpu_off_to_block_off() argument 299 return off & (PCPU_BITMAP_BLOCK_BITS - 1); in pcpu_off_to_block_off() 302 static unsigned long pcpu_block_off_to_off(int index, int off) in pcpu_block_off_to_off() argument 304 return index * PCPU_BITMAP_BLOCK_BITS + off; in pcpu_block_off_to_off() 1276 static int pcpu_free_area(struct pcpu_chunk *chunk, int off) in pcpu_free_area() argument 1286 bit_off = off / PCPU_MIN_ALLOC_SIZE; in pcpu_free_area() 1645 struct pcpu_chunk *chunk, int off, in pcpu_memcg_post_alloc_hook() argument 1652 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; in pcpu_memcg_post_alloc_hook() [all …]
|
D | zsmalloc.c | 904 unsigned long off = 0; in init_zspage() local 912 set_first_obj_offset(page, off); in init_zspage() 915 link = (struct link_free *)vaddr + off / sizeof(*link); in init_zspage() 917 while ((off += class->size) < PAGE_SIZE) { in init_zspage() 939 off %= PAGE_SIZE; in init_zspage() 1055 struct page *pages[2], int off, int size) in __zs_map_object() argument 1068 sizes[0] = PAGE_SIZE - off; in __zs_map_object() 1073 memcpy(buf, addr + off, sizes[0]); in __zs_map_object() 1083 struct page *pages[2], int off, int size) in __zs_unmap_object() argument 1096 off += ZS_HANDLE_SIZE; in __zs_unmap_object() [all …]
|
D | slab.h | 550 unsigned long off; in memcg_slab_post_alloc_hook() local 567 off = obj_to_index(s, slab, p[i]); in memcg_slab_post_alloc_hook() 569 slab_objcgs(slab)[off] = objcg; in memcg_slab_post_alloc_hook() 594 unsigned int off; in memcg_slab_free_hook() local 596 off = obj_to_index(s, slab, p[i]); in memcg_slab_free_hook() 597 objcg = objcgs[off]; in memcg_slab_free_hook() 601 objcgs[off] = NULL; in memcg_slab_free_hook()
|
D | hugetlb_cgroup.c | 595 char *buf, size_t nbytes, loff_t off, in hugetlb_cgroup_write() argument 633 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_write_legacy() argument 635 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1"); in hugetlb_cgroup_write_legacy() 639 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_write_dfl() argument 641 return hugetlb_cgroup_write(of, buf, nbytes, off, "max"); in hugetlb_cgroup_write_dfl() 645 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_reset() argument
|
D | Kconfig.debug | 46 can be overridden by debug_pagealloc=off|on. 74 the runtime debug capabilities switched off. Enabling this is 78 off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying 270 bool "Default kmemleak to off"
|
D | memcontrol.c | 2992 unsigned int off; in mem_cgroup_from_obj_folio() local 2999 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio() 3000 if (objcgs[off]) in mem_cgroup_from_obj_folio() 3001 return obj_cgroup_memcg(objcgs[off]); in mem_cgroup_from_obj_folio() 3742 loff_t off) in mem_cgroup_force_empty_write() argument 3952 char *buf, size_t nbytes, loff_t off) in mem_cgroup_write() argument 4001 size_t nbytes, loff_t off) in mem_cgroup_reset() argument 4963 char *buf, size_t nbytes, loff_t off) in memcg_write_event_control() argument 6587 char *buf, size_t nbytes, loff_t off) in memory_min_write() argument 6610 char *buf, size_t nbytes, loff_t off) in memory_low_write() argument [all …]
|
D | slub.c | 965 unsigned int off; /* Offset of last byte */ in print_trailer() local 987 off = get_info_end(s); in print_trailer() 990 off += 2 * sizeof(struct track); in print_trailer() 993 off += sizeof(unsigned int); in print_trailer() 995 off += kasan_metadata_size(s, false); in print_trailer() 997 if (off != size_from_object(s)) in print_trailer() 999 print_section(KERN_ERR, "Padding ", p + off, in print_trailer() 1000 size_from_object(s) - off); in print_trailer() 1156 unsigned long off = get_info_end(s); /* The end of info */ in check_pad_bytes() local 1160 off += 2 * sizeof(struct track); in check_pad_bytes() [all …]
|
D | huge_memory.c | 837 loff_t off, unsigned long flags, unsigned long size) in __thp_get_unmapped_area() argument 839 loff_t off_end = off + len; in __thp_get_unmapped_area() 840 loff_t off_align = round_up(off, size); in __thp_get_unmapped_area() 850 if (len_pad < len || (off + len_pad) < off) in __thp_get_unmapped_area() 854 off >> PAGE_SHIFT, flags); in __thp_get_unmapped_area() 870 ret += (off - ret) & (size - 1); in __thp_get_unmapped_area() 878 loff_t off = (loff_t)pgoff << PAGE_SHIFT; in thp_get_unmapped_area() local 880 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); in thp_get_unmapped_area()
|
D | vmstat.c | 1846 unsigned long off = l - (unsigned long *)m->private; in vmstat_show() local 1848 seq_puts(m, vmstat_text[off]); in vmstat_show() 1852 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
|
D | mempolicy.c | 1997 unsigned long off; in interleave_nid() local 2007 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid() 2008 off += (addr - vma->vm_start) >> shift; in interleave_nid() 2009 return offset_il_node(pol, off); in interleave_nid()
|
D | vmalloc.c | 3883 unsigned long off; in remap_vmalloc_range_partial() local 3886 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) in remap_vmalloc_range_partial() 3901 if (check_add_overflow(size, off, &end_index) || in remap_vmalloc_range_partial() 3904 kaddr += off; in remap_vmalloc_range_partial()
|
D | memory.c | 628 unsigned long off; in vm_normal_page() local 629 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page() 630 if (pfn == vma->vm_pgoff + off) in vm_normal_page() 682 unsigned long off; in vm_normal_page_pmd() local 683 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd() 684 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
|
D | filemap.c | 4354 first_index = csr.off >> PAGE_SHIFT; in SYSCALL_DEFINE4() 4356 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; in SYSCALL_DEFINE4()
|
D | Kconfig | 803 more than it requires. To deal with this, mmap() is able to trim off 806 If trimming is enabled, the excess is trimmed off and returned to the
|