/mm/ |
D | percpu.c | 296 static unsigned long pcpu_off_to_block_index(int off) in pcpu_off_to_block_index() argument 298 return off / PCPU_BITMAP_BLOCK_BITS; in pcpu_off_to_block_index() 301 static unsigned long pcpu_off_to_block_off(int off) in pcpu_off_to_block_off() argument 303 return off & (PCPU_BITMAP_BLOCK_BITS - 1); in pcpu_off_to_block_off() 306 static unsigned long pcpu_block_off_to_off(int index, int off) in pcpu_block_off_to_off() argument 308 return index * PCPU_BITMAP_BLOCK_BITS + off; in pcpu_block_off_to_off() 1272 static int pcpu_free_area(struct pcpu_chunk *chunk, int off) in pcpu_free_area() argument 1282 bit_off = off / PCPU_MIN_ALLOC_SIZE; in pcpu_free_area() 1648 struct pcpu_chunk *chunk, int off, in pcpu_memcg_post_alloc_hook() argument 1655 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; in pcpu_memcg_post_alloc_hook() [all …]
|
D | zsmalloc.c | 979 unsigned long off = 0; in init_zspage() local 987 set_first_obj_offset(page, off); in init_zspage() 990 link = (struct link_free *)vaddr + off / sizeof(*link); in init_zspage() 992 while ((off += class->size) < PAGE_SIZE) { in init_zspage() 1014 off %= PAGE_SIZE; in init_zspage() 1129 struct page *pages[2], int off, int size) in __zs_map_object() argument 1142 sizes[0] = PAGE_SIZE - off; in __zs_map_object() 1147 memcpy(buf, addr + off, sizes[0]); in __zs_map_object() 1157 struct page *pages[2], int off, int size) in __zs_unmap_object() argument 1170 off += ZS_HANDLE_SIZE; in __zs_unmap_object() [all …]
|
D | slab.h | 333 unsigned long off; in memcg_slab_post_alloc_hook() local 350 off = obj_to_index(s, page, p[i]); in memcg_slab_post_alloc_hook() 352 page_objcgs(page)[off] = objcg; in memcg_slab_post_alloc_hook() 369 unsigned int off; in memcg_slab_free_hook() local 389 off = obj_to_index(s, page, p[i]); in memcg_slab_free_hook() 390 objcg = objcgs[off]; in memcg_slab_free_hook() 394 objcgs[off] = NULL; in memcg_slab_free_hook()
|
D | hugetlb_cgroup.c | 499 char *buf, size_t nbytes, loff_t off, in hugetlb_cgroup_write() argument 537 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_write_legacy() argument 539 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1"); in hugetlb_cgroup_write_legacy() 543 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_write_dfl() argument 545 return hugetlb_cgroup_write(of, buf, nbytes, off, "max"); in hugetlb_cgroup_write_dfl() 549 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_reset() argument
|
D | huge_memory.c | 545 loff_t off, unsigned long flags, unsigned long size) in __thp_get_unmapped_area() argument 547 loff_t off_end = off + len; in __thp_get_unmapped_area() 548 loff_t off_align = round_up(off, size); in __thp_get_unmapped_area() 555 if (len_pad < len || (off + len_pad) < off) in __thp_get_unmapped_area() 559 off >> PAGE_SHIFT, flags); in __thp_get_unmapped_area() 575 ret += (off - ret) & (size - 1); in __thp_get_unmapped_area() 583 loff_t off = (loff_t)pgoff << PAGE_SHIFT; in thp_get_unmapped_area() local 588 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); in thp_get_unmapped_area()
|
D | slub.c | 841 unsigned int off; /* Offset of last byte */ in print_trailer() local 863 off = get_info_end(s); in print_trailer() 866 off += 2 * sizeof(struct track); in print_trailer() 868 off += kasan_metadata_size(s); in print_trailer() 870 if (off != size_from_object(s)) in print_trailer() 872 print_section(KERN_ERR, "Padding ", p + off, in print_trailer() 873 size_from_object(s) - off); in print_trailer() 1003 unsigned long off = get_info_end(s); /* The end of info */ in check_pad_bytes() local 1007 off += 2 * sizeof(struct track); in check_pad_bytes() 1009 off += kasan_metadata_size(s); in check_pad_bytes() [all …]
|
D | memcontrol.c | 2927 unsigned int off; in mem_cgroup_from_obj() local 2929 off = obj_to_index(page->slab_cache, page, p); in mem_cgroup_from_obj() 2930 objcg = page_objcgs(page)[off]; in mem_cgroup_from_obj() 3579 loff_t off) in mem_cgroup_force_empty_write() argument 3820 char *buf, size_t nbytes, loff_t off) in mem_cgroup_write() argument 3864 size_t nbytes, loff_t off) in mem_cgroup_reset() argument 4824 char *buf, size_t nbytes, loff_t off) in memcg_write_event_control() argument 6307 char *buf, size_t nbytes, loff_t off) in memory_min_write() argument 6330 char *buf, size_t nbytes, loff_t off) in memory_low_write() argument 6353 char *buf, size_t nbytes, loff_t off) in memory_high_write() argument [all …]
|
D | vmstat.c | 1834 unsigned long off = l - (unsigned long *)m->private; in vmstat_show() local 1836 seq_puts(m, vmstat_text[off]); in vmstat_show() 1840 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
|
D | memory.c | 693 unsigned long off; in vm_normal_page() local 694 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page() 695 if (pfn == vma->vm_pgoff + off) in vm_normal_page() 736 unsigned long off; in vm_normal_page_pmd() local 737 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd() 738 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd() 4441 int off; in do_fault_around() local 4448 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around() 4449 start_pgoff -= off; in do_fault_around()
|
D | mempolicy.c | 1904 unsigned long off; in interleave_nid() local 1914 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid() 1915 off += (addr - vma->vm_start) >> shift; in interleave_nid() 1916 return offset_il_node(pol, off); in interleave_nid()
|
D | vmalloc.c | 3480 unsigned long off; in remap_vmalloc_range_partial() local 3483 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) in remap_vmalloc_range_partial() 3498 if (check_add_overflow(size, off, &end_index) || in remap_vmalloc_range_partial() 3501 kaddr += off; in remap_vmalloc_range_partial()
|
D | Kconfig.debug | 46 can be overridden by debug_pagealloc=off|on.
|
D | Kconfig | 352 more than it requires. To deal with this, mmap() is able to trim off 355 If trimming is enabled, the excess is trimmed off and returned to the
|