/mm/ |
D | memblock.c | 166 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) in memblock_cap_size() argument 168 return *size = min(*size, PHYS_ADDR_MAX - base); in memblock_cap_size() 181 phys_addr_t base, phys_addr_t size) in memblock_overlaps_region() argument 185 memblock_cap_size(base, &size); in memblock_overlaps_region() 188 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region() 350 type->regions[0].base = 0; in memblock_remove_region() 511 if (this->base + this->size != next->base || in memblock_merge_regions() 515 BUG_ON(this->base + this->size > next->base); in memblock_merge_regions() 540 int idx, phys_addr_t base, in memblock_insert_region() argument 549 rgn->base = base; in memblock_insert_region() [all …]
|
D | page_ext.c | 127 static inline struct page_ext *get_entry(void *base, unsigned long index) in get_entry() argument 129 return base + page_ext_size * index; in get_entry() 186 struct page_ext *base; in lookup_page_ext() local 189 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 196 if (unlikely(!base)) in lookup_page_ext() 200 return get_entry(base, index); in lookup_page_ext() 206 struct page_ext *base; in alloc_node_page_ext() local 225 base = memblock_alloc_try_nid( in alloc_node_page_ext() 228 if (!base) in alloc_node_page_ext() 230 NODE_DATA(nid)->node_page_ext = base; in alloc_node_page_ext() [all …]
|
D | cma.c | 175 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, in cma_init_reserved_mem() argument 189 if (!size || !memblock_is_region_reserved(base, size)) in cma_init_reserved_mem() 200 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) in cma_init_reserved_mem() 214 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem() 244 int __init cma_declare_contiguous_nid(phys_addr_t base, in cma_declare_contiguous_nid() argument 262 __func__, &size, &base, &limit, &alignment); in cma_declare_contiguous_nid() 283 if (fixed && base & (alignment - 1)) { in cma_declare_contiguous_nid() 286 &base, &alignment); in cma_declare_contiguous_nid() 289 base = ALIGN(base, alignment); in cma_declare_contiguous_nid() 293 if (!base) in cma_declare_contiguous_nid() [all …]
|
D | mapping_dirty_helpers.c | 65 struct wp_walk base; member 72 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) 336 .base = { .total = 0 }, in clean_record_shared_mapping_range() 345 &cwalk.base)); in clean_record_shared_mapping_range() 351 return cwalk.base.total; in clean_record_shared_mapping_range()
|
D | internal.h | 472 static inline struct page *mem_map_offset(struct page *base, int offset) in mem_map_offset() argument 475 return nth_page(base, offset); in mem_map_offset() 476 return base + offset; in mem_map_offset() 484 struct page *base, int offset) in mem_map_next() argument 487 unsigned long pfn = page_to_pfn(base) + offset; in mem_map_next()
|
D | slob.c | 148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in set_slob() local 149 slobidx_t offset = next - base; in set_slob() 173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); in slob_next() local 180 return base+next; in slob_next()
|
D | percpu.c | 2319 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); in __is_kernel_percpu_address() local 2323 void *start = per_cpu_ptr(base, cpu); in __is_kernel_percpu_address() 2330 per_cpu_ptr(base, get_boot_cpu_id()); in __is_kernel_percpu_address() 2381 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); in per_cpu_ptr_to_phys() local 2403 void *start = per_cpu_ptr(base, cpu); in per_cpu_ptr_to_phys() 3037 void *base = (void *)ULONG_MAX; in pcpu_embed_first_chunk() local 3079 base = min(ptr, base); in pcpu_embed_first_chunk() 3083 max_distance = areas[highest_group] - base; in pcpu_embed_first_chunk() 3120 ai->groups[group].base_offset = areas[group] - base; in pcpu_embed_first_chunk() 3127 pcpu_setup_first_chunk(ai, base); in pcpu_embed_first_chunk()
|
D | nommu.c | 976 void *base; in do_mmap_private() local 1012 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); in do_mmap_private() 1013 if (!base) in do_mmap_private() 1019 region->vm_start = (unsigned long) base; in do_mmap_private() 1033 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private() 1039 memset(base + ret, 0, len - ret); in do_mmap_private()
|
D | vmalloc.c | 3654 unsigned long base, start, size, end, last_end, orig_start, orig_end; in pcpu_get_vm_areas() local 3706 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 3713 if (base + last_end < vmalloc_start + last_end) in pcpu_get_vm_areas() 3726 if (base + end > va->va_end) { in pcpu_get_vm_areas() 3727 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 3735 if (base + start < va->va_start) { in pcpu_get_vm_areas() 3737 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 3752 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas() 3759 start = base + offsets[area]; in pcpu_get_vm_areas()
|
D | slub.c | 631 void *base; in check_valid_pointer() local 636 base = page_address(page); in check_valid_pointer() 639 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer() 640 (object - base) % s->size) { in check_valid_pointer() 4331 void *base; in __kmem_obj_info() local 4342 base = page_address(page); in __kmem_obj_info() 4351 objp = base + s->size * objnr; in __kmem_obj_info() 4353 …if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size)… in __kmem_obj_info()
|
D | memory.c | 5775 int i, n, base, l; in process_huge_page() local 5784 base = 0; in process_huge_page() 5793 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); in process_huge_page() 5796 for (i = 0; i < base; i++) { in process_huge_page() 5806 int left_idx = base + i; in process_huge_page() 5807 int right_idx = base + 2 * l - 1 - i; in process_huge_page()
|
D | page_alloc.c | 760 int base = order; in order_to_pindex() local 765 base = PAGE_ALLOC_COSTLY_ORDER + 1; in order_to_pindex() 771 return (MIGRATE_PCPTYPES * base) + migratetype; in order_to_pindex() 8156 usable_startpfn = PFN_DOWN(r->base); in find_zone_movable_pfns_for_nodes()
|
D | hugetlb.c | 5949 unsigned long base = addr & PUD_MASK; in vma_shareable() local 5950 unsigned long end = base + PUD_SIZE; in vma_shareable() 5955 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) in vma_shareable()
|
/mm/kasan/ |
D | common.c | 72 void *base = task_stack_page(task); in kasan_unpoison_task_stack() local 74 kasan_unpoison(base, THREAD_SIZE, false); in kasan_unpoison_task_stack() 85 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below() local 87 kasan_unpoison(base, watermark - base, false); in kasan_unpoison_task_stack_below()
|