/mm/ |
D | sparse.c | 374 struct page *map; in sparse_mem_map_populate() local 377 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); in sparse_mem_map_populate() 378 if (map) in sparse_mem_map_populate() 379 return map; in sparse_mem_map_populate() 382 map = memblock_virt_alloc_try_nid(size, in sparse_mem_map_populate() 385 return map; in sparse_mem_map_populate() 392 void *map; in sparse_mem_maps_populate_node() local 396 map = alloc_remap(nodeid, size * map_count); in sparse_mem_maps_populate_node() 397 if (map) { in sparse_mem_maps_populate_node() 401 map_map[pnum] = map; in sparse_mem_maps_populate_node() [all …]
|
D | percpu.c | 112 int *map; /* allocation map */ member 334 int off = chunk->map[i] & ~1; in pcpu_count_occupied_pages() 335 int end = chunk->map[i + 1] & ~1; in pcpu_count_occupied_pages() 338 int prev = chunk->map[i - 1]; in pcpu_count_occupied_pages() 345 int next = chunk->map[i + 1]; in pcpu_count_occupied_pages() 346 int nend = chunk->map[i + 2] & ~1; in pcpu_count_occupied_pages() 461 old_size = chunk->map_alloc * sizeof(chunk->map[0]); in pcpu_extend_area_map() 462 old = chunk->map; in pcpu_extend_area_map() 467 chunk->map = new; in pcpu_extend_area_map() 562 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { in pcpu_alloc_area() [all …]
|
D | page_cgroup.c | 329 struct page **map; member 370 ctrl->map[idx] = page; in swap_cgroup_prepare() 379 __free_page(ctrl->map[idx]); in swap_cgroup_prepare() 396 mappage = ctrl->map[offset / SC_PER_PAGE]; in lookup_swap_cgroup() 486 ctrl->map = array; in swap_cgroup_swapon() 490 ctrl->map = NULL; in swap_cgroup_swapon() 508 struct page **map; in swap_cgroup_swapoff() local 517 map = ctrl->map; in swap_cgroup_swapoff() 519 ctrl->map = NULL; in swap_cgroup_swapoff() 523 if (map) { in swap_cgroup_swapoff() [all …]
|
D | swapfile.c | 887 unsigned char *map; in swp_swapcount() local 907 map = kmap_atomic(page); in swp_swapcount() 908 tmp_count = map[offset]; in swp_swapcount() 909 kunmap_atomic(map); in swp_swapcount() 2847 unsigned char *map; in add_swap_count_continuation() local 2856 map = kmap_atomic(list_page) + offset; in add_swap_count_continuation() 2857 count = *map; in add_swap_count_continuation() 2858 kunmap_atomic(map); in add_swap_count_continuation() 2891 unsigned char *map; in swap_count_continued() local 2901 map = kmap_atomic(page) + offset; in swap_count_continued() [all …]
|
D | sparse-vmemmap.c | 183 struct page *map; in sparse_mem_map_populate() local 185 map = pfn_to_page(pnum * PAGES_PER_SECTION); in sparse_mem_map_populate() 186 start = (unsigned long)map; in sparse_mem_map_populate() 187 end = (unsigned long)(map + PAGES_PER_SECTION); in sparse_mem_map_populate() 192 return map; in sparse_mem_map_populate()
|
D | slub.c | 441 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument 447 set_bit(slab_index(p, s, addr), map); in get_map() 3184 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * in list_slab_objects() local 3186 if (!map) in list_slab_objects() 3191 get_map(s, page, map); in list_slab_objects() 3194 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects() 3200 kfree(map); in list_slab_objects() 3859 unsigned long *map) in validate_slab() argument 3869 bitmap_zero(map, page->objects); in validate_slab() 3871 get_map(s, page, map); in validate_slab() [all …]
|
D | frontswap.c | 164 void __frontswap_init(unsigned type, unsigned long *map) in __frontswap_init() argument 174 if (WARN_ON(!map)) in __frontswap_init() 181 frontswap_map_set(sis, map); in __frontswap_init()
|
D | bootmem.c | 175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local 180 map = bdata->node_bootmem_map; in free_all_bootmem_core() 197 vec = ~map[idx / BITS_PER_LONG]; in free_all_bootmem_core() 202 vec |= ~map[idx / BITS_PER_LONG + 1] << in free_all_bootmem_core()
|
D | vmalloc.c | 1901 void *map = kmap_atomic(p); in aligned_vread() local 1902 memcpy(buf, map + offset, length); in aligned_vread() 1903 kunmap_atomic(map); in aligned_vread() 1940 void *map = kmap_atomic(p); in aligned_vwrite() local 1941 memcpy(map + offset, buf, length); in aligned_vwrite() 1942 kunmap_atomic(map); in aligned_vwrite()
|
D | zpool.c | 319 return zpool->driver->map(zpool->pool, handle, mapmode); in zpool_map_handle()
|
D | zbud.c | 196 .map = zbud_zpool_map,
|
D | page_alloc.c | 4968 struct page *map; in alloc_node_mem_map() local 4979 map = alloc_remap(pgdat->node_id, size); in alloc_node_mem_map() 4980 if (!map) in alloc_node_mem_map() 4981 map = memblock_virt_alloc_node_nopanic(size, in alloc_node_mem_map() 4983 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); in alloc_node_mem_map()
|
D | zsmalloc.c | 381 .map = zs_zpool_map,
|
D | hugetlb.c | 450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument 456 HPAGE_RESV_MASK) | (unsigned long)map); in set_vma_resv_map()
|
D | Kconfig | 355 Programs which use vm86 functionality or have some need to map
|