/mm/ |
D | swap_cgroup.c | 10 struct page **map; member 51 ctrl->map[idx] = page; in swap_cgroup_prepare() 60 __free_page(ctrl->map[idx]); in swap_cgroup_prepare() 71 mappage = ctrl->map[offset / SC_PER_PAGE]; in __lookup_swap_cgroup() 187 ctrl->map = array; in swap_cgroup_swapon() 191 ctrl->map = NULL; in swap_cgroup_swapon() 208 struct page **map; in swap_cgroup_swapoff() local 217 map = ctrl->map; in swap_cgroup_swapoff() 219 ctrl->map = NULL; in swap_cgroup_swapoff() 223 if (map) { in swap_cgroup_swapoff() [all …]
|
D | sparse.c | 222 static void subsection_mask_set(unsigned long *map, unsigned long pfn, in subsection_mask_set() argument 228 bitmap_set(map, idx, end - idx + 1); in subsection_mask_set() 455 struct page *map = sparse_buffer_alloc(size); in __populate_section_memmap() local 458 if (map) in __populate_section_memmap() 459 return map; in __populate_section_memmap() 461 map = memblock_alloc_try_nid(size, in __populate_section_memmap() 464 if (!map) in __populate_section_memmap() 468 return map; in __populate_section_memmap() 533 struct page *map; in sparse_init_nid() local 548 map = __populate_section_memmap(pfn, PAGES_PER_SECTION, in sparse_init_nid() [all …]
|
D | swapfile.c | 934 unsigned char *map; in swap_alloc_cluster() local 954 map = si->swap_map + offset; in swap_alloc_cluster() 956 map[i] = SWAP_HAS_CACHE; in swap_alloc_cluster() 1331 unsigned char *map; in put_swap_page() local 1343 map = si->swap_map + offset; in put_swap_page() 1345 val = map[i]; in put_swap_page() 1503 unsigned char *map; in swp_swapcount() local 1526 map = kmap_atomic(page); in swp_swapcount() 1527 tmp_count = map[offset]; in swp_swapcount() 1528 kunmap_atomic(map); in swp_swapcount() [all …]
|
D | slub.c | 450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument 456 set_bit(slab_index(p, s, addr), map); in get_map() 3677 unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC); in list_slab_objects() local 3678 if (!map) in list_slab_objects() 3683 get_map(s, page, map); in list_slab_objects() 3686 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects() 3692 bitmap_free(map); in list_slab_objects() 4387 unsigned long *map) in validate_slab() argument 4397 bitmap_zero(map, page->objects); in validate_slab() 4399 get_map(s, page, map); in validate_slab() [all …]
|
D | frontswap.c | 190 void __frontswap_init(unsigned type, unsigned long *map) in __frontswap_init() argument 201 if (WARN_ON(!map)) in __frontswap_init() 208 frontswap_map_set(sis, map); in __frontswap_init()
|
D | memremap.c | 76 #define for_each_device_pfn(pfn, map) \ argument 77 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
|
D | vmscan.c | 596 struct memcg_shrinker_map *map; in shrink_slab_memcg() local 606 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, in shrink_slab_memcg() 608 if (unlikely(!map)) in shrink_slab_memcg() 611 for_each_set_bit(i, map->map, shrinker_nr_max) { in shrink_slab_memcg() 622 clear_bit(i, map->map); in shrink_slab_memcg() 633 clear_bit(i, map->map); in shrink_slab_memcg()
|
D | memcontrol.c | 351 memset(new->map, (int)0xff, old_size); in memcg_expand_one_shrinker_map() 352 memset((void *)new->map + old_size, 0, size - old_size); in memcg_expand_one_shrinker_map() 364 struct memcg_shrinker_map *map; in memcg_free_shrinker_maps() local 372 map = rcu_dereference_protected(pn->shrinker_map, true); in memcg_free_shrinker_maps() 373 if (map) in memcg_free_shrinker_maps() 374 kvfree(map); in memcg_free_shrinker_maps() 381 struct memcg_shrinker_map *map; in memcg_alloc_shrinker_maps() local 390 map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); in memcg_alloc_shrinker_maps() 391 if (!map) { in memcg_alloc_shrinker_maps() 396 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps() [all …]
|
D | vmalloc.c | 2770 void *map = kmap_atomic(p); in aligned_vread() local 2771 memcpy(buf, map + offset, length); in aligned_vread() 2772 kunmap_atomic(map); in aligned_vread() 2809 void *map = kmap_atomic(p); in aligned_vwrite() local 2810 memcpy(map + offset, buf, length); in aligned_vwrite() 2811 kunmap_atomic(map); in aligned_vwrite()
|
D | zpool.c | 347 return zpool->driver->map(zpool->pool, handle, mapmode); in zpool_map_handle()
|
D | zbud.c | 212 .map = zbud_zpool_map,
|
D | percpu.c | 1131 static unsigned long pcpu_find_zero_area(unsigned long *map, in pcpu_find_zero_area() argument 1141 index = find_next_zero_bit(map, size, start); in pcpu_find_zero_area() 1150 i = find_next_bit(map, end, index); in pcpu_find_zero_area()
|
D | slab.c | 1426 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) in slab_kernel_map() argument 1431 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map() 1436 int map) {} in slab_kernel_map() argument
|
D | page_alloc.c | 6844 struct page *map; in alloc_node_mem_map() local 6854 map = memblock_alloc_node(size, SMP_CACHE_BYTES, in alloc_node_mem_map() 6856 if (!map) in alloc_node_mem_map() 6859 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
|
D | z3fold.c | 1553 .map = z3fold_zpool_map,
|
D | hugetlb.c | 771 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument 777 HPAGE_RESV_MASK) | (unsigned long)map); in set_vma_resv_map()
|
D | zsmalloc.c | 453 .map = zs_zpool_map,
|
D | Kconfig | 315 Programs which use vm86 functionality or have some need to map
|