/mm/ |
D | gup.c | 1450 struct page *head = compound_head(pages[i]); in check_and_migrate_cma_pages() local 1456 step = compound_nr(head) - (pages[i] - head); in check_and_migrate_cma_pages() 1462 if (is_migrate_cma_page(head)) { in check_and_migrate_cma_pages() 1463 if (PageHuge(head)) in check_and_migrate_cma_pages() 1464 isolate_huge_page(head, &cma_page_list); in check_and_migrate_cma_pages() 1466 if (!PageLRU(head) && drain_allow) { in check_and_migrate_cma_pages() 1471 if (!isolate_lru_page(head)) { in check_and_migrate_cma_pages() 1472 list_add_tail(&head->lru, &cma_page_list); in check_and_migrate_cma_pages() 1473 mod_node_page_state(page_pgdat(head), in check_and_migrate_cma_pages() 1475 page_is_file_cache(head), in check_and_migrate_cma_pages() [all …]
|
D | huge_memory.c | 2445 static void __split_huge_page_tail(struct page *head, int tail, in __split_huge_page_tail() argument 2448 struct page *page_tail = head + tail; in __split_huge_page_tail() 2459 page_tail->flags |= (head->flags & in __split_huge_page_tail() 2474 page_tail->mapping = head->mapping; in __split_huge_page_tail() 2475 page_tail->index = head->index + tail; in __split_huge_page_tail() 2489 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || in __split_huge_page_tail() 2490 PageSwapCache(head))); in __split_huge_page_tail() 2492 if (page_is_young(head)) in __split_huge_page_tail() 2494 if (page_is_idle(head)) in __split_huge_page_tail() 2497 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); in __split_huge_page_tail() [all …]
|
D | memory-failure.c | 831 #define head (1UL << PG_head) macro 854 { head, head, MF_MSG_HUGE, me_huge_page }, 880 #undef head 932 struct page *head = compound_head(page); in get_hwpoison_page() local 934 if (!PageHuge(head) && PageTransHuge(head)) { in get_hwpoison_page() 941 if (!PageAnon(head)) { in get_hwpoison_page() 948 if (get_page_unless_zero(head)) { in get_hwpoison_page() 949 if (head == compound_head(page)) in get_hwpoison_page() 954 put_page(head); in get_hwpoison_page() 1087 struct page *head = compound_head(p); in memory_failure_hugetlb() local [all …]
|
D | migrate.c | 699 static bool buffer_migrate_lock_buffers(struct buffer_head *head, in buffer_migrate_lock_buffers() argument 702 struct buffer_head *bh = head; in buffer_migrate_lock_buffers() 710 } while (bh != head); in buffer_migrate_lock_buffers() 723 bh = head; in buffer_migrate_lock_buffers() 732 } while (bh != head); in buffer_migrate_lock_buffers() 740 struct buffer_head *bh, *head; in __buffer_migrate_page() local 752 head = page_buffers(page); in __buffer_migrate_page() 753 if (!buffer_migrate_lock_buffers(head, mode)) in __buffer_migrate_page() 763 bh = head; in __buffer_migrate_page() 770 } while (bh != head); in __buffer_migrate_page() [all …]
|
D | ksm.c | 159 struct list_head *head; member 209 struct stable_node *head; member 345 return dup->head == STABLE_NODE_DUP_HEAD; in is_stable_node_dup() 352 dup->head = STABLE_NODE_DUP_HEAD; in stable_node_chain_add_dup() 373 dup->head = NULL; in stable_node_dup_del() 662 if (stable_node->head == &migrate_nodes) in remove_node_from_stable_tree() 779 stable_node = rmap_item->head; in remove_rmap_item_from_tree() 1561 if (page_node && page_node->head != &migrate_nodes) { in stable_tree_search() 1641 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search() 1726 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search() [all …]
|
D | swapfile.c | 386 return cluster_is_null(&list->head); in cluster_list_empty() 391 return cluster_next(&list->head); in cluster_list_first() 396 cluster_set_null(&list->head); in cluster_list_init() 405 cluster_set_next_flag(&list->head, idx, 0); in cluster_list_add_tail() 428 idx = cluster_next(&list->head); in cluster_list_del_first() 430 cluster_set_null(&list->head); in cluster_list_del_first() 433 cluster_set_next_flag(&list->head, in cluster_list_del_first() 611 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster() 3526 struct page *head; in add_swap_count_continuation() local 3574 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation() [all …]
|
D | zsmalloc.c | 724 struct zspage *head; in insert_zspage() local 727 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage() 733 if (head) { in insert_zspage() 734 if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { in insert_zspage() 735 list_add(&zspage->list, &head->list); in insert_zspage() 1655 unsigned long head; in find_alloced_obj() local 1665 head = obj_to_head(page, addr + offset); in find_alloced_obj() 1666 if (head & OBJ_ALLOCATED_TAG) { in find_alloced_obj() 1667 handle = head & ~OBJ_ALLOCATED_TAG; in find_alloced_obj() 1982 unsigned long handle, head; in zs_page_migrate() local [all …]
|
D | vmalloc.c | 501 struct rb_node *parent, struct rb_node **link, struct list_head *head) in link_va() argument 508 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va() 510 head = head->prev; in link_va() 535 list_add(&va->list, head); in link_va() 653 struct rb_root *root, struct list_head *head) in insert_vmap_area() argument 659 link_va(va, root, parent, link, head); in insert_vmap_area() 665 struct list_head *head) in insert_vmap_area_augment() argument 675 link_va(va, root, parent, link, head); in insert_vmap_area_augment() 687 struct rb_root *root, struct list_head *head) in merge_or_add_vmap_area() argument 715 if (next != head) { in merge_or_add_vmap_area() [all …]
|
D | memory_hotplug.c | 1265 struct page *page, *head; in scan_movable_pages() local 1278 head = compound_head(page); in scan_movable_pages() 1279 if (page_huge_active(head)) in scan_movable_pages() 1281 skip = compound_nr(head) - (page - head); in scan_movable_pages() 1318 struct page *head = compound_head(page); in do_migrate_range() local 1319 pfn = page_to_pfn(head) + compound_nr(head) - 1; in do_migrate_range() 1320 isolate_huge_page(head, &source); in do_migrate_range()
|
D | hugetlb.c | 264 struct list_head *head = &resv->regions; in region_add() local 270 list_for_each_entry(rg, head, link) in region_add() 280 if (&rg->link == head || t < rg->from) { in region_add() 303 if (&rg->link == head) in region_add() 360 struct list_head *head = &resv->regions; in region_chg() local 394 list_for_each_entry(rg, head, link) in region_chg() 401 if (&rg->link == head || t < rg->from) { in region_chg() 427 if (&rg->link == head) in region_chg() 487 struct list_head *head = &resv->regions; in region_del() local 494 list_for_each_entry_safe(rg, trg, head, link) { in region_del() [all …]
|
D | list_lru.c | 177 struct list_head *head) in list_lru_isolate_move() argument 179 list_move(item, head); in list_lru_isolate_move() 393 static void kvfree_rcu(struct rcu_head *head) in kvfree_rcu() argument 397 mlru = container_of(head, struct list_lru_memcg, rcu); in kvfree_rcu()
|
D | slob.c | 134 struct rcu_head head; member 650 static void kmem_rcu_free(struct rcu_head *head) in kmem_rcu_free() argument 652 struct slob_rcu *slob_rcu = (struct slob_rcu *)head; in kmem_rcu_free() 665 call_rcu(&slob_rcu->head, kmem_rcu_free); in kmem_cache_free()
|
D | readahead.c | 335 pgoff_t head; in count_history_pages() local 338 head = page_cache_prev_miss(mapping, offset - 1, max); in count_history_pages() 341 return offset - 1 - head; in count_history_pages()
|
D | mmu_gather.c | 133 static void tlb_remove_table_rcu(struct rcu_head *head) in tlb_remove_table_rcu() argument 138 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu()
|
D | slub.c | 1170 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument 1174 void *object = head; in free_debug_processing() 1352 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument 1428 void **head, void **tail) in slab_free_freelist_hook() argument 1432 void *next = *head; in slab_free_freelist_hook() 1433 void *old_tail = *tail ? *tail : *head; in slab_free_freelist_hook() 1437 *head = NULL; in slab_free_freelist_hook() 1459 set_freepointer(s, object, *head); in slab_free_freelist_hook() 1460 *head = object; in slab_free_freelist_hook() 1466 if (*head == *tail) in slab_free_freelist_hook() [all …]
|
D | shmem.c | 1925 struct page *head = compound_head(page); in shmem_getpage_gfp() local 1928 for (i = 0; i < compound_nr(head); i++) { in shmem_getpage_gfp() 1929 clear_highpage(head + i); in shmem_getpage_gfp() 1930 flush_dcache_page(head + i); in shmem_getpage_gfp() 1932 SetPageUptodate(head); in shmem_getpage_gfp() 2501 struct page *head = compound_head(page); in shmem_write_end() local 2506 if (head + i == page) in shmem_write_end() 2508 clear_highpage(head + i); in shmem_write_end() 2509 flush_dcache_page(head + i); in shmem_write_end() 2517 SetPageUptodate(head); in shmem_write_end() [all …]
|
D | mempolicy.c | 975 struct page *head = compound_head(page); in migrate_page_add() local 979 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { in migrate_page_add() 980 if (!isolate_lru_page(head)) { in migrate_page_add() 981 list_add_tail(&head->lru, pagelist); in migrate_page_add() 982 mod_node_page_state(page_pgdat(head), in migrate_page_add() 983 NR_ISOLATED_ANON + page_is_file_cache(head), in migrate_page_add() 984 hpage_nr_pages(head)); in migrate_page_add()
|
D | slab_common.c | 726 static void kmemcg_rcufn(struct rcu_head *head) in kmemcg_rcufn() argument 728 struct kmem_cache *s = container_of(head, struct kmem_cache, in kmemcg_rcufn()
|
D | page_alloc.c | 1269 LIST_HEAD(head); in free_pcppages_bulk() 1301 list_add_tail(&page->lru, &head); in free_pcppages_bulk() 1324 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk() 8249 struct page *head = compound_head(page); in has_unmovable_pages() local 8252 if (!hugepage_migration_supported(page_hstate(head))) in has_unmovable_pages() 8255 skip_pages = compound_nr(head) - (page - head); in has_unmovable_pages()
|
D | memcontrol.c | 326 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) in memcg_free_shrinker_map_rcu() argument 328 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); in memcg_free_shrinker_map_rcu() 3050 void mem_cgroup_split_huge_fixup(struct page *head) in mem_cgroup_split_huge_fixup() argument 3058 head[i].mem_cgroup = head->mem_cgroup; in mem_cgroup_split_huge_fixup() 3060 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); in mem_cgroup_split_huge_fixup()
|
D | slab.c | 1404 static void kmem_rcu_free(struct rcu_head *head) in kmem_rcu_free() argument 1409 page = container_of(head, struct page, rcu_head); in kmem_rcu_free()
|
/mm/kasan/ |
D | quarantine.c | 43 struct qlist_node *head; member 52 return !q->head; in qlist_empty() 57 q->head = q->tail = NULL; in qlist_init() 65 q->head = qlink; in qlist_put() 84 to->tail->next = from->head; in qlist_move_all() 161 qlink = q->head; in qlist_free_all() 273 curr = from->head; in qlist_move_cache()
|