Home
last modified time | relevance | path

Searched refs:head (Results 1 – 22 of 22) sorted by relevance

/mm/
Dgup.c1476 struct page *head = compound_head(pages[i]); in check_and_migrate_cma_pages() local
1482 step = compound_nr(head) - (pages[i] - head); in check_and_migrate_cma_pages()
1488 if (is_migrate_cma_page(head)) { in check_and_migrate_cma_pages()
1489 if (PageHuge(head)) in check_and_migrate_cma_pages()
1490 isolate_huge_page(head, &cma_page_list); in check_and_migrate_cma_pages()
1492 if (!PageLRU(head) && drain_allow) { in check_and_migrate_cma_pages()
1497 if (!isolate_lru_page(head)) { in check_and_migrate_cma_pages()
1498 list_add_tail(&head->lru, &cma_page_list); in check_and_migrate_cma_pages()
1499 mod_node_page_state(page_pgdat(head), in check_and_migrate_cma_pages()
1501 page_is_file_cache(head), in check_and_migrate_cma_pages()
[all …]
Dmemory-failure.c831 #define head (1UL << PG_head) macro
854 { head, head, MF_MSG_HUGE, me_huge_page },
880 #undef head
932 struct page *head = compound_head(page); in get_hwpoison_page() local
934 if (!PageHuge(head) && PageTransHuge(head)) { in get_hwpoison_page()
941 if (!PageAnon(head)) { in get_hwpoison_page()
948 if (get_page_unless_zero(head)) { in get_hwpoison_page()
949 if (head == compound_head(page)) in get_hwpoison_page()
954 put_page(head); in get_hwpoison_page()
1087 struct page *head = compound_head(p); in memory_failure_hugetlb() local
[all …]
Dhuge_memory.c2487 static void __split_huge_page_tail(struct page *head, int tail, in __split_huge_page_tail() argument
2490 struct page *page_tail = head + tail; in __split_huge_page_tail()
2501 page_tail->flags |= (head->flags & in __split_huge_page_tail()
2516 page_tail->mapping = head->mapping; in __split_huge_page_tail()
2517 page_tail->index = head->index + tail; in __split_huge_page_tail()
2531 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || in __split_huge_page_tail()
2532 PageSwapCache(head))); in __split_huge_page_tail()
2534 if (page_is_young(head)) in __split_huge_page_tail()
2536 if (page_is_idle(head)) in __split_huge_page_tail()
2539 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); in __split_huge_page_tail()
[all …]
Dksm.c159 struct list_head *head; member
209 struct stable_node *head; member
345 return dup->head == STABLE_NODE_DUP_HEAD; in is_stable_node_dup()
352 dup->head = STABLE_NODE_DUP_HEAD; in stable_node_chain_add_dup()
373 dup->head = NULL; in stable_node_dup_del()
662 if (stable_node->head == &migrate_nodes) in remove_node_from_stable_tree()
779 stable_node = rmap_item->head; in remove_rmap_item_from_tree()
796 rmap_item->head = NULL; in remove_rmap_item_from_tree()
1562 if (page_node && page_node->head != &migrate_nodes) { in stable_tree_search()
1642 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
[all …]
Dzsmalloc.c720 struct zspage *head; in insert_zspage() local
723 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
729 if (head) { in insert_zspage()
730 if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { in insert_zspage()
731 list_add(&zspage->list, &head->list); in insert_zspage()
1609 unsigned long head; in find_alloced_obj() local
1619 head = obj_to_head(page, addr + offset); in find_alloced_obj()
1620 if (head & OBJ_ALLOCATED_TAG) { in find_alloced_obj()
1621 handle = head & ~OBJ_ALLOCATED_TAG; in find_alloced_obj()
1966 unsigned long handle, head; in zs_page_migrate() local
[all …]
Dmigrate.c704 static bool buffer_migrate_lock_buffers(struct buffer_head *head, in buffer_migrate_lock_buffers() argument
707 struct buffer_head *bh = head; in buffer_migrate_lock_buffers()
715 } while (bh != head); in buffer_migrate_lock_buffers()
728 bh = head; in buffer_migrate_lock_buffers()
737 } while (bh != head); in buffer_migrate_lock_buffers()
745 struct buffer_head *bh, *head; in __buffer_migrate_page() local
757 head = page_buffers(page); in __buffer_migrate_page()
758 if (!buffer_migrate_lock_buffers(head, mode)) in __buffer_migrate_page()
768 bh = head; in __buffer_migrate_page()
775 } while (bh != head); in __buffer_migrate_page()
[all …]
Dswapfile.c399 return cluster_is_null(&list->head); in cluster_list_empty()
404 return cluster_next(&list->head); in cluster_list_first()
409 cluster_set_null(&list->head); in cluster_list_init()
418 cluster_set_next_flag(&list->head, idx, 0); in cluster_list_add_tail()
441 idx = cluster_next(&list->head); in cluster_list_del_first()
443 cluster_set_null(&list->head); in cluster_list_del_first()
446 cluster_set_next_flag(&list->head, in cluster_list_del_first()
624 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
3547 struct page *head; in add_swap_count_continuation() local
3595 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
[all …]
Dhugetlb.c74 static inline bool PageHugeFreed(struct page *head) in PageHugeFreed() argument
76 return page_private(head + 4) == -1UL; in PageHugeFreed()
79 static inline void SetPageHugeFreed(struct page *head) in SetPageHugeFreed() argument
81 set_page_private(head + 4, -1UL); in SetPageHugeFreed()
84 static inline void ClearPageHugeFreed(struct page *head) in ClearPageHugeFreed() argument
86 set_page_private(head + 4, 0); in ClearPageHugeFreed()
279 struct list_head *head = &resv->regions; in region_add() local
285 list_for_each_entry(rg, head, link) in region_add()
295 if (&rg->link == head || t < rg->from) { in region_add()
318 if (&rg->link == head) in region_add()
[all …]
Dvmalloc.c504 struct rb_node *parent, struct rb_node **link, struct list_head *head) in link_va() argument
511 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va()
513 head = head->prev; in link_va()
538 list_add(&va->list, head); in link_va()
656 struct rb_root *root, struct list_head *head) in insert_vmap_area() argument
662 link_va(va, root, parent, link, head); in insert_vmap_area()
668 struct list_head *head) in insert_vmap_area_augment() argument
678 link_va(va, root, parent, link, head); in insert_vmap_area_augment()
690 struct rb_root *root, struct list_head *head) in merge_or_add_vmap_area() argument
718 if (next != head) { in merge_or_add_vmap_area()
[all …]
Dmemory_hotplug.c1272 struct page *page, *head; in scan_movable_pages() local
1285 head = compound_head(page); in scan_movable_pages()
1286 if (page_huge_active(head)) in scan_movable_pages()
1288 skip = compound_nr(head) - (page - head); in scan_movable_pages()
1325 struct page *head = compound_head(page); in do_migrate_range() local
1326 pfn = page_to_pfn(head) + compound_nr(head) - 1; in do_migrate_range()
1327 isolate_huge_page(head, &source); in do_migrate_range()
Dlist_lru.c177 struct list_head *head) in list_lru_isolate_move() argument
179 list_move(item, head); in list_lru_isolate_move()
393 static void kvfree_rcu(struct rcu_head *head) in kvfree_rcu() argument
397 mlru = container_of(head, struct list_lru_memcg, rcu); in kvfree_rcu()
Dslob.c134 struct rcu_head head; member
652 static void kmem_rcu_free(struct rcu_head *head) in kmem_rcu_free() argument
654 struct slob_rcu *slob_rcu = (struct slob_rcu *)head; in kmem_rcu_free()
667 call_rcu(&slob_rcu->head, kmem_rcu_free); in kmem_cache_free()
Dreadahead.c335 pgoff_t head; in count_history_pages() local
338 head = page_cache_prev_miss(mapping, offset - 1, max); in count_history_pages()
341 return offset - 1 - head; in count_history_pages()
Dmmu_gather.c138 static void tlb_remove_table_rcu(struct rcu_head *head) in tlb_remove_table_rcu() argument
143 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu()
Dslub.c1195 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument
1199 void *object = head; in free_debug_processing()
1377 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument
1458 void **head, void **tail, in slab_free_freelist_hook() argument
1463 void *next = *head; in slab_free_freelist_hook()
1464 void *old_tail = *tail ? *tail : *head; in slab_free_freelist_hook()
1468 *head = NULL; in slab_free_freelist_hook()
1490 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1491 *head = object; in slab_free_freelist_hook()
1503 if (*head == *tail) in slab_free_freelist_hook()
[all …]
Dshmem.c1930 struct page *head = compound_head(page); in shmem_getpage_gfp() local
1933 for (i = 0; i < compound_nr(head); i++) { in shmem_getpage_gfp()
1934 clear_highpage(head + i); in shmem_getpage_gfp()
1935 flush_dcache_page(head + i); in shmem_getpage_gfp()
1937 SetPageUptodate(head); in shmem_getpage_gfp()
2505 struct page *head = compound_head(page); in shmem_write_end() local
2510 if (head + i == page) in shmem_write_end()
2512 clear_highpage(head + i); in shmem_write_end()
2513 flush_dcache_page(head + i); in shmem_write_end()
2521 SetPageUptodate(head); in shmem_write_end()
[all …]
Dmempolicy.c970 struct page *head = compound_head(page); in migrate_page_add() local
974 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { in migrate_page_add()
975 if (!isolate_lru_page(head)) { in migrate_page_add()
976 list_add_tail(&head->lru, pagelist); in migrate_page_add()
977 mod_node_page_state(page_pgdat(head), in migrate_page_add()
978 NR_ISOLATED_ANON + page_is_file_cache(head), in migrate_page_add()
979 hpage_nr_pages(head)); in migrate_page_add()
Dslab_common.c725 static void kmemcg_rcufn(struct rcu_head *head) in kmemcg_rcufn() argument
727 struct kmem_cache *s = container_of(head, struct kmem_cache, in kmemcg_rcufn()
Dpage_alloc.c1270 LIST_HEAD(head); in free_pcppages_bulk()
1307 list_add_tail(&page->lru, &head); in free_pcppages_bulk()
1330 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk()
8437 struct page *head = compound_head(page); in has_unmovable_pages() local
8440 if (!hugepage_migration_supported(page_hstate(head))) in has_unmovable_pages()
8443 skip_pages = compound_nr(head) - (page - head); in has_unmovable_pages()
Dmemcontrol.c326 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) in memcg_free_shrinker_map_rcu() argument
328 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); in memcg_free_shrinker_map_rcu()
3129 void mem_cgroup_split_huge_fixup(struct page *head) in mem_cgroup_split_huge_fixup() argument
3137 head[i].mem_cgroup = head->mem_cgroup; in mem_cgroup_split_huge_fixup()
3139 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); in mem_cgroup_split_huge_fixup()
Dslab.c1404 static void kmem_rcu_free(struct rcu_head *head) in kmem_rcu_free() argument
1409 page = container_of(head, struct page, rcu_head); in kmem_rcu_free()
/mm/kasan/
Dquarantine.c43 struct qlist_node *head; member
52 return !q->head; in qlist_empty()
57 q->head = q->tail = NULL; in qlist_init()
65 q->head = qlink; in qlist_put()
84 to->tail->next = from->head; in qlist_move_all()
161 qlink = q->head; in qlist_free_all()
273 curr = from->head; in qlist_move_cache()