Home
last modified time | relevance | path

Searched refs:head (Results 1 – 25 of 27) sorted by relevance

12

/mm/
Ddebug.c49 struct page *head = compound_head(page); in __dump_page() local
62 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { in __dump_page()
76 head = page; in __dump_page()
87 mapcount = PageSlab(head) ? 0 : page_mapcount(page); in __dump_page()
90 page, page_ref_count(head), mapcount, mapping, in __dump_page()
95 head, compound_order(head), in __dump_page()
96 head_compound_mapcount(head), in __dump_page()
97 head_compound_pincount(head)); in __dump_page()
100 head, compound_order(head), in __dump_page()
101 head_compound_mapcount(head)); in __dump_page()
[all …]
Dgup.c71 struct page *head = compound_head(page); in try_get_compound_head() local
73 if (WARN_ON_ONCE(page_ref_count(head) < 0)) in try_get_compound_head()
75 if (unlikely(!page_cache_add_speculative(head, refs))) in try_get_compound_head()
87 if (unlikely(compound_head(page) != head)) { in try_get_compound_head()
88 put_page_refs(head, refs); in try_get_compound_head()
92 return head; in try_get_compound_head()
258 struct page **list, struct page **head, in compound_range_next() argument
273 *head = page; in compound_range_next()
284 struct page **list, struct page **head, in compound_next() argument
299 *head = page; in compound_next()
[all …]
Dhuge_memory.c2321 static void lru_add_page_tail(struct page *head, struct page *tail, in lru_add_page_tail() argument
2324 VM_BUG_ON_PAGE(!PageHead(head), head); in lru_add_page_tail()
2325 VM_BUG_ON_PAGE(PageCompound(tail), head); in lru_add_page_tail()
2326 VM_BUG_ON_PAGE(PageLRU(tail), head); in lru_add_page_tail()
2331 VM_WARN_ON(PageLRU(head)); in lru_add_page_tail()
2336 VM_WARN_ON(!PageLRU(head)); in lru_add_page_tail()
2338 list_add_tail(&tail->lru, &head->lru); in lru_add_page_tail()
2342 static void __split_huge_page_tail(struct page *head, int tail, in __split_huge_page_tail() argument
2345 struct page *page_tail = head + tail; in __split_huge_page_tail()
2356 page_tail->flags |= (head->flags & in __split_huge_page_tail()
[all …]
Dhugetlb_vmemmap.c218 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) in alloc_huge_page_vmemmap() argument
221 unsigned long vmemmap_addr = (unsigned long)head; in alloc_huge_page_vmemmap()
224 if (!HPageVmemmapOptimized(head)) in alloc_huge_page_vmemmap()
241 ClearHPageVmemmapOptimized(head); in alloc_huge_page_vmemmap()
246 void free_huge_page_vmemmap(struct hstate *h, struct page *head) in free_huge_page_vmemmap() argument
248 unsigned long vmemmap_addr = (unsigned long)head; in free_huge_page_vmemmap()
264 SetHPageVmemmapOptimized(head); in free_huge_page_vmemmap()
Dmemory-failure.c1094 #define head (1UL << PG_head) macro
1112 { head, head, MF_MSG_HUGE, me_huge_page },
1137 #undef head
1185 struct page *head = compound_head(page); in __get_hwpoison_page() local
1189 ret = get_hwpoison_huge_page(head, &hugetlb); in __get_hwpoison_page()
1198 if (!HWPoisonHandlable(head)) in __get_hwpoison_page()
1201 if (get_page_unless_zero(head)) { in __get_hwpoison_page()
1202 if (head == compound_head(page)) in __get_hwpoison_page()
1207 put_page(head); in __get_hwpoison_page()
1480 struct page *head = compound_head(page); in __get_huge_page_for_hwpoison() local
[all …]
Dhugetlb_vmemmap.h14 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
15 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
27 static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) in alloc_huge_page_vmemmap() argument
32 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head) in free_huge_page_vmemmap() argument
Dksm.c159 struct list_head *head; member
209 struct stable_node *head; member
343 return dup->head == STABLE_NODE_DUP_HEAD; in is_stable_node_dup()
350 dup->head = STABLE_NODE_DUP_HEAD; in stable_node_chain_add_dup()
371 dup->head = NULL; in stable_node_dup_del()
657 if (stable_node->head == &migrate_nodes) in remove_node_from_stable_tree()
774 stable_node = rmap_item->head; in remove_rmap_item_from_tree()
791 rmap_item->head = NULL; in remove_rmap_item_from_tree()
1556 if (page_node && page_node->head != &migrate_nodes) { in stable_tree_search()
1636 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
[all …]
Dzsmalloc.c720 struct zspage *head; in insert_zspage() local
723 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
729 if (head && get_zspage_inuse(zspage) < get_zspage_inuse(head)) in insert_zspage()
730 list_add(&zspage->list, &head->list); in insert_zspage()
1604 unsigned long head; in find_alloced_obj() local
1614 head = obj_to_head(page, addr + offset); in find_alloced_obj()
1615 if (head & OBJ_ALLOCATED_TAG) { in find_alloced_obj()
1616 handle = head & ~OBJ_ALLOCATED_TAG; in find_alloced_obj()
1961 unsigned long handle, head; in zs_page_migrate() local
1998 head = obj_to_head(page, s_addr + pos); in zs_page_migrate()
[all …]
Dmigrate.c661 static bool buffer_migrate_lock_buffers(struct buffer_head *head, in buffer_migrate_lock_buffers() argument
664 struct buffer_head *bh = head; in buffer_migrate_lock_buffers()
672 } while (bh != head); in buffer_migrate_lock_buffers()
685 bh = head; in buffer_migrate_lock_buffers()
694 } while (bh != head); in buffer_migrate_lock_buffers()
702 struct buffer_head *bh, *head; in __buffer_migrate_page() local
714 head = page_buffers(page); in __buffer_migrate_page()
715 if (!buffer_migrate_lock_buffers(head, mode)) in __buffer_migrate_page()
725 bh = head; in __buffer_migrate_page()
732 } while (bh != head); in __buffer_migrate_page()
[all …]
Dfilemap.c2346 struct page *head; in filemap_get_read_batch() local
2349 for (head = xas_load(&xas); head; head = xas_next(&xas)) { in filemap_get_read_batch()
2350 if (xas_retry(&xas, head)) in filemap_get_read_batch()
2352 if (xas.xa_index > max || xa_is_value(head)) in filemap_get_read_batch()
2354 if (!page_cache_get_speculative(head)) in filemap_get_read_batch()
2358 if (unlikely(head != xas_reload(&xas))) in filemap_get_read_batch()
2361 if (!pagevec_add(pvec, head)) in filemap_get_read_batch()
2363 if (!PageUptodate(head)) in filemap_get_read_batch()
2365 if (PageReadahead(head)) in filemap_get_read_batch()
2367 if (PageHead(head)) { in filemap_get_read_batch()
[all …]
Dswapfile.c397 return cluster_is_null(&list->head); in cluster_list_empty()
402 return cluster_next(&list->head); in cluster_list_first()
407 cluster_set_null(&list->head); in cluster_list_init()
416 cluster_set_next_flag(&list->head, idx, 0); in cluster_list_add_tail()
439 idx = cluster_next(&list->head); in cluster_list_del_first()
441 cluster_set_null(&list->head); in cluster_list_del_first()
444 cluster_set_next_flag(&list->head, in cluster_list_del_first()
629 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
3574 struct page *head; in add_swap_count_continuation() local
3622 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
[all …]
Dmemory_hotplug.c1697 struct page *page, *head; in scan_movable_pages() local
1719 head = compound_head(page); in scan_movable_pages()
1727 if (HPageMigratable(head)) in scan_movable_pages()
1729 skip = compound_nr(head) - (pfn - page_to_pfn(head)); in scan_movable_pages()
1742 struct page *page, *head; in do_migrate_range() local
1752 head = compound_head(page); in do_migrate_range()
1755 pfn = page_to_pfn(head) + compound_nr(head) - 1; in do_migrate_range()
1756 isolate_hugetlb(head, &source); in do_migrate_range()
1759 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; in do_migrate_range()
Dhugetlb.c393 struct list_head *head = &resv->regions; in add_reservation_in_range() local
404 list_for_each_entry_safe(rg, trg, head, link) { in add_reservation_in_range()
647 struct list_head *head = &resv->regions; in region_del() local
654 list_for_each_entry_safe(rg, trg, head, link) { in region_del()
774 struct list_head *head = &resv->regions; in region_count() local
780 list_for_each_entry(rg, head, link) { in region_count()
932 struct list_head *head = &resv_map->region_cache; in resv_map_release() local
939 list_for_each_entry_safe(rg, trg, head, link) { in resv_map_release()
1972 struct page *head = compound_head(page); in dissolve_free_huge_page() local
1973 struct hstate *h = page_hstate(head); in dissolve_free_huge_page()
[all …]
Dslob.c134 struct rcu_head head; member
658 static void kmem_rcu_free(struct rcu_head *head) in kmem_rcu_free() argument
660 struct slob_rcu *slob_rcu = (struct slob_rcu *)head; in kmem_rcu_free()
673 call_rcu(&slob_rcu->head, kmem_rcu_free); in kmem_cache_free()
Dslub.c1367 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument
1371 void *object = head; in free_debug_processing()
1634 void *head, void *tail, int bulk_cnt, in free_debug_processing() argument
1725 void **head, void **tail, in slab_free_freelist_hook() argument
1730 void *next = *head; in slab_free_freelist_hook()
1731 void *old_tail = *tail ? *tail : *head; in slab_free_freelist_hook()
1739 *head = NULL; in slab_free_freelist_hook()
1749 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1750 *head = object; in slab_free_freelist_hook()
1762 if (*head == *tail) in slab_free_freelist_hook()
[all …]
Dvmalloc.c931 struct rb_node *parent, struct rb_node **link, struct list_head *head) in link_va() argument
938 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va()
940 head = head->prev; in link_va()
965 list_add(&va->list, head); in link_va()
1044 struct rb_root *root, struct list_head *head) in insert_vmap_area() argument
1051 link_va(va, root, parent, link, head); in insert_vmap_area()
1057 struct list_head *head) in insert_vmap_area_augment() argument
1068 link_va(va, root, parent, link, head); in insert_vmap_area_augment()
1086 struct rb_root *root, struct list_head *head) in merge_or_add_vmap_area() argument
1116 if (next != head) { in merge_or_add_vmap_area()
[all …]
Dmmu_gather.c154 static void tlb_remove_table_rcu(struct rcu_head *head) in tlb_remove_table_rcu() argument
156 __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); in tlb_remove_table_rcu()
Dvmscan.c3075 if (lruvec->mm_state.head == &mm->lru_gen.list) in lru_gen_del_mm()
3076 lruvec->mm_state.head = lruvec->mm_state.head->prev; in lru_gen_del_mm()
3287 if (!mm_state->head) in iterate_mm_list()
3288 mm_state->head = &mm_list->fifo; in iterate_mm_list()
3290 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
3294 mm_state->head = mm_state->head->next; in iterate_mm_list()
3295 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
3302 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
3303 mm_state->tail = mm_state->head->next; in iterate_mm_list()
3307 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in iterate_mm_list()
[all …]
Dreadahead.c402 pgoff_t head; in count_history_pages() local
405 head = page_cache_prev_miss(mapping, index - 1, max); in count_history_pages()
408 return index - 1 - head; in count_history_pages()
Dmempolicy.c1019 struct page *head = compound_head(page); in migrate_page_add() local
1023 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { in migrate_page_add()
1024 if (!isolate_lru_page(head)) { in migrate_page_add()
1025 list_add_tail(&head->lru, pagelist); in migrate_page_add()
1026 mod_node_page_state(page_pgdat(head), in migrate_page_add()
1027 NR_ISOLATED_ANON + page_is_file_lru(head), in migrate_page_add()
1028 thp_nr_pages(head)); in migrate_page_add()
Dlist_lru.c167 struct list_head *head) in list_lru_isolate_move() argument
169 list_move(item, head); in list_lru_isolate_move()
Dmemcontrol.c786 struct page *head = compound_head(page); /* rmap on tail pages */ in __mod_lruvec_page_state() local
792 memcg = page_memcg(head); in __mod_lruvec_page_state()
2049 struct page *head = compound_head(page); /* rmap on tail pages */ in lock_page_memcg() local
2063 memcg = page_memcg(head); in lock_page_memcg()
2077 if (memcg != page_memcg(head)) { in lock_page_memcg()
2113 struct page *head = compound_head(page); in unlock_page_memcg() local
2115 __unlock_page_memcg(page_memcg(head)); in unlock_page_memcg()
3329 void split_page_memcg(struct page *head, unsigned int nr) in split_page_memcg() argument
3331 struct mem_cgroup *memcg = page_memcg(head); in split_page_memcg()
3338 head[i].memcg_data = head->memcg_data; in split_page_memcg()
[all …]
Dpage_alloc.c1560 LIST_HEAD(head); in free_pcppages_bulk()
1604 list_add_tail(&page->lru, &head); in free_pcppages_bulk()
1631 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk()
5769 int head = PageHead(page); in __free_pages() local
5773 else if (!head) in __free_pages()
9215 struct page *head = compound_head(page); in has_unmovable_pages() local
9219 if (!hugepage_migration_supported(page_hstate(head))) in has_unmovable_pages()
9221 } else if (!PageLRU(head) && !__PageMovable(head)) { in has_unmovable_pages()
9225 skip_pages = compound_nr(head) - (page - head); in has_unmovable_pages()
Drmap.c1253 struct page *head = compound_head(page); in page_add_file_rmap() local
1257 SetPageDoubleMap(head); in page_add_file_rmap()
1259 clear_page_mlock(head); in page_add_file_rmap()
/mm/kasan/
Dquarantine.c34 struct qlist_node *head; member
44 return !q->head; in qlist_empty()
49 q->head = q->tail = NULL; in qlist_init()
57 q->head = qlink; in qlist_put()
76 to->tail->next = from->head; in qlist_move_all()
181 qlink = q->head; in qlist_free_all()
307 curr = from->head; in qlist_move_cache()

12