/mm/ |
D | memfd.c | 31 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument 39 xas_lock_irq(xas); in memfd_tag_pins() 40 xas_for_each(xas, page, ULONG_MAX) { in memfd_tag_pins() 48 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins() 50 xas_set(xas, page->index + cache_count); in memfd_tag_pins() 57 xas_pause(xas); in memfd_tag_pins() 58 xas_unlock_irq(xas); in memfd_tag_pins() 60 xas_lock_irq(xas); in memfd_tag_pins() 62 xas_unlock_irq(xas); in memfd_tag_pins() 76 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins() [all …]
|
D | filemap.c | 122 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 125 mapping_set_update(&xas, mapping); in page_cache_delete() 129 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 137 xas_store(&xas, shadow); in page_cache_delete() 138 xas_init_marks(&xas); in page_cache_delete() 297 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch() 302 mapping_set_update(&xas, mapping); in page_cache_delete_batch() 303 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch() 325 if (page->index == xas.xa_index) in page_cache_delete_batch() 334 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch() [all …]
|
D | khugepaged.c | 1550 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file() 1573 xas_lock_irq(&xas); in collapse_file() 1574 xas_create_range(&xas); in collapse_file() 1575 if (!xas_error(&xas)) in collapse_file() 1577 xas_unlock_irq(&xas); in collapse_file() 1578 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file() 1597 xas_set(&xas, start); in collapse_file() 1599 struct page *page = xas_next(&xas); in collapse_file() 1601 VM_BUG_ON(index != xas.xa_index); in collapse_file() 1610 if (!xas_next_entry(&xas, end - 1)) { in collapse_file() [all …]
|
D | swap_state.c | 119 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache() 130 xas_lock_irq(&xas); in add_to_swap_cache() 131 xas_create_range(&xas); in add_to_swap_cache() 132 if (xas_error(&xas)) in add_to_swap_cache() 135 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache() 137 xas_store(&xas, page); in add_to_swap_cache() 138 xas_next(&xas); in add_to_swap_cache() 144 xas_unlock_irq(&xas); in add_to_swap_cache() 145 } while (xas_nomem(&xas, gfp)); in add_to_swap_cache() 147 if (!xas_error(&xas)) in add_to_swap_cache() [all …]
|
D | workingset.c | 455 XA_STATE(xas, node->array, 0); in shadow_lru_isolate() 495 xas.xa_node = xa_parent_locked(&mapping->i_pages, node); in shadow_lru_isolate() 496 xas.xa_offset = node->offset; in shadow_lru_isolate() 497 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; in shadow_lru_isolate() 498 xas_set_update(&xas, workingset_update_node); in shadow_lru_isolate() 503 xas_store(&xas, NULL); in shadow_lru_isolate()
|
D | page-writeback.c | 2108 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2112 xas_lock_irq(&xas); in tag_pages_for_writeback() 2113 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback() 2114 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback() 2118 xas_pause(&xas); in tag_pages_for_writeback() 2119 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2121 xas_lock_irq(&xas); in tag_pages_for_writeback() 2123 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2765 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback() 2770 xas_lock_irqsave(&xas, flags); in __test_set_page_writeback() [all …]
|
D | migrate.c | 401 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping() 423 xas_lock_irq(&xas); in migrate_page_move_mapping() 424 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping() 425 xas_unlock_irq(&xas); in migrate_page_move_mapping() 430 xas_unlock_irq(&xas); in migrate_page_move_mapping() 462 xas_store(&xas, newpage); in migrate_page_move_mapping() 467 xas_next(&xas); in migrate_page_move_mapping() 468 xas_store(&xas, newpage); in migrate_page_move_mapping() 479 xas_unlock(&xas); in migrate_page_move_mapping() 519 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping() [all …]
|
D | shmem.c | 356 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 361 item = xas_load(&xas); in shmem_replace_entry() 364 xas_store(&xas, replacement); in shmem_replace_entry() 615 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache() 631 xas_lock_irq(&xas); in shmem_add_to_page_cache() 632 entry = xas_find_conflict(&xas); in shmem_add_to_page_cache() 634 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache() 635 xas_create_range(&xas); in shmem_add_to_page_cache() 636 if (xas_error(&xas)) in shmem_add_to_page_cache() 639 xas_store(&xas, page); in shmem_add_to_page_cache() [all …]
|
D | truncate.c | 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 39 xas_set_update(&xas, workingset_update_node); in __clear_shadow_entry() 40 if (xas_load(&xas) != entry) in __clear_shadow_entry() 42 xas_store(&xas, NULL); in __clear_shadow_entry()
|
D | huge_memory.c | 2823 XA_STATE(xas, &mapping->i_pages, page_index(head)); in split_huge_page_to_list() 2830 if (xas_load(&xas) != head) in split_huge_page_to_list()
|