Home
last modified time | relevance | path

Searched refs:page (Results 1 – 11 of 11) sorted by relevance

/lib/
Dtest_hmm.c95 struct page *free_pages;
155 static struct dmirror_device *dmirror_page_to_device(struct page *page) in dmirror_page_to_device() argument
158 return container_of(page->pgmap, struct dmirror_chunk, in dmirror_page_to_device()
170 struct page *page; in dmirror_do_fault() local
180 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault()
181 WARN_ON(!page); in dmirror_do_fault()
183 entry = page; in dmirror_do_fault()
325 struct page *page; in dmirror_do_read() local
329 page = xa_untag_pointer(entry); in dmirror_do_read()
330 if (!page) in dmirror_do_read()
[all …]
Diov_iter.c72 struct page *head = NULL; \
173 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, in copy_page_to_iter_iovec() argument
195 kaddr = kmap_atomic(page); in copy_page_to_iter_iovec()
226 kaddr = kmap(page); in copy_page_to_iter_iovec()
243 kunmap(page); in copy_page_to_iter_iovec()
257 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, in copy_page_from_iter_iovec() argument
279 kaddr = kmap_atomic(page); in copy_page_from_iter_iovec()
310 kaddr = kmap(page); in copy_page_from_iter_iovec()
327 kunmap(page); in copy_page_from_iter_iovec()
374 pipe->bufs[idx].page, in sanity()
[all …]
Dtest_free_pages.c20 struct page *page = virt_to_page(addr); in test_free_pages() local
23 get_page(page); in test_free_pages()
25 put_page(page); in test_free_pages()
Dbuildid.c130 struct page *page; in build_id_parse() local
138 page = find_get_page(vma->vm_file->f_mapping, 0); in build_id_parse()
139 if (!page) in build_id_parse()
143 page_addr = kmap_atomic(page); in build_id_parse()
160 put_page(page); in build_id_parse()
Dscatterlist.c443 struct page **pages, unsigned int n_pages, unsigned int offset, in sg_alloc_append_table_from_pages()
565 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, in sg_alloc_table_from_pages_segment()
603 struct page *page; in sgl_alloc_order() local
627 page = alloc_pages(gfp, order); in sgl_alloc_order()
628 if (!page) { in sgl_alloc_order()
633 sg_set_page(sg, page, elem_len, 0); in sgl_alloc_order()
675 struct page *page; in sgl_free_n_order() local
681 page = sg_page(sg); in sgl_free_n_order()
682 if (page) in sgl_free_n_order()
683 __free_pages(page, order); in sgl_free_n_order()
[all …]
Dtest_meminit.c65 struct page *page; in do_alloc_pages_order() local
69 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order()
70 buf = page_address(page); in do_alloc_pages_order()
72 __free_pages(page, order); in do_alloc_pages_order()
74 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order()
75 buf = page_address(page); in do_alloc_pages_order()
79 __free_pages(page, order); in do_alloc_pages_order()
Dtest_lockup.c306 struct page *page; in test_alloc_pages() local
310 page = alloc_pages(alloc_pages_gfp, alloc_pages_order); in test_alloc_pages()
311 if (!page) { in test_alloc_pages()
315 list_add(&page->lru, pages); in test_alloc_pages()
321 struct page *page, *next; in test_free_pages() local
323 list_for_each_entry_safe(page, next, pages, lru) in test_free_pages()
324 __free_pages(page, alloc_pages_order); in test_free_pages()
Dstackdepot.c299 struct page *page = NULL; in __stack_depot_save() local
347 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); in __stack_depot_save()
348 if (page) in __stack_depot_save()
349 prealloc = page_address(page); in __stack_depot_save()
Dkfifo.c301 struct page *page; in setup_sgl_buf() local
310 page = virt_to_page(buf); in setup_sgl_buf()
315 struct page *npage; in setup_sgl_buf()
320 if (page_to_phys(page) != page_to_phys(npage) - l) { in setup_sgl_buf()
321 sg_set_page(sgl, page, l - off, off); in setup_sgl_buf()
325 page = npage; in setup_sgl_buf()
330 sg_set_page(sgl, page, len, off); in setup_sgl_buf()
Dtest_bpf.c8455 struct page *page; in generate_test_data() local
8476 page = alloc_page(GFP_KERNEL); in generate_test_data()
8478 if (!page) in generate_test_data()
8481 ptr = kmap(page); in generate_test_data()
8485 kunmap(page); in generate_test_data()
8486 skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA); in generate_test_data()
8492 __free_page(page); in generate_test_data()
8766 struct page *page[2]; in build_test_skb() local
8770 page[i] = alloc_page(GFP_KERNEL); in build_test_skb()
8771 if (!page[i]) { in build_test_skb()
[all …]
DKconfig.debug843 bool "Debug page-flags operations"
846 Enables extra validation on page flags operations.
851 bool "Debug arch page table for semantics compliance"
857 architecture page table helper functions on various platforms in
873 Enable some costly sanity checks in virtual to page code. This can
2584 tristate "Test heap/page initialization"
2586 Test if the kernel is zero-initializing heap and page allocations.
2608 freeing a block of pages and a speculative page reference.