/lib/ |
D | iov_iter.c | 1005 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, in iter_xarray_populate_pages() argument 1023 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages() 1024 get_page(pages[ret]); in iter_xarray_populate_pages() 1033 struct page ***pages, size_t maxsize, in iter_xarray_get_pages() argument 1045 count = want_pages_array(pages, maxsize, offset, maxpages); in iter_xarray_get_pages() 1048 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages() 1097 struct page ***pages, size_t maxsize, in __iov_iter_get_pages_alloc() argument 1121 n = want_pages_array(pages, maxsize, *start, maxpages); in __iov_iter_get_pages_alloc() 1124 res = get_user_pages_fast(addr, n, gup_flags, *pages); in __iov_iter_get_pages_alloc() 1136 n = want_pages_array(pages, maxsize, *start, maxpages); in __iov_iter_get_pages_alloc() [all …]
|
D | scatterlist.c | 455 struct page **pages, unsigned int n_pages, unsigned int offset, in sg_alloc_append_table_from_pages() argument 484 if (page_to_pfn(pages[0]) == next_pfn) { in sg_alloc_append_table_from_pages() 486 while (n_pages && pages_are_mergeable(pages[0], last_pg)) { in sg_alloc_append_table_from_pages() 490 last_pg = pages[0]; in sg_alloc_append_table_from_pages() 491 pages++; in sg_alloc_append_table_from_pages() 505 !pages_are_mergeable(pages[i], pages[i - 1])) { in sg_alloc_append_table_from_pages() 521 !pages_are_mergeable(pages[j], pages[j - 1])) in sg_alloc_append_table_from_pages() 538 sg_set_page(s, pages[cur_page], in sg_alloc_append_table_from_pages() 578 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, in sg_alloc_table_from_pages_segment() argument 586 err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset, in sg_alloc_table_from_pages_segment() [all …]
|
D | test_lockup.c | 304 static void test_alloc_pages(struct list_head *pages) in test_alloc_pages() argument 315 list_add(&page->lru, pages); in test_alloc_pages() 319 static void test_free_pages(struct list_head *pages) in test_free_pages() argument 323 list_for_each_entry_safe(page, next, pages, lru) in test_free_pages() 325 INIT_LIST_HEAD(pages); in test_free_pages() 353 LIST_HEAD(pages); in test_lockup() 359 test_alloc_pages(&pages); in test_lockup() 372 test_free_pages(&pages); in test_lockup() 392 test_alloc_pages(&pages); in test_lockup() 398 test_free_pages(&pages); in test_lockup()
|
D | kunit_iov_iter.c | 51 struct page **pages; in iov_kunit_create_buffer() local 55 pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL); in iov_kunit_create_buffer() 56 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages); in iov_kunit_create_buffer() 57 *ppages = pages; in iov_kunit_create_buffer() 59 got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages); in iov_kunit_create_buffer() 61 release_pages(pages, got); in iov_kunit_create_buffer() 65 buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); in iov_kunit_create_buffer() 219 struct page **pages, size_t npages, in iov_kunit_load_bvec() argument 236 page = pages[pr->page]; in iov_kunit_load_bvec() 376 struct page **pages, size_t npages) in iov_kunit_load_xarray() argument [all …]
|
D | test_vmalloc.c | 368 struct page **pages; in vm_map_ram_test() local 372 pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL); in vm_map_ram_test() 373 if (!pages) in vm_map_ram_test() 376 nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); in vm_map_ram_test() 382 v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE); in vm_map_ram_test() 389 __free_page(pages[i]); in vm_map_ram_test() 391 kfree(pages); in vm_map_ram_test()
|
D | test_hmm.c | 710 struct page **pages, struct dmirror *dmirror) in dmirror_atomic_map() argument 721 if (!pages[i]) in dmirror_atomic_map() 724 entry = pages[i]; in dmirror_atomic_map() 783 struct page *pages[64]; in dmirror_exclusive() local 802 if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)) in dmirror_exclusive() 805 next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT); in dmirror_exclusive() 807 ret = make_device_exclusive_range(mm, addr, next, pages, NULL); in dmirror_exclusive() 814 mapped = dmirror_atomic_map(addr, next, pages, dmirror); in dmirror_exclusive() 816 if (pages[i]) { in dmirror_exclusive() 817 unlock_page(pages[i]); in dmirror_exclusive() [all …]
|
D | Kconfig.kfence | 45 pages are required; with one containing the object and two adjacent 46 ones used as guard pages. 80 pages, resulting in spurious use-after-frees. The main purpose of
|
D | Kconfig.debug | 933 mechanism for non-highmem pages and on non-highmem systems. 2918 tristate "Test freeing pages" 2921 freeing a block of pages and a speculative page reference.
|
/lib/raid6/ |
D | int.uc | 124 /* P/Q data pages */
|
D | neon.uc | 105 /* P/Q data pages */
|
D | s390vx.uc | 130 /* P/Q data pages */
|