/kernel/linux/linux-5.10/net/ceph/ |
D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() 45 if (!pages) in ceph_alloc_page_vector() [all …]
|
/kernel/linux/linux-5.10/mm/ |
D | percpu-vm.c | 33 static struct page **pages; in pcpu_get_pages() local 34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 38 if (!pages) in pcpu_get_pages() 39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 40 return pages; in pcpu_get_pages() 54 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument 61 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages() 82 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() argument 92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages() 103 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages() [all …]
|
D | gup.c | 267 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument 279 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock() 284 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock() 321 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument 338 unpin_user_page(pages[index]); in unpin_user_pages() 1029 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument 1041 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages() 1062 pages ? &pages[i] : NULL); in __get_user_pages() 1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages() 1128 if (pages) { in __get_user_pages() [all …]
|
D | gup_benchmark.c | 24 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 33 put_page(pages[i]); in put_back_pages() 39 unpin_user_pages(pages, nr_pages); in put_back_pages() 44 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 55 page = pages[i]; in verify_dma_pinned() 73 struct page **pages; in __gup_benchmark_ioctl() local 82 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl() 83 if (!pages) in __gup_benchmark_ioctl() 110 pages + i); in __gup_benchmark_ioctl() 113 nr = get_user_pages(addr, nr, gup->flags, pages + i, in __gup_benchmark_ioctl() [all …]
|
/kernel/linux/linux-5.10/fs/isofs/ |
D | compress.c | 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument 68 if (!pages[i]) in zisofs_uncompress_block() 70 memset(page_address(pages[i]), 0, PAGE_SIZE); in zisofs_uncompress_block() 71 flush_dcache_page(pages[i]); in zisofs_uncompress_block() 72 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 122 if (pages[curpage]) { in zisofs_uncompress_block() 123 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block() 175 if (pages[curpage]) { in zisofs_uncompress_block() 176 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() 177 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 38 if (!pages) in huge_get_pages() 41 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 42 kfree(pages); in huge_get_pages() 46 sg = pages->sgl; in huge_get_pages() [all …]
|
/kernel/linux/linux-5.10/include/drm/ttm/ |
D | ttm_set_memory.h | 40 static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) in ttm_set_pages_array_wb() argument 42 return set_pages_array_wb(pages, addrinarray); in ttm_set_pages_array_wb() 45 static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) in ttm_set_pages_array_wc() argument 47 return set_pages_array_wc(pages, addrinarray); in ttm_set_pages_array_wc() 50 static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) in ttm_set_pages_array_uc() argument 52 return set_pages_array_uc(pages, addrinarray); in ttm_set_pages_array_uc() 78 static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) in ttm_set_pages_array_wb() argument 83 unmap_page_from_agp(pages[i]); in ttm_set_pages_array_wb() 87 static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) in ttm_set_pages_array_wc() argument 92 map_page_into_agp(pages[i]); in ttm_set_pages_array_wc() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/vkms/ |
D | vkms_gem.c | 37 WARN_ON(gem->pages); in vkms_gem_free_object() 61 if (obj->pages) { in vkms_gem_fault() 62 get_page(obj->pages[page_offset]); in vkms_gem_fault() 63 vmf->page = obj->pages[page_offset]; in vkms_gem_fault() 155 if (!vkms_obj->pages) { in _get_pages() 156 struct page **pages = drm_gem_get_pages(gem_obj); in _get_pages() local 158 if (IS_ERR(pages)) in _get_pages() 159 return pages; in _get_pages() 161 if (cmpxchg(&vkms_obj->pages, NULL, pages)) in _get_pages() 162 drm_gem_put_pages(gem_obj, pages, false, true); in _get_pages() [all …]
|
/kernel/linux/linux-5.10/drivers/xen/ |
D | xlate_mmu.c | 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() [all …]
|
/kernel/linux/linux-5.10/kernel/dma/ |
D | remap.c | 15 return area->pages; in dma_common_find_pages() 22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 42 struct page **pages; in dma_common_contiguous_remap() local 46 pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 47 if (!pages) in dma_common_contiguous_remap() 50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap() 52 kfree(pages); in dma_common_contiguous_remap()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/xen/ |
D | xen_drm_front_gem.c | 30 struct page **pages; member 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 51 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 56 kvfree(xen_obj->pages); in gem_free_pages_array() 57 xen_obj->pages = NULL; in gem_free_pages_array() 104 xen_obj->pages); in gem_create() 120 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create() 121 if (IS_ERR(xen_obj->pages)) { in gem_create() 122 ret = PTR_ERR(xen_obj->pages); in gem_create() 123 xen_obj->pages = NULL; in gem_create() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; in imgu_dmamap_alloc() local [all …]
|
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/include/ |
D | perf_test_util.h | 45 uint64_t pages; member 70 uint64_t pages; in guest_code() local 77 pages = vcpu_args->pages; in guest_code() 80 for (i = 0; i < pages; i++) { in guest_code() 97 uint64_t pages = DEFAULT_GUEST_PHY_PAGES; in create_vm() local 101 pages += DEFAULT_STACK_PGS * vcpus; in create_vm() 109 pages += (2 * pages) / PTES_PER_4K_PT; in create_vm() 110 pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) / in create_vm() 112 pages = vm_adjust_num_guest_pages(mode, pages); in create_vm() 116 vm = vm_create(mode, pages, O_RDWR); in create_vm() [all …]
|
/kernel/linux/linux-5.10/fs/ramfs/ |
D | file-nommu.c | 65 struct page *pages; in ramfs_nommu_expand_for_mapping() local 84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping() 85 if (!pages) in ramfs_nommu_expand_for_mapping() 92 split_page(pages, order); in ramfs_nommu_expand_for_mapping() 96 __free_page(pages + loop); in ramfs_nommu_expand_for_mapping() 100 data = page_address(pages); in ramfs_nommu_expand_for_mapping() 105 struct page *page = pages + loop; in ramfs_nommu_expand_for_mapping() 124 __free_page(pages + loop++); in ramfs_nommu_expand_for_mapping() 207 struct page **pages = NULL, **ptr, *page; in ramfs_nommu_get_unmapped_area() local 223 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); in ramfs_nommu_get_unmapped_area() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument 253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put() 259 if (ttm_set_pages_wb(pages[i], pages_nr)) in ttm_pages_put() 262 __free_pages(pages[i], order); in ttm_pages_put() 442 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument 449 r = ttm_set_pages_array_uc(pages, cpages); in ttm_set_pages_caching() 454 r = ttm_set_pages_array_wc(pages, cpages); in ttm_set_pages_caching() 469 static void ttm_handle_caching_state_failure(struct list_head *pages, in ttm_handle_caching_state_failure() argument 487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, in ttm_alloc_new_pages() argument 519 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages() [all …]
|
/kernel/linux/linux-5.10/include/xen/ |
D | xen-ops.h | 67 unsigned int domid, bool no_translate, struct page **pages); 72 bool no_translate, struct page **pages) in xen_remap_pfn() argument 87 struct page **pages); 89 int nr, struct page **pages); 100 struct page **pages) in xen_xlate_remap_gfn_array() argument 106 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 137 struct page **pages) in xen_remap_domain_gfn_array() argument 141 prot, domid, pages); in xen_remap_domain_gfn_array() 149 false, pages); in xen_remap_domain_gfn_array() 173 struct page **pages) in xen_remap_domain_mfn_array() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 62 kvfree(vgem_obj->pages); in vgem_gem_free_object() 89 if (obj->pages) { in vgem_gem_fault() 90 get_page(obj->pages[page_offset]); in vgem_gem_fault() 91 vmf->page = obj->pages[page_offset]; in vgem_gem_fault() 269 struct page **pages; in vgem_pin_pages() local 271 pages = drm_gem_get_pages(&bo->base); in vgem_pin_pages() 272 if (IS_ERR(pages)) { in vgem_pin_pages() 275 return pages; in vgem_pin_pages() 278 bo->pages = pages; in vgem_pin_pages() 282 return bo->pages; in vgem_pin_pages() [all …]
|
/kernel/linux/linux-5.10/fs/squashfs/ |
D | page_actor.c | 30 if (actor->next_page == actor->pages) in cache_next_page() 42 int pages, int length) in squashfs_page_actor_init() argument 49 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init() 51 actor->pages = pages; in squashfs_page_actor_init() 71 return actor->pageaddr = actor->next_page == actor->pages ? NULL : in direct_next_page() 82 int pages, int length) in squashfs_page_actor_init_special() argument 89 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init_special() 91 actor->pages = pages; in squashfs_page_actor_init_special()
|
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/pci/hmm/ |
D | hmm_reserved_pool.c | 53 page_obj[i].page = repool_info->pages[j]; in get_pages_from_reserved_pool() 79 repool_info->pages[repool_info->index++] = page_obj->page; in free_pages_to_reserved_pool() 95 pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, in hmm_reserved_pool_setup() 97 if (unlikely(!pool_info->pages)) { in hmm_reserved_pool_setup() 120 struct page *pages; in hmm_reserved_pool_init() local 146 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); in hmm_reserved_pool_init() 147 if (unlikely(!pages)) { in hmm_reserved_pool_init() 163 ret = set_pages_uc(pages, blk_pgnr); in hmm_reserved_pool_init() 167 __free_pages(pages, order); in hmm_reserved_pool_init() 172 repool_info->pages[i++] = pages + j; in hmm_reserved_pool_init() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/android/ion/ |
D | ion_cma_heap.c | 33 struct page *pages; in ion_cma_allocate() local 42 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate() 43 if (!pages) in ion_cma_allocate() 46 if (PageHighMem(pages)) { in ion_cma_allocate() 48 struct page *page = pages; in ion_cma_allocate() 59 memset(page_address(pages), 0, size); in ion_cma_allocate() 70 sg_set_page(table->sgl, pages, size, 0); in ion_cma_allocate() 72 buffer->priv_virt = pages; in ion_cma_allocate() 79 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate() 86 struct page *pages = buffer->priv_virt; in ion_cma_free() local [all …]
|
/kernel/linux/linux-5.10/arch/arm/plat-omap/ |
D | sram.c | 65 int pages; in omap_sram_push() local 73 pages = PAGE_ALIGN(size) / PAGE_SIZE; in omap_sram_push() 75 set_memory_rw(base, pages); in omap_sram_push() 79 set_memory_ro(base, pages); in omap_sram_push() 80 set_memory_x(base, pages); in omap_sram_push() 101 int pages; in omap_map_sram() local 125 pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; in omap_map_sram() 127 set_memory_ro(base, pages); in omap_map_sram() 128 set_memory_x(base, pages); in omap_map_sram()
|
/kernel/linux/linux-5.10/net/rds/ |
D | info.c | 65 struct page **pages; member 122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy() 127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy() 140 iter->pages++; in rds_info_copy() 166 struct page **pages = NULL; in rds_info_getsockopt() local 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 191 if (!pages) { in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 214 iter.pages = pages; in rds_info_getsockopt() 237 if (pages) in rds_info_getsockopt() [all …]
|
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/ |
D | hugetlbpage.rst | 30 persistent hugetlb pages in the kernel's huge page pool. It also displays 32 and surplus huge pages in the pool of huge pages of default size. 48 is the size of the pool of huge pages. 50 is the number of huge pages in the pool that are not yet 53 is short for "reserved," and is the number of huge pages for 55 but no allocation has yet been made. Reserved huge pages 57 huge page from the pool of huge pages at fault time. 59 is short for "surplus," and is the number of huge pages in 61 maximum number of surplus huge pages is controlled by 67 pages of all sizes. [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_pages.c | 16 struct sg_table *pages, in __i915_gem_object_set_pages() argument 32 drm_clflush_sg(pages); in __i915_gem_object_set_pages() 36 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 39 obj->mm.pages = pages; in __i915_gem_object_set_pages() 172 struct sg_table *pages; in __i915_gem_object_unset_pages() local 174 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages() 175 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages() 176 return pages; in __i915_gem_object_unset_pages() 199 return pages; in __i915_gem_object_unset_pages() 204 struct sg_table *pages; in __i915_gem_object_put_pages() local [all …]
|
/kernel/linux/linux-5.10/drivers/dma-buf/heaps/ |
D | system_heap.c | 30 __free_page(buffer->pages[pg]); in system_heap_free() 31 kfree(buffer->pages); in system_heap_free() 54 helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, in system_heap_allocate() 55 sizeof(*helper_buffer->pages), in system_heap_allocate() 57 if (!helper_buffer->pages) { in system_heap_allocate() 70 helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); in system_heap_allocate() 71 if (!helper_buffer->pages[pg]) in system_heap_allocate() 95 __free_page(helper_buffer->pages[--pg]); in system_heap_allocate() 96 kfree(helper_buffer->pages); in system_heap_allocate()
|