Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 437) sorted by relevance

12345678910>>...18

/drivers/gpu/drm/i915/gem/selftests/
Dhuge_gem_object.c12 struct sg_table *pages) in huge_free_pages() argument
18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages()
24 sg_free_table(pages); in huge_free_pages()
25 kfree(pages); in huge_free_pages()
34 struct sg_table *pages; in huge_get_pages() local
37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages()
38 if (!pages) in huge_get_pages()
41 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages()
42 kfree(pages); in huge_get_pages()
46 sg = pages->sgl; in huge_get_pages()
[all …]
/drivers/gpu/drm/vkms/
Dvkms_gem.c37 WARN_ON(gem->pages); in vkms_gem_free_object()
61 if (obj->pages) { in vkms_gem_fault()
62 get_page(obj->pages[page_offset]); in vkms_gem_fault()
63 vmf->page = obj->pages[page_offset]; in vkms_gem_fault()
155 if (!vkms_obj->pages) { in _get_pages()
156 struct page **pages = drm_gem_get_pages(gem_obj); in _get_pages() local
158 if (IS_ERR(pages)) in _get_pages()
159 return pages; in _get_pages()
161 if (cmpxchg(&vkms_obj->pages, NULL, pages)) in _get_pages()
162 drm_gem_put_pages(gem_obj, pages, false, true); in _get_pages()
[all …]
/drivers/xen/
Dxlate_mmu.c48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument
57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn()
71 struct page **pages; member
99 struct page *page = info->pages[info->index++]; in remap_pte_fn()
148 struct page **pages) in xen_xlate_remap_gfn_array() argument
163 data.pages = pages; in xen_xlate_remap_gfn_array()
184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
217 struct page **pages; in xen_xlate_map_ballooned_pages() local
226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
[all …]
Dprivcmd.c90 static void free_page_list(struct list_head *pages) in free_page_list() argument
94 list_for_each_entry_safe(p, n, pages, lru) in free_page_list()
97 INIT_LIST_HEAD(pages); in free_page_list()
339 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local
344 cur_pages = &pages[st->index]; in mmap_batch_fn()
421 struct page **pages; in alloc_empty_pages() local
423 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); in alloc_empty_pages()
424 if (pages == NULL) in alloc_empty_pages()
427 rc = xen_alloc_unpopulated_pages(numpgs, pages); in alloc_empty_pages()
431 kfree(pages); in alloc_empty_pages()
[all …]
/drivers/gpu/drm/xen/
Dxen_drm_front_gem.c30 struct page **pages; member
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
51 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array()
56 kvfree(xen_obj->pages); in gem_free_pages_array()
57 xen_obj->pages = NULL; in gem_free_pages_array()
104 xen_obj->pages); in gem_create()
120 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create()
121 if (IS_ERR(xen_obj->pages)) { in gem_create()
122 ret = PTR_ERR(xen_obj->pages); in gem_create()
123 xen_obj->pages = NULL; in gem_create()
[all …]
/drivers/gpu/drm/ttm/
Dttm_page_alloc.c247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument
253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put()
259 if (ttm_set_pages_wb(pages[i], pages_nr)) in ttm_pages_put()
262 __free_pages(pages[i], order); in ttm_pages_put()
442 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument
449 r = ttm_set_pages_array_uc(pages, cpages); in ttm_set_pages_caching()
454 r = ttm_set_pages_array_wc(pages, cpages); in ttm_set_pages_caching()
469 static void ttm_handle_caching_state_failure(struct list_head *pages, in ttm_handle_caching_state_failure() argument
487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, in ttm_alloc_new_pages() argument
519 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages()
[all …]
Dttm_tt.c89 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory()
91 if (!ttm->pages) in ttm_tt_alloc_page_directory()
98 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory()
99 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory()
102 if (!ttm->ttm.pages) in ttm_dma_tt_alloc_page_directory()
104 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory()
166 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching()
169 cur_page = ttm->pages[i]; in ttm_tt_set_caching()
185 cur_page = ttm->pages[j]; in ttm_tt_set_caching()
254 kvfree(ttm->pages); in ttm_tt_fini()
[all …]
/drivers/staging/media/ipu3/
Dipu3-dmamap.c20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument
26 __free_page(pages[count]); in imgu_dmamap_free_buffer()
27 kvfree(pages); in imgu_dmamap_free_buffer()
36 struct page **pages; in imgu_dmamap_alloc_buffer() local
42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer()
44 if (!pages) in imgu_dmamap_alloc_buffer()
72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer()
77 pages[i++] = page++; in imgu_dmamap_alloc_buffer()
80 return pages; in imgu_dmamap_alloc_buffer()
100 struct page **pages; in imgu_dmamap_alloc() local
[all …]
/drivers/gpu/drm/vgem/
Dvgem_drv.c62 kvfree(vgem_obj->pages); in vgem_gem_free_object()
89 if (obj->pages) { in vgem_gem_fault()
90 get_page(obj->pages[page_offset]); in vgem_gem_fault()
91 vmf->page = obj->pages[page_offset]; in vgem_gem_fault()
269 struct page **pages; in vgem_pin_pages() local
271 pages = drm_gem_get_pages(&bo->base); in vgem_pin_pages()
272 if (IS_ERR(pages)) { in vgem_pin_pages()
275 return pages; in vgem_pin_pages()
278 bo->pages = pages; in vgem_pin_pages()
282 return bo->pages; in vgem_pin_pages()
[all …]
/drivers/staging/media/atomisp/pci/hmm/
Dhmm_reserved_pool.c53 page_obj[i].page = repool_info->pages[j]; in get_pages_from_reserved_pool()
79 repool_info->pages[repool_info->index++] = page_obj->page; in free_pages_to_reserved_pool()
95 pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, in hmm_reserved_pool_setup()
97 if (unlikely(!pool_info->pages)) { in hmm_reserved_pool_setup()
120 struct page *pages; in hmm_reserved_pool_init() local
146 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); in hmm_reserved_pool_init()
147 if (unlikely(!pages)) { in hmm_reserved_pool_init()
163 ret = set_pages_uc(pages, blk_pgnr); in hmm_reserved_pool_init()
167 __free_pages(pages, order); in hmm_reserved_pool_init()
172 repool_info->pages[i++] = pages + j; in hmm_reserved_pool_init()
[all …]
/drivers/gpu/drm/i915/gem/
Di915_gem_pages.c16 struct sg_table *pages, in __i915_gem_object_set_pages() argument
32 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
36 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
39 obj->mm.pages = pages; in __i915_gem_object_set_pages()
172 struct sg_table *pages; in __i915_gem_object_unset_pages() local
174 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages()
175 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages()
176 return pages; in __i915_gem_object_unset_pages()
199 return pages; in __i915_gem_object_unset_pages()
204 struct sg_table *pages; in __i915_gem_object_put_pages() local
[all …]
Di915_gem_phys.c94 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument
96 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys()
97 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys()
99 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys()
129 sg_free_table(pages); in i915_gem_object_put_pages_phys()
130 kfree(pages); in i915_gem_object_put_pages_phys()
141 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in phys_pwrite()
172 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in phys_pread()
207 struct sg_table *pages; in i915_gem_object_attach_phys() local
240 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_attach_phys()
[all …]
/drivers/staging/android/ion/heaps/
Dion_cma_heap.c32 struct page *pages; in ion_cma_allocate() local
41 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate()
42 if (!pages) in ion_cma_allocate()
45 if (PageHighMem(pages)) { in ion_cma_allocate()
47 struct page *page = pages; in ion_cma_allocate()
58 memset(page_address(pages), 0, size); in ion_cma_allocate()
69 sg_set_page(table->sgl, pages, size, 0); in ion_cma_allocate()
71 buffer->priv_virt = pages; in ion_cma_allocate()
81 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate()
88 struct page *pages = buffer->priv_virt; in ion_cma_free() local
[all …]
/drivers/gpu/drm/
Ddrm_scatter.c51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup()
83 unsigned long pages, i, j; in drm_legacy_sg_alloc() local
103 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc()
104 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc()
106 entry->pages = pages; in drm_legacy_sg_alloc()
107 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc()
113 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc()
120 entry->virtual = vmalloc_32(pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
131 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc()
138 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc()
[all …]
/drivers/block/xen-blkback/
Dblkback.c246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local
252 unmap_data.pages = pages; in free_persistent_gnts()
265 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts()
273 gnttab_page_cache_put(&ring->free_pages, pages, in free_persistent_gnts()
288 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local
294 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants()
309 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants()
314 gnttab_page_cache_put(&ring->free_pages, pages, in xen_blkbk_unmap_purged_grants()
323 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
647 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument
[all …]
/drivers/net/ethernet/amd/xgbe/
Dxgbe-desc.c141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring()
289 struct page *pages = NULL; in xgbe_alloc_pages() local
300 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages()
301 if (pages) in xgbe_alloc_pages()
308 if (!pages && (node != NUMA_NO_NODE)) { in xgbe_alloc_pages()
[all …]
/drivers/gpu/drm/virtio/
Dvirtgpu_object.c73 if (shmem->pages) { in virtio_gpu_cleanup_object()
76 shmem->pages, DMA_TO_DEVICE, 0); in virtio_gpu_cleanup_object()
80 sg_free_table(shmem->pages); in virtio_gpu_cleanup_object()
81 kfree(shmem->pages); in virtio_gpu_cleanup_object()
82 shmem->pages = NULL; in virtio_gpu_cleanup_object()
159 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); in virtio_gpu_object_shmem_init()
160 if (IS_ERR(shmem->pages)) { in virtio_gpu_object_shmem_init()
162 ret = PTR_ERR(shmem->pages); in virtio_gpu_object_shmem_init()
163 shmem->pages = NULL; in virtio_gpu_object_shmem_init()
169 shmem->pages, DMA_TO_DEVICE, 0); in virtio_gpu_object_shmem_init()
[all …]
/drivers/staging/vc04_services/interface/vchiq_arm/
Dvchiq_2835_arm.c44 struct page **pages; member
292 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); in cleanup_pagelistinfo()
312 struct page **pages; in create_pagelist() local
332 (sizeof(u32) + sizeof(pages[0]) + in create_pagelist()
338 (num_pages * sizeof(pages[0]) + in create_pagelist()
354 pages = (struct page **)(addrs + num_pages); in create_pagelist()
355 scatterlist = (struct scatterlist *)(pages + num_pages); in create_pagelist()
371 pagelistinfo->pages = pages; in create_pagelist()
393 pages[actual_pages] = pg; in create_pagelist()
403 pages); in create_pagelist()
[all …]
/drivers/gpu/drm/etnaviv/
Detnaviv_gem.c68 etnaviv_obj->pages = p; in etnaviv_gem_shmem_get_pages()
81 if (etnaviv_obj->pages) { in put_pages()
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, in put_pages()
85 etnaviv_obj->pages = NULL; in put_pages()
95 if (!etnaviv_obj->pages) { in etnaviv_gem_get_pages()
107 etnaviv_obj->pages, npages); in etnaviv_gem_get_pages()
119 return etnaviv_obj->pages; in etnaviv_gem_get_pages()
179 struct page **pages, *page; in etnaviv_gem_fault() local
192 pages = etnaviv_gem_get_pages(etnaviv_obj); in etnaviv_gem_fault()
195 if (IS_ERR(pages)) { in etnaviv_gem_fault()
[all …]
/drivers/net/ethernet/synopsys/
Ddwc-xlgmac-desc.c41 if (desc_data->rx.hdr.pa.pages) in xlgmac_unmap_desc_data()
42 put_page(desc_data->rx.hdr.pa.pages); in xlgmac_unmap_desc_data()
44 if (desc_data->rx.hdr.pa_unmap.pages) { in xlgmac_unmap_desc_data()
48 put_page(desc_data->rx.hdr.pa_unmap.pages); in xlgmac_unmap_desc_data()
51 if (desc_data->rx.buf.pa.pages) in xlgmac_unmap_desc_data()
52 put_page(desc_data->rx.buf.pa.pages); in xlgmac_unmap_desc_data()
54 if (desc_data->rx.buf.pa_unmap.pages) { in xlgmac_unmap_desc_data()
58 put_page(desc_data->rx.buf.pa_unmap.pages); in xlgmac_unmap_desc_data()
93 if (ring->rx_hdr_pa.pages) { in xlgmac_free_ring()
96 put_page(ring->rx_hdr_pa.pages); in xlgmac_free_ring()
[all …]
/drivers/gpu/drm/gma500/
Dgtt.c80 struct page **pages; in psb_gtt_insert() local
83 if (r->pages == NULL) { in psb_gtt_insert()
91 pages = r->pages; in psb_gtt_insert()
95 set_pages_array_wc(pages, r->npage); in psb_gtt_insert()
100 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
105 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert()
140 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove()
174 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll()
196 struct page **pages; in psb_gtt_attach_pages() local
[all …]
/drivers/gpu/drm/omapdrm/
Domap_gem.c87 struct page **pages; member
225 struct page **pages; in omap_gem_attach_pages() local
236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) in omap_gem_attach_pages()
239 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages()
240 if (IS_ERR(pages)) { in omap_gem_attach_pages()
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages()
242 return PTR_ERR(pages); in omap_gem_attach_pages()
256 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages()
281 omap_obj->pages = pages; in omap_gem_attach_pages()
288 drm_gem_put_pages(obj, pages, true, false); in omap_gem_attach_pages()
[all …]
/drivers/hwtracing/coresight/
Dcoresight-tmc-etr.c46 void **pages; member
172 if (tmc_pages->pages && tmc_pages->pages[i]) in tmc_pages_free()
173 __free_page(tmc_pages->pages[i]); in tmc_pages_free()
176 kfree(tmc_pages->pages); in tmc_pages_free()
178 tmc_pages->pages = NULL; in tmc_pages_free()
193 enum dma_data_direction dir, void **pages) in tmc_pages_alloc() argument
205 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), in tmc_pages_alloc()
207 if (!tmc_pages->pages) { in tmc_pages_alloc()
214 if (pages && pages[i]) { in tmc_pages_alloc()
215 page = virt_to_page(pages[i]); in tmc_pages_alloc()
[all …]
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_gart.c222 int pages) in amdgpu_gart_unbind() argument
238 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_unbind()
240 adev->gart.pages[p] = NULL; in amdgpu_gart_unbind()
274 int pages, dma_addr_t *dma_addr, uint64_t flags, in amdgpu_gart_map() argument
287 for (i = 0; i < pages; i++) { in amdgpu_gart_map()
312 int pages, struct page **pagelist, dma_addr_t *dma_addr, in amdgpu_gart_bind() argument
328 for (i = 0; i < pages; i++, p++) in amdgpu_gart_bind()
329 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL; in amdgpu_gart_bind()
335 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, in amdgpu_gart_bind()
378 adev->gart.pages = vzalloc(array_size(sizeof(void *), in amdgpu_gart_init()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dmem.c34 u64 pages; member
58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr()
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
87 while (mem->pages--) { in nvkm_mem_dtor()
89 mem->dma[mem->pages], PAGE_SIZE, in nvkm_mem_dtor()
91 __free_page(mem->mem[mem->pages]); in nvkm_mem_dtor()
137 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL); in nvkm_mem_map_host()
183 mem->pages = size >> PAGE_SHIFT; in nvkm_mem_new_host()
204 for (mem->pages = 0; size; size--, mem->pages++) { in nvkm_mem_new_host()
209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host()
[all …]

12345678910>>...18