/drivers/gpu/drm/i915/gem/selftests/ |
D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 17 for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg)) in huge_free_pages() 20 sg_free_table(pages); in huge_free_pages() 21 kfree(pages); in huge_free_pages() 30 struct sg_table *pages; in huge_get_pages() local 33 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 34 if (!pages) in huge_get_pages() 37 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 38 kfree(pages); in huge_get_pages() 42 sg = pages->sgl; in huge_get_pages() [all …]
|
/drivers/gpu/drm/vkms/ |
D | vkms_gem.c | 35 WARN_ON(gem->pages); in vkms_gem_free_object() 59 if (obj->pages) { in vkms_gem_fault() 60 get_page(obj->pages[page_offset]); in vkms_gem_fault() 61 vmf->page = obj->pages[page_offset]; in vkms_gem_fault() 152 if (!vkms_obj->pages) { in _get_pages() 153 struct page **pages = drm_gem_get_pages(gem_obj); in _get_pages() local 155 if (IS_ERR(pages)) in _get_pages() 156 return pages; in _get_pages() 158 if (cmpxchg(&vkms_obj->pages, NULL, pages)) in _get_pages() 159 drm_gem_put_pages(gem_obj, pages, false, true); in _get_pages() [all …]
|
/drivers/xen/ |
D | xlate_mmu.c | 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() [all …]
|
D | privcmd.c | 93 static void free_page_list(struct list_head *pages) in free_page_list() argument 97 list_for_each_entry_safe(p, n, pages, lru) in free_page_list() 100 INIT_LIST_HEAD(pages); in free_page_list() 342 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local 347 cur_pages = &pages[st->index]; in mmap_batch_fn() 424 struct page **pages; in alloc_empty_pages() local 426 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); in alloc_empty_pages() 427 if (pages == NULL) in alloc_empty_pages() 430 rc = alloc_xenballooned_pages(numpgs, pages); in alloc_empty_pages() 434 kfree(pages); in alloc_empty_pages() [all …]
|
/drivers/gpu/drm/xen/ |
D | xen_drm_front_gem.c | 29 struct page **pages; member 48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 50 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 55 kvfree(xen_obj->pages); in gem_free_pages_array() 56 xen_obj->pages = NULL; in gem_free_pages_array() 103 xen_obj->pages); in gem_create() 119 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create() 120 if (IS_ERR_OR_NULL(xen_obj->pages)) { in gem_create() 121 ret = PTR_ERR(xen_obj->pages); in gem_create() 122 xen_obj->pages = NULL; in gem_create() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument 253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put() 259 if (ttm_set_pages_wb(pages[i], pages_nr)) in ttm_pages_put() 262 __free_pages(pages[i], order); in ttm_pages_put() 442 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument 449 r = ttm_set_pages_array_uc(pages, cpages); in ttm_set_pages_caching() 454 r = ttm_set_pages_array_wc(pages, cpages); in ttm_set_pages_caching() 469 static void ttm_handle_caching_state_failure(struct list_head *pages, in ttm_handle_caching_state_failure() argument 487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, in ttm_alloc_new_pages() argument 519 ttm_handle_caching_state_failure(pages, in ttm_alloc_new_pages() [all …]
|
D | ttm_tt.c | 87 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory() 89 if (!ttm->pages) in ttm_tt_alloc_page_directory() 96 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory() 97 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory() 100 if (!ttm->ttm.pages) in ttm_dma_tt_alloc_page_directory() 102 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory() 164 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching() 167 cur_page = ttm->pages[i]; in ttm_tt_set_caching() 183 cur_page = ttm->pages[j]; in ttm_tt_set_caching() 254 kvfree(ttm->pages); in ttm_tt_fini() [all …]
|
/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 99 struct page **pages; in imgu_dmamap_alloc() local [all …]
|
/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 61 kvfree(vgem_obj->pages); in vgem_gem_free_object() 88 if (obj->pages) { in vgem_gem_fault() 89 get_page(obj->pages[page_offset]); in vgem_gem_fault() 90 vmf->page = obj->pages[page_offset]; in vgem_gem_fault() 291 struct page **pages; in vgem_pin_pages() local 293 pages = drm_gem_get_pages(&bo->base); in vgem_pin_pages() 294 if (IS_ERR(pages)) { in vgem_pin_pages() 297 return pages; in vgem_pin_pages() 300 bo->pages = pages; in vgem_pin_pages() 304 return bo->pages; in vgem_pin_pages() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_pages.c | 12 struct sg_table *pages, in __i915_gem_object_set_pages() argument 25 drm_clflush_sg(pages); in __i915_gem_object_set_pages() 29 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 32 obj->mm.pages = pages; in __i915_gem_object_set_pages() 156 struct sg_table *pages; in __i915_gem_object_unset_pages() local 158 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages() 159 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages() 160 return pages; in __i915_gem_object_unset_pages() 179 return pages; in __i915_gem_object_unset_pages() 185 struct sg_table *pages; in __i915_gem_object_put_pages() local [all …]
|
/drivers/gpu/drm/lima/ |
D | lima_object.c | 13 kfree(bo->pages); in lima_bo_destroy() 27 if (bo->pages) in lima_bo_destroy() 28 drm_gem_put_pages(&bo->gem, bo->pages, true, true); in lima_bo_destroy() 81 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); in lima_bo_create() 82 if (!bo->pages) { in lima_bo_create() 88 sgt, bo->pages, bo->pages_dma_addr, npages); in lima_bo_create() 95 bo->pages = drm_gem_get_pages(&bo->gem); in lima_bo_create() 96 if (IS_ERR(bo->pages)) { in lima_bo_create() 97 ret = ERR_CAST(bo->pages); in lima_bo_create() 98 bo->pages = NULL; in lima_bo_create() [all …]
|
/drivers/gpu/drm/udl/ |
D | udl_gem.c | 112 if (!obj->pages) in udl_gem_fault() 115 page = obj->pages[page_offset]; in udl_gem_fault() 121 struct page **pages; in udl_gem_get_pages() local 123 if (obj->pages) in udl_gem_get_pages() 126 pages = drm_gem_get_pages(&obj->base); in udl_gem_get_pages() 127 if (IS_ERR(pages)) in udl_gem_get_pages() 128 return PTR_ERR(pages); in udl_gem_get_pages() 130 obj->pages = pages; in udl_gem_get_pages() 138 kvfree(obj->pages); in udl_gem_put_pages() 139 obj->pages = NULL; in udl_gem_put_pages() [all …]
|
/drivers/staging/android/ion/heaps/ |
D | ion_cma_heap.c | 32 struct page *pages; in ion_cma_allocate() local 41 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate() 42 if (!pages) in ion_cma_allocate() 45 if (PageHighMem(pages)) { in ion_cma_allocate() 47 struct page *page = pages; in ion_cma_allocate() 58 memset(page_address(pages), 0, size); in ion_cma_allocate() 69 sg_set_page(table->sgl, pages, size, 0); in ion_cma_allocate() 71 buffer->priv_virt = pages; in ion_cma_allocate() 81 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate() 88 struct page *pages = buffer->priv_virt; in ion_cma_free() local [all …]
|
/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring() 144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring() 146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring() 152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring() 155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring() 157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring() 289 struct page *pages = NULL; in xgbe_alloc_pages() local 300 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages() 301 if (pages) in xgbe_alloc_pages() 308 if (!pages && (node != NUMA_NO_NODE)) { in xgbe_alloc_pages() [all …]
|
/drivers/misc/mic/scif/ |
D | scif_mmap.c | 213 struct scif_range **pages) in scif_get_pages() argument 252 *pages = kzalloc(sizeof(**pages), GFP_KERNEL); in scif_get_pages() 253 if (!*pages) { in scif_get_pages() 259 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); in scif_get_pages() 260 if (!((*pages)->phys_addr)) { in scif_get_pages() 267 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); in scif_get_pages() 268 if (!(*pages)->va) { in scif_get_pages() 274 (*pages)->cookie = window; in scif_get_pages() 275 (*pages)->nr_pages = nr_pages; in scif_get_pages() 276 (*pages)->prot_flags = window->prot; in scif_get_pages() [all …]
|
/drivers/gpu/drm/ |
D | drm_scatter.c | 60 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 92 unsigned long pages, i, j; in drm_legacy_sg_alloc() local 109 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc() 110 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc() 112 entry->pages = pages; in drm_legacy_sg_alloc() 113 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc() 119 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc() 126 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 137 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 144 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc() [all …]
|
/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 87 struct page **pages; member 225 struct page **pages; in omap_gem_attach_pages() local 236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) in omap_gem_attach_pages() 239 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages() 240 if (IS_ERR(pages)) { in omap_gem_attach_pages() 241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages() 242 return PTR_ERR(pages); in omap_gem_attach_pages() 256 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages() 281 omap_obj->pages = pages; in omap_gem_attach_pages() 288 drm_gem_put_pages(obj, pages, true, false); in omap_gem_attach_pages() [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_object.c | 70 if (bo->pages) in virtio_gpu_ttm_bo_destroy() 201 struct page **pages = bo->tbo.ttm->pages; in virtio_gpu_object_get_sg_table() local 210 if (bo->pages) in virtio_gpu_object_get_sg_table() 215 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); in virtio_gpu_object_get_sg_table() 216 if (!bo->pages) in virtio_gpu_object_get_sg_table() 223 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, in virtio_gpu_object_get_sg_table() 230 kfree(bo->pages); in virtio_gpu_object_get_sg_table() 231 bo->pages = NULL; in virtio_gpu_object_get_sg_table() 237 sg_free_table(bo->pages); in virtio_gpu_object_free_sg_table() 238 kfree(bo->pages); in virtio_gpu_object_free_sg_table() [all …]
|
/drivers/net/ethernet/synopsys/ |
D | dwc-xlgmac-desc.c | 41 if (desc_data->rx.hdr.pa.pages) in xlgmac_unmap_desc_data() 42 put_page(desc_data->rx.hdr.pa.pages); in xlgmac_unmap_desc_data() 44 if (desc_data->rx.hdr.pa_unmap.pages) { in xlgmac_unmap_desc_data() 48 put_page(desc_data->rx.hdr.pa_unmap.pages); in xlgmac_unmap_desc_data() 51 if (desc_data->rx.buf.pa.pages) in xlgmac_unmap_desc_data() 52 put_page(desc_data->rx.buf.pa.pages); in xlgmac_unmap_desc_data() 54 if (desc_data->rx.buf.pa_unmap.pages) { in xlgmac_unmap_desc_data() 58 put_page(desc_data->rx.buf.pa_unmap.pages); in xlgmac_unmap_desc_data() 93 if (ring->rx_hdr_pa.pages) { in xlgmac_free_ring() 96 put_page(ring->rx_hdr_pa.pages); in xlgmac_free_ring() [all …]
|
/drivers/gpu/drm/gma500/ |
D | gtt.c | 80 struct page **pages; in psb_gtt_insert() local 83 if (r->pages == NULL) { in psb_gtt_insert() 91 pages = r->pages; in psb_gtt_insert() 95 set_pages_array_wc(pages, r->npage); in psb_gtt_insert() 100 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 105 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 140 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove() 174 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 196 struct page **pages; in psb_gtt_attach_pages() local [all …]
|
/drivers/block/xen-blkback/ |
D | blkback.c | 308 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local 314 unmap_data.pages = pages; in free_persistent_gnts() 327 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts() 335 put_free_pages(ring, pages, segs_to_unmap); in free_persistent_gnts() 349 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local 355 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants() 370 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants() 375 put_free_pages(ring, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants() 383 put_free_pages(ring, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants() 696 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument [all …]
|
/drivers/hwtracing/coresight/ |
D | coresight-tmc-etr.c | 46 void **pages; member 171 if (tmc_pages->pages && tmc_pages->pages[i]) in tmc_pages_free() 172 __free_page(tmc_pages->pages[i]); in tmc_pages_free() 175 kfree(tmc_pages->pages); in tmc_pages_free() 177 tmc_pages->pages = NULL; in tmc_pages_free() 192 enum dma_data_direction dir, void **pages) in tmc_pages_alloc() argument 204 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), in tmc_pages_alloc() 206 if (!tmc_pages->pages) { in tmc_pages_alloc() 213 if (pages && pages[i]) { in tmc_pages_alloc() 214 page = virt_to_page(pages[i]); in tmc_pages_alloc() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | mem.c | 34 u64 pages; member 58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr() 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 87 while (mem->pages--) { in nvkm_mem_dtor() 89 mem->dma[mem->pages], PAGE_SIZE, in nvkm_mem_dtor() 91 __free_page(mem->mem[mem->pages]); in nvkm_mem_dtor() 137 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL); in nvkm_mem_map_host() 183 mem->pages = size >> PAGE_SHIFT; in nvkm_mem_new_host() 204 for (mem->pages = 0; size; size--, mem->pages++) { in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_gart.c | 222 int pages) in amdgpu_gart_unbind() argument 238 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_unbind() 240 adev->gart.pages[p] = NULL; in amdgpu_gart_unbind() 274 int pages, dma_addr_t *dma_addr, uint64_t flags, in amdgpu_gart_map() argument 287 for (i = 0; i < pages; i++) { in amdgpu_gart_map() 311 int pages, struct page **pagelist, dma_addr_t *dma_addr, in amdgpu_gart_bind() argument 327 for (i = 0; i < pages; i++, p++) in amdgpu_gart_bind() 328 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL; in amdgpu_gart_bind() 334 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, in amdgpu_gart_bind() 377 adev->gart.pages = vzalloc(array_size(sizeof(void *), in amdgpu_gart_init() [all …]
|
/drivers/staging/vc04_services/interface/vchiq_arm/ |
D | vchiq_2835_arm.c | 43 struct page **pages; member 340 put_page(pagelistinfo->pages[i]); in cleanup_pagelistinfo() 360 struct page **pages; in create_pagelist() local 377 (sizeof(u32) + sizeof(pages[0]) + in create_pagelist() 383 (num_pages * sizeof(pages[0]) + in create_pagelist() 399 pages = (struct page **)(addrs + num_pages); in create_pagelist() 400 scatterlist = (struct scatterlist *)(pages + num_pages); in create_pagelist() 416 pagelistinfo->pages = pages; in create_pagelist() 437 pages[actual_pages] = pg; in create_pagelist() 447 pages); in create_pagelist() [all …]
|