/drivers/gpu/drm/i915/gem/selftests/ |
D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 38 if (!pages) in huge_get_pages() 41 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 42 kfree(pages); in huge_get_pages() 46 sg = pages->sgl; in huge_get_pages() [all …]
|
/drivers/xen/ |
D | xlate_mmu.c | 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() argument 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() [all …]
|
D | privcmd.c | 90 static void free_page_list(struct list_head *pages) in free_page_list() argument 94 list_for_each_entry_safe(p, n, pages, lru) in free_page_list() 97 INIT_LIST_HEAD(pages); in free_page_list() 339 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local 344 cur_pages = &pages[st->index]; in mmap_batch_fn() 421 struct page **pages; in alloc_empty_pages() local 423 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); in alloc_empty_pages() 424 if (pages == NULL) in alloc_empty_pages() 427 rc = xen_alloc_unpopulated_pages(numpgs, pages); in alloc_empty_pages() 431 kvfree(pages); in alloc_empty_pages() [all …]
|
/drivers/gpu/drm/xen/ |
D | xen_drm_front_gem.c | 30 struct page **pages; member 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 51 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 56 kvfree(xen_obj->pages); in gem_free_pages_array() 57 xen_obj->pages = NULL; in gem_free_pages_array() 119 xen_obj->pages); in gem_create() 135 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create() 136 if (IS_ERR(xen_obj->pages)) { in gem_create() 137 ret = PTR_ERR(xen_obj->pages); in gem_create() 138 xen_obj->pages = NULL; in gem_create() [all …]
|
/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; in imgu_dmamap_alloc() local [all …]
|
/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 64 kvfree(vgem_obj->pages); in vgem_gem_free_object() 91 if (obj->pages) { in vgem_gem_fault() 92 get_page(obj->pages[page_offset]); in vgem_gem_fault() 93 vmf->page = obj->pages[page_offset]; in vgem_gem_fault() 273 struct page **pages; in vgem_pin_pages() local 275 pages = drm_gem_get_pages(&bo->base); in vgem_pin_pages() 276 if (IS_ERR(pages)) { in vgem_pin_pages() 279 return pages; in vgem_pin_pages() 282 bo->pages = pages; in vgem_pin_pages() 286 return bo->pages; in vgem_pin_pages() [all …]
|
/drivers/staging/media/atomisp/pci/hmm/ |
D | hmm_reserved_pool.c | 53 page_obj[i].page = repool_info->pages[j]; in get_pages_from_reserved_pool() 79 repool_info->pages[repool_info->index++] = page_obj->page; in free_pages_to_reserved_pool() 95 pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, in hmm_reserved_pool_setup() 97 if (unlikely(!pool_info->pages)) { in hmm_reserved_pool_setup() 120 struct page *pages; in hmm_reserved_pool_init() local 146 pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); in hmm_reserved_pool_init() 147 if (unlikely(!pages)) { in hmm_reserved_pool_init() 163 ret = set_pages_uc(pages, blk_pgnr); in hmm_reserved_pool_init() 167 __free_pages(pages, order); in hmm_reserved_pool_init() 172 repool_info->pages[i++] = pages + j; in hmm_reserved_pool_init() [all …]
|
/drivers/media/common/videobuf2/ |
D | frame_vector.c | 78 struct page **pages; in put_vaddr_frames() local 82 pages = frame_vector_pages(vec); in put_vaddr_frames() 88 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames() 91 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames() 110 struct page **pages; in frame_vector_to_pages() local 118 pages = (struct page **)nums; in frame_vector_to_pages() 120 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages() 136 struct page **pages; in frame_vector_to_pfns() local 140 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns() 141 nums = (unsigned long *)pages; in frame_vector_to_pfns() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_pages.c | 16 struct sg_table *pages, in __i915_gem_object_set_pages() argument 33 drm_clflush_sg(pages); in __i915_gem_object_set_pages() 37 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 39 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 42 obj->mm.pages = pages; in __i915_gem_object_set_pages() 201 struct sg_table *pages; in __i915_gem_object_unset_pages() local 205 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages() 206 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages() 207 return pages; in __i915_gem_object_unset_pages() 230 return pages; in __i915_gem_object_unset_pages() [all …]
|
D | i915_gem_phys.c | 95 struct sg_table *pages) in i915_gem_object_put_pages_phys() argument 97 dma_addr_t dma = sg_dma_address(pages->sgl); in i915_gem_object_put_pages_phys() 98 void *vaddr = sg_page(pages->sgl); in i915_gem_object_put_pages_phys() 100 __i915_gem_object_release_shmem(obj, pages, false); in i915_gem_object_put_pages_phys() 130 sg_free_table(pages); in i915_gem_object_put_pages_phys() 131 kfree(pages); in i915_gem_object_put_pages_phys() 141 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pwrite_phys() 171 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; in i915_gem_object_pread_phys() 190 struct sg_table *pages; in i915_gem_object_shmem_to_phys() local 193 pages = __i915_gem_object_unset_pages(obj); in i915_gem_object_shmem_to_phys() [all …]
|
/drivers/block/xen-blkback/ |
D | blkback.c | 246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local 252 unmap_data.pages = pages; in free_persistent_gnts() 265 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts() 273 gnttab_page_cache_put(&ring->free_pages, pages, in free_persistent_gnts() 288 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local 294 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants() 309 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants() 314 gnttab_page_cache_put(&ring->free_pages, pages, in xen_blkbk_unmap_purged_grants() 323 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants() 647 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument [all …]
|
/drivers/gpu/drm/ |
D | drm_scatter.c | 51 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 83 unsigned long pages, i, j; in drm_legacy_sg_alloc() local 103 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc() 104 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc() 106 entry->pages = pages; in drm_legacy_sg_alloc() 107 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc() 113 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc() 120 entry->virtual = vmalloc_32(pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 131 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 138 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc() [all …]
|
/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring() 144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring() 146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring() 152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring() 155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring() 157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring() 289 struct page *pages = NULL; in xgbe_alloc_pages() local 300 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages() 301 if (pages) in xgbe_alloc_pages() 308 if (!pages && (node != NUMA_NO_NODE)) { in xgbe_alloc_pages() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_pool.c | 235 list_add(&p->lru, &pt->pages); in ttm_pool_type_give() 246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); in ttm_pool_type_take() 264 INIT_LIST_HEAD(&pt->pages); in ttm_pool_type_init() 352 struct page ***pages) in ttm_pool_page_allocated() argument 364 for (i = 1 << order; i; --i, ++(*pages), ++p) in ttm_pool_page_allocated() 365 **pages = p; in ttm_pool_page_allocated() 387 struct page **pages = &tt->pages[start_page]; in ttm_pool_free_range() local 391 for (i = start_page; i < end_page; i += nr, pages += nr) { in ttm_pool_free_range() 394 order = ttm_pool_page_order(pool, *pages); in ttm_pool_free_range() 401 ttm_pool_type_give(pt, *pages); in ttm_pool_free_range() [all …]
|
/drivers/net/ethernet/synopsys/ |
D | dwc-xlgmac-desc.c | 41 if (desc_data->rx.hdr.pa.pages) in xlgmac_unmap_desc_data() 42 put_page(desc_data->rx.hdr.pa.pages); in xlgmac_unmap_desc_data() 44 if (desc_data->rx.hdr.pa_unmap.pages) { in xlgmac_unmap_desc_data() 48 put_page(desc_data->rx.hdr.pa_unmap.pages); in xlgmac_unmap_desc_data() 51 if (desc_data->rx.buf.pa.pages) in xlgmac_unmap_desc_data() 52 put_page(desc_data->rx.buf.pa.pages); in xlgmac_unmap_desc_data() 54 if (desc_data->rx.buf.pa_unmap.pages) { in xlgmac_unmap_desc_data() 58 put_page(desc_data->rx.buf.pa_unmap.pages); in xlgmac_unmap_desc_data() 93 if (ring->rx_hdr_pa.pages) { in xlgmac_free_ring() 96 put_page(ring->rx_hdr_pa.pages); in xlgmac_free_ring() [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_object.c | 72 if (shmem->pages) { in virtio_gpu_cleanup_object() 75 shmem->pages, DMA_TO_DEVICE, 0); in virtio_gpu_cleanup_object() 79 sg_free_table(shmem->pages); in virtio_gpu_cleanup_object() 80 kfree(shmem->pages); in virtio_gpu_cleanup_object() 81 shmem->pages = NULL; in virtio_gpu_cleanup_object() 169 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base); in virtio_gpu_object_shmem_init() 170 if (IS_ERR(shmem->pages)) { in virtio_gpu_object_shmem_init() 172 ret = PTR_ERR(shmem->pages); in virtio_gpu_object_shmem_init() 173 shmem->pages = NULL; in virtio_gpu_object_shmem_init() 179 shmem->pages, DMA_TO_DEVICE, 0); in virtio_gpu_object_shmem_init() [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | mock_region.c | 18 struct sg_table *pages) in mock_region_put_pages() argument 21 sg_free_table(pages); in mock_region_put_pages() 22 kfree(pages); in mock_region_put_pages() 28 struct sg_table *pages; in mock_region_get_pages() local 41 pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); in mock_region_get_pages() 42 if (IS_ERR(pages)) { in mock_region_get_pages() 43 err = PTR_ERR(pages); in mock_region_get_pages() 47 __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); in mock_region_get_pages()
|
/drivers/hwmon/pmbus/ |
D | pmbus.c | 19 int pages; member 70 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups() 96 if (!info->pages) { in pmbus_identify() 111 info->pages = page; in pmbus_identify() 113 info->pages = 1; in pmbus_identify() 129 for (i = 0; i < info->pages; i++) in pmbus_identify() 185 info->pages = device_info->pages; in pmbus_probe() 193 .pages = 1, 198 .pages = 0, 203 .pages = 1, [all …]
|
/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 87 struct page **pages; member 225 struct page **pages; in omap_gem_attach_pages() local 236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) in omap_gem_attach_pages() 239 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages() 240 if (IS_ERR(pages)) { in omap_gem_attach_pages() 241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages() 242 return PTR_ERR(pages); in omap_gem_attach_pages() 256 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages() 281 omap_obj->pages = pages; in omap_gem_attach_pages() 288 drm_gem_put_pages(obj, pages, true, false); in omap_gem_attach_pages() [all …]
|
/drivers/hwtracing/coresight/ |
D | coresight-tmc-etr.c | 44 void **pages; member 170 if (tmc_pages->pages && tmc_pages->pages[i]) in tmc_pages_free() 171 __free_page(tmc_pages->pages[i]); in tmc_pages_free() 174 kfree(tmc_pages->pages); in tmc_pages_free() 176 tmc_pages->pages = NULL; in tmc_pages_free() 191 enum dma_data_direction dir, void **pages) in tmc_pages_alloc() argument 203 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), in tmc_pages_alloc() 205 if (!tmc_pages->pages) { in tmc_pages_alloc() 212 if (pages && pages[i]) { in tmc_pages_alloc() 213 page = virt_to_page(pages[i]); in tmc_pages_alloc() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | mem.c | 34 u64 pages; member 58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr() 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 87 while (mem->pages--) { in nvkm_mem_dtor() 89 mem->dma[mem->pages], PAGE_SIZE, in nvkm_mem_dtor() 91 __free_page(mem->mem[mem->pages]); in nvkm_mem_dtor() 137 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL); in nvkm_mem_map_host() 183 mem->pages = size >> PAGE_SHIFT; in nvkm_mem_new_host() 204 for (mem->pages = 0; size; size--, mem->pages++) { in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() [all …]
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 68 etnaviv_obj->pages = p; in etnaviv_gem_shmem_get_pages() 81 if (etnaviv_obj->pages) { in put_pages() 82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, in put_pages() 85 etnaviv_obj->pages = NULL; in put_pages() 95 if (!etnaviv_obj->pages) { in etnaviv_gem_get_pages() 107 etnaviv_obj->pages, npages); in etnaviv_gem_get_pages() 119 return etnaviv_obj->pages; in etnaviv_gem_get_pages() 168 struct page **pages, *page; in etnaviv_gem_fault() local 181 pages = etnaviv_gem_get_pages(etnaviv_obj); in etnaviv_gem_fault() 184 if (IS_ERR(pages)) { in etnaviv_gem_fault() [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | shmem_utils.c | 56 struct page **pages; in shmem_pin_map() local 61 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in shmem_pin_map() 62 if (!pages) in shmem_pin_map() 66 pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i, in shmem_pin_map() 68 if (IS_ERR(pages[i])) in shmem_pin_map() 72 vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL); in shmem_pin_map() 79 put_page(pages[i]); in shmem_pin_map() 80 kvfree(pages); in shmem_pin_map()
|
/drivers/gpu/drm/gma500/ |
D | gtt.c | 79 struct page **pages; in psb_gtt_insert() local 82 if (r->pages == NULL) { in psb_gtt_insert() 90 pages = r->pages; in psb_gtt_insert() 94 set_pages_array_wc(pages, r->npage); in psb_gtt_insert() 99 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 135 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove() 148 struct page **pages; in psb_gtt_attach_pages() local 150 WARN_ON(gt->pages); in psb_gtt_attach_pages() 152 pages = drm_gem_get_pages(>->gem); in psb_gtt_attach_pages() 153 if (IS_ERR(pages)) in psb_gtt_attach_pages() [all …]
|
/drivers/gpu/drm/r128/ |
D | ati_pcigart.c | 83 unsigned long pages; in drm_ati_pcigart_cleanup() local 96 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup() 97 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup() 99 for (i = 0; i < pages; i++) { in drm_ati_pcigart_cleanup() 123 unsigned long pages; in drm_ati_pcigart_init() local 164 pages = (entry->pages <= max_real_pages) in drm_ati_pcigart_init() 165 ? entry->pages : max_real_pages; in drm_ati_pcigart_init() 174 for (i = 0; i < pages; i++) { in drm_ati_pcigart_init()
|