/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 220 static int set_pages_array_wb(struct page **pages, int addrinarray) in set_pages_array_wb() argument 226 unmap_page_from_agp(pages[i]); in set_pages_array_wb() 231 static int set_pages_array_wc(struct page **pages, int addrinarray) in set_pages_array_wc() argument 237 map_page_into_agp(pages[i]); in set_pages_array_wc() 242 static int set_pages_array_uc(struct page **pages, int addrinarray) in set_pages_array_uc() argument 248 map_page_into_agp(pages[i]); in set_pages_array_uc() 276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument 279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put() 282 __free_page(pages[i]); in ttm_pages_put() 448 static int ttm_set_pages_caching(struct page **pages, in ttm_set_pages_caching() argument [all …]
|
D | ttm_tt.c | 53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); in ttm_tt_alloc_page_directory() 58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory() 59 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory() 62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory() 123 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching() 126 cur_page = ttm->pages[i]; in ttm_tt_set_caching() 142 cur_page = ttm->pages[j]; in ttm_tt_set_caching() 201 if (!ttm->pages) { in ttm_tt_init() 212 drm_free_large(ttm->pages); in ttm_tt_fini() 213 ttm->pages = NULL; in ttm_tt_fini() [all …]
|
/drivers/gpu/drm/udl/ |
D | udl_gem.c | 113 if (!obj->pages) in udl_gem_fault() 116 page = obj->pages[page_offset]; in udl_gem_fault() 132 struct page **pages; in udl_gem_get_pages() local 134 if (obj->pages) in udl_gem_get_pages() 137 pages = drm_gem_get_pages(&obj->base); in udl_gem_get_pages() 138 if (IS_ERR(pages)) in udl_gem_get_pages() 139 return PTR_ERR(pages); in udl_gem_get_pages() 141 obj->pages = pages; in udl_gem_get_pages() 149 drm_free_large(obj->pages); in udl_gem_put_pages() 150 obj->pages = NULL; in udl_gem_put_pages() [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_object.c | 38 if (bo->pages) in virtio_gpu_ttm_bo_destroy() 125 struct page **pages = bo->tbo.ttm->pages; in virtio_gpu_object_get_sg_table() local 129 if (bo->pages) in virtio_gpu_object_get_sg_table() 134 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); in virtio_gpu_object_get_sg_table() 135 if (!bo->pages) in virtio_gpu_object_get_sg_table() 138 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, in virtio_gpu_object_get_sg_table() 144 kfree(bo->pages); in virtio_gpu_object_get_sg_table() 145 bo->pages = NULL; in virtio_gpu_object_get_sg_table() 151 sg_free_table(bo->pages); in virtio_gpu_object_free_sg_table() 152 kfree(bo->pages); in virtio_gpu_object_free_sg_table() [all …]
|
/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring() 144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring() 146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring() 152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring() 155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring() 157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring() 266 struct page *pages = NULL; in xgbe_alloc_pages() local 273 pages = alloc_pages(gfp, order); in xgbe_alloc_pages() 274 if (pages) in xgbe_alloc_pages() 279 if (!pages) in xgbe_alloc_pages() [all …]
|
/drivers/misc/mic/scif/ |
D | scif_mmap.c | 222 struct scif_range **pages) in scif_get_pages() argument 261 *pages = kzalloc(sizeof(**pages), GFP_KERNEL); in scif_get_pages() 262 if (!*pages) { in scif_get_pages() 268 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); in scif_get_pages() 269 if (!((*pages)->phys_addr)) { in scif_get_pages() 276 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); in scif_get_pages() 277 if (!(*pages)->va) { in scif_get_pages() 283 (*pages)->cookie = window; in scif_get_pages() 284 (*pages)->nr_pages = nr_pages; in scif_get_pages() 285 (*pages)->prot_flags = window->prot; in scif_get_pages() [all …]
|
/drivers/gpu/drm/ |
D | drm_scatter.c | 55 for (i = 0; i < entry->pages; i++) { in drm_sg_cleanup() 87 unsigned long pages, i, j; in drm_legacy_sg_alloc() local 104 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; in drm_legacy_sg_alloc() 105 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); in drm_legacy_sg_alloc() 107 entry->pages = pages; in drm_legacy_sg_alloc() 108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); in drm_legacy_sg_alloc() 114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); in drm_legacy_sg_alloc() 121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 132 memset(entry->virtual, 0, pages << PAGE_SHIFT); in drm_legacy_sg_alloc() 139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; in drm_legacy_sg_alloc() [all …]
|
D | ati_pcigart.c | 62 unsigned long pages; in drm_ati_pcigart_cleanup() local 75 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup() 76 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup() 78 for (i = 0; i < pages; i++) { in drm_ati_pcigart_cleanup() 103 unsigned long pages; in drm_ati_pcigart_init() local 144 pages = (entry->pages <= max_real_pages) in drm_ati_pcigart_init() 145 ? entry->pages : max_real_pages; in drm_ati_pcigart_init() 154 for (i = 0; i < pages; i++) { in drm_ati_pcigart_init()
|
/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 88 struct page **pages; member 122 static int get_pages(struct drm_gem_object *obj, struct page ***pages); 229 struct page **pages; in omap_gem_attach_pages() local 234 WARN_ON(omap_obj->pages); in omap_gem_attach_pages() 236 pages = drm_gem_get_pages(obj); in omap_gem_attach_pages() 237 if (IS_ERR(pages)) { in omap_gem_attach_pages() 238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); in omap_gem_attach_pages() 239 return PTR_ERR(pages); in omap_gem_attach_pages() 253 addrs[i] = dma_map_page(dev->dev, pages[i], in omap_gem_attach_pages() [all …]
|
D | omap_gem_dmabuf.c | 85 struct page **pages; in omap_gem_dmabuf_begin_cpu_access() local 93 return omap_gem_get_pages(obj, &pages, true); in omap_gem_dmabuf_begin_cpu_access() 108 struct page **pages; in omap_gem_dmabuf_kmap_atomic() local 109 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap_atomic() 111 return kmap_atomic(pages[page_num]); in omap_gem_dmabuf_kmap_atomic() 124 struct page **pages; in omap_gem_dmabuf_kmap() local 125 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kmap() 127 return kmap(pages[page_num]); in omap_gem_dmabuf_kmap() 134 struct page **pages; in omap_gem_dmabuf_kunmap() local 135 omap_gem_get_pages(obj, &pages, false); in omap_gem_dmabuf_kunmap() [all …]
|
/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 189 size_t size, struct page ***pages, in ll_get_user_pages() argument 196 *pages = NULL; in ll_get_user_pages() 203 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); in ll_get_user_pages() 204 if (*pages) { in ll_get_user_pages() 206 (rw == READ), *pages); in ll_get_user_pages() 208 kvfree(*pages); in ll_get_user_pages() 216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) in ll_free_user_pages() argument 222 set_page_dirty_lock(pages[i]); in ll_free_user_pages() 223 page_cache_release(pages[i]); in ll_free_user_pages() 225 kvfree(pages); in ll_free_user_pages() [all …]
|
/drivers/iommu/ |
D | dma-iommu.c | 190 static void __iommu_dma_free_pages(struct page **pages, int count) in __iommu_dma_free_pages() argument 193 __free_page(pages[count]); in __iommu_dma_free_pages() 194 kvfree(pages); in __iommu_dma_free_pages() 199 struct page **pages; in __iommu_dma_alloc_pages() local 200 unsigned int i = 0, array_size = count * sizeof(*pages); in __iommu_dma_alloc_pages() 204 pages = kzalloc(array_size, GFP_KERNEL); in __iommu_dma_alloc_pages() 206 pages = vzalloc(array_size); in __iommu_dma_alloc_pages() 207 if (!pages) in __iommu_dma_alloc_pages() 239 __iommu_dma_free_pages(pages, i); in __iommu_dma_alloc_pages() 245 pages[i++] = page++; in __iommu_dma_alloc_pages() [all …]
|
/drivers/block/xen-blkback/ |
D | blkback.c | 279 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local 285 unmap_data.pages = pages; in free_persistent_gnts() 298 pages[segs_to_unmap] = persistent_gnt->page; in free_persistent_gnts() 306 put_free_pages(blkif, pages, segs_to_unmap); in free_persistent_gnts() 320 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local 326 unmap_data.pages = pages; in xen_blkbk_unmap_purged_grants() 341 pages[segs_to_unmap] = persistent_gnt->page; in xen_blkbk_unmap_purged_grants() 346 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants() 354 put_free_pages(blkif, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants() 683 struct grant_page **pages, in xen_blkbk_unmap_prepare() argument [all …]
|
/drivers/gpu/drm/gma500/ |
D | gtt.c | 89 struct page **pages; in psb_gtt_insert() local 92 if (r->pages == NULL) { in psb_gtt_insert() 100 pages = r->pages; in psb_gtt_insert() 104 set_pages_array_wc(pages, r->npage); in psb_gtt_insert() 109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_insert() 149 set_pages_array_wb(r->pages, r->npage); in psb_gtt_remove() 183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), in psb_gtt_roll() 205 struct page **pages; in psb_gtt_attach_pages() local [all …]
|
/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 47 drm_gem_put_pages(&obj->base, obj->pages, false, false); in vgem_gem_put_pages() 48 obj->pages = NULL; in vgem_gem_put_pages() 64 if (vgem_obj->pages) in vgem_gem_free_object() 67 vgem_obj->pages = NULL; in vgem_gem_free_object() 74 struct page **pages; in vgem_gem_get_pages() local 76 if (obj->pages || obj->use_dma_buf) in vgem_gem_get_pages() 79 pages = drm_gem_get_pages(&obj->base); in vgem_gem_get_pages() 80 if (IS_ERR(pages)) { in vgem_gem_get_pages() 81 return PTR_ERR(pages); in vgem_gem_get_pages() 84 obj->pages = pages; in vgem_gem_get_pages() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_gem_dmabuf.c | 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); in i915_gem_map_dma_buf() 64 src = obj->pages->sgl; in i915_gem_map_dma_buf() 66 for (i = 0; i < obj->pages->nents; i++) { in i915_gem_map_dma_buf() 114 struct page **pages; in i915_gem_dmabuf_vmap() local 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); in i915_gem_dmabuf_vmap() 135 if (pages == NULL) in i915_gem_dmabuf_vmap() 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) in i915_gem_dmabuf_vmap() 140 pages[i++] = sg_page_iter_page(&sg_iter); in i915_gem_dmabuf_vmap() 142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); in i915_gem_dmabuf_vmap() 143 drm_free_large(pages); in i915_gem_dmabuf_vmap() [all …]
|
/drivers/lguest/x86/ |
D | core.c | 85 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument 93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info() 95 cpu->last_pages = pages; in copy_in_guest_info() 103 pages->state.host_cr3 = __pa(current->mm->pgd); in copy_in_guest_info() 108 map_switcher_in_guest(cpu, pages); in copy_in_guest_info() 114 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info() 115 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info() 119 copy_traps(cpu, pages->state.guest_idt, default_idt_entries); in copy_in_guest_info() 123 copy_gdt(cpu, pages->state.guest_gdt); in copy_in_guest_info() 126 copy_gdt_tls(cpu, pages->state.guest_gdt); in copy_in_guest_info() [all …]
|
/drivers/gpu/drm/msm/ |
D | msm_gem.c | 76 if (!msm_obj->pages) { in get_pages() 92 msm_obj->pages = p; in get_pages() 111 return msm_obj->pages; in get_pages() 118 if (msm_obj->pages) { in put_pages() 134 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages() 137 drm_free_large(msm_obj->pages); in put_pages() 140 msm_obj->pages = NULL; in put_pages() 205 struct page **pages; in msm_gem_fault() local 218 pages = get_pages(obj); in msm_gem_fault() 219 if (IS_ERR(pages)) { in msm_gem_fault() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_gart.c | 225 int pages) in amdgpu_gart_unbind() argument 240 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_unbind() 241 if (adev->gart.pages[p]) { in amdgpu_gart_unbind() 242 adev->gart.pages[p] = NULL; in amdgpu_gart_unbind() 273 int pages, struct page **pagelist, dma_addr_t *dma_addr, in amdgpu_gart_bind() argument 289 for (i = 0; i < pages; i++, p++) { in amdgpu_gart_bind() 291 adev->gart.pages[p] = pagelist[i]; in amdgpu_gart_bind() 317 if (adev->gart.pages) { in amdgpu_gart_init() 334 adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); in amdgpu_gart_init() 335 if (adev->gart.pages == NULL) { in amdgpu_gart_init() [all …]
|
/drivers/media/v4l2-core/ |
D | videobuf2-dma-sg.c | 40 struct page **pages; member 68 struct page *pages; in vb2_dma_sg_alloc_compacted() local 77 pages = NULL; in vb2_dma_sg_alloc_compacted() 78 while (!pages) { in vb2_dma_sg_alloc_compacted() 79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | in vb2_dma_sg_alloc_compacted() 81 if (pages) in vb2_dma_sg_alloc_compacted() 86 __free_page(buf->pages[last_page]); in vb2_dma_sg_alloc_compacted() 92 split_page(pages, order); in vb2_dma_sg_alloc_compacted() 94 buf->pages[last_page++] = &pages[i]; in vb2_dma_sg_alloc_compacted() 128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), in vb2_dma_sg_alloc() [all …]
|
/drivers/xen/ |
D | gntdev.c | 95 struct page **pages; member 99 static int unmap_grant_pages(struct grant_map *map, int offset, int pages); 122 if (map->pages) in gntdev_free_map() 123 gnttab_free_pages(map->count, map->pages); in gntdev_free_map() 124 kfree(map->pages); in gntdev_free_map() 147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); in gntdev_alloc_map() 153 NULL == add->pages) in gntdev_alloc_map() 156 if (gnttab_alloc_pages(count, add->pages)) in gntdev_alloc_map() 230 if (map->pages && !use_ptemod) in gntdev_put_map() 283 pfn_to_kaddr(page_to_pfn(map->pages[i])); in map_grant_pages() [all …]
|
D | xlate_mmu.c | 45 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 54 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 68 struct page **pages; member 97 struct page *page = info->pages[info->index++]; in remap_pte_fn() 146 struct page **pages) in xen_xlate_remap_gfn_array() argument 161 data.pages = pages; in xen_xlate_remap_gfn_array() 182 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 184 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
|
/drivers/lightnvm/ |
D | rrpc.h | 171 unsigned pages, struct rrpc_inflight_rq *r) in __rrpc_lock_laddr() argument 173 sector_t laddr_end = laddr + pages - 1; in __rrpc_lock_laddr() 194 unsigned pages, in rrpc_lock_laddr() argument 197 BUG_ON((laddr + pages) > rrpc->nr_pages); in rrpc_lock_laddr() 199 return __rrpc_lock_laddr(rrpc, laddr, pages, r); in rrpc_lock_laddr() 213 unsigned int pages = rrpc_get_pages(bio); in rrpc_lock_rq() local 216 return rrpc_lock_laddr(rrpc, laddr, pages, r); in rrpc_lock_rq() 232 uint8_t pages = rqd->nr_pages; in rrpc_unlock_rq() local 234 BUG_ON((r->l_start + pages) > rrpc->nr_pages); in rrpc_unlock_rq()
|
/drivers/gpu/drm/radeon/ |
D | radeon_gart.c | 239 int pages) in radeon_gart_unbind() argument 251 for (i = 0; i < pages; i++, p++) { in radeon_gart_unbind() 252 if (rdev->gart.pages[p]) { in radeon_gart_unbind() 253 rdev->gart.pages[p] = NULL; in radeon_gart_unbind() 284 int pages, struct page **pagelist, dma_addr_t *dma_addr, in radeon_gart_bind() argument 299 for (i = 0; i < pages; i++, p++) { in radeon_gart_bind() 300 rdev->gart.pages[p] = pagelist[i]; in radeon_gart_bind() 330 if (rdev->gart.pages) { in radeon_gart_init() 347 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); in radeon_gart_init() 348 if (rdev->gart.pages == NULL) { in radeon_gart_init() [all …]
|
/drivers/firewire/ |
D | core-iso.c | 48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc() 50 if (buffer->pages == NULL) in fw_iso_buffer_alloc() 54 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); in fw_iso_buffer_alloc() 55 if (buffer->pages[i] == NULL) in fw_iso_buffer_alloc() 76 address = dma_map_page(card->device, buffer->pages[i], in fw_iso_buffer_map_dma() 81 set_page_private(buffer->pages[i], address); in fw_iso_buffer_map_dma() 115 err = vm_insert_page(vma, uaddr, buffer->pages[i]); in fw_iso_buffer_map_vma() 132 address = page_private(buffer->pages[i]); in fw_iso_buffer_destroy() 137 __free_page(buffer->pages[i]); in fw_iso_buffer_destroy() 139 kfree(buffer->pages); in fw_iso_buffer_destroy() [all …]
|