/drivers/gpu/drm/tegra/ |
D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 if (mapping->sgt) in tegra_drm_mapping_release() 21 dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, in tegra_drm_mapping_release() 24 host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); in tegra_drm_mapping_release() 25 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 27 kfree(mapping); in tegra_drm_mapping_release() 30 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 32 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 37 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 40 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() [all …]
|
D | submit.c | 119 struct tegra_drm_mapping *mapping; in tegra_drm_mapping_get() local 123 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get() 124 if (mapping) in tegra_drm_mapping_get() 125 kref_get(&mapping->ref); in tegra_drm_mapping_get() 129 return mapping; in tegra_drm_mapping_get() 205 struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping) in submit_write_reloc() argument 208 dma_addr_t iova = mapping->iova + buf->reloc.target_offset; in submit_write_reloc() 258 struct tegra_drm_mapping *mapping; in submit_process_bufs() local 266 mapping = tegra_drm_mapping_get(context, buf->mapping); in submit_process_bufs() 267 if (!mapping) { in submit_process_bufs() [all …]
|
/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 81 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 82 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() 83 spin_unlock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() [all …]
|
D | panfrost_mmu.c | 288 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) in panfrost_mmu_map() argument 290 struct panfrost_gem_object *bo = mapping->obj; in panfrost_mmu_map() 296 if (WARN_ON(mapping->active)) in panfrost_mmu_map() 306 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, in panfrost_mmu_map() 308 mapping->active = true; in panfrost_mmu_map() 313 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) in panfrost_mmu_unmap() argument 315 struct panfrost_gem_object *bo = mapping->obj; in panfrost_mmu_unmap() 318 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; in panfrost_mmu_unmap() 319 u64 iova = mapping->mmnode.start << PAGE_SHIFT; in panfrost_mmu_unmap() 320 size_t len = mapping->mmnode.size << PAGE_SHIFT; in panfrost_mmu_unmap() [all …]
|
D | panfrost_perfcnt.c | 29 struct panfrost_gem_mapping *mapping; member 53 gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; in panfrost_perfcnt_dump_locked() 101 perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), in panfrost_perfcnt_enable_locked() 103 if (!perfcnt->mapping) { in panfrost_perfcnt_enable_locked() 132 as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); in panfrost_perfcnt_enable_locked() 169 panfrost_gem_mapping_put(perfcnt->mapping); in panfrost_perfcnt_enable_locked() 197 drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); in panfrost_perfcnt_disable_locked() 199 panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); in panfrost_perfcnt_disable_locked() 200 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); in panfrost_perfcnt_disable_locked() 201 panfrost_gem_mapping_put(perfcnt->mapping); in panfrost_perfcnt_disable_locked() [all …]
|
/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 380 static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument 398 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument 405 static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument 408 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed() 425 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument 428 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed() 759 static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument 762 int bits = mapping->size; in uvc_get_le_value() 763 int offset = mapping->offset; in uvc_get_le_value() 784 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) in uvc_get_le_value() [all …]
|
/drivers/gpu/drm/exynos/ |
D | exynos_drm_dma.c | 67 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 69 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 93 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 110 if (!priv->mapping) { in exynos_drm_register_dma() 111 void *mapping = NULL; in exynos_drm_register_dma() local 114 mapping = arm_iommu_create_mapping(&platform_bus_type, in exynos_drm_register_dma() 117 mapping = iommu_get_domain_for_dev(priv->dma_dev); in exynos_drm_register_dma() 119 if (!mapping) in exynos_drm_register_dma() 121 priv->mapping = mapping; in exynos_drm_register_dma() 141 arm_iommu_release_mapping(priv->mapping); in exynos_drm_cleanup_dma() [all …]
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 222 return mapping; in etnaviv_gem_get_vram_mapping() 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local 250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); in etnaviv_gem_mapping_get() [all …]
|
D | etnaviv_mmu.c | 123 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 125 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 129 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 131 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 230 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument 247 mapping->iova = iova; in etnaviv_iommu_map_gem() 248 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem() 254 node = &mapping->vram_node; in etnaviv_iommu_map_gem() 265 mapping->iova = node->start; in etnaviv_iommu_map_gem() 274 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem() [all …]
|
D | etnaviv_cmdbuf.c | 65 struct etnaviv_vram_mapping *mapping, in etnaviv_cmdbuf_suballoc_map() argument 68 return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base, in etnaviv_cmdbuf_suballoc_map() 73 struct etnaviv_vram_mapping *mapping) in etnaviv_cmdbuf_suballoc_unmap() argument 75 etnaviv_iommu_put_suballoc_va(context, mapping); in etnaviv_cmdbuf_suballoc_unmap() 134 struct etnaviv_vram_mapping *mapping) in etnaviv_cmdbuf_get_va() argument 136 return mapping->iova + buf->suballoc_offset; in etnaviv_cmdbuf_get_va()
|
/drivers/sh/clk/ |
D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 369 kref_init(&mapping->ref); in clk_establish_mapping() 371 mapping->base = ioremap(mapping->phys, mapping->len); in clk_establish_mapping() 372 if (unlikely(!mapping->base)) in clk_establish_mapping() 374 } else if (mapping->base) { in clk_establish_mapping() [all …]
|
/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 27 dma_addr_t mapping, u32 len) in bnxt_xmit_bd() argument 42 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd() 50 dma_addr_t mapping, u32 len, u16 rx_prod) in __bnxt_xmit_xdp() argument 54 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp() 61 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument 66 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); in __bnxt_xmit_xdp_redirect() 69 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect() 90 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp() 121 dma_addr_t mapping; in bnxt_rx_xdp() local 134 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp() [all …]
|
/drivers/net/wireless/marvell/mwifiex/ |
D | util.h | 69 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 77 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 86 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 88 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 90 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 1876 struct amdgpu_bo_va_mapping *mapping; in amdgpu_vm_bo_update() local 1931 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update() 1937 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update() 1939 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update() 1943 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); in amdgpu_vm_bo_update() 1945 trace_amdgpu_vm_bo_update(mapping); in amdgpu_vm_bo_update() 1948 resv, mapping->start, in amdgpu_vm_bo_update() 1949 mapping->last, update_flags, in amdgpu_vm_bo_update() 1950 mapping->offset, mem, in amdgpu_vm_bo_update() 1976 list_for_each_entry(mapping, &bo_va->valids, list) in amdgpu_vm_bo_update() [all …]
|
D | amdgpu_trace.h | 245 struct amdgpu_bo_va_mapping *mapping), 246 TP_ARGS(bo_va, mapping), 257 __entry->start = mapping->start; 258 __entry->last = mapping->last; 259 __entry->offset = mapping->offset; 260 __entry->flags = mapping->flags; 269 struct amdgpu_bo_va_mapping *mapping), 270 TP_ARGS(bo_va, mapping), 281 __entry->start = mapping->start; 282 __entry->last = mapping->last; [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shmem.c | 34 struct address_space *mapping; in shmem_get_pages() local 76 mapping = obj->base.filp->f_mapping; in shmem_get_pages() 77 mapping_set_unevictable(mapping); in shmem_get_pages() 78 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); in shmem_get_pages() 93 page = shmem_read_mapping_page_gfp(mapping, i, gfp); in shmem_get_pages() 115 gfp = mapping_gfp_mask(mapping); in shmem_get_pages() 210 mapping_clear_unevictable(mapping); in shmem_get_pages() 257 struct address_space *mapping; in shmem_writeback() local 273 mapping = obj->base.filp->f_mapping; in shmem_writeback() 279 page = find_lock_page(mapping, i); in shmem_writeback() [all …]
|
/drivers/net/ethernet/dec/tulip/ |
D | interrupt.c | 70 dma_addr_t mapping; in tulip_refill_rx() local 77 mapping = dma_map_single(&tp->pdev->dev, skb->data, in tulip_refill_rx() 79 if (dma_mapping_error(&tp->pdev->dev, mapping)) { in tulip_refill_rx() 85 tp->rx_buffers[entry].mapping = mapping; in tulip_refill_rx() 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); in tulip_refill_rx() 214 tp->rx_buffers[entry].mapping, in tulip_poll() 227 tp->rx_buffers[entry].mapping, in tulip_poll() 235 if (tp->rx_buffers[entry].mapping != in tulip_poll() 240 (unsigned long long)tp->rx_buffers[entry].mapping, in tulip_poll() 246 tp->rx_buffers[entry].mapping, in tulip_poll() [all …]
|
/drivers/mfd/ |
D | htc-pasic3.c | 22 void __iomem *mapping; member 38 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_write_register() 39 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_write_register() 53 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_read_register() 54 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_read_register() 153 asic->mapping = ioremap(r->start, resource_size(r)); in pasic3_probe() 154 if (!asic->mapping) { in pasic3_probe() 191 iounmap(asic->mapping); in pasic3_remove()
|
/drivers/net/xen-netback/ |
D | hash.c | 329 memset(vif->hash.mapping[vif->hash.mapping_sel], 0, in xenvif_set_hash_mapping_size() 338 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel]; in xenvif_set_hash_mapping() local 344 .len = len * sizeof(*mapping), in xenvif_set_hash_mapping() 349 len > XEN_PAGE_SIZE / sizeof(*mapping)) in xenvif_set_hash_mapping() 352 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off); in xenvif_set_hash_mapping() 353 copy_op[0].dest.offset = xen_offset_in_page(mapping + off); in xenvif_set_hash_mapping() 357 copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len); in xenvif_set_hash_mapping() 364 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel], in xenvif_set_hash_mapping() 365 vif->hash.size * sizeof(*mapping)); in xenvif_set_hash_mapping() 376 if (mapping[off++] >= vif->num_queues) in xenvif_set_hash_mapping() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_page_dirty.c | 87 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable() local 91 (mapping, in vmw_bo_dirty_scan_pagetable() 103 wp_shared_mapping_range(mapping, in vmw_bo_dirty_scan_pagetable() 105 clean_record_shared_mapping_range(mapping, in vmw_bo_dirty_scan_pagetable() 125 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite() local 147 clean_record_shared_mapping_range(mapping, offset, end, offset, in vmw_bo_dirty_scan_mkwrite() 191 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap() local 196 wp_shared_mapping_range(mapping, start + offset, end - start); in vmw_bo_dirty_pre_unmap() 197 clean_record_shared_mapping_range(mapping, start + offset, in vmw_bo_dirty_pre_unmap() 215 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_unmap() local [all …]
|
/drivers/net/wwan/iosm/ |
D | iosm_ipc_pcie.c | 490 size_t size, dma_addr_t *mapping, int direction) in ipc_pcie_addr_map() argument 493 *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size, in ipc_pcie_addr_map() 495 if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) { in ipc_pcie_addr_map() 504 dma_addr_t mapping, int direction) in ipc_pcie_addr_unmap() argument 506 if (!mapping) in ipc_pcie_addr_unmap() 509 dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction); in ipc_pcie_addr_unmap() 527 IPC_CB(skb)->mapping = 0; in ipc_pcie_alloc_local_skb() 533 gfp_t flags, dma_addr_t *mapping, in ipc_pcie_alloc_skb() argument 544 if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) { in ipc_pcie_alloc_skb() 552 IPC_CB(skb)->mapping = *mapping; in ipc_pcie_alloc_skb() [all …]
|
/drivers/nvdimm/ |
D | region_devs.c | 71 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate() 98 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate() 130 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_release() 201 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_to_nstype() 224 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in region_size() 312 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in set_cookie_show() 344 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_available_dpa() 375 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_allocatable_dpa() 714 nd_mapping = &nd_region->mapping[n]; in mappingN() 723 static ssize_t mapping##idx##_show(struct device *dev, \ [all …]
|
/drivers/iommu/ |
D | virtio-iommu.c | 317 struct viommu_mapping *mapping; in viommu_add_mapping() local 319 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); in viommu_add_mapping() 320 if (!mapping) in viommu_add_mapping() 323 mapping->paddr = paddr; in viommu_add_mapping() 324 mapping->iova.start = iova; in viommu_add_mapping() 325 mapping->iova.last = iova + size - 1; in viommu_add_mapping() 326 mapping->flags = flags; in viommu_add_mapping() 329 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping() 351 struct viommu_mapping *mapping = NULL; in viommu_del_mappings() local 358 mapping = container_of(node, struct viommu_mapping, iova); in viommu_del_mappings() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 94 u64 mapping[IPOIB_UD_RX_SG]) in ipoib_ud_dma_unmap_rx() 96 ib_dma_unmap_single(priv->ca, mapping[0], in ipoib_ud_dma_unmap_rx() 107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; in ipoib_ib_post_receive() 108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; in ipoib_ib_post_receive() 114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); in ipoib_ib_post_receive() 127 u64 *mapping; in ipoib_alloc_rx_skb() local 141 mapping = priv->rx_ring[id].mapping; in ipoib_alloc_rx_skb() 142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, in ipoib_alloc_rx_skb() 144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) in ipoib_alloc_rx_skb() 178 u64 mapping[IPOIB_UD_RX_SG]; in ipoib_ib_handle_rx_wc() local [all …]
|
/drivers/dax/ |
D | bus.c | 609 struct dax_mapping *mapping = to_dax_mapping(dev); in dax_mapping_release() local 613 ida_free(&dev_dax->ida, mapping->id); in dax_mapping_release() 614 kfree(mapping); in dax_mapping_release() 621 struct dax_mapping *mapping = to_dax_mapping(dev); in unregister_dax_mapping() local 629 dev_dax->ranges[mapping->range_id].mapping = NULL; in unregister_dax_mapping() 630 mapping->range_id = -1; in unregister_dax_mapping() 638 struct dax_mapping *mapping = to_dax_mapping(dev); in get_dax_range() local 643 if (mapping->range_id < 0) { in get_dax_range() 648 return &dev_dax->ranges[mapping->range_id]; in get_dax_range() 653 struct dax_mapping *mapping = dax_range->mapping; in put_dax_range() local [all …]
|