| /kernel/linux/linux-5.10/crypto/async_tx/ |
| D | async_xor.c | 23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, in do_async_xor() argument 32 int src_cnt = unmap->to_cnt; in do_async_xor() 34 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; in do_async_xor() 35 dma_addr_t *src_list = unmap->addr; in do_async_xor() 63 if (src_list > unmap->addr) in do_async_xor() 66 xor_src_cnt, unmap->len, in do_async_xor() 77 xor_src_cnt, unmap->len, in do_async_xor() 82 dma_set_unmap(tx, unmap); in do_async_xor() 190 struct dmaengine_unmap_data *unmap = NULL; in async_xor_offs() local 195 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); in async_xor_offs() [all …]
|
| D | async_pq.c | 37 struct dmaengine_unmap_data *unmap, in do_async_gen_syndrome() argument 76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome() 77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome() 79 &unmap->addr[src_off], in do_async_gen_syndrome() 81 &scfs[src_off], unmap->len, in do_async_gen_syndrome() 89 dma_set_unmap(tx, unmap); in do_async_gen_syndrome() 185 struct dmaengine_unmap_data *unmap = NULL; in async_gen_syndrome() local 190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome() 193 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && in async_gen_syndrome() 209 unmap->len = len; in async_gen_syndrome() [all …]
|
| D | async_memcpy.c | 40 struct dmaengine_unmap_data *unmap = NULL; in async_memcpy() local 43 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); in async_memcpy() 45 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { in async_memcpy() 53 unmap->to_cnt = 1; in async_memcpy() 54 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, in async_memcpy() 56 unmap->from_cnt = 1; in async_memcpy() 57 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, in async_memcpy() 59 unmap->len = len; in async_memcpy() 61 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], in async_memcpy() 62 unmap->addr[0], len, in async_memcpy() [all …]
|
| D | async_raid6_recov.c | 25 struct dmaengine_unmap_data *unmap = NULL; in async_sum_product() local 31 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); in async_sum_product() 33 if (unmap) { in async_sum_product() 41 unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0], in async_sum_product() 43 unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1], in async_sum_product() 45 unmap->to_cnt = 2; in async_sum_product() 47 unmap->addr[2] = dma_map_page(dev, dest, d_off, in async_sum_product() 49 unmap->bidi_cnt = 1; in async_sum_product() 51 pq[1] = unmap->addr[2]; in async_sum_product() 53 unmap->len = len; in async_sum_product() [all …]
|
| /kernel/linux/linux-6.6/crypto/async_tx/ |
| D | async_xor.c | 23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, in do_async_xor() argument 32 int src_cnt = unmap->to_cnt; in do_async_xor() 34 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; in do_async_xor() 35 dma_addr_t *src_list = unmap->addr; in do_async_xor() 63 if (src_list > unmap->addr) in do_async_xor() 66 xor_src_cnt, unmap->len, in do_async_xor() 77 xor_src_cnt, unmap->len, in do_async_xor() 82 dma_set_unmap(tx, unmap); in do_async_xor() 190 struct dmaengine_unmap_data *unmap = NULL; in async_xor_offs() local 195 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); in async_xor_offs() [all …]
|
| D | async_pq.c | 37 struct dmaengine_unmap_data *unmap, in do_async_gen_syndrome() argument 76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome() 77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome() 79 &unmap->addr[src_off], in do_async_gen_syndrome() 81 &scfs[src_off], unmap->len, in do_async_gen_syndrome() 89 dma_set_unmap(tx, unmap); in do_async_gen_syndrome() 185 struct dmaengine_unmap_data *unmap = NULL; in async_gen_syndrome() local 190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome() 193 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && in async_gen_syndrome() 209 unmap->len = len; in async_gen_syndrome() [all …]
|
| D | async_memcpy.c | 40 struct dmaengine_unmap_data *unmap = NULL; in async_memcpy() local 43 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); in async_memcpy() 45 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { in async_memcpy() 53 unmap->to_cnt = 1; in async_memcpy() 54 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, in async_memcpy() 56 unmap->from_cnt = 1; in async_memcpy() 57 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, in async_memcpy() 59 unmap->len = len; in async_memcpy() 61 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], in async_memcpy() 62 unmap->addr[0], len, in async_memcpy() [all …]
|
| D | async_raid6_recov.c | 25 struct dmaengine_unmap_data *unmap = NULL; in async_sum_product() local 31 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); in async_sum_product() 33 if (unmap) { in async_sum_product() 41 unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0], in async_sum_product() 43 unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1], in async_sum_product() 45 unmap->to_cnt = 2; in async_sum_product() 47 unmap->addr[2] = dma_map_page(dev, dest, d_off, in async_sum_product() 49 unmap->bidi_cnt = 1; in async_sum_product() 51 pq[1] = unmap->addr[2]; in async_sum_product() 53 unmap->len = len; in async_sum_product() [all …]
|
| /kernel/linux/linux-6.6/tools/testing/selftests/mm/ |
| D | ksm_functional_tests.c | 157 goto unmap; in mmap_and_merge_range() 162 goto unmap; in mmap_and_merge_range() 175 goto unmap; in mmap_and_merge_range() 183 goto unmap; in mmap_and_merge_range() 190 goto unmap; in mmap_and_merge_range() 193 goto unmap; in mmap_and_merge_range() 197 goto unmap; in mmap_and_merge_range() 203 goto unmap; in mmap_and_merge_range() 212 goto unmap; in mmap_and_merge_range() 216 unmap: in mmap_and_merge_range() [all …]
|
| D | mlock2-tests.c | 209 goto unmap; in test_mlock_lock() 213 goto unmap; in test_mlock_lock() 218 goto unmap; in test_mlock_lock() 223 unmap: in test_mlock_lock() 272 goto unmap; in test_mlock_onfault() 276 goto unmap; in test_mlock_onfault() 285 goto unmap; in test_mlock_onfault() 289 unmap: in test_mlock_onfault() 316 goto unmap; in test_lock_onfault_of_present() 322 goto unmap; in test_lock_onfault_of_present() [all …]
|
| D | mremap_dontunmap.c | 59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 122 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 124 "unable to unmap source mapping"); in mremap_dontunmap_simple() 154 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() 173 "unable to unmap destination mapping"); in mremap_dontunmap_simple_shmem() 175 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() 207 // The dest mapping will have been unmap by mremap so we expect the Xs in mremap_dontunmap_simple_fixed() 219 "unable to unmap destination mapping"); in mremap_dontunmap_simple_fixed() 221 "unable to unmap source mapping"); in mremap_dontunmap_simple_fixed() [all …]
|
| /kernel/linux/linux-5.10/tools/testing/selftests/vm/ |
| D | mlock2-tests.c | 210 goto unmap; in test_mlock_lock() 214 goto unmap; in test_mlock_lock() 219 goto unmap; in test_mlock_lock() 224 unmap: in test_mlock_lock() 273 goto unmap; in test_mlock_onfault() 277 goto unmap; in test_mlock_onfault() 286 goto unmap; in test_mlock_onfault() 290 unmap: in test_mlock_onfault() 317 goto unmap; in test_lock_onfault_of_present() 323 goto unmap; in test_lock_onfault_of_present() [all …]
|
| D | mremap_dontunmap.c | 62 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 66 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 125 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 127 "unable to unmap source mapping"); in mremap_dontunmap_simple() 159 // The dest mapping will have been unmap by mremap so we expect the Xs in mremap_dontunmap_simple_fixed() 171 "unable to unmap destination mapping"); in mremap_dontunmap_simple_fixed() 173 "unable to unmap source mapping"); in mremap_dontunmap_simple_fixed() 221 "unable to unmap destination mapping"); in mremap_dontunmap_partial_mapping() 223 "unable to unmap source mapping"); in mremap_dontunmap_partial_mapping() 280 "unable to unmap destination mapping"); in mremap_dontunmap_partial_mapping_overwrite() [all …]
|
| /kernel/linux/linux-5.10/drivers/xen/xenbus/ |
| D | xenbus_client.c | 80 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; member 92 int (*unmap)(struct xenbus_device *dev, void *vaddr); member 536 gnttab_set_unmap_op(&info->unmap[j], in __xenbus_map_ring() 543 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)) in __xenbus_map_ring() 548 if (info->unmap[i].status != GNTST_okay) { in __xenbus_map_ring() 562 * @vaddrs: addresses to unmap 564 * Unmap memory in this domain that was imported from another domain. 571 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; in xenbus_unmap_ring() local 579 gnttab_set_unmap_op(&unmap[i], vaddrs[i], in xenbus_unmap_ring() 582 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) in xenbus_unmap_ring() [all …]
|
| /kernel/linux/linux-6.6/drivers/xen/xenbus/ |
| D | xenbus_client.c | 80 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; member 92 int (*unmap)(struct xenbus_device *dev, void *vaddr); member 581 gnttab_set_unmap_op(&info->unmap[j], in __xenbus_map_ring() 588 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)); in __xenbus_map_ring() 592 if (info->unmap[i].status != GNTST_okay) { in __xenbus_map_ring() 606 * @vaddrs: addresses to unmap 608 * Unmap memory in this domain that was imported from another domain. 615 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; in xenbus_unmap_ring() local 623 gnttab_set_unmap_op(&unmap[i], vaddrs[i], in xenbus_unmap_ring() 626 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)); in xenbus_unmap_ring() [all …]
|
| /kernel/linux/linux-5.10/arch/arm/xen/ |
| D | p2m.c | 97 struct gnttab_unmap_grant_ref unmap; in set_foreign_p2m_mapping() local 111 unmap.host_addr = map_ops[i].host_addr, in set_foreign_p2m_mapping() 112 unmap.handle = map_ops[i].handle; in set_foreign_p2m_mapping() 115 unmap.dev_bus_addr = map_ops[i].dev_bus_addr; in set_foreign_p2m_mapping() 117 unmap.dev_bus_addr = 0; in set_foreign_p2m_mapping() 123 unmap.status = 1; in set_foreign_p2m_mapping() 126 &unmap, 1); in set_foreign_p2m_mapping() 127 if (rc || unmap.status != GNTST_okay) in set_foreign_p2m_mapping() 128 pr_err_once("gnttab unmap failed: rc=%d st=%d\n", in set_foreign_p2m_mapping() 129 rc, unmap.status); in set_foreign_p2m_mapping()
|
| /kernel/linux/linux-6.6/arch/arm/xen/ |
| D | p2m.c | 98 struct gnttab_unmap_grant_ref unmap; in set_foreign_p2m_mapping() local 112 unmap.host_addr = map_ops[i].host_addr, in set_foreign_p2m_mapping() 113 unmap.handle = map_ops[i].handle; in set_foreign_p2m_mapping() 116 unmap.dev_bus_addr = map_ops[i].dev_bus_addr; in set_foreign_p2m_mapping() 118 unmap.dev_bus_addr = 0; in set_foreign_p2m_mapping() 124 unmap.status = 1; in set_foreign_p2m_mapping() 127 &unmap, 1); in set_foreign_p2m_mapping() 128 if (rc || unmap.status != GNTST_okay) in set_foreign_p2m_mapping() 129 pr_err_once("gnttab unmap failed: rc=%d st=%d\n", in set_foreign_p2m_mapping() 130 rc, unmap.status); in set_foreign_p2m_mapping()
|
| /kernel/linux/linux-6.6/drivers/iommu/iommufd/ |
| D | vfio_compat.c | 208 struct vfio_iommu_type1_dma_unmap unmap; in iommufd_vfio_unmap_dma() local 213 if (copy_from_user(&unmap, arg, minsz)) in iommufd_vfio_unmap_dma() 216 if (unmap.argsz < minsz || unmap.flags & ~supported_flags) in iommufd_vfio_unmap_dma() 223 if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL) { in iommufd_vfio_unmap_dma() 224 if (unmap.iova != 0 || unmap.size != 0) { in iommufd_vfio_unmap_dma() 236 unsigned long iovas[] = { unmap.iova + unmap.size - 1, in iommufd_vfio_unmap_dma() 237 unmap.iova - 1 }; in iommufd_vfio_unmap_dma() 240 unmap.iova ? 2 : 1); in iommufd_vfio_unmap_dma() 244 rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, in iommufd_vfio_unmap_dma() 247 unmap.size = unmapped; in iommufd_vfio_unmap_dma() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ |
| D | drm_gpuva_mgr.c | 71 * calculate a sequence of operations to satisfy a given map or unmap request. 80 * containing map, unmap and remap operations for a given newly requested 87 * amount of unmap operations, a maximum of two remap operations and a single 95 * &drm_gpuva to unmap is physically contiguous with the original mapping 102 * one unmap operation and one or two map operations, such that drivers can 110 * to call back into the driver in order to unmap a range of GPU VA space. The 112 * enclosed by the given range unmap operations are created. For mappings which 410 * &drm_gpuva_op in order to satisfy a given map or unmap request and how to 458 * va = op->remap.unmap->va; 488 * va = op->unmap->va; [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/brocade/bna/ |
| D | bnad.c | 93 struct bnad_tx_unmap *unmap; in bnad_tx_buff_unmap() local 97 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 98 nvecs = unmap->nvecs; in bnad_tx_buff_unmap() 100 skb = unmap->skb; in bnad_tx_buff_unmap() 101 unmap->skb = NULL; in bnad_tx_buff_unmap() 102 unmap->nvecs = 0; in bnad_tx_buff_unmap() 104 dma_unmap_addr(&unmap->vectors[0], dma_addr), in bnad_tx_buff_unmap() 106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); in bnad_tx_buff_unmap() 115 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 119 dma_unmap_addr(&unmap->vectors[vector], dma_addr), in bnad_tx_buff_unmap() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/ |
| D | bnad.c | 93 struct bnad_tx_unmap *unmap; in bnad_tx_buff_unmap() local 97 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 98 nvecs = unmap->nvecs; in bnad_tx_buff_unmap() 100 skb = unmap->skb; in bnad_tx_buff_unmap() 101 unmap->skb = NULL; in bnad_tx_buff_unmap() 102 unmap->nvecs = 0; in bnad_tx_buff_unmap() 104 dma_unmap_addr(&unmap->vectors[0], dma_addr), in bnad_tx_buff_unmap() 106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); in bnad_tx_buff_unmap() 115 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 119 dma_unmap_addr(&unmap->vectors[vector], dma_addr), in bnad_tx_buff_unmap() [all …]
|
| /kernel/linux/linux-6.6/include/drm/ |
| D | drm_gpuva_mgr.h | 347 * @DRM_GPUVA_OP_UNMAP: the unmap op type 397 * struct drm_gpuva_op_unmap - GPU VA unmap operation 399 * This structure represents a single unmap operation generated by the 404 * @va: the &drm_gpuva to unmap 428 * mapping(s), hence it consists of a maximum of two map and one unmap 431 * The @unmap operation takes care of removing the original existing mapping. 439 * unmap and map operations, is to give drivers the chance of extracting driver 440 * specific data for creating the new mappings from the unmap operations's 456 * @unmap: the unmap operation for the original existing mapping 458 struct drm_gpuva_op_unmap *unmap; member [all …]
|
| /kernel/linux/linux-6.6/drivers/xen/ |
| D | xen-front-pgdir-shbuf.c | 60 /* Unmap grant references of the buffer. */ 61 int (*unmap)(struct xen_front_pgdir_shbuf *buf); member 106 * Unmap granted references of the shared buffer. 110 * shared by the frontend itself) or unmap the provided granted 118 if (buf->ops && buf->ops->unmap) in xen_front_pgdir_shbuf_unmap() 119 return buf->ops->unmap(buf); in xen_front_pgdir_shbuf_unmap() 121 /* No need to unmap own grant references. */ in xen_front_pgdir_shbuf_unmap() 194 * Unmap the buffer previously mapped with grant references 227 "Failed to unmap page %d: %d\n", in backend_unmap() 233 "Failed to unmap grant references, ret %d", ret); in backend_unmap() [all …]
|
| /kernel/linux/linux-5.10/drivers/xen/ |
| D | xen-front-pgdir-shbuf.c | 68 /* Unmap grant references of the buffer. */ 69 int (*unmap)(struct xen_front_pgdir_shbuf *buf); member 114 * Unmap granted references of the shared buffer. 118 * shared by the frontend itself) or unmap the provided granted 126 if (buf->ops && buf->ops->unmap) in xen_front_pgdir_shbuf_unmap() 127 return buf->ops->unmap(buf); in xen_front_pgdir_shbuf_unmap() 129 /* No need to unmap own grant references. */ in xen_front_pgdir_shbuf_unmap() 203 * Unmap the buffer previously mapped with grant references 236 "Failed to unmap page %d: %d\n", in backend_unmap() 242 "Failed to unmap grant references, ret %d", ret); in backend_unmap() [all …]
|
| /kernel/linux/linux-6.6/include/linux/ |
| D | zpool.h | 24 * changed memory back out on unmap. Write-only does not copy 30 ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */ 69 * @unmap: unmap a handle. 92 void (*unmap)(void *pool, unsigned long handle); member
|