/drivers/gpu/drm/virtio/ |
D | virtgpu_gem.c | 165 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) in virtio_gpu_array_alloc() argument 169 objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL); in virtio_gpu_array_alloc() 173 objs->nents = 0; in virtio_gpu_array_alloc() 174 objs->total = nents; in virtio_gpu_array_alloc() 184 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) in virtio_gpu_array_from_handles() argument 189 objs = virtio_gpu_array_alloc(nents); in virtio_gpu_array_from_handles() 193 for (i = 0; i < nents; i++) { in virtio_gpu_array_from_handles() 196 objs->nents = i; in virtio_gpu_array_from_handles() 201 objs->nents = i; in virtio_gpu_array_from_handles() 208 if (WARN_ON_ONCE(objs->nents == objs->total)) in virtio_gpu_array_add_obj() [all …]
|
D | virtgpu_object.c | 138 unsigned int *nents) in virtio_gpu_object_shmem_init() argument 150 *nents = pages->nents; in virtio_gpu_object_shmem_init() 152 *nents = pages->orig_nents; in virtio_gpu_object_shmem_init() 154 *ents = kvmalloc_array(*nents, in virtio_gpu_object_shmem_init() 188 unsigned int nents; in virtio_gpu_object_create() local 205 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); in virtio_gpu_object_create() 226 ents, nents); in virtio_gpu_object_create() 230 virtio_gpu_object_attach(vgdev, bo, ents, nents); in virtio_gpu_object_create() 234 virtio_gpu_object_attach(vgdev, bo, ents, nents); in virtio_gpu_object_create()
|
/drivers/crypto/ccree/ |
D | cc_buffer_mgr.c | 25 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; member 77 unsigned int nents = 0; in cc_get_sgl_nents() local 82 nents++; in cc_get_sgl_nents() 90 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); in cc_get_sgl_nents() 91 return nents; in cc_get_sgl_nents() 109 u32 nents; in cc_copy_sg_portion() local 111 nents = sg_nents_for_len(sg, end); in cc_copy_sg_portion() 112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, in cc_copy_sg_portion() 236 unsigned int nents, struct scatterlist *sgl, in cc_add_sg_entry() argument 243 index, nents, sgl, data_len, is_last_table); in cc_add_sg_entry() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_scatterlist.c | 21 if (orig_st->nents == orig_st->orig_nents) in i915_sg_trim() 24 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) in i915_sg_trim() 28 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { in i915_sg_trim() 113 st->nents = 0; in i915_rsgt_from_mm_node() 122 if (st->nents) in i915_rsgt_from_mm_node() 130 st->nents++; in i915_rsgt_from_mm_node() 199 st->nents = 0; in i915_rsgt_from_buddy_resource() 212 if (st->nents) in i915_rsgt_from_buddy_resource() 220 st->nents++; in i915_rsgt_from_buddy_resource()
|
/drivers/spi/ |
D | spi-dw-dma.c | 292 u32 nents; in dw_spi_dma_wait_tx_done() local 294 nents = dw_readl(dws, DW_SPI_TXFLR); in dw_spi_dma_wait_tx_done() 296 delay.value = nents * dws->n_bytes * BITS_PER_BYTE; in dw_spi_dma_wait_tx_done() 340 unsigned int nents) in dw_spi_dma_submit_tx() argument 346 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, in dw_spi_dma_submit_tx() 377 u32 nents; in dw_spi_dma_wait_rx_done() local 388 nents = dw_readl(dws, DW_SPI_RXFLR); in dw_spi_dma_wait_rx_done() 389 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; in dw_spi_dma_wait_rx_done() 441 unsigned int nents) in dw_spi_dma_submit_rx() argument 447 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, in dw_spi_dma_submit_rx() [all …]
|
D | spi-ep93xx.c | 285 int i, ret, nents; in ep93xx_spi_dma_prepare() local 325 nents = DIV_ROUND_UP(len, PAGE_SIZE); in ep93xx_spi_dma_prepare() 326 if (nents != sgt->nents) { in ep93xx_spi_dma_prepare() 329 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); in ep93xx_spi_dma_prepare() 335 for_each_sg(sgt->sgl, sg, sgt->nents, i) { in ep93xx_spi_dma_prepare() 355 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); in ep93xx_spi_dma_prepare() 356 if (!nents) in ep93xx_spi_dma_prepare() 359 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction, in ep93xx_spi_dma_prepare() 362 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); in ep93xx_spi_dma_prepare() 391 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); in ep93xx_spi_dma_finish()
|
/drivers/hwtracing/intel_th/ |
D | msu-sink.c | 54 unsigned int nents; in msu_sink_alloc_window() local 62 nents = DIV_ROUND_UP(size, PAGE_SIZE); in msu_sink_alloc_window() 64 ret = sg_alloc_table(*sgt, nents, GFP_KERNEL); in msu_sink_alloc_window() 70 for_each_sg((*sgt)->sgl, sg_ptr, nents, i) { in msu_sink_alloc_window() 80 return nents; in msu_sink_alloc_window() 90 for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) { in msu_sink_free_window()
|
/drivers/parisc/ |
D | iommu-helpers.h | 15 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, in iommu_fill_pdir() argument 30 while (nents-- > 0) { in iommu_fill_pdir() 34 DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents, in iommu_fill_pdir() 102 struct scatterlist *startsg, int nents, in iommu_coalesce_chunks() argument 114 while (nents > 0) { in iommu_coalesce_chunks() 131 while(--nents > 0) { in iommu_coalesce_chunks()
|
D | sba_iommu.c | 278 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) in sba_dump_sg() argument 280 while (nents-- > 0) { in sba_dump_sg() 282 nents, in sba_dump_sg() 950 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, in sba_map_sg() argument 957 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); in sba_map_sg() 964 if (nents == 1) { in sba_map_sg() 976 sba_dump_sg(ioc, sglist, nents); in sba_map_sg() 993 iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); in sba_map_sg() 1003 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); in sba_map_sg() 1011 sba_dump_sg(ioc, sglist, nents); in sba_map_sg() [all …]
|
D | ccio-dma.c | 903 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, in ccio_map_sg() argument 918 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); in ccio_map_sg() 921 if (nents == 1) { in ccio_map_sg() 929 for(i = 0; i < nents; i++) in ccio_map_sg() 946 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range); in ccio_map_sg() 956 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry); in ccio_map_sg() 983 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, in ccio_unmap_sg() argument 996 __func__, nents, sg_virt(sglist), sglist->length); in ccio_unmap_sg() 1002 while (nents && sg_dma_len(sglist)) { in ccio_unmap_sg() 1010 nents--; in ccio_unmap_sg() [all …]
|
/drivers/target/iscsi/cxgbit/ |
D | cxgbit_ddp.c | 153 unsigned int nents) in cxgbit_ddp_sgl_check() argument 155 unsigned int last_sgidx = nents - 1; in cxgbit_ddp_sgl_check() 158 for (i = 0; i < nents; i++, sg = sg_next(sg)) { in cxgbit_ddp_sgl_check() 177 unsigned int sgcnt = ttinfo->nents; in cxgbit_ddp_reserve() 184 xferlen, ttinfo->nents); in cxgbit_ddp_reserve() 246 ttinfo->nents = cmd->se_cmd.t_data_nents; in cxgbit_get_r2t_ttt() 251 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); in cxgbit_get_r2t_ttt() 254 ttinfo->nents = 0; in cxgbit_get_r2t_ttt() 286 ttinfo->nents, DMA_FROM_DEVICE); in cxgbit_unmap_cmd() 287 ttinfo->nents = 0; in cxgbit_unmap_cmd()
|
/drivers/crypto/cavium/nitrox/ |
D | nitrox_req.h | 555 static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp) in alloc_req_buf() argument 559 size = sizeof(struct scatterlist) * nents; in alloc_req_buf() 621 int nents, int ivsize) in alloc_src_req_buf() argument 625 nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); in alloc_src_req_buf() 643 int nents, int ivsize, in nitrox_creq_set_src_sg() argument 652 sg_init_table(sg, nents); in nitrox_creq_set_src_sg() 667 int nents) in alloc_dst_req_buf() argument 672 nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); in alloc_dst_req_buf() 701 int nents, int ivsize, in nitrox_creq_set_dst_sg() argument 710 sg_init_table(sg, nents); in nitrox_creq_set_dst_sg()
|
D | nitrox_aead.c | 97 int nents = sg_nents_for_len(src, buflen); in alloc_src_sglist() local 100 if (nents < 0) in alloc_src_sglist() 101 return nents; in alloc_src_sglist() 104 nents += 1; in alloc_src_sglist() 106 ret = alloc_src_req_buf(nkreq, nents, ivsize); in alloc_src_sglist() 111 nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen); in alloc_src_sglist() 119 int nents = sg_nents_for_len(dst, buflen); in alloc_dst_sglist() local 122 if (nents < 0) in alloc_dst_sglist() 123 return nents; in alloc_dst_sglist() 126 nents += 3; in alloc_dst_sglist() [all …]
|
D | nitrox_reqmgr.c | 163 int i, nents, ret = 0; in dma_map_inbufs() local 165 nents = dma_map_sg(dev, req->src, sg_nents(req->src), in dma_map_inbufs() 167 if (!nents) in dma_map_inbufs() 170 for_each_sg(req->src, sg, nents, i) in dma_map_inbufs() 174 sr->in.sgmap_cnt = nents; in dma_map_inbufs() 191 int nents, ret = 0; in dma_map_outbufs() local 193 nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), in dma_map_outbufs() 195 if (!nents) in dma_map_outbufs() 199 sr->out.sgmap_cnt = nents; in dma_map_outbufs()
|
/drivers/mmc/core/ |
D | sdio_ops.c | 122 unsigned int nents, left_size, i; in mmc_io_rw_extended() local 152 nents = DIV_ROUND_UP(left_size, seg_size); in mmc_io_rw_extended() 153 if (nents > 1) { in mmc_io_rw_extended() 154 if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) in mmc_io_rw_extended() 158 data.sg_len = nents; in mmc_io_rw_extended() 196 if (nents > 1) in mmc_io_rw_extended()
|
/drivers/iommu/ |
D | dma-iommu.c | 534 int nents, enum dma_data_direction dir) in dev_use_sg_swiotlb() argument 551 for_each_sg(sg, s, nents, i) in dev_use_sg_swiotlb() 1123 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, in __finalise_sg() argument 1131 for_each_sg(sg, s, nents, i) { in __finalise_sg() 1191 static void __invalidate_sg(struct scatterlist *sg, int nents) in __invalidate_sg() argument 1196 for_each_sg(sg, s, nents, i) { in __invalidate_sg() 1211 int nents, enum dma_data_direction dir, unsigned long attrs) in iommu_dma_unmap_sg_swiotlb() argument 1216 for_each_sg(sg, s, nents, i) in iommu_dma_unmap_sg_swiotlb() 1222 int nents, enum dma_data_direction dir, unsigned long attrs) in iommu_dma_map_sg_swiotlb() argument 1229 for_each_sg(sg, s, nents, i) { in iommu_dma_map_sg_swiotlb() [all …]
|
/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 74 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_put_pages() 160 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); in usnic_uiom_get_pages() 161 sg_init_table(chunk->page_list, chunk->nents); in usnic_uiom_get_pages() 162 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_get_pages() 169 cur_base += chunk->nents * PAGE_SIZE; in usnic_uiom_get_pages() 170 ret -= chunk->nents; in usnic_uiom_get_pages() 171 off += chunk->nents; in usnic_uiom_get_pages() 259 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { in usnic_uiom_map_sorted_intervals() 310 if (i == chunk->nents) { in usnic_uiom_map_sorted_intervals()
|
/drivers/infiniband/core/ |
D | rw.c | 97 u32 nents = min(sg_cnt, pages_per_mr); in rdma_rw_init_one_mr() local 106 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); in rdma_rw_init_one_mr() 107 if (ret < 0 || ret < nents) { in rdma_rw_init_one_mr() 142 u32 nents = min(sg_cnt, pages_per_mr); in rdma_rw_init_mr_wrs() local 174 sg_cnt -= nents; in rdma_rw_init_mr_wrs() 175 for (j = 0; j < nents; j++) in rdma_rw_init_mr_wrs() 306 sg_cnt = sgt.nents; in rdma_rw_ctx_init() 418 ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg, in rdma_rw_ctx_signature_init() 419 prot_sgt.nents, NULL, SZ_4K); in rdma_rw_ctx_signature_init() 422 sgt.nents + prot_sgt.nents); in rdma_rw_ctx_signature_init() [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 51 for_each_sg(pt->st.sgl, sg, pt->st.nents, n) { in expect_pfn_sg() 53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() 89 for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) { in expect_pfn_sg_page_iter() 263 pt->st.nents = n; in alloc_table() 350 pt.st.nents != prime) { in igt_sg_trim() 352 pt.st.nents, pt.st.orig_nents, prime); in igt_sg_trim()
|
/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 185 int nents, struct imgu_css_map *map) in imgu_dmamap_map_sg() argument 193 for_each_sg(sglist, sg, nents, i) { in imgu_dmamap_map_sg() 197 if (i != nents - 1 && !PAGE_ALIGNED(sg->length)) in imgu_dmamap_map_sg() 205 nents, size >> shift); in imgu_dmamap_map_sg() 216 sglist, nents) < size) in imgu_dmamap_map_sg()
|
/drivers/usb/storage/ |
D | protocol.c | 128 unsigned int nents = scsi_sg_count(srb); in usb_stor_access_xfer_buf() local 131 nents = sg_nents(sg); in usb_stor_access_xfer_buf() 135 sg_miter_start(&miter, sg, nents, dir == FROM_XFER_BUF ? in usb_stor_access_xfer_buf()
|
/drivers/crypto/bcm/ |
D | util.c | 54 unsigned int nents = sg_nents(src); in sg_copy_part_to_buf() local 56 copied = sg_pcopy_to_buffer(src, nents, dest, len, skip); in sg_copy_part_to_buf() 60 flow_log("sg with %u entries and skip %u\n", nents, skip); in sg_copy_part_to_buf() 73 unsigned int nents = sg_nents(dest); in sg_copy_part_from_buf() local 75 copied = sg_pcopy_from_buffer(dest, nents, src, len, skip); in sg_copy_part_from_buf() 79 flow_log("sg with %u entries and skip %u\n", nents, skip); in sg_copy_part_from_buf()
|
/drivers/crypto/qce/ |
D | dma.c | 78 int nents, unsigned long flags, in qce_dma_prep_sg() argument 85 if (!sg || !nents) in qce_dma_prep_sg() 88 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); in qce_dma_prep_sg()
|
/drivers/xen/ |
D | grant-dma-ops.c | 235 int nents, enum dma_data_direction dir, in xen_grant_dma_unmap_sg() argument 244 for_each_sg(sg, s, nents, i) in xen_grant_dma_unmap_sg() 250 int nents, enum dma_data_direction dir, in xen_grant_dma_map_sg() argument 259 for_each_sg(sg, s, nents, i) { in xen_grant_dma_map_sg() 268 return nents; in xen_grant_dma_map_sg()
|
/drivers/dma/hsu/ |
D | hsu.c | 85 count = desc->nents - desc->active; in hsu_dma_chan_start() 229 } else if (desc->active < desc->nents) { in hsu_dma_do_irq() 244 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) in hsu_dma_alloc_desc() argument 252 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); in hsu_dma_alloc_desc() 290 desc->nents = sg_len; in hsu_dma_prep_slave_sg() 315 for (i = desc->active; i < desc->nents; i++) in hsu_dma_active_desc_size()
|