/drivers/virt/ |
D | fsl_hypervisor.c | 153 struct fh_sg_list *sg_list = NULL; in ioctl_memcpy() local 243 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); in ioctl_memcpy() 264 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy() 265 sg_list[0].target = param.remote_paddr; in ioctl_memcpy() 267 sg_list[0].source = param.remote_paddr; in ioctl_memcpy() 268 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy() 270 sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset); in ioctl_memcpy() 272 remote_paddr = param.remote_paddr + sg_list[0].size; in ioctl_memcpy() 273 count = param.count - sg_list[0].size; in ioctl_memcpy() 278 sg_list[i].source = page_to_phys(pages[i]); in ioctl_memcpy() [all …]
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send() 76 plen += wr->sg_list[i].length; in build_rdma_send() 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send() 78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); in build_rdma_send() 79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); in build_rdma_send() 110 if ((plen + wr->sg_list[i].length) < plen) { in build_rdma_write() 113 plen += wr->sg_list[i].length; in build_rdma_write() 115 cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_write() 117 cpu_to_be32(wr->sg_list[i].length); in build_rdma_write() 119 cpu_to_be64(wr->sg_list[i].addr); in build_rdma_write() [all …]
|
/drivers/scsi/qla2xxx/ |
D | qla_bsg.c | 41 bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free() 46 bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free() 49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free() 52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free() 183 bsg_job->reply_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd() 210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd() 336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els() 343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els() 395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els() 397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_ruc.c | 93 ss->sg_list = qp->r_sg_list; in qib_init_sge() 96 if (wqe->sg_list[i].length == 0) in qib_init_sge() 99 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, in qib_init_sge() 100 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in qib_init_sge() 102 qp->r_len += wqe->sg_list[i].length; in qib_init_sge() 112 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; in qib_init_sge() 437 sqp->s_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback() 438 sqp->s_sge.sg_list = wqe->sg_list + 1; in qib_ruc_loopback() 475 qp->r_sge.sg_list = NULL; in qib_ruc_loopback() 489 sqp->s_sge.sg_list = NULL; in qib_ruc_loopback() [all …]
|
D | qib_ud.c | 181 ssge.sg_list = swqe->sg_list + 1; in qib_ud_loopback() 182 ssge.sge = *swqe->sg_list; in qib_ud_loopback() 199 *sge = *ssge.sg_list++; in qib_ud_loopback() 333 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_ud_req() 334 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_ud_req()
|
D | qib_uc.c | 100 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req() 101 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req() 431 qp->r_sge.sg_list = NULL; in qib_uc_rcv()
|
/drivers/infiniband/hw/hfi1/ |
D | ruc.c | 109 ss->sg_list = qp->r_sg_list; in init_sge() 112 if (wqe->sg_list[i].length == 0) in init_sge() 115 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, in init_sge() 116 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in init_sge() 118 qp->r_len += wqe->sg_list[i].length; in init_sge() 128 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; in init_sge() 439 sqp->s_sge.sge = wqe->sg_list[0]; in ruc_loopback() 440 sqp->s_sge.sg_list = wqe->sg_list + 1; in ruc_loopback() 500 qp->r_sge.sg_list = NULL; in ruc_loopback() 514 sqp->s_sge.sg_list = NULL; in ruc_loopback() [all …]
|
D | ud.c | 197 ssge.sg_list = swqe->sg_list + 1; in ud_loopback() 198 ssge.sge = *swqe->sg_list; in ud_loopback() 215 *sge = *ssge.sg_list++; in ud_loopback() 362 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_ud_req() 363 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_ud_req()
|
D | uc.c | 143 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_uc_req() 144 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_uc_req() 489 qp->r_sge.sg_list = NULL; in hfi1_uc_rcv()
|
/drivers/dma/ |
D | imx-dma.c | 165 struct scatterlist *sg_list; member 806 kfree(imxdmac->sg_list); in imxdma_free_chan_resources() 807 imxdmac->sg_list = NULL; in imxdma_free_chan_resources() 881 kfree(imxdmac->sg_list); in imxdma_prep_dma_cyclic() 883 imxdmac->sg_list = kcalloc(periods + 1, in imxdma_prep_dma_cyclic() 885 if (!imxdmac->sg_list) in imxdma_prep_dma_cyclic() 888 sg_init_table(imxdmac->sg_list, periods); in imxdma_prep_dma_cyclic() 891 imxdmac->sg_list[i].page_link = 0; in imxdma_prep_dma_cyclic() 892 imxdmac->sg_list[i].offset = 0; in imxdma_prep_dma_cyclic() 893 imxdmac->sg_list[i].dma_address = dma_addr; in imxdma_prep_dma_cyclic() [all …]
|
/drivers/infiniband/hw/qedr/ |
D | qedr_cm.c | 109 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; in qedr_ll2_rx_cb() 270 send_size += swr->sg_list[i].length; in qedr_gsi_build_header() 416 packet->payload[i].baddr = swr->sg_list[i].addr; in qedr_gsi_build_packet() 417 packet->payload[i].len = swr->sg_list[i].length; in qedr_gsi_build_packet() 539 buf.baddr = wr->sg_list[0].addr; in qedr_gsi_post_recv() 540 buf.len = wr->sg_list[0].length; in qedr_gsi_post_recv() 552 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0]; in qedr_gsi_post_recv() 589 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; in qedr_gsi_poll_cq()
|
/drivers/scsi/aacraid/ |
D | commctrl.c | 489 void *sg_list[32]; in aac_send_raw_srb() local 516 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ in aac_send_raw_srb() 569 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { in aac_send_raw_srb() 628 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 679 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 733 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 769 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 812 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ in aac_send_raw_srb() 831 kfree(sg_list[i]); in aac_send_raw_srb()
|
/drivers/crypto/caam/ |
D | sg_sw_sec4.h | 74 static inline int sg_count(struct scatterlist *sg_list, int nbytes) in sg_count() argument 76 int sg_nents = sg_nents_for_len(sg_list, nbytes); in sg_count()
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.c | 2011 static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges) in i40iw_copy_sg_list() argument 2016 sg_list[i].tag_off = sgl[i].addr; in i40iw_copy_sg_list() 2017 sg_list[i].len = sgl[i].length; in i40iw_copy_sg_list() 2018 sg_list[i].stag = sgl[i].lkey; in i40iw_copy_sg_list() 2070 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; in i40iw_post_send() 2071 info.op.inline_send.len = ib_wr->sg_list[0].length; in i40iw_post_send() 2075 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list; in i40iw_post_send() 2090 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; in i40iw_post_send() 2091 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; in i40iw_post_send() 2094 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length; in i40iw_post_send() [all …]
|
D | i40iw_user.h | 202 i40iw_sgl sg_list; member 212 i40iw_sgl sg_list; member 282 i40iw_sgl sg_list; member
|
/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 399 if ((plen + wr->sg_list[i].length) > max) in build_immd() 401 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd() 402 plen += wr->sg_list[i].length; in build_immd() 403 rem = wr->sg_list[i].length; in build_immd() 429 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, in build_isgl() argument 438 if ((plen + sg_list[i].length) < plen) in build_isgl() 440 plen += sg_list[i].length; in build_isgl() 441 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | in build_isgl() 442 sg_list[i].length); in build_isgl() 445 *flitp = cpu_to_be64(sg_list[i].addr); in build_isgl() [all …]
|
/drivers/xen/ |
D | efi.c | 225 unsigned long count, unsigned long sg_list) in xen_efi_update_capsule() argument 235 efi_data(op).u.update_capsule.sg_list = sg_list; in xen_efi_update_capsule()
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 120 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member 129 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; member 146 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member
|
/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 413 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_clear_mr_refs() 1454 wqe->sg_list[i] = wr->sg_list[i]; in rvt_post_recv() 1501 wr->sg_list[0].length < sizeof(u64) || in rvt_qp_valid_operation() 1502 wr->sg_list[0].addr & (sizeof(u64) - 1))) in rvt_qp_valid_operation() 1674 u32 length = wr->sg_list[i].length; in rvt_post_one_wr() 1679 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], in rvt_post_one_wr() 1680 &wr->sg_list[i], acc); in rvt_post_one_wr() 1739 struct rvt_sge *sge = &wqe->sg_list[--j]; in rvt_post_one_wr() 1845 wqe->sg_list[i] = wr->sg_list[i]; in rvt_post_srq_recv()
|
/drivers/scsi/qla4xxx/ |
D | ql4_bsg.c | 63 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_flash() 123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_flash() 187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_get_acb_state() 258 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_nvram() 322 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_nvram() 436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_bsg_get_acb()
|
/drivers/scsi/ |
D | 3w-9xxx.c | 1348 …if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_i… in twa_interrupt() 1349 …scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); in twa_interrupt() 1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); in twa_load_sgl() 1394 newcommand->sg_list[0].length = cpu_to_le32(length); in twa_load_sgl() 1855 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); in DEF_SCSI_QCMD() 1856 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); in DEF_SCSI_QCMD() 1863 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); in DEF_SCSI_QCMD() 1864 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); in DEF_SCSI_QCMD() 1865 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { in DEF_SCSI_QCMD() 1876 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); in DEF_SCSI_QCMD() [all …]
|
/drivers/infiniband/core/ |
D | mad.c | 1028 mad_send_wr->sg_list[0].length = hdr_len; in ib_create_send_mad() 1029 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 1034 mad_send_wr->sg_list[1].length = data_len; in ib_create_send_mad() 1036 mad_send_wr->sg_list[1].length = mad_size - hdr_len; in ib_create_send_mad() 1038 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; in ib_create_send_mad() 1159 sge = mad_send_wr->sg_list; in ib_send_mad() 2458 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); in ib_mad_send_done() 2461 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); in ib_mad_send_done() 2851 struct ib_sge sg_list; in ib_mad_post_receive_mads() local [all …]
|
D | rw.c | 141 reg->wr.wr.sg_list = ®->sge; in rdma_rw_init_mr_wrs() 208 rdma_wr->wr.sg_list = sge; in rdma_rw_init_map_wrs() 252 rdma_wr->wr.sg_list = &ctx->single.sge; in rdma_rw_init_single_wr() 421 ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge; in rdma_rw_ctx_signature_init() 438 rdma_wr->wr.sg_list = &ctx->sig->sig_sge; in rdma_rw_ctx_signature_init()
|
/drivers/firmware/efi/ |
D | runtime-wrappers.c | 266 unsigned long sg_list) in virt_efi_update_capsule() argument 275 status = efi_call_virt(update_capsule, capsules, count, sg_list); in virt_efi_update_capsule()
|
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 355 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv() 356 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv() 357 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx4_ib_post_srq_recv()
|