Home
last modified time | relevance | path

Searched refs:sg_list (Results 1 – 25 of 105) sorted by relevance

12345

/drivers/staging/rdma/ehca/
Dehca_reqs.c85 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); in ehca_write_rwqe()
91 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr = in ehca_write_rwqe()
92 recv_wr->sg_list[cnt_ds].addr; in ehca_write_rwqe()
93 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey = in ehca_write_rwqe()
94 recv_wr->sg_list[cnt_ds].lkey; in ehca_write_rwqe()
95 wqe_p->u.all_rcv.sg_list[cnt_ds].length = in ehca_write_rwqe()
96 recv_wr->sg_list[cnt_ds].length; in ehca_write_rwqe()
119 struct ib_sge *sge = ud_wr->wr.sg_list; in trace_ud_wr()
175 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); in ehca_write_swqe()
248 wqe_p->u.ud_av.sg_list[idx].vaddr = in ehca_write_swqe()
[all …]
Dehca_qes.h156 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member
164 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member
168 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES - member
176 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; member
/drivers/virt/
Dfsl_hypervisor.c153 struct fh_sg_list *sg_list = NULL; in ioctl_memcpy() local
246 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); in ioctl_memcpy()
267 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
268 sg_list[0].target = param.remote_paddr; in ioctl_memcpy()
270 sg_list[0].source = param.remote_paddr; in ioctl_memcpy()
271 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy()
273 sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset); in ioctl_memcpy()
275 remote_paddr = param.remote_paddr + sg_list[0].size; in ioctl_memcpy()
276 count = param.count - sg_list[0].size; in ioctl_memcpy()
281 sg_list[i].source = page_to_phys(pages[i]); in ioctl_memcpy()
[all …]
/drivers/scsi/qla2xxx/
Dqla_bsg.c41 bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free()
46 bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free()
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free()
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free()
183 bsg_job->reply_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd()
210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd()
336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els()
339 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els()
345 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els()
348 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els()
[all …]
/drivers/infiniband/hw/cxgb3/
Diwch_qp.c73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send()
76 plen += wr->sg_list[i].length; in build_rdma_send()
77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send()
78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); in build_rdma_send()
79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); in build_rdma_send()
110 if ((plen + wr->sg_list[i].length) < plen) { in build_rdma_write()
113 plen += wr->sg_list[i].length; in build_rdma_write()
115 cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_write()
117 cpu_to_be32(wr->sg_list[i].length); in build_rdma_write()
119 cpu_to_be64(wr->sg_list[i].addr); in build_rdma_write()
[all …]
/drivers/infiniband/hw/qib/
Dqib_ruc.c93 ss->sg_list = qp->r_sg_list; in qib_init_sge()
96 if (wqe->sg_list[i].length == 0) in qib_init_sge()
99 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, in qib_init_sge()
100 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in qib_init_sge()
102 qp->r_len += wqe->sg_list[i].length; in qib_init_sge()
112 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; in qib_init_sge()
428 sqp->s_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback()
429 sqp->s_sge.sg_list = wqe->sg_list + 1; in qib_ruc_loopback()
466 qp->r_sge.sg_list = NULL; in qib_ruc_loopback()
480 sqp->s_sge.sg_list = NULL; in qib_ruc_loopback()
[all …]
Dqib_ud.c172 ssge.sg_list = swqe->sg_list + 1; in qib_ud_loopback()
173 ssge.sge = *swqe->sg_list; in qib_ud_loopback()
190 *sge = *ssge.sg_list++; in qib_ud_loopback()
320 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_ud_req()
321 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_ud_req()
Dqib_srq.c83 wqe->sg_list[i] = wr->sg_list[i]; in qib_post_srq_receive()
289 p->sg_list[i] = wqe->sg_list[i]; in qib_modify_srq()
Dqib_verbs.c189 *sge = *ss->sg_list++; in qib_copy_sge()
230 *sge = *ss->sg_list++; in qib_skip_sge()
253 struct qib_sge *sg_list = ss->sg_list; in qib_count_sge() local
277 sge = *sg_list++; in qib_count_sge()
315 *sge = *ss->sg_list++; in qib_copy_from_sge()
383 wr->sg_list[0].length < sizeof(u64) || in qib_post_one_send()
384 wr->sg_list[0].addr & (sizeof(u64) - 1))) in qib_post_one_send()
423 u32 length = wr->sg_list[i].length; in qib_post_one_send()
428 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], in qib_post_one_send()
429 &wr->sg_list[i], acc); in qib_post_one_send()
[all …]
Dqib_uc.c99 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req()
100 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req()
434 qp->r_sge.sg_list = NULL; in qib_uc_rcv()
/drivers/staging/rdma/ipath/
Dipath_ruc.c130 if (wqe->sg_list[i].length == 0) in ipath_init_sge()
133 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge()
134 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in ipath_init_sge()
136 *lengthp += wqe->sg_list[i].length; in ipath_init_sge()
209 qp->r_sge.sg_list = qp->r_sg_list; in ipath_get_rwqe()
327 sqp->s_sge.sge = wqe->sg_list[0]; in ipath_ruc_loopback()
328 sqp->s_sge.sg_list = wqe->sg_list + 1; in ipath_ruc_loopback()
369 qp->r_sge.sge = wqe->sg_list[0]; in ipath_ruc_loopback()
370 qp->r_sge.sg_list = wqe->sg_list + 1; in ipath_ruc_loopback()
413 *sge = *sqp->s_sge.sg_list++; in ipath_ruc_loopback()
Dipath_srq.c83 wqe->sg_list[i] = wr->sg_list[i]; in ipath_post_srq_receive()
293 p->sg_list[i] = wqe->sg_list[i]; in ipath_modify_srq()
Dipath_ud.c133 rsge.sg_list = qp->r_ud_sg_list; in ipath_ud_loopback()
183 sge = swqe->sg_list; in ipath_ud_loopback()
322 qp->s_sge.sge = wqe->sg_list[0]; in ipath_make_ud_req()
323 qp->s_sge.sg_list = wqe->sg_list + 1; in ipath_make_ud_req()
Dipath_uc.c98 qp->s_sge.sge = wqe->sg_list[0]; in ipath_make_uc_req()
99 qp->s_sge.sg_list = wqe->sg_list + 1; in ipath_make_uc_req()
453 qp->r_sge.sg_list = NULL; in ipath_uc_rcv()
/drivers/staging/rdma/hfi1/
Druc.c111 ss->sg_list = qp->r_sg_list; in init_sge()
114 if (wqe->sg_list[i].length == 0) in init_sge()
117 if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, in init_sge()
118 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in init_sge()
120 qp->r_len += wqe->sg_list[i].length; in init_sge()
130 struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; in init_sge()
450 sqp->s_sge.sge = wqe->sg_list[0]; in ruc_loopback()
451 sqp->s_sge.sg_list = wqe->sg_list + 1; in ruc_loopback()
488 qp->r_sge.sg_list = NULL; in ruc_loopback()
502 sqp->s_sge.sg_list = NULL; in ruc_loopback()
[all …]
Dsrq.c100 wqe->sg_list[i] = wr->sg_list[i]; in hfi1_post_srq_receive()
306 p->sg_list[i] = wqe->sg_list[i]; in hfi1_modify_srq()
Dverbs.c299 *sge = *ss->sg_list++; in hfi1_copy_sge()
340 *sge = *ss->sg_list++; in hfi1_skip_sge()
401 wr->sg_list[0].length < sizeof(u64) || in post_one_send()
402 wr->sg_list[0].addr & (sizeof(u64) - 1))) in post_one_send()
437 u32 length = wr->sg_list[i].length; in post_one_send()
442 ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j], in post_one_send()
443 &wr->sg_list[i], acc); in post_one_send()
468 struct hfi1_sge *sge = &wqe->sg_list[--j]; in post_one_send()
569 wqe->sg_list[i] = wr->sg_list[i]; in post_receive()
724 *sge = *ss->sg_list++; in update_sge()
[all …]
Dud.c194 ssge.sg_list = swqe->sg_list + 1; in ud_loopback()
195 ssge.sge = *swqe->sg_list; in ud_loopback()
212 *sge = *ssge.sg_list++; in ud_loopback()
350 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_ud_req()
351 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_ud_req()
/drivers/dma/
Dimx-dma.c165 struct scatterlist *sg_list; member
804 kfree(imxdmac->sg_list); in imxdma_free_chan_resources()
805 imxdmac->sg_list = NULL; in imxdma_free_chan_resources()
879 kfree(imxdmac->sg_list); in imxdma_prep_dma_cyclic()
881 imxdmac->sg_list = kcalloc(periods + 1, in imxdma_prep_dma_cyclic()
883 if (!imxdmac->sg_list) in imxdma_prep_dma_cyclic()
886 sg_init_table(imxdmac->sg_list, periods); in imxdma_prep_dma_cyclic()
889 imxdmac->sg_list[i].page_link = 0; in imxdma_prep_dma_cyclic()
890 imxdmac->sg_list[i].offset = 0; in imxdma_prep_dma_cyclic()
891 imxdmac->sg_list[i].dma_address = dma_addr; in imxdma_prep_dma_cyclic()
[all …]
/drivers/scsi/aacraid/
Dcommctrl.c489 void *sg_list[32]; in aac_send_raw_srb() local
516 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ in aac_send_raw_srb()
569 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { in aac_send_raw_srb()
628 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb()
680 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb()
734 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb()
770 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb()
813 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ in aac_send_raw_srb()
832 kfree(sg_list[i]); in aac_send_raw_srb()
/drivers/crypto/caam/
Dsg_sw_sec4.h73 static inline int sg_count(struct scatterlist *sg_list, int nbytes) in sg_count() argument
75 int sg_nents = sg_nents_for_len(sg_list, nbytes); in sg_count()
/drivers/infiniband/hw/cxgb4/
Dqp.c399 if ((plen + wr->sg_list[i].length) > max) in build_immd()
401 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
402 plen += wr->sg_list[i].length; in build_immd()
403 rem = wr->sg_list[i].length; in build_immd()
429 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, in build_isgl() argument
438 if ((plen + sg_list[i].length) < plen) in build_isgl()
440 plen += sg_list[i].length; in build_isgl()
441 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | in build_isgl()
442 sg_list[i].length); in build_isgl()
445 *flitp = cpu_to_be64(sg_list[i].addr); in build_isgl()
[all …]
/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.h120 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member
129 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; member
146 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member
/drivers/staging/rdma/amso1100/
Dc2_qp.c845 ib_wr->sg_list, in c2_post_send()
868 ib_wr->sg_list, in c2_post_send()
888 cpu_to_be32(ib_wr->sg_list->lkey); in c2_post_send()
890 cpu_to_be64(ib_wr->sg_list->addr); in c2_post_send()
896 cpu_to_be32(ib_wr->sg_list->length); in c2_post_send()
980 ib_wr->sg_list, in c2_post_receive()
/drivers/scsi/qla4xxx/
Dql4_bsg.c63 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_flash()
123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_flash()
187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_get_acb_state()
258 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_nvram()
322 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_nvram()
436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_bsg_get_acb()

12345