Home
last modified time | relevance | path

Searched refs:sg_head (Results 1 – 17 of 17) sorted by relevance

/kernel/linux/linux-5.10/drivers/infiniband/core/
Dumem.c53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents, in __ib_umem_release()
56 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { in __ib_umem_release()
61 sg_free_table(&umem->sg_head); in __ib_umem_release()
108 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in ib_umem_find_best_pgsz()
223 sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret, in ib_umem_get()
227 umem->sg_nents = umem->sg_head.nents; in ib_umem_get()
239 ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents, in ib_umem_get()
305 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length, in ib_umem_copy_from()
/kernel/linux/linux-5.10/drivers/vdpa/mlx5/core/
Dmr.c43 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { in populate_mtts()
250 err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL); in map_direct_mr()
254 sg = mr->sg_head.sgl; in map_direct_mr()
275 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in map_direct_mr()
288 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in map_direct_mr()
290 sg_free_table(&mr->sg_head); in map_direct_mr()
299 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in unmap_direct_mr()
300 sg_free_table(&mr->sg_head); in unmap_direct_mr()
Dmlx5_vdpa.h19 struct sg_table sg_head; member
/kernel/linux/linux-5.10/include/rdma/
Dib_umem.h26 struct sg_table sg_head; member
54 __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); in __rdma_umem_block_iter_start()
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/
Dhns_roce_db.c46 db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; in hns_roce_db_map_user()
47 db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; in hns_roce_db_map_user()
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/
Ddoorbell.c78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
Dmem.c66 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages()
131 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas()
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/
Ddoorbell.c78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
Dmr.c203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_write_mtt()
276 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
/kernel/linux/linux-5.10/drivers/crypto/nx/
Dnx.c78 struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, in nx_build_sg_list() argument
108 for (sg = sg_head; sg_len < *len; sg++) { in nx_build_sg_list()
126 if ((sg - sg_head) == sgmax) { in nx_build_sg_list()
/kernel/linux/linux-5.10/drivers/scsi/
Dadvansys.c326 ASC_SG_HEAD *sg_head; member
335 ASC_SG_HEAD *sg_head; member
2516 (ulong)q->sg_head, q->q1.sg_queue_cnt); in asc_prt_asc_scsi_q()
2518 if (q->sg_head) { in asc_prt_asc_scsi_q()
2519 sgp = q->sg_head; in asc_prt_asc_scsi_q()
7575 asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) + in asc_build_req()
7584 asc_scsi_q->sg_head = asc_sg_head; in asc_build_req()
8004 ASC_SG_HEAD *sg_head; in AscPutReadySgListQueue() local
8016 sg_head = scsiq->sg_head; in AscPutReadySgListQueue()
8019 scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); in AscPutReadySgListQueue()
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/
Drxe_mr.c166 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { in rxe_mem_init_user()
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/
Dmr.c412 for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { in rvt_reg_user_mr()
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/
Di40iw_verbs.c1310 iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl); in i40iw_copy_user_pgaddrs()
/kernel/linux/patches/linux-5.10/hispark_taurus_patch/
Dhispark_taurus.patch15471 +static int hiedmac_add_sg(struct list_head *sg_head,
15484 + free_dsg(sg_head);
15489 + list_add_tail(&dsg->node, sg_head);
15496 +static int hiedmac_add_sg_slave(struct list_head *sg_head,
15513 + return hiedmac_add_sg(sg_head, dst, src, length);
15516 +static int hiedmac_fill_sg_for_slave(struct list_head *sg_head,
15534 + ret = hiedmac_add_sg_slave(sg_head, slave_addr, addr, length, direction);
15541 +static inline int hiedmac_fill_sg_for_m2m_copy(struct list_head *sg_head,
15545 + return hiedmac_add_sg(sg_head, dst, src, len);
15548 +static int hiedmac_fill_sg_for_cyclic(struct list_head *sg_head,
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/hw/qedr/
Dverbs.c1480 sg = srq->prod_umem->sg_head.sgl; in qedr_init_srq_user_params()
/kernel/linux/patches/linux-4.19/hispark_taurus_patch/
Dhispark_taurus.patch298430 +static int hiedmac_add_sg(struct list_head *sg_head,
298443 + free_dsg(sg_head);
298448 + list_add_tail(&dsg->node, sg_head);
298455 +static int hiedmac_add_sg_slave(struct list_head *sg_head,
298472 + return hiedmac_add_sg(sg_head, dst, src, length);
298475 +static int hiedmac_fill_sg_for_slave(struct list_head *sg_head,
298493 + ret = hiedmac_add_sg_slave(sg_head, slave_addr, addr, length, direction);
298500 +static inline int hiedmac_fill_sg_for_m2m_copy(struct list_head *sg_head,
298504 + return hiedmac_add_sg(sg_head, dst, src, len);
298507 +static int hiedmac_fill_sg_for_cyclic(struct list_head *sg_head,
[all …]