/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 782 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) in isert_login_post_send() argument 788 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, in isert_login_post_send() 791 tx_desc->tx_cqe.done = isert_login_send_done; in isert_login_post_send() 794 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send() 795 send_wr.sg_list = tx_desc->tx_sg; in isert_login_post_send() 796 send_wr.num_sge = tx_desc->num_sge; in isert_login_post_send() 809 struct iser_tx_desc *tx_desc) in __isert_create_send_desc() argument 812 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in __isert_create_send_desc() 813 tx_desc->iser_header.flags = ISCSI_CTRL; in __isert_create_send_desc() 815 tx_desc->num_sge = 1; in __isert_create_send_desc() [all …]
|
/drivers/crypto/ccp/ |
D | ccp-dmaengine.c | 111 if (!async_tx_test_ack(&desc->tx_desc)) in ccp_cleanup_desc_resources() 148 desc->tx_desc.cookie, cmd); in ccp_issue_next_cmd() 155 ret, desc->tx_desc.cookie, cmd); in ccp_issue_next_cmd() 170 __func__, desc->tx_desc.cookie, cmd); in ccp_free_active_cmd() 193 struct dma_async_tx_descriptor *tx_desc; in ccp_handle_active_desc() local 212 tx_desc = &desc->tx_desc; in ccp_handle_active_desc() 214 tx_desc = NULL; in ccp_handle_active_desc() 225 desc->tx_desc.cookie, desc->status); in ccp_handle_active_desc() 227 dma_cookie_complete(tx_desc); in ccp_handle_active_desc() 228 dma_descriptor_unmap(tx_desc); in ccp_handle_active_desc() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 236 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_stamp_wqe() local 239 __be32 *ptr = (__be32 *)tx_desc; in mlx4_en_stamp_wqe() 243 if (likely((void *)tx_desc + in mlx4_en_stamp_wqe() 276 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE); in mlx4_en_free_tx_desc() local 277 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; in mlx4_en_free_tx_desc() 307 if (likely((void *)tx_desc + in mlx4_en_free_tx_desc() 400 struct mlx4_en_tx_desc *tx_desc; in mlx4_en_handle_err_cqe() local 414 tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); in mlx4_en_handle_err_cqe() 415 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); in mlx4_en_handle_err_cqe() 676 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, in build_inline_wqe() argument [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iser_initiator.c | 164 struct iser_tx_desc *tx_desc) in iser_create_send_desc() argument 169 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); in iser_create_send_desc() 171 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in iser_create_send_desc() 172 tx_desc->iser_header.flags = ISER_VER; in iser_create_send_desc() 173 tx_desc->num_sge = 1; in iser_create_send_desc() 373 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_send_command() local 379 tx_desc->type = ISCSI_TX_SCSI_COMMAND; in iser_send_command() 380 tx_desc->cqe.done = iser_cmd_comp; in iser_send_command() 381 iser_create_send_desc(iser_conn, tx_desc); in iser_send_command() 421 err = iser_post_send(&iser_conn->ib_conn, tx_desc, in iser_send_command() [all …]
|
D | iscsi_iser.c | 184 struct iser_tx_desc *tx_desc) in iser_initialize_task_headers() argument 201 dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc, in iser_initialize_task_headers() 208 tx_desc->inv_wr.next = NULL; in iser_initialize_task_headers() 209 tx_desc->reg_wr.wr.next = NULL; in iser_initialize_task_headers() 210 tx_desc->mapped = true; in iser_initialize_task_headers() 211 tx_desc->dma_addr = dma_addr; in iser_initialize_task_headers() 212 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; in iser_initialize_task_headers() 213 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; in iser_initialize_task_headers() 214 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; in iser_initialize_task_headers() 370 struct iser_tx_desc *tx_desc = &iser_task->desc; in iscsi_iser_cleanup_task() local [all …]
|
D | iser_memory.c | 239 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_reg_sig_mr() local 243 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() 254 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 267 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr() 296 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_fast_reg_mr() local 299 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_fast_reg_mr() 303 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_fast_reg_mr() 314 wr->wr.next = &tx_desc->send_wr; in iser_fast_reg_mr()
|
/drivers/net/ethernet/hisilicon/hns3/ |
D | hns3_debugfs.c | 175 struct hns3_desc *rx_desc, *tx_desc; in hns3_dbg_bd_info() local 207 tx_desc = &ring->desc[tx_index]; in hns3_dbg_bd_info() 208 addr = le64_to_cpu(tx_desc->addr); in hns3_dbg_bd_info() 211 dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag)); in hns3_dbg_bd_info() 213 le16_to_cpu(tx_desc->tx.send_size)); in hns3_dbg_bd_info() 214 dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); in hns3_dbg_bd_info() 215 dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); in hns3_dbg_bd_info() 216 dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); in hns3_dbg_bd_info() 217 dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); in hns3_dbg_bd_info() 219 le16_to_cpu(tx_desc->tx.outer_vlan_tag)); in hns3_dbg_bd_info() [all …]
|
/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 752 struct fm10k_tx_desc *tx_desc; in fm10k_tso() local 781 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso() 782 tx_desc->hdrlen = hdrlen; in fm10k_tso() 783 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); in fm10k_tso() 799 struct fm10k_tx_desc *tx_desc; in fm10k_tx_csum() local 875 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tx_csum() 876 tx_desc->hdrlen = 0; in fm10k_tx_csum() 877 tx_desc->mss = 0; in fm10k_tx_csum() 898 struct fm10k_tx_desc *tx_desc, u16 i, in fm10k_tx_desc_push() argument 906 tx_desc->buffer_addr = cpu_to_le64(dma); in fm10k_tx_desc_push() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 33 struct ice_tx_desc *tx_desc; in ice_prgm_fdir_fltr() local 69 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr() 79 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr() 86 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr() 95 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr() 209 struct ice_tx_desc *tx_desc; in ice_clean_tx_irq() local 213 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_tx_irq() 256 while (tx_desc != eop_desc) { in ice_clean_tx_irq() 258 tx_desc++; in ice_clean_tx_irq() 263 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq() [all …]
|
D | ice_txrx_lib.c | 211 struct ice_tx_desc *tx_desc; in ice_xmit_xdp_ring() local 233 tx_desc = ICE_TX_DESC(xdp_ring, i); in ice_xmit_xdp_ring() 234 tx_desc->buf_addr = cpu_to_le64(dma); in ice_xmit_xdp_ring() 235 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, in ice_xmit_xdp_ring() 247 tx_buf->next_to_watch = tx_desc; in ice_xmit_xdp_ring()
|
D | ice_xsk.c | 696 struct ice_tx_desc *tx_desc = NULL; in ice_xmit_zc() local 721 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ice_xmit_zc() 722 tx_desc->buf_addr = cpu_to_le64(dma); in ice_xmit_zc() 723 tx_desc->cmd_type_offset_bsz = in ice_xmit_zc() 731 if (tx_desc) { in ice_xmit_zc() 764 struct ice_tx_desc *tx_desc; in ice_clean_tx_irq_zc() local 769 tx_desc = ICE_TX_DESC(xdp_ring, ntc); in ice_clean_tx_irq_zc() 774 if (!(tx_desc->cmd_type_offset_bsz & in ice_clean_tx_irq_zc() 788 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_tx_irq_zc() 790 tx_desc++; in ice_clean_tx_irq_zc() [all …]
|
/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 386 union ixgbe_adv_tx_desc *tx_desc = NULL; in ixgbe_xmit_zc() local 413 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc() 414 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_xmit_zc() 421 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_xmit_zc() 422 tx_desc->read.olinfo_status = in ixgbe_xmit_zc() 430 if (tx_desc) { in ixgbe_xmit_zc() 454 union ixgbe_adv_tx_desc *tx_desc; in ixgbe_clean_xdp_tx_irq() local 459 tx_desc = IXGBE_TX_DESC(tx_ring, ntc); in ixgbe_clean_xdp_tx_irq() 462 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbe_clean_xdp_tx_irq() 476 tx_desc++; in ixgbe_clean_xdp_tx_irq() [all …]
|
/drivers/staging/mt7621-dma/ |
D | hsdma-mt7621.c | 234 struct hsdma_desc *tx_desc; in hsdma_dump_desc() local 242 tx_desc = &chan->tx_ring[i]; in hsdma_dump_desc() 247 i, tx_desc->addr0, tx_desc->flags, in hsdma_dump_desc() 248 tx_desc->addr1, rx_desc->addr0, rx_desc->flags); in hsdma_dump_desc() 319 struct hsdma_desc *tx_desc, *rx_desc; in mtk_hsdma_start_transfer() local 331 tx_desc = &chan->tx_ring[chan->tx_idx]; in mtk_hsdma_start_transfer() 339 tx_desc->addr1 = src; in mtk_hsdma_start_transfer() 340 tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); in mtk_hsdma_start_transfer() 342 tx_desc->addr0 = src; in mtk_hsdma_start_transfer() 343 tx_desc->flags = HSDMA_DESC_PLEN0(tlen); in mtk_hsdma_start_transfer() [all …]
|
/drivers/staging/rtl8712/ |
D | rtl8712_xmit.c | 251 struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf; in r8712_construct_txaggr_cmd_desc() 285 struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf; in r8712_append_mpdu_unit() 345 (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff); in r8712_xmitframe_aggr_next() 352 (((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff); in r8712_xmitframe_aggr_next() 360 struct tx_desc *ptxdesc = pxmitbuf->pbuf; in r8712_dump_aggr_xframe() 405 struct tx_desc *ptxdesc = (struct tx_desc *)pmem; in update_txdesc() 413 struct tx_desc txdesc_mp; in update_txdesc() 415 memcpy(&txdesc_mp, ptxdesc, sizeof(struct tx_desc)); in update_txdesc() 416 memset(ptxdesc, 0, sizeof(struct tx_desc)); in update_txdesc() 537 struct tx_desc *ptxdesc_mp; in update_txdesc()
|
D | rtl8712_xmit.h | 40 #define tx_cmd tx_desc 80 struct tx_desc { struct 94 struct tx_desc txdesc; argument
|
/drivers/net/ethernet/seeq/ |
D | sgiseeq.c | 96 struct sgiseeq_tx_desc *tx_desc; member 196 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; in seeq_init_ring() 197 dma_sync_desc_dev(dev, &sp->tx_desc[i]); in seeq_init_ring() 230 if (sp->tx_desc[i].skb) { in seeq_purge_ring() 231 dev_kfree_skb(sp->tx_desc[i].skb); in seeq_purge_ring() 232 sp->tx_desc[i].skb = NULL; in seeq_purge_ring() 253 struct sgiseeq_tx_desc *t = gpriv->tx_desc; in sgiseeq_dump_rings() 312 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); in init_seeq() 443 td = &sp->tx_desc[i]; in kick_tx() 448 td = &sp->tx_desc[i]; in kick_tx() [all …]
|
/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 173 struct mvpp2_tx_desc *tx_desc) in mvpp2_txdesc_dma_addr_get() argument 176 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); in mvpp2_txdesc_dma_addr_get() 178 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & in mvpp2_txdesc_dma_addr_get() 183 struct mvpp2_tx_desc *tx_desc, in mvpp2_txdesc_dma_addr_set() argument 192 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); in mvpp2_txdesc_dma_addr_set() 193 tx_desc->pp21.packet_offset = offset; in mvpp2_txdesc_dma_addr_set() 197 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); in mvpp2_txdesc_dma_addr_set() 198 tx_desc->pp22.buf_dma_addr_ptp |= val; in mvpp2_txdesc_dma_addr_set() 199 tx_desc->pp22.packet_offset = offset; in mvpp2_txdesc_dma_addr_set() 204 struct mvpp2_tx_desc *tx_desc) in mvpp2_txdesc_size_get() argument [all …]
|
/drivers/net/ethernet/intel/iavf/ |
D | iavf_txrx.c | 198 struct iavf_tx_desc *tx_desc; in iavf_clean_tx_irq() local 203 tx_desc = IAVF_TX_DESC(tx_ring, i); in iavf_clean_tx_irq() 216 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq() 243 while (tx_desc != eop_desc) { in iavf_clean_tx_irq() 245 tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq() 248 tx_desc++; in iavf_clean_tx_irq() 253 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq() 268 tx_desc++; in iavf_clean_tx_irq() 273 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq() 276 prefetch(tx_desc); in iavf_clean_tx_irq() [all …]
|
/drivers/net/ethernet/oki-semi/pch_gbe/ |
D | pch_gbe_main.c | 1090 struct pch_gbe_tx_desc *tx_desc; in pch_gbe_tx_queue() local 1176 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num); in pch_gbe_tx_queue() 1177 tx_desc->buffer_addr = (buffer_info->dma); in pch_gbe_tx_queue() 1178 tx_desc->length = (tmp_skb->len); in pch_gbe_tx_queue() 1179 tx_desc->tx_words_eob = ((tmp_skb->len + 3)); in pch_gbe_tx_queue() 1180 tx_desc->tx_frame_ctrl = (frame_ctrl); in pch_gbe_tx_queue() 1181 tx_desc->gbec_status = (DSC_INIT16); in pch_gbe_tx_queue() 1449 struct pch_gbe_tx_desc *tx_desc; in pch_gbe_alloc_tx_buffers() local 1459 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); in pch_gbe_alloc_tx_buffers() 1460 tx_desc->gbec_status = (DSC_INIT16); in pch_gbe_alloc_tx_buffers() [all …]
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 93 struct i40e_tx_desc *tx_desc; in i40e_program_fdir_filter() local 128 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter() 139 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_program_fdir_filter() 145 tx_desc->cmd_type_offset_bsz = in i40e_program_fdir_filter() 154 first->next_to_watch = tx_desc; in i40e_program_fdir_filter() 781 struct i40e_tx_desc *tx_desc; in i40e_clean_tx_irq() local 786 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq() 801 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq() 803 if (tx_head == tx_desc) in i40e_clean_tx_irq() 830 while (tx_desc != eop_desc) { in i40e_clean_tx_irq() [all …]
|
/drivers/net/ethernet/hisilicon/ |
D | hip04_eth.c | 173 struct tx_desc { struct 226 struct tx_desc *tx_desc; member 451 struct tx_desc *desc; in hip04_tx_reclaim() 461 desc = &priv->tx_desc[tx_tail]; in hip04_tx_reclaim() 511 struct tx_desc *desc = &priv->tx_desc[tx_head]; in hip04_mac_start_xmit() 540 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); in hip04_mac_start_xmit() 542 offsetof(struct tx_desc, send_addr)); in hip04_mac_start_xmit() 855 priv->tx_desc = dma_alloc_coherent(d, in hip04_alloc_ring() 856 TX_DESC_NUM * sizeof(struct tx_desc), in hip04_alloc_ring() 858 if (!priv->tx_desc) in hip04_alloc_ring() [all …]
|
/drivers/net/ethernet/ |
D | ec_bhf.c | 103 struct tx_desc { struct 144 struct tx_desc *tx_descs; 170 static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) in ec_bhf_send_packet() 178 static int ec_bhf_desc_sent(struct tx_desc *desc) in ec_bhf_desc_sent() 286 struct tx_desc *desc; in ec_bhf_start_xmit() 361 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); in ec_bhf_setup_tx_descs() 362 priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; in ec_bhf_setup_tx_descs() 409 FIFO_SIZE * sizeof(struct tx_desc)); in ec_bhf_open()
|
/drivers/dma/xilinx/ |
D | xilinx_dpdma.c | 601 struct xilinx_dpdma_tx_desc *tx_desc) in xilinx_dpdma_chan_dump_tx_desc() argument 610 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) { in xilinx_dpdma_chan_dump_tx_desc() 647 struct xilinx_dpdma_tx_desc *tx_desc; in xilinx_dpdma_chan_alloc_tx_desc() local 649 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT); in xilinx_dpdma_chan_alloc_tx_desc() 650 if (!tx_desc) in xilinx_dpdma_chan_alloc_tx_desc() 653 INIT_LIST_HEAD(&tx_desc->descriptors); in xilinx_dpdma_chan_alloc_tx_desc() 654 tx_desc->chan = chan; in xilinx_dpdma_chan_alloc_tx_desc() 655 tx_desc->error = false; in xilinx_dpdma_chan_alloc_tx_desc() 657 return tx_desc; in xilinx_dpdma_chan_alloc_tx_desc() 699 struct xilinx_dpdma_tx_desc *tx_desc; in xilinx_dpdma_chan_prep_interleaved_dma() local [all …]
|
/drivers/spi/ |
D | spi-pxa2xx-dma.c | 143 struct dma_async_tx_descriptor *tx_desc, *rx_desc; in pxa2xx_spi_dma_prepare() local 146 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer); in pxa2xx_spi_dma_prepare() 147 if (!tx_desc) { in pxa2xx_spi_dma_prepare() 167 dmaengine_submit(tx_desc); in pxa2xx_spi_dma_prepare()
|
/drivers/staging/rtl8188eu/hal/ |
D | rtl8188eu_xmit.c | 29 static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc) in rtl8188eu_cal_txdesc_chksum() 51 struct tx_desc *ptxdesc; in rtl8188e_fill_fake_txdesc() 54 ptxdesc = (struct tx_desc *)desc; in rtl8188e_fill_fake_txdesc() 86 static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc) in fill_txdesc_sectype() 167 struct tx_desc *ptxdesc = (struct tx_desc *)pmem; in update_txdesc() 173 ptxdesc = (struct tx_desc *)(pmem + PACKET_OFFSET_SZ); in update_txdesc() 178 memset(ptxdesc, 0, sizeof(struct tx_desc)); in update_txdesc()
|