Home
last modified time | relevance | path

Searched refs:tcb (Results 1 – 15 of 15) sorted by relevance

/drivers/net/ethernet/brocade/bna/
Dbnad.c161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txq_cleanup() argument
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txq_cleanup()
167 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txcmpl_process() argument
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txcmpl_process()
192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) in bnad_txcmpl_process()
195 hw_cons = *(tcb->hw_consumer_index); in bnad_txcmpl_process()
196 cons = tcb->consumer_index; in bnad_txcmpl_process()
197 q_depth = tcb->q_depth; in bnad_txcmpl_process()
[all …]
Dbnad_ethtool.c766 struct bna_tcb *tcb = NULL; in bnad_per_q_stats_fill() local
833 if (bnad->tx_info[i].tcb[j] && in bnad_per_q_stats_fill()
834 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill()
835 tcb = bnad->tx_info[i].tcb[j]; in bnad_per_q_stats_fill()
836 buf[bi++] = tcb->txq->tx_packets; in bnad_per_q_stats_fill()
837 buf[bi++] = tcb->txq->tx_bytes; in bnad_per_q_stats_fill()
838 buf[bi++] = tcb->producer_index; in bnad_per_q_stats_fill()
839 buf[bi++] = tcb->consumer_index; in bnad_per_q_stats_fill()
840 buf[bi++] = *(tcb->hw_consumer_index); in bnad_per_q_stats_fill()
Dbna_tx_rx.c2965 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
3281 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; in bna_txq_qpt_setup()
3282 txq->tcb->sw_q = page_mem->kva; in bna_txq_qpt_setup()
3288 txq->tcb->sw_qpt[i] = kva; in bna_txq_qpt_setup()
3331 txq->tcb = NULL; in bna_tx_free()
3414 txq->tcb->i_dbell->doorbell_addr = in bna_bfi_tx_enet_start_rsp()
3417 txq->tcb->q_dbell = in bna_bfi_tx_enet_start_rsp()
3423 (*txq->tcb->hw_consumer_index) = 0; in bna_bfi_tx_enet_start_rsp()
3424 txq->tcb->producer_index = txq->tcb->consumer_index = 0; in bna_bfi_tx_enet_start_rsp()
3578 txq->tcb = (struct bna_tcb *) in bna_tx_create()
[all …]
Dbnad.h207 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; member
Dbna_types.h461 struct bna_tcb *tcb; member
/drivers/staging/et131x/
Det131x.c355 struct tcb { struct
356 struct tcb *next; /* Next entry in ring */ argument
368 struct tcb *tcb_ring; argument
371 struct tcb *tcb_qhead;
372 struct tcb *tcb_qtail;
380 struct tcb *send_head;
381 struct tcb *send_tail;
2053 struct tcb *tcb; in et131x_init_send() local
2059 tcb = adapter->tx_ring.tcb_ring; in et131x_init_send()
2061 tx_ring->tcb_qhead = tcb; in et131x_init_send()
[all …]
/drivers/isdn/gigaset/
Dser-gigaset.c108 struct cmdbuf_t *cb, *tcb; in send_cb() local
137 tcb = cb; in send_cb()
148 if (tcb->wake_tasklet) in send_cb()
149 tasklet_schedule(tcb->wake_tasklet); in send_cb()
150 kfree(tcb); in send_cb()
Dusb-gigaset.c435 struct cmdbuf_t *tcb; in send_cb() local
443 tcb = cb; in send_cb()
459 if (tcb->wake_tasklet) in send_cb()
460 tasklet_schedule(tcb->wake_tasklet); in send_cb()
461 kfree(tcb); in send_cb()
Dcommon.c821 struct cmdbuf_t *cb, *tcb; in cleanup_cs() local
840 tcb = cb; in cleanup_cs()
842 kfree(tcb); in cleanup_cs()
/drivers/scsi/
Dncr53c8xx.c1126 struct tcb;
1162 struct tcb { struct
1654 struct tcb target[MAX_TARGET]; /* Target data */
1937 static void ncr_negotiate (struct ncb* np, struct tcb* tp);
3972 struct tcb *tp = &np->target[i]; in ncr_prepare_setting()
4062 struct tcb *tp = &np->target[cp->target]; in ncr_prepare_nego()
4122 struct tcb *tp = &np->target[sdev->id]; in ncr_queue_command()
4707 struct tcb *tp; in ncr_detach()
4819 struct tcb *tp; in ncr_complete()
5094 struct tcb *tp = &np->target[cp->target]; in ncr_ccb_skipped()
[all …]
/drivers/pwm/
DMakefile3 obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
DKconfig50 will be called pwm-atmel-tcb.
/drivers/infiniband/hw/cxgb4/
Dcm.c1513 req->tcb.t_state_to_astid = in send_fw_act_open_req()
1516 req->tcb.cplrxdataack_cplpassacceptrpl = in send_fw_act_open_req()
1518 req->tcb.tx_max = (__force __be32) jiffies; in send_fw_act_open_req()
1519 req->tcb.rcv_adv = htons(1); in send_fw_act_open_req()
1522 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | in send_fw_act_open_req()
1534 req->tcb.opt2 = (__force __be32) (PACE(1) | in send_fw_act_open_req()
1540 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); in send_fw_act_open_req()
1542 req->tcb.opt2 |= (__force __be32) SACK_EN(1); in send_fw_act_open_req()
1544 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); in send_fw_act_open_req()
1545 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); in send_fw_act_open_req()
[all …]
/drivers/net/ethernet/intel/
De100.c521 } tcb; member
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); in e100_xmit_prepare()
1792 cb->u.tcb.tcb_byte_count = 0; in e100_xmit_prepare()
1793 cb->u.tcb.threshold = nic->tx_threshold; in e100_xmit_prepare()
1794 cb->u.tcb.tbd_count = 1; in e100_xmit_prepare()
1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); in e100_xmit_prepare()
1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); in e100_xmit_prepare()
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_tx_clean()
1861 le16_to_cpu(cb->u.tcb.tbd.size), in e100_tx_clean()
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_clean_cbs()
[all …]
/drivers/net/ethernet/chelsio/cxgb4/
Dt4fw_api.h458 } tcb; member