/drivers/net/ethernet/brocade/bna/ |
D | bnad.c | 137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txq_cleanup() argument 139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txq_cleanup() 143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup() 147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup() 159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txcmpl_process() argument 163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q; in bnad_txcmpl_process() 168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) in bnad_txcmpl_process() 171 hw_cons = *(tcb->hw_consumer_index); in bnad_txcmpl_process() 173 cons = tcb->consumer_index; in bnad_txcmpl_process() 174 q_depth = tcb->q_depth; in bnad_txcmpl_process() [all …]
|
D | bnad_ethtool.c | 757 struct bna_tcb *tcb = NULL; in bnad_per_q_stats_fill() local 826 if (bnad->tx_info[i].tcb[j] && in bnad_per_q_stats_fill() 827 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill() 828 tcb = bnad->tx_info[i].tcb[j]; in bnad_per_q_stats_fill() 829 buf[bi++] = tcb->txq->tx_packets; in bnad_per_q_stats_fill() 830 buf[bi++] = tcb->txq->tx_bytes; in bnad_per_q_stats_fill() 831 buf[bi++] = tcb->producer_index; in bnad_per_q_stats_fill() 832 buf[bi++] = tcb->consumer_index; in bnad_per_q_stats_fill() 833 buf[bi++] = *(tcb->hw_consumer_index); in bnad_per_q_stats_fill()
|
D | bna_tx_rx.c | 2874 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry() 3177 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; in bna_txq_qpt_setup() 3178 txq->tcb->sw_q = page_mem->kva; in bna_txq_qpt_setup() 3184 txq->tcb->sw_qpt[i] = kva; in bna_txq_qpt_setup() 3222 txq->tcb = NULL; in bna_tx_free() 3284 txq->tcb->i_dbell->doorbell_addr = in bna_bfi_tx_enet_start_rsp() 3287 txq->tcb->q_dbell = in bna_bfi_tx_enet_start_rsp() 3293 (*txq->tcb->hw_consumer_index) = 0; in bna_bfi_tx_enet_start_rsp() 3294 txq->tcb->producer_index = txq->tcb->consumer_index = 0; in bna_bfi_tx_enet_start_rsp() 3442 txq->tcb = (struct bna_tcb *) in bna_tx_create() [all …]
|
D | bnad.h | 205 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; member
|
D | bna_types.h | 445 struct bna_tcb *tcb; member
|
/drivers/net/ethernet/agere/ |
D | et131x.c | 349 struct tcb { struct 350 struct tcb *next; /* Next entry in ring */ argument 361 struct tcb *tcb_ring; argument 364 struct tcb *tcb_qhead; 365 struct tcb *tcb_qtail; 368 struct tcb *send_head; 369 struct tcb *send_tail; 1757 struct tcb *tcb = tx_ring->tcb_ring; in et131x_init_send() local 1759 tx_ring->tcb_qhead = tcb; in et131x_init_send() 1761 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); in et131x_init_send() [all …]
|
/drivers/staging/isdn/gigaset/ |
D | ser-gigaset.c | 104 struct cmdbuf_t *cb, *tcb; in send_cb() local 132 tcb = cb; in send_cb() 143 if (tcb->wake_tasklet) in send_cb() 144 tasklet_schedule(tcb->wake_tasklet); in send_cb() 145 kfree(tcb); in send_cb()
|
D | common.c | 817 struct cmdbuf_t *cb, *tcb; in cleanup_cs() local 836 tcb = cb; in cleanup_cs() 838 kfree(tcb); in cleanup_cs()
|
/drivers/scsi/ |
D | ncr53c8xx.c | 1114 struct tcb; 1150 struct tcb { struct 1642 struct tcb target[MAX_TARGET]; /* Target data */ 1925 static void ncr_negotiate (struct ncb* np, struct tcb* tp); 3963 struct tcb *tp = &np->target[i]; in ncr_prepare_setting() 4053 struct tcb *tp = &np->target[cp->target]; in ncr_prepare_nego() 4113 struct tcb *tp = &np->target[sdev->id]; in ncr_queue_command() 4698 struct tcb *tp; in ncr_detach() 4810 struct tcb *tp; in ncr_complete() 5085 struct tcb *tp = &np->target[cp->target]; in ncr_ccb_skipped() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cm.c | 1980 req->tcb.t_state_to_astid = in send_fw_act_open_req() 1983 req->tcb.cplrxdataack_cplpassacceptrpl = in send_fw_act_open_req() 1985 req->tcb.tx_max = (__force __be32) jiffies; in send_fw_act_open_req() 1986 req->tcb.rcv_adv = htons(1); in send_fw_act_open_req() 2000 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | in send_fw_act_open_req() 2012 req->tcb.opt2 = (__force __be32) (PACE_V(1) | in send_fw_act_open_req() 2018 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; in send_fw_act_open_req() 2020 req->tcb.opt2 |= (__force __be32)SACK_EN_F; in send_fw_act_open_req() 2022 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; in send_fw_act_open_req() 2023 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); in send_fw_act_open_req() [all …]
|
/drivers/net/ethernet/intel/ |
D | e100.c | 496 } tcb; member 1766 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); in e100_xmit_prepare() 1767 cb->u.tcb.tcb_byte_count = 0; in e100_xmit_prepare() 1768 cb->u.tcb.threshold = nic->tx_threshold; in e100_xmit_prepare() 1769 cb->u.tcb.tbd_count = 1; in e100_xmit_prepare() 1770 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); in e100_xmit_prepare() 1771 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); in e100_xmit_prepare() 1835 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_tx_clean() 1836 le16_to_cpu(cb->u.tcb.tbd.size), in e100_tx_clean() 1862 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_clean_cbs() [all …]
|
/drivers/pwm/ |
D | Makefile | 7 obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
|
D | Kconfig | 76 will be called pwm-atmel-tcb.
|
/drivers/clocksource/ |
D | Makefile | 6 obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | t4fw_api.h | 576 } tcb; member
|