Lines Matching refs:txq
276 #define IS_TSO_HEADER(txq, addr) \ argument
277 ((addr >= txq->tso_hdrs_dma) && \
278 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
302 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
306 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
307 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num()
309 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num()
335 struct fec_enet_priv_tx_q *txq; in fec_dump() local
341 txq = fep->tx_queue[0]; in fec_dump()
342 bdp = txq->bd.base; in fec_dump()
347 bdp == txq->bd.cur ? 'S' : ' ', in fec_dump()
348 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
352 txq->tx_skbuff[index]); in fec_dump()
353 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_dump()
355 } while (bdp != txq->bd.base); in fec_dump()
381 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_frag_skb() argument
386 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
400 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
421 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_frag_skb()
430 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
433 memcpy(txq->tx_bounce[index], bufaddr, frag_len); in fec_enet_txq_submit_frag_skb()
434 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_frag_skb()
459 bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
461 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
468 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_skb() argument
482 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_txq_submit_skb()
497 bdp = txq->bd.cur; in fec_enet_txq_submit_skb()
506 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_skb()
509 memcpy(txq->tx_bounce[index], skb->data, buflen); in fec_enet_txq_submit_skb()
510 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_skb()
526 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); in fec_enet_txq_submit_skb()
554 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_skb()
563 index = fec_enet_get_bd_index(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
565 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_skb()
579 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
587 txq->bd.cur = bdp; in fec_enet_txq_submit_skb()
590 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_skb()
596 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, in fec_enet_txq_put_data_tso() argument
614 memcpy(txq->tx_bounce[index], data, size); in fec_enet_txq_put_data_tso()
615 data = txq->tx_bounce[index]; in fec_enet_txq_put_data_tso()
634 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_data_tso()
656 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_put_hdr_tso() argument
672 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
673 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
676 memcpy(txq->tx_bounce[index], skb->data, hdr_len); in fec_enet_txq_put_hdr_tso()
677 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_put_hdr_tso()
697 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_hdr_tso()
709 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_tso() argument
715 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_tso()
720 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { in fec_enet_txq_submit_tso()
740 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
745 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_submit_tso()
747 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); in fec_enet_txq_submit_tso()
755 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
756 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
757 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, in fec_enet_txq_submit_tso()
769 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
773 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_tso()
776 txq->bd.cur = bdp; in fec_enet_txq_submit_tso()
780 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
781 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
782 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
783 !readl(txq->bd.reg_desc_active)) in fec_enet_txq_submit_tso()
784 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_tso()
799 struct fec_enet_priv_tx_q *txq; in fec_enet_start_xmit() local
804 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
808 ret = fec_enet_txq_submit_tso(txq, skb, ndev); in fec_enet_start_xmit()
810 ret = fec_enet_txq_submit_skb(txq, skb, ndev); in fec_enet_start_xmit()
814 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_start_xmit()
815 if (entries_free <= txq->tx_stop_threshold) in fec_enet_start_xmit()
826 struct fec_enet_priv_tx_q *txq; in fec_enet_bd_init() local
856 txq = fep->tx_queue[q]; in fec_enet_bd_init()
857 bdp = txq->bd.base; in fec_enet_bd_init()
858 txq->bd.cur = bdp; in fec_enet_bd_init()
860 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_bd_init()
864 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_bd_init()
869 if (txq->tx_skbuff[i]) { in fec_enet_bd_init()
870 dev_kfree_skb_any(txq->tx_skbuff[i]); in fec_enet_bd_init()
871 txq->tx_skbuff[i] = NULL; in fec_enet_bd_init()
874 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_bd_init()
878 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_bd_init()
880 txq->dirty_tx = bdp; in fec_enet_bd_init()
896 struct fec_enet_priv_tx_q *txq; in fec_enet_enable_ring() local
912 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
913 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
925 struct fec_enet_priv_tx_q *txq; in fec_enet_reset_skb() local
929 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
931 for (j = 0; j < txq->bd.ring_size; j++) { in fec_enet_reset_skb()
932 if (txq->tx_skbuff[j]) { in fec_enet_reset_skb()
933 dev_kfree_skb_any(txq->tx_skbuff[j]); in fec_enet_reset_skb()
934 txq->tx_skbuff[j] = NULL; in fec_enet_reset_skb()
1244 struct fec_enet_priv_tx_q *txq; in fec_enet_tx_queue() local
1251 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1254 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1257 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1259 while (bdp != READ_ONCE(txq->bd.cur)) { in fec_enet_tx_queue()
1266 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_tx_queue()
1268 skb = txq->tx_skbuff[index]; in fec_enet_tx_queue()
1269 txq->tx_skbuff[index] = NULL; in fec_enet_tx_queue()
1270 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_tx_queue()
1326 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1329 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1334 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_tx_queue()
1335 if (entries_free >= txq->tx_wake_threshold) in fec_enet_tx_queue()
1341 if (bdp != txq->bd.cur && in fec_enet_tx_queue()
1342 readl(txq->bd.reg_desc_active) == 0) in fec_enet_tx_queue()
1343 writel(0, txq->bd.reg_desc_active); in fec_enet_tx_queue()
2819 struct fec_enet_priv_tx_q *txq; in fec_enet_free_buffers() local
2841 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2842 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_free_buffers()
2843 kfree(txq->tx_bounce[i]); in fec_enet_free_buffers()
2844 txq->tx_bounce[i] = NULL; in fec_enet_free_buffers()
2845 skb = txq->tx_skbuff[i]; in fec_enet_free_buffers()
2846 txq->tx_skbuff[i] = NULL; in fec_enet_free_buffers()
2856 struct fec_enet_priv_tx_q *txq; in fec_enet_free_queue() local
2860 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2862 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_free_queue()
2863 txq->tso_hdrs, in fec_enet_free_queue()
2864 txq->tso_hdrs_dma); in fec_enet_free_queue()
2878 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_queue() local
2881 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in fec_enet_alloc_queue()
2882 if (!txq) { in fec_enet_alloc_queue()
2887 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2888 txq->bd.ring_size = TX_RING_SIZE; in fec_enet_alloc_queue()
2891 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; in fec_enet_alloc_queue()
2892 txq->tx_wake_threshold = in fec_enet_alloc_queue()
2893 (txq->bd.ring_size - txq->tx_stop_threshold) / 2; in fec_enet_alloc_queue()
2895 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, in fec_enet_alloc_queue()
2896 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_alloc_queue()
2897 &txq->tso_hdrs_dma, in fec_enet_alloc_queue()
2899 if (!txq->tso_hdrs) { in fec_enet_alloc_queue()
2971 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_txq_buffers() local
2973 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2974 bdp = txq->bd.base; in fec_enet_alloc_txq_buffers()
2975 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_alloc_txq_buffers()
2976 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); in fec_enet_alloc_txq_buffers()
2977 if (!txq->tx_bounce[i]) in fec_enet_alloc_txq_buffers()
2988 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
2992 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
3388 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; in fec_enet_init() local
3389 unsigned size = dsize * txq->bd.ring_size; in fec_enet_init()
3391 txq->bd.qid = i; in fec_enet_init()
3392 txq->bd.base = cbd_base; in fec_enet_init()
3393 txq->bd.cur = cbd_base; in fec_enet_init()
3394 txq->bd.dma = bd_dma; in fec_enet_init()
3395 txq->bd.dsize = dsize; in fec_enet_init()
3396 txq->bd.dsize_log2 = dsize_log2; in fec_enet_init()
3397 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; in fec_enet_init()
3400 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); in fec_enet_init()