Lines Matching refs:txq
308 #define IS_TSO_HEADER(txq, addr) \ argument
309 ((addr >= txq->tso_hdrs_dma) && \
310 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
334 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
338 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
339 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num()
341 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num()
367 struct fec_enet_priv_tx_q *txq; in fec_dump() local
373 txq = fep->tx_queue[0]; in fec_dump()
374 bdp = txq->bd.base; in fec_dump()
379 bdp == txq->bd.cur ? 'S' : ' ', in fec_dump()
380 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
384 txq->tx_skbuff[index]); in fec_dump()
385 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_dump()
387 } while (bdp != txq->bd.base); in fec_dump()
413 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_frag_skb() argument
418 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
432 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
453 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_frag_skb()
463 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
466 memcpy(txq->tx_bounce[index], bufaddr, frag_len); in fec_enet_txq_submit_frag_skb()
467 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_frag_skb()
492 bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
494 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
501 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_skb() argument
515 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_txq_submit_skb()
530 bdp = txq->bd.cur; in fec_enet_txq_submit_skb()
539 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_skb()
542 memcpy(txq->tx_bounce[index], skb->data, buflen); in fec_enet_txq_submit_skb()
543 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_skb()
559 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); in fec_enet_txq_submit_skb()
587 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_skb()
596 index = fec_enet_get_bd_index(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
598 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_skb()
612 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
620 txq->bd.cur = bdp; in fec_enet_txq_submit_skb()
623 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_skb()
629 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, in fec_enet_txq_put_data_tso() argument
647 memcpy(txq->tx_bounce[index], data, size); in fec_enet_txq_put_data_tso()
648 data = txq->tx_bounce[index]; in fec_enet_txq_put_data_tso()
667 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_data_tso()
689 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_put_hdr_tso() argument
705 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
706 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
709 memcpy(txq->tx_bounce[index], skb->data, hdr_len); in fec_enet_txq_put_hdr_tso()
710 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_put_hdr_tso()
730 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_hdr_tso()
742 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_tso() argument
748 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_tso()
753 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { in fec_enet_txq_submit_tso()
773 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
778 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_submit_tso()
780 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); in fec_enet_txq_submit_tso()
788 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
789 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
790 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, in fec_enet_txq_submit_tso()
802 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
806 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_tso()
809 txq->bd.cur = bdp; in fec_enet_txq_submit_tso()
813 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
814 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
815 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
816 !readl(txq->bd.reg_desc_active)) in fec_enet_txq_submit_tso()
817 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_tso()
832 struct fec_enet_priv_tx_q *txq; in fec_enet_start_xmit() local
837 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
841 ret = fec_enet_txq_submit_tso(txq, skb, ndev); in fec_enet_start_xmit()
843 ret = fec_enet_txq_submit_skb(txq, skb, ndev); in fec_enet_start_xmit()
847 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_start_xmit()
848 if (entries_free <= txq->tx_stop_threshold) in fec_enet_start_xmit()
859 struct fec_enet_priv_tx_q *txq; in fec_enet_bd_init() local
889 txq = fep->tx_queue[q]; in fec_enet_bd_init()
890 bdp = txq->bd.base; in fec_enet_bd_init()
891 txq->bd.cur = bdp; in fec_enet_bd_init()
893 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_bd_init()
897 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_bd_init()
902 if (txq->tx_skbuff[i]) { in fec_enet_bd_init()
903 dev_kfree_skb_any(txq->tx_skbuff[i]); in fec_enet_bd_init()
904 txq->tx_skbuff[i] = NULL; in fec_enet_bd_init()
907 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_bd_init()
911 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_bd_init()
913 txq->dirty_tx = bdp; in fec_enet_bd_init()
929 struct fec_enet_priv_tx_q *txq; in fec_enet_enable_ring() local
945 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
946 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
958 struct fec_enet_priv_tx_q *txq; in fec_enet_reset_skb() local
962 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
964 for (j = 0; j < txq->bd.ring_size; j++) { in fec_enet_reset_skb()
965 if (txq->tx_skbuff[j]) { in fec_enet_reset_skb()
966 dev_kfree_skb_any(txq->tx_skbuff[j]); in fec_enet_reset_skb()
967 txq->tx_skbuff[j] = NULL; in fec_enet_reset_skb()
1285 struct fec_enet_priv_tx_q *txq; in fec_enet_tx_queue() local
1292 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1295 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1298 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1300 while (bdp != READ_ONCE(txq->bd.cur)) { in fec_enet_tx_queue()
1307 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_tx_queue()
1309 skb = txq->tx_skbuff[index]; in fec_enet_tx_queue()
1310 txq->tx_skbuff[index] = NULL; in fec_enet_tx_queue()
1311 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_tx_queue()
1367 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1370 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1375 entries_free = fec_enet_get_free_txdesc_num(txq); in fec_enet_tx_queue()
1376 if (entries_free >= txq->tx_wake_threshold) in fec_enet_tx_queue()
1382 if (bdp != txq->bd.cur && in fec_enet_tx_queue()
1383 readl(txq->bd.reg_desc_active) == 0) in fec_enet_tx_queue()
1384 writel(0, txq->bd.reg_desc_active); in fec_enet_tx_queue()
2989 struct fec_enet_priv_tx_q *txq; in fec_enet_free_buffers() local
3011 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
3012 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_free_buffers()
3013 kfree(txq->tx_bounce[i]); in fec_enet_free_buffers()
3014 txq->tx_bounce[i] = NULL; in fec_enet_free_buffers()
3015 skb = txq->tx_skbuff[i]; in fec_enet_free_buffers()
3016 txq->tx_skbuff[i] = NULL; in fec_enet_free_buffers()
3026 struct fec_enet_priv_tx_q *txq; in fec_enet_free_queue() local
3030 txq = fep->tx_queue[i]; in fec_enet_free_queue()
3032 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_free_queue()
3033 txq->tso_hdrs, in fec_enet_free_queue()
3034 txq->tso_hdrs_dma); in fec_enet_free_queue()
3048 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_queue() local
3051 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in fec_enet_alloc_queue()
3052 if (!txq) { in fec_enet_alloc_queue()
3057 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
3058 txq->bd.ring_size = TX_RING_SIZE; in fec_enet_alloc_queue()
3061 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; in fec_enet_alloc_queue()
3062 txq->tx_wake_threshold = in fec_enet_alloc_queue()
3063 (txq->bd.ring_size - txq->tx_stop_threshold) / 2; in fec_enet_alloc_queue()
3065 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, in fec_enet_alloc_queue()
3066 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_alloc_queue()
3067 &txq->tso_hdrs_dma, in fec_enet_alloc_queue()
3069 if (!txq->tso_hdrs) { in fec_enet_alloc_queue()
3141 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_txq_buffers() local
3143 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
3144 bdp = txq->bd.base; in fec_enet_alloc_txq_buffers()
3145 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_alloc_txq_buffers()
3146 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); in fec_enet_alloc_txq_buffers()
3147 if (!txq->tx_bounce[i]) in fec_enet_alloc_txq_buffers()
3158 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
3162 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
3565 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; in fec_enet_init() local
3566 unsigned size = dsize * txq->bd.ring_size; in fec_enet_init()
3568 txq->bd.qid = i; in fec_enet_init()
3569 txq->bd.base = cbd_base; in fec_enet_init()
3570 txq->bd.cur = cbd_base; in fec_enet_init()
3571 txq->bd.dma = bd_dma; in fec_enet_init()
3572 txq->bd.dsize = dsize; in fec_enet_init()
3573 txq->bd.dsize_log2 = dsize_log2; in fec_enet_init()
3574 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; in fec_enet_init()
3577 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); in fec_enet_init()