• Home
  • Raw
  • Download

Lines Matching refs:txq

433 	struct tx_queue txq[8];  member
465 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
467 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
486 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
488 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
491 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
492 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
493 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); in txq_reset_hw_ptr()
496 static void txq_enable(struct tx_queue *txq) in txq_enable() argument
498 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_enable()
499 wrlp(mp, TXQ_COMMAND, 1 << txq->index); in txq_enable()
502 static void txq_disable(struct tx_queue *txq) in txq_disable() argument
504 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_disable()
505 u8 mask = 1 << txq->index; in txq_disable()
512 static void txq_maybe_wake(struct tx_queue *txq) in txq_maybe_wake() argument
514 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_maybe_wake()
515 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake()
519 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) in txq_maybe_wake()
729 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) in txq_submit_frag_skb() argument
731 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_frag_skb()
741 tx_index = txq->tx_curr_desc++; in txq_submit_frag_skb()
742 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_frag_skb()
743 txq->tx_curr_desc = 0; in txq_submit_frag_skb()
744 desc = &txq->tx_desc_area[tx_index]; in txq_submit_frag_skb()
772 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) in txq_submit_skb() argument
774 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_skb()
828 tx_index = txq->tx_curr_desc++; in txq_submit_skb()
829 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_skb()
830 txq->tx_curr_desc = 0; in txq_submit_skb()
831 desc = &txq->tx_desc_area[tx_index]; in txq_submit_skb()
834 txq_submit_frag_skb(txq, skb); in txq_submit_skb()
846 __skb_queue_tail(&txq->tx_skb, skb); in txq_submit_skb()
855 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_skb()
859 txq_enable(txq); in txq_submit_skb()
861 txq->tx_desc_count += nr_frags + 1; in txq_submit_skb()
870 struct tx_queue *txq; in mv643xx_eth_xmit() local
874 txq = mp->txq + queue; in mv643xx_eth_xmit()
878 txq->tx_dropped++; in mv643xx_eth_xmit()
884 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { in mv643xx_eth_xmit()
893 if (!txq_submit_skb(txq, skb)) { in mv643xx_eth_xmit()
896 txq->tx_bytes += length; in mv643xx_eth_xmit()
897 txq->tx_packets++; in mv643xx_eth_xmit()
899 entries_left = txq->tx_ring_size - txq->tx_desc_count; in mv643xx_eth_xmit()
909 static void txq_kick(struct tx_queue *txq) in txq_kick() argument
911 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_kick()
912 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick()
918 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) in txq_kick()
921 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); in txq_kick()
922 expected_ptr = (u32)txq->tx_desc_dma + in txq_kick()
923 txq->tx_curr_desc * sizeof(struct tx_desc); in txq_kick()
926 txq_enable(txq); in txq_kick()
931 mp->work_tx_end &= ~(1 << txq->index); in txq_kick()
934 static int txq_reclaim(struct tx_queue *txq, int budget, int force) in txq_reclaim() argument
936 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reclaim()
937 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_reclaim()
943 while (reclaimed < budget && txq->tx_desc_count > 0) { in txq_reclaim()
949 tx_index = txq->tx_used_desc; in txq_reclaim()
950 desc = &txq->tx_desc_area[tx_index]; in txq_reclaim()
959 txq->tx_used_desc = tx_index + 1; in txq_reclaim()
960 if (txq->tx_used_desc == txq->tx_ring_size) in txq_reclaim()
961 txq->tx_used_desc = 0; in txq_reclaim()
964 txq->tx_desc_count--; in txq_reclaim()
968 skb = __skb_dequeue(&txq->tx_skb); in txq_reclaim()
996 mp->work_tx &= ~(1 << txq->index); in txq_reclaim()
1039 static void txq_set_rate(struct tx_queue *txq, int rate, int burst) in txq_set_rate() argument
1041 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_rate()
1053 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); in txq_set_rate()
1054 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); in txq_set_rate()
1057 static void txq_set_fixed_prio_mode(struct tx_queue *txq) in txq_set_fixed_prio_mode() argument
1059 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_fixed_prio_mode()
1078 val |= 1 << txq->index; in txq_set_fixed_prio_mode()
1187 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats() local
1189 tx_packets += txq->tx_packets; in mv643xx_eth_get_stats()
1190 tx_bytes += txq->tx_bytes; in mv643xx_eth_get_stats()
1191 tx_dropped += txq->tx_dropped; in mv643xx_eth_get_stats()
1960 struct tx_queue *txq = mp->txq + index; in txq_init() local
1965 txq->index = index; in txq_init()
1967 txq->tx_ring_size = mp->tx_ring_size; in txq_init()
1969 txq->tx_desc_count = 0; in txq_init()
1970 txq->tx_curr_desc = 0; in txq_init()
1971 txq->tx_used_desc = 0; in txq_init()
1973 size = txq->tx_ring_size * sizeof(struct tx_desc); in txq_init()
1976 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, in txq_init()
1978 txq->tx_desc_dma = mp->tx_desc_sram_addr; in txq_init()
1980 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
1981 size, &txq->tx_desc_dma, in txq_init()
1985 if (txq->tx_desc_area == NULL) { in txq_init()
1990 memset(txq->tx_desc_area, 0, size); in txq_init()
1992 txq->tx_desc_area_size = size; in txq_init()
1994 tx_desc = (struct tx_desc *)txq->tx_desc_area; in txq_init()
1995 for (i = 0; i < txq->tx_ring_size; i++) { in txq_init()
2000 if (nexti == txq->tx_ring_size) in txq_init()
2004 txd->next_desc_ptr = txq->tx_desc_dma + in txq_init()
2008 skb_queue_head_init(&txq->tx_skb); in txq_init()
2013 static void txq_deinit(struct tx_queue *txq) in txq_deinit() argument
2015 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_deinit()
2017 txq_disable(txq); in txq_deinit()
2018 txq_reclaim(txq, txq->tx_ring_size, 1); in txq_deinit()
2020 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); in txq_deinit()
2022 if (txq->index == 0 && in txq_deinit()
2023 txq->tx_desc_area_size <= mp->tx_desc_sram_size) in txq_deinit()
2024 iounmap(txq->tx_desc_area); in txq_deinit()
2026 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_deinit()
2027 txq->tx_desc_area, txq->tx_desc_dma); in txq_deinit()
2097 struct tx_queue *txq = mp->txq + i; in handle_link_event() local
2099 txq_reclaim(txq, txq->tx_ring_size, 1); in handle_link_event()
2100 txq_reset_hw_ptr(txq); in handle_link_event()
2173 txq_kick(mp->txq + queue); in mv643xx_eth_poll()
2175 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); in mv643xx_eth_poll()
2176 txq_maybe_wake(mp->txq + queue); in mv643xx_eth_poll()
2254 struct tx_queue *txq = mp->txq + i; in port_start() local
2256 txq_reset_hw_ptr(txq); in port_start()
2257 txq_set_rate(txq, 1000000000, 16777216); in port_start()
2258 txq_set_fixed_prio_mode(txq); in port_start()
2367 txq_deinit(mp->txq + i); in mv643xx_eth_open()
2398 txq_disable(mp->txq + i); in port_reset()
2443 txq_deinit(mp->txq + i); in mv643xx_eth_stop()