• Home
  • Raw
  • Download

Lines Matching refs:txq

178 #define IS_TSO_HEADER(txq, addr) \  argument
179 ((addr >= txq->tso_hdrs_dma) && \
180 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
407 struct tx_queue txq[8]; member
445 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
447 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
466 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
468 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
471 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
472 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
473 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); in txq_reset_hw_ptr()
476 static void txq_enable(struct tx_queue *txq) in txq_enable() argument
478 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_enable()
479 wrlp(mp, TXQ_COMMAND, 1 << txq->index); in txq_enable()
482 static void txq_disable(struct tx_queue *txq) in txq_disable() argument
484 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_disable()
485 u8 mask = 1 << txq->index; in txq_disable()
492 static void txq_maybe_wake(struct tx_queue *txq) in txq_maybe_wake() argument
494 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_maybe_wake()
495 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake()
499 if (txq->tx_desc_count <= txq->tx_wake_threshold) in txq_maybe_wake()
726 txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, in txq_put_data_tso() argument
734 tx_index = txq->tx_curr_desc++; in txq_put_data_tso()
735 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_data_tso()
736 txq->tx_curr_desc = 0; in txq_put_data_tso()
737 desc = &txq->tx_desc_area[tx_index]; in txq_put_data_tso()
738 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_put_data_tso()
745 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, in txq_put_data_tso()
747 desc->buf_ptr = txq->tso_hdrs_dma in txq_put_data_tso()
751 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_put_data_tso()
774 txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, in txq_put_hdr_tso() argument
777 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_put_hdr_tso()
786 tx_index = txq->tx_curr_desc; in txq_put_hdr_tso()
787 desc = &txq->tx_desc_area[tx_index]; in txq_put_hdr_tso()
799 desc->buf_ptr = txq->tso_hdrs_dma + in txq_put_hdr_tso()
800 txq->tx_curr_desc * TSO_HEADER_SIZE; in txq_put_hdr_tso()
812 txq->tx_curr_desc++; in txq_put_hdr_tso()
813 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_hdr_tso()
814 txq->tx_curr_desc = 0; in txq_put_hdr_tso()
817 static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, in txq_submit_tso() argument
820 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_tso()
828 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { in txq_submit_tso()
833 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; in txq_submit_tso()
848 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; in txq_submit_tso()
850 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, in txq_submit_tso()
858 ret = txq_put_data_tso(dev, txq, skb, tso.data, size, in txq_submit_tso()
868 __skb_queue_tail(&txq->tx_skb, skb); in txq_submit_tso()
876 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_tso()
880 txq_enable(txq); in txq_submit_tso()
881 txq->tx_desc_count += desc_count; in txq_submit_tso()
890 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) in txq_submit_frag_skb() argument
892 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_frag_skb()
902 tx_index = txq->tx_curr_desc++; in txq_submit_frag_skb()
903 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_frag_skb()
904 txq->tx_curr_desc = 0; in txq_submit_frag_skb()
905 desc = &txq->tx_desc_area[tx_index]; in txq_submit_frag_skb()
906 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; in txq_submit_frag_skb()
928 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, in txq_submit_skb() argument
931 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_skb()
942 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { in txq_submit_skb()
953 tx_index = txq->tx_curr_desc++; in txq_submit_skb()
954 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_skb()
955 txq->tx_curr_desc = 0; in txq_submit_skb()
956 desc = &txq->tx_desc_area[tx_index]; in txq_submit_skb()
957 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_submit_skb()
960 txq_submit_frag_skb(txq, skb); in txq_submit_skb()
972 __skb_queue_tail(&txq->tx_skb, skb); in txq_submit_skb()
981 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_skb()
985 txq_enable(txq); in txq_submit_skb()
987 txq->tx_desc_count += nr_frags + 1; in txq_submit_skb()
996 struct tx_queue *txq; in mv643xx_eth_xmit() local
1000 txq = mp->txq + queue; in mv643xx_eth_xmit()
1012 ret = txq_submit_tso(txq, skb, dev); in mv643xx_eth_xmit()
1014 ret = txq_submit_skb(txq, skb, dev); in mv643xx_eth_xmit()
1016 txq->tx_bytes += length; in mv643xx_eth_xmit()
1017 txq->tx_packets++; in mv643xx_eth_xmit()
1019 if (txq->tx_desc_count >= txq->tx_stop_threshold) in mv643xx_eth_xmit()
1022 txq->tx_dropped++; in mv643xx_eth_xmit()
1031 static void txq_kick(struct tx_queue *txq) in txq_kick() argument
1033 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_kick()
1034 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick()
1040 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) in txq_kick()
1043 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); in txq_kick()
1044 expected_ptr = (u32)txq->tx_desc_dma + in txq_kick()
1045 txq->tx_curr_desc * sizeof(struct tx_desc); in txq_kick()
1048 txq_enable(txq); in txq_kick()
1053 mp->work_tx_end &= ~(1 << txq->index); in txq_kick()
1056 static int txq_reclaim(struct tx_queue *txq, int budget, int force) in txq_reclaim() argument
1058 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reclaim()
1059 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_reclaim()
1065 while (reclaimed < budget && txq->tx_desc_count > 0) { in txq_reclaim()
1071 tx_index = txq->tx_used_desc; in txq_reclaim()
1072 desc = &txq->tx_desc_area[tx_index]; in txq_reclaim()
1073 desc_dma_map = txq->tx_desc_mapping[tx_index]; in txq_reclaim()
1083 txq->tx_used_desc = tx_index + 1; in txq_reclaim()
1084 if (txq->tx_used_desc == txq->tx_ring_size) in txq_reclaim()
1085 txq->tx_used_desc = 0; in txq_reclaim()
1088 txq->tx_desc_count--; in txq_reclaim()
1090 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { in txq_reclaim()
1105 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); in txq_reclaim()
1121 mp->work_tx &= ~(1 << txq->index); in txq_reclaim()
1164 static void txq_set_rate(struct tx_queue *txq, int rate, int burst) in txq_set_rate() argument
1166 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_rate()
1178 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); in txq_set_rate()
1179 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); in txq_set_rate()
1182 static void txq_set_fixed_prio_mode(struct tx_queue *txq) in txq_set_fixed_prio_mode() argument
1184 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_fixed_prio_mode()
1203 val |= 1 << txq->index; in txq_set_fixed_prio_mode()
1261 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats() local
1263 tx_packets += txq->tx_packets; in mv643xx_eth_get_stats()
1264 tx_bytes += txq->tx_bytes; in mv643xx_eth_get_stats()
1265 tx_dropped += txq->tx_dropped; in mv643xx_eth_get_stats()
2031 struct tx_queue *txq = mp->txq + index; in txq_init() local
2037 txq->index = index; in txq_init()
2039 txq->tx_ring_size = mp->tx_ring_size; in txq_init()
2045 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; in txq_init()
2046 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in txq_init()
2048 txq->tx_desc_count = 0; in txq_init()
2049 txq->tx_curr_desc = 0; in txq_init()
2050 txq->tx_used_desc = 0; in txq_init()
2052 size = txq->tx_ring_size * sizeof(struct tx_desc); in txq_init()
2055 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, in txq_init()
2057 txq->tx_desc_dma = mp->tx_desc_sram_addr; in txq_init()
2059 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2060 size, &txq->tx_desc_dma, in txq_init()
2064 if (txq->tx_desc_area == NULL) { in txq_init()
2069 memset(txq->tx_desc_area, 0, size); in txq_init()
2071 txq->tx_desc_area_size = size; in txq_init()
2073 tx_desc = txq->tx_desc_area; in txq_init()
2074 for (i = 0; i < txq->tx_ring_size; i++) { in txq_init()
2079 if (nexti == txq->tx_ring_size) in txq_init()
2083 txd->next_desc_ptr = txq->tx_desc_dma + in txq_init()
2087 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), in txq_init()
2089 if (!txq->tx_desc_mapping) { in txq_init()
2095 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2096 txq->tx_ring_size * TSO_HEADER_SIZE, in txq_init()
2097 &txq->tso_hdrs_dma, GFP_KERNEL); in txq_init()
2098 if (txq->tso_hdrs == NULL) { in txq_init()
2102 skb_queue_head_init(&txq->tx_skb); in txq_init()
2107 kfree(txq->tx_desc_mapping); in txq_init()
2110 iounmap(txq->tx_desc_area); in txq_init()
2112 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_init()
2113 txq->tx_desc_area, txq->tx_desc_dma); in txq_init()
2117 static void txq_deinit(struct tx_queue *txq) in txq_deinit() argument
2119 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_deinit()
2121 txq_disable(txq); in txq_deinit()
2122 txq_reclaim(txq, txq->tx_ring_size, 1); in txq_deinit()
2124 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); in txq_deinit()
2126 if (txq->index == 0 && in txq_deinit()
2127 txq->tx_desc_area_size <= mp->tx_desc_sram_size) in txq_deinit()
2128 iounmap(txq->tx_desc_area); in txq_deinit()
2130 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_deinit()
2131 txq->tx_desc_area, txq->tx_desc_dma); in txq_deinit()
2132 kfree(txq->tx_desc_mapping); in txq_deinit()
2134 if (txq->tso_hdrs) in txq_deinit()
2136 txq->tx_ring_size * TSO_HEADER_SIZE, in txq_deinit()
2137 txq->tso_hdrs, txq->tso_hdrs_dma); in txq_deinit()
2207 struct tx_queue *txq = mp->txq + i; in handle_link_event() local
2209 txq_reclaim(txq, txq->tx_ring_size, 1); in handle_link_event()
2210 txq_reset_hw_ptr(txq); in handle_link_event()
2283 txq_kick(mp->txq + queue); in mv643xx_eth_poll()
2285 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); in mv643xx_eth_poll()
2286 txq_maybe_wake(mp->txq + queue); in mv643xx_eth_poll()
2350 struct tx_queue *txq = mp->txq + i; in port_start() local
2352 txq_reset_hw_ptr(txq); in port_start()
2353 txq_set_rate(txq, 1000000000, 16777216); in port_start()
2354 txq_set_fixed_prio_mode(txq); in port_start()
2461 txq_deinit(mp->txq + i); in mv643xx_eth_open()
2494 txq_disable(mp->txq + i); in port_reset()
2538 txq_deinit(mp->txq + i); in mv643xx_eth_stop()