• Home
  • Raw
  • Download

Lines Matching +full:xrx200 +full:- +full:net

1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
42 /* Remove Layer-2 header from packets from PMAC to DMA */
79 return __raw_readl(priv->pmac_reg + offset); in xrx200_pmac_r32()
84 __raw_writel(val, priv->pmac_reg + offset); in xrx200_pmac_w32()
103 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_flush_dma()
105 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) in xrx200_flush_dma()
108 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | in xrx200_flush_dma()
110 ch->dma.desc++; in xrx200_flush_dma()
111 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_flush_dma()
119 napi_enable(&priv->chan_tx.napi); in xrx200_open()
120 ltq_dma_open(&priv->chan_tx.dma); in xrx200_open()
121 ltq_dma_enable_irq(&priv->chan_tx.dma); in xrx200_open()
123 napi_enable(&priv->chan_rx.napi); in xrx200_open()
124 ltq_dma_open(&priv->chan_rx.dma); in xrx200_open()
132 xrx200_flush_dma(&priv->chan_rx); in xrx200_open()
133 ltq_dma_enable_irq(&priv->chan_rx.dma); in xrx200_open()
146 napi_disable(&priv->chan_rx.napi); in xrx200_close()
147 ltq_dma_close(&priv->chan_rx.dma); in xrx200_close()
149 napi_disable(&priv->chan_tx.napi); in xrx200_close()
150 ltq_dma_close(&priv->chan_tx.dma); in xrx200_close()
157 struct sk_buff *skb = ch->skb[ch->dma.desc]; in xrx200_alloc_skb()
161 ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, in xrx200_alloc_skb()
163 if (!ch->skb[ch->dma.desc]) { in xrx200_alloc_skb()
164 ret = -ENOMEM; in xrx200_alloc_skb()
168 mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, in xrx200_alloc_skb()
170 if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { in xrx200_alloc_skb()
171 dev_kfree_skb_any(ch->skb[ch->dma.desc]); in xrx200_alloc_skb()
172 ch->skb[ch->dma.desc] = skb; in xrx200_alloc_skb()
173 ret = -ENOMEM; in xrx200_alloc_skb()
177 ch->dma.desc_base[ch->dma.desc].addr = mapping; in xrx200_alloc_skb()
181 ch->dma.desc_base[ch->dma.desc].ctl = in xrx200_alloc_skb()
190 struct xrx200_priv *priv = ch->priv; in xrx200_hw_receive()
191 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_hw_receive()
192 struct sk_buff *skb = ch->skb[ch->dma.desc]; in xrx200_hw_receive()
193 int len = (desc->ctl & LTQ_DMA_SIZE_MASK); in xrx200_hw_receive()
194 struct net_device *net_dev = priv->net_dev; in xrx200_hw_receive()
199 ch->dma.desc++; in xrx200_hw_receive()
200 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_hw_receive()
203 net_dev->stats.rx_dropped++; in xrx200_hw_receive()
209 skb->protocol = eth_type_trans(skb, net_dev); in xrx200_hw_receive()
211 net_dev->stats.rx_packets++; in xrx200_hw_receive()
212 net_dev->stats.rx_bytes += len; in xrx200_hw_receive()
225 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_poll_rx()
227 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { in xrx200_poll_rx()
238 if (napi_complete_done(&ch->napi, rx)) in xrx200_poll_rx()
239 ltq_dma_enable_irq(&ch->dma); in xrx200_poll_rx()
249 struct net_device *net_dev = ch->priv->net_dev; in xrx200_tx_housekeeping()
255 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; in xrx200_tx_housekeeping()
257 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { in xrx200_tx_housekeeping()
258 struct sk_buff *skb = ch->skb[ch->tx_free]; in xrx200_tx_housekeeping()
261 bytes += skb->len; in xrx200_tx_housekeeping()
262 ch->skb[ch->tx_free] = NULL; in xrx200_tx_housekeeping()
264 memset(&ch->dma.desc_base[ch->tx_free], 0, in xrx200_tx_housekeeping()
266 ch->tx_free++; in xrx200_tx_housekeeping()
267 ch->tx_free %= LTQ_DESC_NUM; in xrx200_tx_housekeeping()
273 net_dev->stats.tx_packets += pkts; in xrx200_tx_housekeeping()
274 net_dev->stats.tx_bytes += bytes; in xrx200_tx_housekeeping()
275 netdev_completed_queue(ch->priv->net_dev, pkts, bytes); in xrx200_tx_housekeeping()
282 if (napi_complete_done(&ch->napi, pkts)) in xrx200_tx_housekeeping()
283 ltq_dma_enable_irq(&ch->dma); in xrx200_tx_housekeeping()
293 struct xrx200_chan *ch = &priv->chan_tx; in xrx200_start_xmit()
294 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; in xrx200_start_xmit()
299 skb->dev = net_dev; in xrx200_start_xmit()
301 net_dev->stats.tx_dropped++; in xrx200_start_xmit()
305 len = skb->len; in xrx200_start_xmit()
307 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { in xrx200_start_xmit()
313 ch->skb[ch->dma.desc] = skb; in xrx200_start_xmit()
315 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); in xrx200_start_xmit()
316 if (unlikely(dma_mapping_error(priv->dev, mapping))) in xrx200_start_xmit()
322 desc->addr = mapping - byte_offset; in xrx200_start_xmit()
325 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | in xrx200_start_xmit()
327 ch->dma.desc++; in xrx200_start_xmit()
328 ch->dma.desc %= LTQ_DESC_NUM; in xrx200_start_xmit()
329 if (ch->dma.desc == ch->tx_free) in xrx200_start_xmit()
338 net_dev->stats.tx_dropped++; in xrx200_start_xmit()
339 net_dev->stats.tx_errors++; in xrx200_start_xmit()
355 if (napi_schedule_prep(&ch->napi)) { in xrx200_dma_irq()
356 ltq_dma_disable_irq(&ch->dma); in xrx200_dma_irq()
357 __napi_schedule(&ch->napi); in xrx200_dma_irq()
360 ltq_dma_ack_irq(&ch->dma); in xrx200_dma_irq()
367 struct xrx200_chan *ch_rx = &priv->chan_rx; in xrx200_dma_init()
368 struct xrx200_chan *ch_tx = &priv->chan_tx; in xrx200_dma_init()
374 ch_rx->dma.nr = XRX200_DMA_RX; in xrx200_dma_init()
375 ch_rx->dma.dev = priv->dev; in xrx200_dma_init()
376 ch_rx->priv = priv; in xrx200_dma_init()
378 ltq_dma_alloc_rx(&ch_rx->dma); in xrx200_dma_init()
379 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; in xrx200_dma_init()
380 ch_rx->dma.desc++) { in xrx200_dma_init()
385 ch_rx->dma.desc = 0; in xrx200_dma_init()
386 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, in xrx200_dma_init()
387 "xrx200_net_rx", &priv->chan_rx); in xrx200_dma_init()
389 dev_err(priv->dev, "failed to request RX irq %d\n", in xrx200_dma_init()
390 ch_rx->dma.irq); in xrx200_dma_init()
394 ch_tx->dma.nr = XRX200_DMA_TX; in xrx200_dma_init()
395 ch_tx->dma.dev = priv->dev; in xrx200_dma_init()
396 ch_tx->priv = priv; in xrx200_dma_init()
398 ltq_dma_alloc_tx(&ch_tx->dma); in xrx200_dma_init()
399 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, in xrx200_dma_init()
400 "xrx200_net_tx", &priv->chan_tx); in xrx200_dma_init()
402 dev_err(priv->dev, "failed to request TX irq %d\n", in xrx200_dma_init()
403 ch_tx->dma.irq); in xrx200_dma_init()
410 ltq_dma_free(&ch_tx->dma); in xrx200_dma_init()
415 if (priv->chan_rx.skb[i]) in xrx200_dma_init()
416 dev_kfree_skb_any(priv->chan_rx.skb[i]); in xrx200_dma_init()
420 ltq_dma_free(&ch_rx->dma); in xrx200_dma_init()
428 ltq_dma_free(&priv->chan_tx.dma); in xrx200_hw_cleanup()
429 ltq_dma_free(&priv->chan_rx.dma); in xrx200_hw_cleanup()
433 dev_kfree_skb_any(priv->chan_rx.skb[i]); in xrx200_hw_cleanup()
438 struct device *dev = &pdev->dev; in xrx200_probe()
439 struct device_node *np = dev->of_node; in xrx200_probe()
449 return -ENOMEM; in xrx200_probe()
452 priv->net_dev = net_dev; in xrx200_probe()
453 priv->dev = dev; in xrx200_probe()
455 net_dev->netdev_ops = &xrx200_netdev_ops; in xrx200_probe()
457 net_dev->min_mtu = ETH_ZLEN; in xrx200_probe()
458 net_dev->max_mtu = XRX200_DMA_DATA_LEN; in xrx200_probe()
464 return -ENOENT; in xrx200_probe()
467 priv->pmac_reg = devm_ioremap_resource(dev, res); in xrx200_probe()
468 if (IS_ERR(priv->pmac_reg)) { in xrx200_probe()
470 return PTR_ERR(priv->pmac_reg); in xrx200_probe()
473 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); in xrx200_probe()
474 if (priv->chan_rx.dma.irq < 0) in xrx200_probe()
475 return -ENOENT; in xrx200_probe()
476 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); in xrx200_probe()
477 if (priv->chan_tx.dma.irq < 0) in xrx200_probe()
478 return -ENOENT; in xrx200_probe()
481 priv->clk = devm_clk_get(dev, NULL); in xrx200_probe()
482 if (IS_ERR(priv->clk)) { in xrx200_probe()
484 return PTR_ERR(priv->clk); in xrx200_probe()
489 ether_addr_copy(net_dev->dev_addr, mac); in xrx200_probe()
499 err = clk_prepare_enable(priv->clk); in xrx200_probe()
513 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); in xrx200_probe()
514 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); in xrx200_probe()
525 clk_disable_unprepare(priv->clk); in xrx200_probe()
536 struct net_device *net_dev = priv->net_dev; in xrx200_remove()
540 netif_napi_del(&priv->chan_tx.napi); in xrx200_remove()
541 netif_napi_del(&priv->chan_rx.napi); in xrx200_remove()
547 clk_disable_unprepare(priv->clk); in xrx200_remove()
556 { .compatible = "lantiq,xrx200-net" },
565 .name = "lantiq,xrx200-net",
573 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");