• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Driver for the ARC EMAC 10100 (hardware revision 5)
9  *
10  * Contributors:
11  *		Amit Bhor
12  *		Sameer Dhavale
13  *		Vineet Gupta
14  */
15 
16 #include <linux/crc32.h>
17 #include <linux/etherdevice.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/of_platform.h>
26 
27 #include "emac.h"
28 
29 
30 /**
31  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
32  * @priv: Pointer to ARC EMAC private data structure.
33  *
34  * returns: the number of slots available for transmission in tx the ring.
35  */
arc_emac_tx_avail(struct arc_emac_priv * priv)36 static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
37 {
38 	return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
39 }
40 
41 /**
42  * arc_emac_adjust_link - Adjust the PHY link duplex.
43  * @ndev:	Pointer to the net_device structure.
44  *
45  * This function is called to change the duplex setting after auto negotiation
46  * is done by the PHY.
47  */
arc_emac_adjust_link(struct net_device * ndev)48 static void arc_emac_adjust_link(struct net_device *ndev)
49 {
50 	struct arc_emac_priv *priv = netdev_priv(ndev);
51 	struct phy_device *phy_dev = priv->phy_dev;
52 	unsigned int reg, state_changed = 0;
53 
54 	if (priv->link != phy_dev->link) {
55 		priv->link = phy_dev->link;
56 		state_changed = 1;
57 	}
58 
59 	if (priv->speed != phy_dev->speed) {
60 		priv->speed = phy_dev->speed;
61 		state_changed = 1;
62 		if (priv->set_mac_speed)
63 			priv->set_mac_speed(priv, priv->speed);
64 	}
65 
66 	if (priv->duplex != phy_dev->duplex) {
67 		reg = arc_reg_get(priv, R_CTRL);
68 
69 		if (DUPLEX_FULL == phy_dev->duplex)
70 			reg |= ENFL_MASK;
71 		else
72 			reg &= ~ENFL_MASK;
73 
74 		arc_reg_set(priv, R_CTRL, reg);
75 		priv->duplex = phy_dev->duplex;
76 		state_changed = 1;
77 	}
78 
79 	if (state_changed)
80 		phy_print_status(phy_dev);
81 }
82 
83 /**
84  * arc_emac_get_settings - Get PHY settings.
85  * @ndev:	Pointer to net_device structure.
86  * @cmd:	Pointer to ethtool_cmd structure.
87  *
88  * This implements ethtool command for getting PHY settings. If PHY could
89  * not be found, the function returns -ENODEV. This function calls the
90  * relevant PHY ethtool API to get the PHY settings.
91  * Issue "ethtool ethX" under linux prompt to execute this function.
92  */
arc_emac_get_settings(struct net_device * ndev,struct ethtool_cmd * cmd)93 static int arc_emac_get_settings(struct net_device *ndev,
94 				 struct ethtool_cmd *cmd)
95 {
96 	struct arc_emac_priv *priv = netdev_priv(ndev);
97 
98 	return phy_ethtool_gset(priv->phy_dev, cmd);
99 }
100 
101 /**
102  * arc_emac_set_settings - Set PHY settings as passed in the argument.
103  * @ndev:	Pointer to net_device structure.
104  * @cmd:	Pointer to ethtool_cmd structure.
105  *
106  * This implements ethtool command for setting various PHY settings. If PHY
107  * could not be found, the function returns -ENODEV. This function calls the
108  * relevant PHY ethtool API to set the PHY.
109  * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
110  * function.
111  */
arc_emac_set_settings(struct net_device * ndev,struct ethtool_cmd * cmd)112 static int arc_emac_set_settings(struct net_device *ndev,
113 				 struct ethtool_cmd *cmd)
114 {
115 	struct arc_emac_priv *priv = netdev_priv(ndev);
116 
117 	if (!capable(CAP_NET_ADMIN))
118 		return -EPERM;
119 
120 	return phy_ethtool_sset(priv->phy_dev, cmd);
121 }
122 
123 /**
124  * arc_emac_get_drvinfo - Get EMAC driver information.
125  * @ndev:	Pointer to net_device structure.
126  * @info:	Pointer to ethtool_drvinfo structure.
127  *
128  * This implements ethtool command for getting the driver information.
129  * Issue "ethtool -i ethX" under linux prompt to execute this function.
130  */
arc_emac_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)131 static void arc_emac_get_drvinfo(struct net_device *ndev,
132 				 struct ethtool_drvinfo *info)
133 {
134 	struct arc_emac_priv *priv = netdev_priv(ndev);
135 
136 	strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
137 	strlcpy(info->version, priv->drv_version, sizeof(info->version));
138 }
139 
140 static const struct ethtool_ops arc_emac_ethtool_ops = {
141 	.get_settings	= arc_emac_get_settings,
142 	.set_settings	= arc_emac_set_settings,
143 	.get_drvinfo	= arc_emac_get_drvinfo,
144 	.get_link	= ethtool_op_get_link,
145 };
146 
147 #define FIRST_OR_LAST_MASK	(FIRST_MASK | LAST_MASK)
148 
149 /**
150  * arc_emac_tx_clean - clears processed by EMAC Tx BDs.
151  * @ndev:	Pointer to the network device.
152  */
arc_emac_tx_clean(struct net_device * ndev)153 static void arc_emac_tx_clean(struct net_device *ndev)
154 {
155 	struct arc_emac_priv *priv = netdev_priv(ndev);
156 	struct net_device_stats *stats = &ndev->stats;
157 	unsigned int i;
158 
159 	for (i = 0; i < TX_BD_NUM; i++) {
160 		unsigned int *txbd_dirty = &priv->txbd_dirty;
161 		struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
162 		struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
163 		struct sk_buff *skb = tx_buff->skb;
164 		unsigned int info = le32_to_cpu(txbd->info);
165 
166 		if ((info & FOR_EMAC) || !txbd->data || !skb)
167 			break;
168 
169 		if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
170 			stats->tx_errors++;
171 			stats->tx_dropped++;
172 
173 			if (info & DEFR)
174 				stats->tx_carrier_errors++;
175 
176 			if (info & LTCL)
177 				stats->collisions++;
178 
179 			if (info & UFLO)
180 				stats->tx_fifo_errors++;
181 		} else if (likely(info & FIRST_OR_LAST_MASK)) {
182 			stats->tx_packets++;
183 			stats->tx_bytes += skb->len;
184 		}
185 
186 		dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
187 				 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
188 
189 		/* return the sk_buff to system */
190 		dev_kfree_skb_irq(skb);
191 
192 		txbd->data = 0;
193 		txbd->info = 0;
194 		tx_buff->skb = NULL;
195 
196 		*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
197 	}
198 
199 	/* Ensure that txbd_dirty is visible to tx() before checking
200 	 * for queue stopped.
201 	 */
202 	smp_mb();
203 
204 	if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
205 		netif_wake_queue(ndev);
206 }
207 
208 /**
209  * arc_emac_rx - processing of Rx packets.
210  * @ndev:	Pointer to the network device.
211  * @budget:	How many BDs to process on 1 call.
212  *
213  * returns:	Number of processed BDs
214  *
215  * Iterate through Rx BDs and deliver received packages to upper layer.
216  */
arc_emac_rx(struct net_device * ndev,int budget)217 static int arc_emac_rx(struct net_device *ndev, int budget)
218 {
219 	struct arc_emac_priv *priv = netdev_priv(ndev);
220 	unsigned int work_done;
221 
222 	for (work_done = 0; work_done < budget; work_done++) {
223 		unsigned int *last_rx_bd = &priv->last_rx_bd;
224 		struct net_device_stats *stats = &ndev->stats;
225 		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
226 		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
227 		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
228 		struct sk_buff *skb;
229 		dma_addr_t addr;
230 
231 		if (unlikely((info & OWN_MASK) == FOR_EMAC))
232 			break;
233 
234 		/* Make a note that we saw a packet at this BD.
235 		 * So next time, driver starts from this + 1
236 		 */
237 		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
238 
239 		if (unlikely((info & FIRST_OR_LAST_MASK) !=
240 			     FIRST_OR_LAST_MASK)) {
241 			/* We pre-allocate buffers of MTU size so incoming
242 			 * packets won't be split/chained.
243 			 */
244 			if (net_ratelimit())
245 				netdev_err(ndev, "incomplete packet received\n");
246 
247 			/* Return ownership to EMAC */
248 			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
249 			stats->rx_errors++;
250 			stats->rx_length_errors++;
251 			continue;
252 		}
253 
254 		/* Prepare the BD for next cycle. netif_receive_skb()
255 		 * only if new skb was allocated and mapped to avoid holes
256 		 * in the RX fifo.
257 		 */
258 		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
259 		if (unlikely(!skb)) {
260 			if (net_ratelimit())
261 				netdev_err(ndev, "cannot allocate skb\n");
262 			/* Return ownership to EMAC */
263 			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
264 			stats->rx_errors++;
265 			stats->rx_dropped++;
266 			continue;
267 		}
268 
269 		addr = dma_map_single(&ndev->dev, (void *)skb->data,
270 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
271 		if (dma_mapping_error(&ndev->dev, addr)) {
272 			if (net_ratelimit())
273 				netdev_err(ndev, "cannot map dma buffer\n");
274 			dev_kfree_skb(skb);
275 			/* Return ownership to EMAC */
276 			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
277 			stats->rx_errors++;
278 			stats->rx_dropped++;
279 			continue;
280 		}
281 
282 		/* unmap previosly mapped skb */
283 		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
284 				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
285 
286 		pktlen = info & LEN_MASK;
287 		stats->rx_packets++;
288 		stats->rx_bytes += pktlen;
289 		skb_put(rx_buff->skb, pktlen);
290 		rx_buff->skb->dev = ndev;
291 		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
292 
293 		netif_receive_skb(rx_buff->skb);
294 
295 		rx_buff->skb = skb;
296 		dma_unmap_addr_set(rx_buff, addr, addr);
297 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
298 
299 		rxbd->data = cpu_to_le32(addr);
300 
301 		/* Make sure pointer to data buffer is set */
302 		wmb();
303 
304 		/* Return ownership to EMAC */
305 		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
306 	}
307 
308 	return work_done;
309 }
310 
311 /**
312  * arc_emac_poll - NAPI poll handler.
313  * @napi:	Pointer to napi_struct structure.
314  * @budget:	How many BDs to process on 1 call.
315  *
316  * returns:	Number of processed BDs
317  */
arc_emac_poll(struct napi_struct * napi,int budget)318 static int arc_emac_poll(struct napi_struct *napi, int budget)
319 {
320 	struct net_device *ndev = napi->dev;
321 	struct arc_emac_priv *priv = netdev_priv(ndev);
322 	unsigned int work_done;
323 
324 	arc_emac_tx_clean(ndev);
325 
326 	work_done = arc_emac_rx(ndev, budget);
327 	if (work_done < budget) {
328 		napi_complete(napi);
329 		arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
330 	}
331 
332 	return work_done;
333 }
334 
335 /**
336  * arc_emac_intr - Global interrupt handler for EMAC.
337  * @irq:		irq number.
338  * @dev_instance:	device instance.
339  *
340  * returns: IRQ_HANDLED for all cases.
341  *
342  * ARC EMAC has only 1 interrupt line, and depending on bits raised in
343  * STATUS register we may tell what is a reason for interrupt to fire.
344  */
arc_emac_intr(int irq,void * dev_instance)345 static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
346 {
347 	struct net_device *ndev = dev_instance;
348 	struct arc_emac_priv *priv = netdev_priv(ndev);
349 	struct net_device_stats *stats = &ndev->stats;
350 	unsigned int status;
351 
352 	status = arc_reg_get(priv, R_STATUS);
353 	status &= ~MDIO_MASK;
354 
355 	/* Reset all flags except "MDIO complete" */
356 	arc_reg_set(priv, R_STATUS, status);
357 
358 	if (status & (RXINT_MASK | TXINT_MASK)) {
359 		if (likely(napi_schedule_prep(&priv->napi))) {
360 			arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
361 			__napi_schedule(&priv->napi);
362 		}
363 	}
364 
365 	if (status & ERR_MASK) {
366 		/* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
367 		 * 8-bit error counter overrun.
368 		 */
369 
370 		if (status & MSER_MASK) {
371 			stats->rx_missed_errors += 0x100;
372 			stats->rx_errors += 0x100;
373 		}
374 
375 		if (status & RXCR_MASK) {
376 			stats->rx_crc_errors += 0x100;
377 			stats->rx_errors += 0x100;
378 		}
379 
380 		if (status & RXFR_MASK) {
381 			stats->rx_frame_errors += 0x100;
382 			stats->rx_errors += 0x100;
383 		}
384 
385 		if (status & RXFL_MASK) {
386 			stats->rx_over_errors += 0x100;
387 			stats->rx_errors += 0x100;
388 		}
389 	}
390 
391 	return IRQ_HANDLED;
392 }
393 
394 #ifdef CONFIG_NET_POLL_CONTROLLER
arc_emac_poll_controller(struct net_device * dev)395 static void arc_emac_poll_controller(struct net_device *dev)
396 {
397 	disable_irq(dev->irq);
398 	arc_emac_intr(dev->irq, dev);
399 	enable_irq(dev->irq);
400 }
401 #endif
402 
403 /**
404  * arc_emac_open - Open the network device.
405  * @ndev:	Pointer to the network device.
406  *
407  * returns: 0, on success or non-zero error value on failure.
408  *
409  * This function sets the MAC address, requests and enables an IRQ
410  * for the EMAC device and starts the Tx queue.
411  * It also connects to the phy device.
412  */
arc_emac_open(struct net_device * ndev)413 static int arc_emac_open(struct net_device *ndev)
414 {
415 	struct arc_emac_priv *priv = netdev_priv(ndev);
416 	struct phy_device *phy_dev = priv->phy_dev;
417 	int i;
418 
419 	phy_dev->autoneg = AUTONEG_ENABLE;
420 	phy_dev->speed = 0;
421 	phy_dev->duplex = 0;
422 	phy_dev->advertising &= phy_dev->supported;
423 
424 	priv->last_rx_bd = 0;
425 
426 	/* Allocate and set buffers for Rx BD's */
427 	for (i = 0; i < RX_BD_NUM; i++) {
428 		dma_addr_t addr;
429 		unsigned int *last_rx_bd = &priv->last_rx_bd;
430 		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
431 		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
432 
433 		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
434 							 EMAC_BUFFER_SIZE);
435 		if (unlikely(!rx_buff->skb))
436 			return -ENOMEM;
437 
438 		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
439 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
440 		if (dma_mapping_error(&ndev->dev, addr)) {
441 			netdev_err(ndev, "cannot dma map\n");
442 			dev_kfree_skb(rx_buff->skb);
443 			return -ENOMEM;
444 		}
445 		dma_unmap_addr_set(rx_buff, addr, addr);
446 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
447 
448 		rxbd->data = cpu_to_le32(addr);
449 
450 		/* Make sure pointer to data buffer is set */
451 		wmb();
452 
453 		/* Return ownership to EMAC */
454 		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
455 
456 		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
457 	}
458 
459 	/* Clean Tx BD's */
460 	memset(priv->txbd, 0, TX_RING_SZ);
461 
462 	/* Initialize logical address filter */
463 	arc_reg_set(priv, R_LAFL, 0);
464 	arc_reg_set(priv, R_LAFH, 0);
465 
466 	/* Set BD ring pointers for device side */
467 	arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
468 	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
469 
470 	/* Enable interrupts */
471 	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
472 
473 	/* Set CONTROL */
474 	arc_reg_set(priv, R_CTRL,
475 		     (RX_BD_NUM << 24) |	/* RX BD table length */
476 		     (TX_BD_NUM << 16) |	/* TX BD table length */
477 		     TXRN_MASK | RXRN_MASK);
478 
479 	napi_enable(&priv->napi);
480 
481 	/* Enable EMAC */
482 	arc_reg_or(priv, R_CTRL, EN_MASK);
483 
484 	phy_start_aneg(priv->phy_dev);
485 
486 	netif_start_queue(ndev);
487 
488 	return 0;
489 }
490 
491 /**
492  * arc_emac_set_rx_mode - Change the receive filtering mode.
493  * @ndev:	Pointer to the network device.
494  *
495  * This function enables/disables promiscuous or all-multicast mode
496  * and updates the multicast filtering list of the network device.
497  */
arc_emac_set_rx_mode(struct net_device * ndev)498 static void arc_emac_set_rx_mode(struct net_device *ndev)
499 {
500 	struct arc_emac_priv *priv = netdev_priv(ndev);
501 
502 	if (ndev->flags & IFF_PROMISC) {
503 		arc_reg_or(priv, R_CTRL, PROM_MASK);
504 	} else {
505 		arc_reg_clr(priv, R_CTRL, PROM_MASK);
506 
507 		if (ndev->flags & IFF_ALLMULTI) {
508 			arc_reg_set(priv, R_LAFL, ~0);
509 			arc_reg_set(priv, R_LAFH, ~0);
510 		} else {
511 			struct netdev_hw_addr *ha;
512 			unsigned int filter[2] = { 0, 0 };
513 			int bit;
514 
515 			netdev_for_each_mc_addr(ha, ndev) {
516 				bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
517 				filter[bit >> 5] |= 1 << (bit & 31);
518 			}
519 
520 			arc_reg_set(priv, R_LAFL, filter[0]);
521 			arc_reg_set(priv, R_LAFH, filter[1]);
522 		}
523 	}
524 }
525 
526 /**
527  * arc_emac_stop - Close the network device.
528  * @ndev:	Pointer to the network device.
529  *
530  * This function stops the Tx queue, disables interrupts and frees the IRQ for
531  * the EMAC device.
532  * It also disconnects the PHY device associated with the EMAC device.
533  */
arc_emac_stop(struct net_device * ndev)534 static int arc_emac_stop(struct net_device *ndev)
535 {
536 	struct arc_emac_priv *priv = netdev_priv(ndev);
537 
538 	napi_disable(&priv->napi);
539 	netif_stop_queue(ndev);
540 
541 	/* Disable interrupts */
542 	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
543 
544 	/* Disable EMAC */
545 	arc_reg_clr(priv, R_CTRL, EN_MASK);
546 
547 	return 0;
548 }
549 
550 /**
551  * arc_emac_stats - Get system network statistics.
552  * @ndev:	Pointer to net_device structure.
553  *
554  * Returns the address of the device statistics structure.
555  * Statistics are updated in interrupt handler.
556  */
arc_emac_stats(struct net_device * ndev)557 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
558 {
559 	struct arc_emac_priv *priv = netdev_priv(ndev);
560 	struct net_device_stats *stats = &ndev->stats;
561 	unsigned long miss, rxerr;
562 	u8 rxcrc, rxfram, rxoflow;
563 
564 	rxerr = arc_reg_get(priv, R_RXERR);
565 	miss = arc_reg_get(priv, R_MISS);
566 
567 	rxcrc = rxerr;
568 	rxfram = rxerr >> 8;
569 	rxoflow = rxerr >> 16;
570 
571 	stats->rx_errors += miss;
572 	stats->rx_errors += rxcrc + rxfram + rxoflow;
573 
574 	stats->rx_over_errors += rxoflow;
575 	stats->rx_frame_errors += rxfram;
576 	stats->rx_crc_errors += rxcrc;
577 	stats->rx_missed_errors += miss;
578 
579 	return stats;
580 }
581 
582 /**
583  * arc_emac_tx - Starts the data transmission.
584  * @skb:	sk_buff pointer that contains data to be Transmitted.
585  * @ndev:	Pointer to net_device structure.
586  *
587  * returns: NETDEV_TX_OK, on success
588  *		NETDEV_TX_BUSY, if any of the descriptors are not free.
589  *
590  * This function is invoked from upper layers to initiate transmission.
591  */
arc_emac_tx(struct sk_buff * skb,struct net_device * ndev)592 static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
593 {
594 	struct arc_emac_priv *priv = netdev_priv(ndev);
595 	unsigned int len, *txbd_curr = &priv->txbd_curr;
596 	struct net_device_stats *stats = &ndev->stats;
597 	__le32 *info = &priv->txbd[*txbd_curr].info;
598 	dma_addr_t addr;
599 
600 	if (skb_padto(skb, ETH_ZLEN))
601 		return NETDEV_TX_OK;
602 
603 	len = max_t(unsigned int, ETH_ZLEN, skb->len);
604 
605 	if (unlikely(!arc_emac_tx_avail(priv))) {
606 		netif_stop_queue(ndev);
607 		netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
608 		return NETDEV_TX_BUSY;
609 	}
610 
611 	addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
612 			      DMA_TO_DEVICE);
613 
614 	if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
615 		stats->tx_dropped++;
616 		stats->tx_errors++;
617 		dev_kfree_skb(skb);
618 		return NETDEV_TX_OK;
619 	}
620 	dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
621 	dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
622 
623 	priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
624 
625 	/* Make sure pointer to data buffer is set */
626 	wmb();
627 
628 	skb_tx_timestamp(skb);
629 
630 	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
631 
632 	/* Make sure info word is set */
633 	wmb();
634 
635 	priv->tx_buff[*txbd_curr].skb = skb;
636 
637 	/* Increment index to point to the next BD */
638 	*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
639 
640 	/* Ensure that tx_clean() sees the new txbd_curr before
641 	 * checking the queue status. This prevents an unneeded wake
642 	 * of the queue in tx_clean().
643 	 */
644 	smp_mb();
645 
646 	if (!arc_emac_tx_avail(priv)) {
647 		netif_stop_queue(ndev);
648 		/* Refresh tx_dirty */
649 		smp_mb();
650 		if (arc_emac_tx_avail(priv))
651 			netif_start_queue(ndev);
652 	}
653 
654 	arc_reg_set(priv, R_STATUS, TXPL_MASK);
655 
656 	return NETDEV_TX_OK;
657 }
658 
arc_emac_set_address_internal(struct net_device * ndev)659 static void arc_emac_set_address_internal(struct net_device *ndev)
660 {
661 	struct arc_emac_priv *priv = netdev_priv(ndev);
662 	unsigned int addr_low, addr_hi;
663 
664 	addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
665 	addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
666 
667 	arc_reg_set(priv, R_ADDRL, addr_low);
668 	arc_reg_set(priv, R_ADDRH, addr_hi);
669 }
670 
671 /**
672  * arc_emac_set_address - Set the MAC address for this device.
673  * @ndev:	Pointer to net_device structure.
674  * @p:		6 byte Address to be written as MAC address.
675  *
676  * This function copies the HW address from the sockaddr structure to the
677  * net_device structure and updates the address in HW.
678  *
679  * returns:	-EBUSY if the net device is busy or 0 if the address is set
680  *		successfully.
681  */
arc_emac_set_address(struct net_device * ndev,void * p)682 static int arc_emac_set_address(struct net_device *ndev, void *p)
683 {
684 	struct sockaddr *addr = p;
685 
686 	if (netif_running(ndev))
687 		return -EBUSY;
688 
689 	if (!is_valid_ether_addr(addr->sa_data))
690 		return -EADDRNOTAVAIL;
691 
692 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
693 
694 	arc_emac_set_address_internal(ndev);
695 
696 	return 0;
697 }
698 
699 static const struct net_device_ops arc_emac_netdev_ops = {
700 	.ndo_open		= arc_emac_open,
701 	.ndo_stop		= arc_emac_stop,
702 	.ndo_start_xmit		= arc_emac_tx,
703 	.ndo_set_mac_address	= arc_emac_set_address,
704 	.ndo_get_stats		= arc_emac_stats,
705 	.ndo_set_rx_mode	= arc_emac_set_rx_mode,
706 #ifdef CONFIG_NET_POLL_CONTROLLER
707 	.ndo_poll_controller	= arc_emac_poll_controller,
708 #endif
709 };
710 
arc_emac_probe(struct net_device * ndev,int interface)711 int arc_emac_probe(struct net_device *ndev, int interface)
712 {
713 	struct device *dev = ndev->dev.parent;
714 	struct resource res_regs;
715 	struct device_node *phy_node;
716 	struct arc_emac_priv *priv;
717 	const char *mac_addr;
718 	unsigned int id, clock_frequency, irq;
719 	int err;
720 
721 
722 	/* Get PHY from device tree */
723 	phy_node = of_parse_phandle(dev->of_node, "phy", 0);
724 	if (!phy_node) {
725 		dev_err(dev, "failed to retrieve phy description from device tree\n");
726 		return -ENODEV;
727 	}
728 
729 	/* Get EMAC registers base address from device tree */
730 	err = of_address_to_resource(dev->of_node, 0, &res_regs);
731 	if (err) {
732 		dev_err(dev, "failed to retrieve registers base from device tree\n");
733 		return -ENODEV;
734 	}
735 
736 	/* Get IRQ from device tree */
737 	irq = irq_of_parse_and_map(dev->of_node, 0);
738 	if (!irq) {
739 		dev_err(dev, "failed to retrieve <irq> value from device tree\n");
740 		return -ENODEV;
741 	}
742 
743 
744 	ndev->netdev_ops = &arc_emac_netdev_ops;
745 	ndev->ethtool_ops = &arc_emac_ethtool_ops;
746 	ndev->watchdog_timeo = TX_TIMEOUT;
747 	/* FIXME :: no multicast support yet */
748 	ndev->flags &= ~IFF_MULTICAST;
749 
750 	priv = netdev_priv(ndev);
751 	priv->dev = dev;
752 
753 	priv->regs = devm_ioremap_resource(dev, &res_regs);
754 	if (IS_ERR(priv->regs)) {
755 		return PTR_ERR(priv->regs);
756 	}
757 	dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
758 
759 	if (priv->clk) {
760 		err = clk_prepare_enable(priv->clk);
761 		if (err) {
762 			dev_err(dev, "failed to enable clock\n");
763 			return err;
764 		}
765 
766 		clock_frequency = clk_get_rate(priv->clk);
767 	} else {
768 		/* Get CPU clock frequency from device tree */
769 		if (of_property_read_u32(dev->of_node, "clock-frequency",
770 					 &clock_frequency)) {
771 			dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
772 			return -EINVAL;
773 		}
774 	}
775 
776 	id = arc_reg_get(priv, R_ID);
777 
778 	/* Check for EMAC revision 5 or 7, magic number */
779 	if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
780 		dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id);
781 		err = -ENODEV;
782 		goto out_clken;
783 	}
784 	dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id);
785 
786 	/* Set poll rate so that it polls every 1 ms */
787 	arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
788 
789 	ndev->irq = irq;
790 	dev_info(dev, "IRQ is %d\n", ndev->irq);
791 
792 	/* Register interrupt handler for device */
793 	err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0,
794 			       ndev->name, ndev);
795 	if (err) {
796 		dev_err(dev, "could not allocate IRQ\n");
797 		goto out_clken;
798 	}
799 
800 	/* Get MAC address from device tree */
801 	mac_addr = of_get_mac_address(dev->of_node);
802 
803 	if (mac_addr)
804 		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
805 	else
806 		eth_hw_addr_random(ndev);
807 
808 	arc_emac_set_address_internal(ndev);
809 	dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
810 
811 	/* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
812 	priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ,
813 					 &priv->rxbd_dma, GFP_KERNEL);
814 
815 	if (!priv->rxbd) {
816 		dev_err(dev, "failed to allocate data buffers\n");
817 		err = -ENOMEM;
818 		goto out_clken;
819 	}
820 
821 	priv->txbd = priv->rxbd + RX_BD_NUM;
822 
823 	priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
824 	dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
825 		(unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
826 
827 	err = arc_mdio_probe(priv);
828 	if (err) {
829 		dev_err(dev, "failed to probe MII bus\n");
830 		goto out_clken;
831 	}
832 
833 	priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
834 				       interface);
835 	if (!priv->phy_dev) {
836 		dev_err(dev, "of_phy_connect() failed\n");
837 		err = -ENODEV;
838 		goto out_mdio;
839 	}
840 
841 	dev_info(dev, "connected to %s phy with id 0x%x\n",
842 		 priv->phy_dev->drv->name, priv->phy_dev->phy_id);
843 
844 	netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
845 
846 	err = register_netdev(ndev);
847 	if (err) {
848 		dev_err(dev, "failed to register network device\n");
849 		goto out_netif_api;
850 	}
851 
852 	return 0;
853 
854 out_netif_api:
855 	netif_napi_del(&priv->napi);
856 	phy_disconnect(priv->phy_dev);
857 	priv->phy_dev = NULL;
858 out_mdio:
859 	arc_mdio_remove(priv);
860 out_clken:
861 	if (priv->clk)
862 		clk_disable_unprepare(priv->clk);
863 	return err;
864 }
865 EXPORT_SYMBOL_GPL(arc_emac_probe);
866 
arc_emac_remove(struct net_device * ndev)867 int arc_emac_remove(struct net_device *ndev)
868 {
869 	struct arc_emac_priv *priv = netdev_priv(ndev);
870 
871 	phy_disconnect(priv->phy_dev);
872 	priv->phy_dev = NULL;
873 	arc_mdio_remove(priv);
874 	unregister_netdev(ndev);
875 	netif_napi_del(&priv->napi);
876 
877 	if (!IS_ERR(priv->clk)) {
878 		clk_disable_unprepare(priv->clk);
879 	}
880 
881 
882 	return 0;
883 }
884 EXPORT_SYMBOL_GPL(arc_emac_remove);
885 
886 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
887 MODULE_DESCRIPTION("ARC EMAC driver");
888 MODULE_LICENSE("GPL");
889