• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * drivers/net/ethernet/micrel/ks8851_mll.c
4  * Copyright (c) 2009 Micrel Inc.
5  */
6 
7 /* Supports:
8  * KS8851 16bit MLL chip from Micrel Inc.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/cache.h>
20 #include <linux/crc32.h>
21 #include <linux/crc32poly.h>
22 #include <linux/mii.h>
23 #include <linux/platform_device.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/ks8851_mll.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_net.h>
30 
31 #include "ks8851.h"
32 
33 #define	DRV_NAME	"ks8851_mll"
34 
35 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
36 #define MAX_RECV_FRAMES			255
37 #define MAX_BUF_SIZE			2048
38 #define TX_BUF_SIZE			2000
39 #define RX_BUF_SIZE			2000
40 
41 #define RXCR1_FILTER_MASK    		(RXCR1_RXINVF | RXCR1_RXAE | \
42 					 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
43 #define RXQCR_CMD_CNTL                	(RXQCR_RXFCTE|RXQCR_ADRFE)
44 
45 #define	ENUM_BUS_NONE			0
46 #define	ENUM_BUS_8BIT			1
47 #define	ENUM_BUS_16BIT			2
48 #define	ENUM_BUS_32BIT			3
49 
50 #define MAX_MCAST_LST			32
51 #define HW_MCAST_SIZE			8
52 
53 /**
54  * union ks_tx_hdr - tx header data
55  * @txb: The header as bytes
56  * @txw: The header as 16bit, little-endian words
57  *
58  * A dual representation of the tx header data to allow
59  * access to individual bytes, and to allow 16bit accesses
60  * with 16bit alignment.
61  */
62 union ks_tx_hdr {
63 	u8      txb[4];
64 	__le16  txw[2];
65 };
66 
67 /**
68  * struct ks_net - KS8851 driver private data
69  * @net_device 	: The network device we're bound to
70  * @hw_addr	: start address of data register.
71  * @hw_addr_cmd	: start address of command register.
72  * @txh    	: temporaly buffer to save status/length.
73  * @lock	: Lock to ensure that the device is not accessed when busy.
74  * @pdev	: Pointer to platform device.
75  * @mii		: The MII state information for the mii calls.
76  * @frame_head_info   	: frame header information for multi-pkt rx.
77  * @statelock	: Lock on this structure for tx list.
78  * @msg_enable	: The message flags controlling driver output (see ethtool).
79  * @frame_cnt  	: number of frames received.
80  * @bus_width  	: i/o bus width.
81  * @rc_rxqcr	: Cached copy of KS_RXQCR.
82  * @rc_txcr	: Cached copy of KS_TXCR.
83  * @rc_ier	: Cached copy of KS_IER.
84  * @sharedbus  	: Multipex(addr and data bus) mode indicator.
85  * @cmd_reg_cache	: command register cached.
86  * @cmd_reg_cache_int	: command register cached. Used in the irq handler.
87  * @promiscuous	: promiscuous mode indicator.
88  * @all_mcast  	: mutlicast indicator.
89  * @mcast_lst_size   	: size of multicast list.
90  * @mcast_lst    	: multicast list.
91  * @mcast_bits    	: multicast enabed.
92  * @mac_addr   		: MAC address assigned to this device.
93  * @fid    		: frame id.
94  * @extra_byte    	: number of extra byte prepended rx pkt.
95  * @enabled    		: indicator this device works.
96  *
97  * The @lock ensures that the chip is protected when certain operations are
98  * in progress. When the read or write packet transfer is in progress, most
99  * of the chip registers are not accessible until the transfer is finished and
100  * the DMA has been de-asserted.
101  *
102  * The @statelock is used to protect information in the structure which may
103  * need to be accessed via several sources, such as the network driver layer
104  * or one of the work queues.
105  *
106  */
107 
108 /* Receive multiplex framer header info */
109 struct type_frame_head {
110 	u16	sts;         /* Frame status */
111 	u16	len;         /* Byte count */
112 };
113 
114 struct ks_net {
115 	struct net_device	*netdev;
116 	void __iomem    	*hw_addr;
117 	void __iomem    	*hw_addr_cmd;
118 	union ks_tx_hdr		txh ____cacheline_aligned;
119 	struct mutex      	lock; /* spinlock to be interrupt safe */
120 	struct platform_device *pdev;
121 	struct mii_if_info	mii;
122 	struct type_frame_head	*frame_head_info;
123 	spinlock_t		statelock;
124 	u32			msg_enable;
125 	u32			frame_cnt;
126 	int			bus_width;
127 
128 	u16			rc_rxqcr;
129 	u16			rc_txcr;
130 	u16			rc_ier;
131 	u16			sharedbus;
132 	u16			cmd_reg_cache;
133 	u16			cmd_reg_cache_int;
134 	u16			promiscuous;
135 	u16			all_mcast;
136 	u16			mcast_lst_size;
137 	u8			mcast_lst[MAX_MCAST_LST][ETH_ALEN];
138 	u8			mcast_bits[HW_MCAST_SIZE];
139 	u8			mac_addr[6];
140 	u8                      fid;
141 	u8			extra_byte;
142 	u8			enabled;
143 };
144 
145 static int msg_enable;
146 
147 #define BE3             0x8000      /* Byte Enable 3 */
148 #define BE2             0x4000      /* Byte Enable 2 */
149 #define BE1             0x2000      /* Byte Enable 1 */
150 #define BE0             0x1000      /* Byte Enable 0 */
151 
152 /* register read/write calls.
153  *
154  * All these calls issue transactions to access the chip's registers. They
155  * all require that the necessary lock is held to prevent accesses when the
156  * chip is busy transferring packet data (RX/TX FIFO accesses).
157  */
158 
159 /**
160  * ks_rdreg8 - read 8 bit register from device
161  * @ks	  : The chip information
162  * @offset: The register address
163  *
164  * Read a 8bit register from the chip, returning the result
165  */
ks_rdreg8(struct ks_net * ks,int offset)166 static u8 ks_rdreg8(struct ks_net *ks, int offset)
167 {
168 	u16 data;
169 	u8 shift_bit = offset & 0x03;
170 	u8 shift_data = (offset & 1) << 3;
171 	ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
172 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
173 	data  = ioread16(ks->hw_addr);
174 	return (u8)(data >> shift_data);
175 }
176 
177 /**
178  * ks_rdreg16 - read 16 bit register from device
179  * @ks	  : The chip information
180  * @offset: The register address
181  *
182  * Read a 16bit register from the chip, returning the result
183  */
184 
ks_rdreg16(struct ks_net * ks,int offset)185 static u16 ks_rdreg16(struct ks_net *ks, int offset)
186 {
187 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
188 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
189 	return ioread16(ks->hw_addr);
190 }
191 
192 /**
193  * ks_wrreg8 - write 8bit register value to chip
194  * @ks: The chip information
195  * @offset: The register address
196  * @value: The value to write
197  *
198  */
ks_wrreg8(struct ks_net * ks,int offset,u8 value)199 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
200 {
201 	u8  shift_bit = (offset & 0x03);
202 	u16 value_write = (u16)(value << ((offset & 1) << 3));
203 	ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
204 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
205 	iowrite16(value_write, ks->hw_addr);
206 }
207 
208 /**
209  * ks_wrreg16 - write 16bit register value to chip
210  * @ks: The chip information
211  * @offset: The register address
212  * @value: The value to write
213  *
214  */
215 
ks_wrreg16(struct ks_net * ks,int offset,u16 value)216 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
217 {
218 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
219 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
220 	iowrite16(value, ks->hw_addr);
221 }
222 
223 /**
224  * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
225  * @ks: The chip state
226  * @wptr: buffer address to save data
227  * @len: length in byte to read
228  *
229  */
ks_inblk(struct ks_net * ks,u16 * wptr,u32 len)230 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
231 {
232 	len >>= 1;
233 	while (len--)
234 		*wptr++ = (u16)ioread16(ks->hw_addr);
235 }
236 
237 /**
238  * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
239  * @ks: The chip information
240  * @wptr: buffer address
241  * @len: length in byte to write
242  *
243  */
ks_outblk(struct ks_net * ks,u16 * wptr,u32 len)244 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
245 {
246 	len >>= 1;
247 	while (len--)
248 		iowrite16(*wptr++, ks->hw_addr);
249 }
250 
ks_disable_int(struct ks_net * ks)251 static void ks_disable_int(struct ks_net *ks)
252 {
253 	ks_wrreg16(ks, KS_IER, 0x0000);
254 }  /* ks_disable_int */
255 
ks_enable_int(struct ks_net * ks)256 static void ks_enable_int(struct ks_net *ks)
257 {
258 	ks_wrreg16(ks, KS_IER, ks->rc_ier);
259 }  /* ks_enable_int */
260 
261 /**
262  * ks_tx_fifo_space - return the available hardware buffer size.
263  * @ks: The chip information
264  *
265  */
ks_tx_fifo_space(struct ks_net * ks)266 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
267 {
268 	return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
269 }
270 
271 /**
272  * ks_save_cmd_reg - save the command register from the cache.
273  * @ks: The chip information
274  *
275  */
ks_save_cmd_reg(struct ks_net * ks)276 static inline void ks_save_cmd_reg(struct ks_net *ks)
277 {
278 	/*ks8851 MLL has a bug to read back the command register.
279 	* So rely on software to save the content of command register.
280 	*/
281 	ks->cmd_reg_cache_int = ks->cmd_reg_cache;
282 }
283 
284 /**
285  * ks_restore_cmd_reg - restore the command register from the cache and
286  * 	write to hardware register.
287  * @ks: The chip information
288  *
289  */
ks_restore_cmd_reg(struct ks_net * ks)290 static inline void ks_restore_cmd_reg(struct ks_net *ks)
291 {
292 	ks->cmd_reg_cache = ks->cmd_reg_cache_int;
293 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
294 }
295 
296 /**
297  * ks_set_powermode - set power mode of the device
298  * @ks: The chip information
299  * @pwrmode: The power mode value to write to KS_PMECR.
300  *
301  * Change the power mode of the chip.
302  */
ks_set_powermode(struct ks_net * ks,unsigned pwrmode)303 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
304 {
305 	unsigned pmecr;
306 
307 	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
308 
309 	ks_rdreg16(ks, KS_GRR);
310 	pmecr = ks_rdreg16(ks, KS_PMECR);
311 	pmecr &= ~PMECR_PM_MASK;
312 	pmecr |= pwrmode;
313 
314 	ks_wrreg16(ks, KS_PMECR, pmecr);
315 }
316 
317 /**
318  * ks_read_config - read chip configuration of bus width.
319  * @ks: The chip information
320  *
321  */
ks_read_config(struct ks_net * ks)322 static void ks_read_config(struct ks_net *ks)
323 {
324 	u16 reg_data = 0;
325 
326 	/* Regardless of bus width, 8 bit read should always work.*/
327 	reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
328 	reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
329 
330 	/* addr/data bus are multiplexed */
331 	ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
332 
333 	/* There are garbage data when reading data from QMU,
334 	depending on bus-width.
335 	*/
336 
337 	if (reg_data & CCR_8BIT) {
338 		ks->bus_width = ENUM_BUS_8BIT;
339 		ks->extra_byte = 1;
340 	} else if (reg_data & CCR_16BIT) {
341 		ks->bus_width = ENUM_BUS_16BIT;
342 		ks->extra_byte = 2;
343 	} else {
344 		ks->bus_width = ENUM_BUS_32BIT;
345 		ks->extra_byte = 4;
346 	}
347 }
348 
349 /**
350  * ks_soft_reset - issue one of the soft reset to the device
351  * @ks: The device state.
352  * @op: The bit(s) to set in the GRR
353  *
354  * Issue the relevant soft-reset command to the device's GRR register
355  * specified by @op.
356  *
357  * Note, the delays are in there as a caution to ensure that the reset
358  * has time to take effect and then complete. Since the datasheet does
359  * not currently specify the exact sequence, we have chosen something
360  * that seems to work with our device.
361  */
ks_soft_reset(struct ks_net * ks,unsigned op)362 static void ks_soft_reset(struct ks_net *ks, unsigned op)
363 {
364 	/* Disable interrupt first */
365 	ks_wrreg16(ks, KS_IER, 0x0000);
366 	ks_wrreg16(ks, KS_GRR, op);
367 	mdelay(10);	/* wait a short time to effect reset */
368 	ks_wrreg16(ks, KS_GRR, 0);
369 	mdelay(1);	/* wait for condition to clear */
370 }
371 
372 
ks_enable_qmu(struct ks_net * ks)373 static void ks_enable_qmu(struct ks_net *ks)
374 {
375 	u16 w;
376 
377 	w = ks_rdreg16(ks, KS_TXCR);
378 	/* Enables QMU Transmit (TXCR). */
379 	ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
380 
381 	/*
382 	 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
383 	 * Enable
384 	 */
385 
386 	w = ks_rdreg16(ks, KS_RXQCR);
387 	ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
388 
389 	/* Enables QMU Receive (RXCR1). */
390 	w = ks_rdreg16(ks, KS_RXCR1);
391 	ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
392 	ks->enabled = true;
393 }  /* ks_enable_qmu */
394 
ks_disable_qmu(struct ks_net * ks)395 static void ks_disable_qmu(struct ks_net *ks)
396 {
397 	u16	w;
398 
399 	w = ks_rdreg16(ks, KS_TXCR);
400 
401 	/* Disables QMU Transmit (TXCR). */
402 	w  &= ~TXCR_TXE;
403 	ks_wrreg16(ks, KS_TXCR, w);
404 
405 	/* Disables QMU Receive (RXCR1). */
406 	w = ks_rdreg16(ks, KS_RXCR1);
407 	w &= ~RXCR1_RXE ;
408 	ks_wrreg16(ks, KS_RXCR1, w);
409 
410 	ks->enabled = false;
411 
412 }  /* ks_disable_qmu */
413 
414 /**
415  * ks_read_qmu - read 1 pkt data from the QMU.
416  * @ks: The chip information
417  * @buf: buffer address to save 1 pkt
418  * @len: Pkt length
419  * Here is the sequence to read 1 pkt:
420  *	1. set sudo DMA mode
421  *	2. read prepend data
422  *	3. read pkt data
423  *	4. reset sudo DMA Mode
424  */
ks_read_qmu(struct ks_net * ks,u16 * buf,u32 len)425 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
426 {
427 	u32 r =  ks->extra_byte & 0x1 ;
428 	u32 w = ks->extra_byte - r;
429 
430 	/* 1. set sudo DMA mode */
431 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
432 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
433 
434 	/* 2. read prepend data */
435 	/**
436 	 * read 4 + extra bytes and discard them.
437 	 * extra bytes for dummy, 2 for status, 2 for len
438 	 */
439 
440 	/* use likely(r) for 8 bit access for performance */
441 	if (unlikely(r))
442 		ioread8(ks->hw_addr);
443 	ks_inblk(ks, buf, w + 2 + 2);
444 
445 	/* 3. read pkt data */
446 	ks_inblk(ks, buf, ALIGN(len, 4));
447 
448 	/* 4. reset sudo DMA Mode */
449 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
450 }
451 
452 /**
453  * ks_rcv - read multiple pkts data from the QMU.
454  * @ks: The chip information
455  * @netdev: The network device being opened.
456  *
457  * Read all of header information before reading pkt content.
458  * It is not allowed only port of pkts in QMU after issuing
459  * interrupt ack.
460  */
ks_rcv(struct ks_net * ks,struct net_device * netdev)461 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
462 {
463 	u32	i;
464 	struct type_frame_head *frame_hdr = ks->frame_head_info;
465 	struct sk_buff *skb;
466 
467 	ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
468 
469 	/* read all header information */
470 	for (i = 0; i < ks->frame_cnt; i++) {
471 		/* Checking Received packet status */
472 		frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
473 		/* Get packet len from hardware */
474 		frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
475 		frame_hdr++;
476 	}
477 
478 	frame_hdr = ks->frame_head_info;
479 	while (ks->frame_cnt--) {
480 		if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
481 			     frame_hdr->len >= RX_BUF_SIZE ||
482 			     frame_hdr->len <= 0)) {
483 
484 			/* discard an invalid packet */
485 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
486 			netdev->stats.rx_dropped++;
487 			if (!(frame_hdr->sts & RXFSHR_RXFV))
488 				netdev->stats.rx_frame_errors++;
489 			else
490 				netdev->stats.rx_length_errors++;
491 			frame_hdr++;
492 			continue;
493 		}
494 
495 		skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
496 		if (likely(skb)) {
497 			skb_reserve(skb, 2);
498 			/* read data block including CRC 4 bytes */
499 			ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
500 			skb_put(skb, frame_hdr->len - 4);
501 			skb->protocol = eth_type_trans(skb, netdev);
502 			netif_rx(skb);
503 			/* exclude CRC size */
504 			netdev->stats.rx_bytes += frame_hdr->len - 4;
505 			netdev->stats.rx_packets++;
506 		} else {
507 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
508 			netdev->stats.rx_dropped++;
509 		}
510 		frame_hdr++;
511 	}
512 }
513 
514 /**
515  * ks_update_link_status - link status update.
516  * @netdev: The network device being opened.
517  * @ks: The chip information
518  *
519  */
520 
ks_update_link_status(struct net_device * netdev,struct ks_net * ks)521 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
522 {
523 	/* check the status of the link */
524 	u32 link_up_status;
525 	if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
526 		netif_carrier_on(netdev);
527 		link_up_status = true;
528 	} else {
529 		netif_carrier_off(netdev);
530 		link_up_status = false;
531 	}
532 	netif_dbg(ks, link, ks->netdev,
533 		  "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
534 }
535 
536 /**
537  * ks_irq - device interrupt handler
538  * @irq: Interrupt number passed from the IRQ handler.
539  * @pw: The private word passed to register_irq(), our struct ks_net.
540  *
541  * This is the handler invoked to find out what happened
542  *
543  * Read the interrupt status, work out what needs to be done and then clear
544  * any of the interrupts that are not needed.
545  */
546 
ks_irq(int irq,void * pw)547 static irqreturn_t ks_irq(int irq, void *pw)
548 {
549 	struct net_device *netdev = pw;
550 	struct ks_net *ks = netdev_priv(netdev);
551 	u16 status;
552 
553 	/*this should be the first in IRQ handler */
554 	ks_save_cmd_reg(ks);
555 
556 	status = ks_rdreg16(ks, KS_ISR);
557 	if (unlikely(!status)) {
558 		ks_restore_cmd_reg(ks);
559 		return IRQ_NONE;
560 	}
561 
562 	ks_wrreg16(ks, KS_ISR, status);
563 
564 	if (likely(status & IRQ_RXI))
565 		ks_rcv(ks, netdev);
566 
567 	if (unlikely(status & IRQ_LCI))
568 		ks_update_link_status(netdev, ks);
569 
570 	if (unlikely(status & IRQ_TXI))
571 		netif_wake_queue(netdev);
572 
573 	if (unlikely(status & IRQ_LDI)) {
574 
575 		u16 pmecr = ks_rdreg16(ks, KS_PMECR);
576 		pmecr &= ~PMECR_WKEVT_MASK;
577 		ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
578 	}
579 
580 	if (unlikely(status & IRQ_RXOI))
581 		ks->netdev->stats.rx_over_errors++;
582 	/* this should be the last in IRQ handler*/
583 	ks_restore_cmd_reg(ks);
584 	return IRQ_HANDLED;
585 }
586 
587 
588 /**
589  * ks_net_open - open network device
590  * @netdev: The network device being opened.
591  *
592  * Called when the network device is marked active, such as a user executing
593  * 'ifconfig up' on the device.
594  */
ks_net_open(struct net_device * netdev)595 static int ks_net_open(struct net_device *netdev)
596 {
597 	struct ks_net *ks = netdev_priv(netdev);
598 	int err;
599 
600 #define	KS_INT_FLAGS	IRQF_TRIGGER_LOW
601 	/* lock the card, even if we may not actually do anything
602 	 * else at the moment.
603 	 */
604 
605 	netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
606 
607 	/* reset the HW */
608 	err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
609 
610 	if (err) {
611 		pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
612 		return err;
613 	}
614 
615 	/* wake up powermode to normal mode */
616 	ks_set_powermode(ks, PMECR_PM_NORMAL);
617 	mdelay(1);	/* wait for normal mode to take effect */
618 
619 	ks_wrreg16(ks, KS_ISR, 0xffff);
620 	ks_enable_int(ks);
621 	ks_enable_qmu(ks);
622 	netif_start_queue(ks->netdev);
623 
624 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
625 
626 	return 0;
627 }
628 
629 /**
630  * ks_net_stop - close network device
631  * @netdev: The device being closed.
632  *
633  * Called to close down a network device which has been active. Cancell any
634  * work, shutdown the RX and TX process and then place the chip into a low
635  * power state whilst it is not being used.
636  */
ks_net_stop(struct net_device * netdev)637 static int ks_net_stop(struct net_device *netdev)
638 {
639 	struct ks_net *ks = netdev_priv(netdev);
640 
641 	netif_info(ks, ifdown, netdev, "shutting down\n");
642 
643 	netif_stop_queue(netdev);
644 
645 	mutex_lock(&ks->lock);
646 
647 	/* turn off the IRQs and ack any outstanding */
648 	ks_wrreg16(ks, KS_IER, 0x0000);
649 	ks_wrreg16(ks, KS_ISR, 0xffff);
650 
651 	/* shutdown RX/TX QMU */
652 	ks_disable_qmu(ks);
653 
654 	/* set powermode to soft power down to save power */
655 	ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
656 	free_irq(netdev->irq, netdev);
657 	mutex_unlock(&ks->lock);
658 	return 0;
659 }
660 
661 
662 /**
663  * ks_write_qmu - write 1 pkt data to the QMU.
664  * @ks: The chip information
665  * @pdata: buffer address to save 1 pkt
666  * @len: Pkt length in byte
667  * Here is the sequence to write 1 pkt:
668  *	1. set sudo DMA mode
669  *	2. write status/length
670  *	3. write pkt data
671  *	4. reset sudo DMA Mode
672  *	5. reset sudo DMA mode
673  *	6. Wait until pkt is out
674  */
ks_write_qmu(struct ks_net * ks,u8 * pdata,u16 len)675 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
676 {
677 	/* start header at txb[0] to align txw entries */
678 	ks->txh.txw[0] = 0;
679 	ks->txh.txw[1] = cpu_to_le16(len);
680 
681 	/* 1. set sudo-DMA mode */
682 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
683 	/* 2. write status/lenth info */
684 	ks_outblk(ks, ks->txh.txw, 4);
685 	/* 3. write pkt data */
686 	ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
687 	/* 4. reset sudo-DMA mode */
688 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
689 	/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
690 	ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
691 	/* 6. wait until TXQCR_METFE is auto-cleared */
692 	while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
693 		;
694 }
695 
696 /**
697  * ks_start_xmit - transmit packet
698  * @skb		: The buffer to transmit
699  * @netdev	: The device used to transmit the packet.
700  *
701  * Called by the network layer to transmit the @skb.
702  * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
703  * So while tx is in-progress, prevent IRQ interrupt from happenning.
704  */
ks_start_xmit(struct sk_buff * skb,struct net_device * netdev)705 static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
706 {
707 	netdev_tx_t retv = NETDEV_TX_OK;
708 	struct ks_net *ks = netdev_priv(netdev);
709 
710 	disable_irq(netdev->irq);
711 	ks_disable_int(ks);
712 	spin_lock(&ks->statelock);
713 
714 	/* Extra space are required:
715 	*  4 byte for alignment, 4 for status/length, 4 for CRC
716 	*/
717 
718 	if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
719 		ks_write_qmu(ks, skb->data, skb->len);
720 		/* add tx statistics */
721 		netdev->stats.tx_bytes += skb->len;
722 		netdev->stats.tx_packets++;
723 		dev_kfree_skb(skb);
724 	} else
725 		retv = NETDEV_TX_BUSY;
726 	spin_unlock(&ks->statelock);
727 	ks_enable_int(ks);
728 	enable_irq(netdev->irq);
729 	return retv;
730 }
731 
732 /**
733  * ks_start_rx - ready to serve pkts
734  * @ks		: The chip information
735  *
736  */
ks_start_rx(struct ks_net * ks)737 static void ks_start_rx(struct ks_net *ks)
738 {
739 	u16 cntl;
740 
741 	/* Enables QMU Receive (RXCR1). */
742 	cntl = ks_rdreg16(ks, KS_RXCR1);
743 	cntl |= RXCR1_RXE ;
744 	ks_wrreg16(ks, KS_RXCR1, cntl);
745 }  /* ks_start_rx */
746 
747 /**
748  * ks_stop_rx - stop to serve pkts
749  * @ks		: The chip information
750  *
751  */
ks_stop_rx(struct ks_net * ks)752 static void ks_stop_rx(struct ks_net *ks)
753 {
754 	u16 cntl;
755 
756 	/* Disables QMU Receive (RXCR1). */
757 	cntl = ks_rdreg16(ks, KS_RXCR1);
758 	cntl &= ~RXCR1_RXE ;
759 	ks_wrreg16(ks, KS_RXCR1, cntl);
760 
761 }  /* ks_stop_rx */
762 
763 static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
764 
ether_gen_crc(int length,u8 * data)765 static unsigned long ether_gen_crc(int length, u8 *data)
766 {
767 	long crc = -1;
768 	while (--length >= 0) {
769 		u8 current_octet = *data++;
770 		int bit;
771 
772 		for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
773 			crc = (crc << 1) ^
774 				((crc < 0) ^ (current_octet & 1) ?
775 			ethernet_polynomial : 0);
776 		}
777 	}
778 	return (unsigned long)crc;
779 }  /* ether_gen_crc */
780 
781 /**
782 * ks_set_grpaddr - set multicast information
783 * @ks : The chip information
784 */
785 
ks_set_grpaddr(struct ks_net * ks)786 static void ks_set_grpaddr(struct ks_net *ks)
787 {
788 	u8	i;
789 	u32	index, position, value;
790 
791 	memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
792 
793 	for (i = 0; i < ks->mcast_lst_size; i++) {
794 		position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
795 		index = position >> 3;
796 		value = 1 << (position & 7);
797 		ks->mcast_bits[index] |= (u8)value;
798 	}
799 
800 	for (i  = 0; i < HW_MCAST_SIZE; i++) {
801 		if (i & 1) {
802 			ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
803 				(ks->mcast_bits[i] << 8) |
804 				ks->mcast_bits[i - 1]);
805 		}
806 	}
807 }  /* ks_set_grpaddr */
808 
809 /**
810 * ks_clear_mcast - clear multicast information
811 *
812 * @ks : The chip information
813 * This routine removes all mcast addresses set in the hardware.
814 */
815 
ks_clear_mcast(struct ks_net * ks)816 static void ks_clear_mcast(struct ks_net *ks)
817 {
818 	u16	i, mcast_size;
819 	for (i = 0; i < HW_MCAST_SIZE; i++)
820 		ks->mcast_bits[i] = 0;
821 
822 	mcast_size = HW_MCAST_SIZE >> 2;
823 	for (i = 0; i < mcast_size; i++)
824 		ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
825 }
826 
ks_set_promis(struct ks_net * ks,u16 promiscuous_mode)827 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
828 {
829 	u16		cntl;
830 	ks->promiscuous = promiscuous_mode;
831 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
832 	cntl = ks_rdreg16(ks, KS_RXCR1);
833 
834 	cntl &= ~RXCR1_FILTER_MASK;
835 	if (promiscuous_mode)
836 		/* Enable Promiscuous mode */
837 		cntl |= RXCR1_RXAE | RXCR1_RXINVF;
838 	else
839 		/* Disable Promiscuous mode (default normal mode) */
840 		cntl |= RXCR1_RXPAFMA;
841 
842 	ks_wrreg16(ks, KS_RXCR1, cntl);
843 
844 	if (ks->enabled)
845 		ks_start_rx(ks);
846 
847 }  /* ks_set_promis */
848 
ks_set_mcast(struct ks_net * ks,u16 mcast)849 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
850 {
851 	u16	cntl;
852 
853 	ks->all_mcast = mcast;
854 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
855 	cntl = ks_rdreg16(ks, KS_RXCR1);
856 	cntl &= ~RXCR1_FILTER_MASK;
857 	if (mcast)
858 		/* Enable "Perfect with Multicast address passed mode" */
859 		cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
860 	else
861 		/**
862 		 * Disable "Perfect with Multicast address passed
863 		 * mode" (normal mode).
864 		 */
865 		cntl |= RXCR1_RXPAFMA;
866 
867 	ks_wrreg16(ks, KS_RXCR1, cntl);
868 
869 	if (ks->enabled)
870 		ks_start_rx(ks);
871 }  /* ks_set_mcast */
872 
ks_set_rx_mode(struct net_device * netdev)873 static void ks_set_rx_mode(struct net_device *netdev)
874 {
875 	struct ks_net *ks = netdev_priv(netdev);
876 	struct netdev_hw_addr *ha;
877 
878 	/* Turn on/off promiscuous mode. */
879 	if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
880 		ks_set_promis(ks,
881 			(u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
882 	/* Turn on/off all mcast mode. */
883 	else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
884 		ks_set_mcast(ks,
885 			(u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
886 	else
887 		ks_set_promis(ks, false);
888 
889 	if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
890 		if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
891 			int i = 0;
892 
893 			netdev_for_each_mc_addr(ha, netdev) {
894 				if (i >= MAX_MCAST_LST)
895 					break;
896 				memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
897 			}
898 			ks->mcast_lst_size = (u8)i;
899 			ks_set_grpaddr(ks);
900 		} else {
901 			/**
902 			 * List too big to support so
903 			 * turn on all mcast mode.
904 			 */
905 			ks->mcast_lst_size = MAX_MCAST_LST;
906 			ks_set_mcast(ks, true);
907 		}
908 	} else {
909 		ks->mcast_lst_size = 0;
910 		ks_clear_mcast(ks);
911 	}
912 } /* ks_set_rx_mode */
913 
ks_set_mac(struct ks_net * ks,u8 * data)914 static void ks_set_mac(struct ks_net *ks, u8 *data)
915 {
916 	u16 *pw = (u16 *)data;
917 	u16 w, u;
918 
919 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
920 
921 	u = *pw++;
922 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
923 	ks_wrreg16(ks, KS_MARH, w);
924 
925 	u = *pw++;
926 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
927 	ks_wrreg16(ks, KS_MARM, w);
928 
929 	u = *pw;
930 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
931 	ks_wrreg16(ks, KS_MARL, w);
932 
933 	memcpy(ks->mac_addr, data, ETH_ALEN);
934 
935 	if (ks->enabled)
936 		ks_start_rx(ks);
937 }
938 
ks_set_mac_address(struct net_device * netdev,void * paddr)939 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
940 {
941 	struct ks_net *ks = netdev_priv(netdev);
942 	struct sockaddr *addr = paddr;
943 	u8 *da;
944 
945 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
946 
947 	da = (u8 *)netdev->dev_addr;
948 
949 	ks_set_mac(ks, da);
950 	return 0;
951 }
952 
ks_net_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)953 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
954 {
955 	struct ks_net *ks = netdev_priv(netdev);
956 
957 	if (!netif_running(netdev))
958 		return -EINVAL;
959 
960 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
961 }
962 
963 static const struct net_device_ops ks_netdev_ops = {
964 	.ndo_open		= ks_net_open,
965 	.ndo_stop		= ks_net_stop,
966 	.ndo_do_ioctl		= ks_net_ioctl,
967 	.ndo_start_xmit		= ks_start_xmit,
968 	.ndo_set_mac_address	= ks_set_mac_address,
969 	.ndo_set_rx_mode	= ks_set_rx_mode,
970 	.ndo_validate_addr	= eth_validate_addr,
971 };
972 
973 /* ethtool support */
974 
ks_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * di)975 static void ks_get_drvinfo(struct net_device *netdev,
976 			       struct ethtool_drvinfo *di)
977 {
978 	strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
979 	strlcpy(di->version, "1.00", sizeof(di->version));
980 	strlcpy(di->bus_info, dev_name(netdev->dev.parent),
981 		sizeof(di->bus_info));
982 }
983 
ks_get_msglevel(struct net_device * netdev)984 static u32 ks_get_msglevel(struct net_device *netdev)
985 {
986 	struct ks_net *ks = netdev_priv(netdev);
987 	return ks->msg_enable;
988 }
989 
ks_set_msglevel(struct net_device * netdev,u32 to)990 static void ks_set_msglevel(struct net_device *netdev, u32 to)
991 {
992 	struct ks_net *ks = netdev_priv(netdev);
993 	ks->msg_enable = to;
994 }
995 
ks_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)996 static int ks_get_link_ksettings(struct net_device *netdev,
997 				 struct ethtool_link_ksettings *cmd)
998 {
999 	struct ks_net *ks = netdev_priv(netdev);
1000 
1001 	mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1002 
1003 	return 0;
1004 }
1005 
ks_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)1006 static int ks_set_link_ksettings(struct net_device *netdev,
1007 				 const struct ethtool_link_ksettings *cmd)
1008 {
1009 	struct ks_net *ks = netdev_priv(netdev);
1010 	return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1011 }
1012 
ks_get_link(struct net_device * netdev)1013 static u32 ks_get_link(struct net_device *netdev)
1014 {
1015 	struct ks_net *ks = netdev_priv(netdev);
1016 	return mii_link_ok(&ks->mii);
1017 }
1018 
ks_nway_reset(struct net_device * netdev)1019 static int ks_nway_reset(struct net_device *netdev)
1020 {
1021 	struct ks_net *ks = netdev_priv(netdev);
1022 	return mii_nway_restart(&ks->mii);
1023 }
1024 
1025 static const struct ethtool_ops ks_ethtool_ops = {
1026 	.get_drvinfo	= ks_get_drvinfo,
1027 	.get_msglevel	= ks_get_msglevel,
1028 	.set_msglevel	= ks_set_msglevel,
1029 	.get_link	= ks_get_link,
1030 	.nway_reset	= ks_nway_reset,
1031 	.get_link_ksettings = ks_get_link_ksettings,
1032 	.set_link_ksettings = ks_set_link_ksettings,
1033 };
1034 
1035 /* MII interface controls */
1036 
1037 /**
1038  * ks_phy_reg - convert MII register into a KS8851 register
1039  * @reg: MII register number.
1040  *
1041  * Return the KS8851 register number for the corresponding MII PHY register
1042  * if possible. Return zero if the MII register has no direct mapping to the
1043  * KS8851 register set.
1044  */
ks_phy_reg(int reg)1045 static int ks_phy_reg(int reg)
1046 {
1047 	switch (reg) {
1048 	case MII_BMCR:
1049 		return KS_P1MBCR;
1050 	case MII_BMSR:
1051 		return KS_P1MBSR;
1052 	case MII_PHYSID1:
1053 		return KS_PHY1ILR;
1054 	case MII_PHYSID2:
1055 		return KS_PHY1IHR;
1056 	case MII_ADVERTISE:
1057 		return KS_P1ANAR;
1058 	case MII_LPA:
1059 		return KS_P1ANLPR;
1060 	}
1061 
1062 	return 0x0;
1063 }
1064 
1065 /**
1066  * ks_phy_read - MII interface PHY register read.
1067  * @netdev: The network device the PHY is on.
1068  * @phy_addr: Address of PHY (ignored as we only have one)
1069  * @reg: The register to read.
1070  *
1071  * This call reads data from the PHY register specified in @reg. Since the
1072  * device does not support all the MII registers, the non-existent values
1073  * are always returned as zero.
1074  *
1075  * We return zero for unsupported registers as the MII code does not check
1076  * the value returned for any error status, and simply returns it to the
1077  * caller. The mii-tool that the driver was tested with takes any -ve error
1078  * as real PHY capabilities, thus displaying incorrect data to the user.
1079  */
ks_phy_read(struct net_device * netdev,int phy_addr,int reg)1080 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1081 {
1082 	struct ks_net *ks = netdev_priv(netdev);
1083 	int ksreg;
1084 	int result;
1085 
1086 	ksreg = ks_phy_reg(reg);
1087 	if (!ksreg)
1088 		return 0x0;	/* no error return allowed, so use zero */
1089 
1090 	mutex_lock(&ks->lock);
1091 	result = ks_rdreg16(ks, ksreg);
1092 	mutex_unlock(&ks->lock);
1093 
1094 	return result;
1095 }
1096 
ks_phy_write(struct net_device * netdev,int phy,int reg,int value)1097 static void ks_phy_write(struct net_device *netdev,
1098 			     int phy, int reg, int value)
1099 {
1100 	struct ks_net *ks = netdev_priv(netdev);
1101 	int ksreg;
1102 
1103 	ksreg = ks_phy_reg(reg);
1104 	if (ksreg) {
1105 		mutex_lock(&ks->lock);
1106 		ks_wrreg16(ks, ksreg, value);
1107 		mutex_unlock(&ks->lock);
1108 	}
1109 }
1110 
1111 /**
1112  * ks_read_selftest - read the selftest memory info.
1113  * @ks: The device state
1114  *
1115  * Read and check the TX/RX memory selftest information.
1116  */
ks_read_selftest(struct ks_net * ks)1117 static int ks_read_selftest(struct ks_net *ks)
1118 {
1119 	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1120 	int ret = 0;
1121 	unsigned rd;
1122 
1123 	rd = ks_rdreg16(ks, KS_MBIR);
1124 
1125 	if ((rd & both_done) != both_done) {
1126 		netdev_warn(ks->netdev, "Memory selftest not finished\n");
1127 		return 0;
1128 	}
1129 
1130 	if (rd & MBIR_TXMBFA) {
1131 		netdev_err(ks->netdev, "TX memory selftest fails\n");
1132 		ret |= 1;
1133 	}
1134 
1135 	if (rd & MBIR_RXMBFA) {
1136 		netdev_err(ks->netdev, "RX memory selftest fails\n");
1137 		ret |= 2;
1138 	}
1139 
1140 	netdev_info(ks->netdev, "the selftest passes\n");
1141 	return ret;
1142 }
1143 
ks_setup(struct ks_net * ks)1144 static void ks_setup(struct ks_net *ks)
1145 {
1146 	u16	w;
1147 
1148 	/**
1149 	 * Configure QMU Transmit
1150 	 */
1151 
1152 	/* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1153 	ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1154 
1155 	/* Setup Receive Frame Data Pointer Auto-Increment */
1156 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1157 
1158 	/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1159 	ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1160 
1161 	/* Setup RxQ Command Control (RXQCR) */
1162 	ks->rc_rxqcr = RXQCR_CMD_CNTL;
1163 	ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1164 
1165 	/**
1166 	 * set the force mode to half duplex, default is full duplex
1167 	 *  because if the auto-negotiation fails, most switch uses
1168 	 *  half-duplex.
1169 	 */
1170 
1171 	w = ks_rdreg16(ks, KS_P1MBCR);
1172 	w &= ~BMCR_FULLDPLX;
1173 	ks_wrreg16(ks, KS_P1MBCR, w);
1174 
1175 	w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1176 	ks_wrreg16(ks, KS_TXCR, w);
1177 
1178 	w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1179 
1180 	if (ks->promiscuous)         /* bPromiscuous */
1181 		w |= (RXCR1_RXAE | RXCR1_RXINVF);
1182 	else if (ks->all_mcast) /* Multicast address passed mode */
1183 		w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1184 	else                                   /* Normal mode */
1185 		w |= RXCR1_RXPAFMA;
1186 
1187 	ks_wrreg16(ks, KS_RXCR1, w);
1188 }  /*ks_setup */
1189 
1190 
ks_setup_int(struct ks_net * ks)1191 static void ks_setup_int(struct ks_net *ks)
1192 {
1193 	ks->rc_ier = 0x00;
1194 	/* Clear the interrupts status of the hardware. */
1195 	ks_wrreg16(ks, KS_ISR, 0xffff);
1196 
1197 	/* Enables the interrupts of the hardware. */
1198 	ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1199 }  /* ks_setup_int */
1200 
ks_hw_init(struct ks_net * ks)1201 static int ks_hw_init(struct ks_net *ks)
1202 {
1203 #define	MHEADER_SIZE	(sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1204 	ks->promiscuous = 0;
1205 	ks->all_mcast = 0;
1206 	ks->mcast_lst_size = 0;
1207 
1208 	ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1209 					   GFP_KERNEL);
1210 	if (!ks->frame_head_info)
1211 		return false;
1212 
1213 	ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1214 	return true;
1215 }
1216 
1217 #if defined(CONFIG_OF)
1218 static const struct of_device_id ks8851_ml_dt_ids[] = {
1219 	{ .compatible = "micrel,ks8851-mll" },
1220 	{ /* sentinel */ }
1221 };
1222 MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1223 #endif
1224 
ks8851_probe(struct platform_device * pdev)1225 static int ks8851_probe(struct platform_device *pdev)
1226 {
1227 	int err;
1228 	struct net_device *netdev;
1229 	struct ks_net *ks;
1230 	u16 id, data;
1231 	const char *mac;
1232 
1233 	netdev = alloc_etherdev(sizeof(struct ks_net));
1234 	if (!netdev)
1235 		return -ENOMEM;
1236 
1237 	SET_NETDEV_DEV(netdev, &pdev->dev);
1238 
1239 	ks = netdev_priv(netdev);
1240 	ks->netdev = netdev;
1241 
1242 	ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
1243 	if (IS_ERR(ks->hw_addr)) {
1244 		err = PTR_ERR(ks->hw_addr);
1245 		goto err_free;
1246 	}
1247 
1248 	ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
1249 	if (IS_ERR(ks->hw_addr_cmd)) {
1250 		err = PTR_ERR(ks->hw_addr_cmd);
1251 		goto err_free;
1252 	}
1253 
1254 	netdev->irq = platform_get_irq(pdev, 0);
1255 
1256 	if ((int)netdev->irq < 0) {
1257 		err = netdev->irq;
1258 		goto err_free;
1259 	}
1260 
1261 	ks->pdev = pdev;
1262 
1263 	mutex_init(&ks->lock);
1264 	spin_lock_init(&ks->statelock);
1265 
1266 	netdev->netdev_ops = &ks_netdev_ops;
1267 	netdev->ethtool_ops = &ks_ethtool_ops;
1268 
1269 	/* setup mii state */
1270 	ks->mii.dev             = netdev;
1271 	ks->mii.phy_id          = 1,
1272 	ks->mii.phy_id_mask     = 1;
1273 	ks->mii.reg_num_mask    = 0xf;
1274 	ks->mii.mdio_read       = ks_phy_read;
1275 	ks->mii.mdio_write      = ks_phy_write;
1276 
1277 	netdev_info(netdev, "message enable is %d\n", msg_enable);
1278 	/* set the default message enable */
1279 	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1280 						     NETIF_MSG_PROBE |
1281 						     NETIF_MSG_LINK));
1282 	ks_read_config(ks);
1283 
1284 	/* simple check for a valid chip being connected to the bus */
1285 	if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1286 		netdev_err(netdev, "failed to read device ID\n");
1287 		err = -ENODEV;
1288 		goto err_free;
1289 	}
1290 
1291 	if (ks_read_selftest(ks)) {
1292 		netdev_err(netdev, "failed to read device ID\n");
1293 		err = -ENODEV;
1294 		goto err_free;
1295 	}
1296 
1297 	err = register_netdev(netdev);
1298 	if (err)
1299 		goto err_free;
1300 
1301 	platform_set_drvdata(pdev, netdev);
1302 
1303 	ks_soft_reset(ks, GRR_GSR);
1304 	ks_hw_init(ks);
1305 	ks_disable_qmu(ks);
1306 	ks_setup(ks);
1307 	ks_setup_int(ks);
1308 
1309 	data = ks_rdreg16(ks, KS_OBCR);
1310 	ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1311 
1312 	/* overwriting the default MAC address */
1313 	if (pdev->dev.of_node) {
1314 		mac = of_get_mac_address(pdev->dev.of_node);
1315 		if (!IS_ERR(mac))
1316 			ether_addr_copy(ks->mac_addr, mac);
1317 	} else {
1318 		struct ks8851_mll_platform_data *pdata;
1319 
1320 		pdata = dev_get_platdata(&pdev->dev);
1321 		if (!pdata) {
1322 			netdev_err(netdev, "No platform data\n");
1323 			err = -ENODEV;
1324 			goto err_pdata;
1325 		}
1326 		memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1327 	}
1328 	if (!is_valid_ether_addr(ks->mac_addr)) {
1329 		/* Use random MAC address if none passed */
1330 		eth_random_addr(ks->mac_addr);
1331 		netdev_info(netdev, "Using random mac address\n");
1332 	}
1333 	netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1334 
1335 	memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1336 
1337 	ks_set_mac(ks, netdev->dev_addr);
1338 
1339 	id = ks_rdreg16(ks, KS_CIDER);
1340 
1341 	netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1342 		    (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1343 	return 0;
1344 
1345 err_pdata:
1346 	unregister_netdev(netdev);
1347 err_free:
1348 	free_netdev(netdev);
1349 	return err;
1350 }
1351 
ks8851_remove(struct platform_device * pdev)1352 static int ks8851_remove(struct platform_device *pdev)
1353 {
1354 	struct net_device *netdev = platform_get_drvdata(pdev);
1355 
1356 	unregister_netdev(netdev);
1357 	free_netdev(netdev);
1358 	return 0;
1359 
1360 }
1361 
1362 static struct platform_driver ks8851_platform_driver = {
1363 	.driver = {
1364 		.name = DRV_NAME,
1365 		.of_match_table	= of_match_ptr(ks8851_ml_dt_ids),
1366 	},
1367 	.probe = ks8851_probe,
1368 	.remove = ks8851_remove,
1369 };
1370 
1371 module_platform_driver(ks8851_platform_driver);
1372 
1373 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1374 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1375 MODULE_LICENSE("GPL");
1376 module_param_named(message, msg_enable, int, 0);
1377 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1378 
1379