• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * drivers/net/ethernet/micrel/ks8851_mll.c
3  * Copyright (c) 2009 Micrel Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* Supports:
20  * KS8851 16bit MLL chip from Micrel Inc.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/cache.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
34 #include <linux/platform_device.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/ks8851_mll.h>
38 #include <linux/of.h>
39 #include <linux/of_device.h>
40 #include <linux/of_net.h>
41 
42 #define	DRV_NAME	"ks8851_mll"
43 
44 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
45 #define MAX_RECV_FRAMES			255
46 #define MAX_BUF_SIZE			2048
47 #define TX_BUF_SIZE			2000
48 #define RX_BUF_SIZE			2000
49 
50 #define KS_CCR				0x08
51 #define CCR_EEPROM			(1 << 9)
52 #define CCR_SPI				(1 << 8)
53 #define CCR_8BIT			(1 << 7)
54 #define CCR_16BIT			(1 << 6)
55 #define CCR_32BIT			(1 << 5)
56 #define CCR_SHARED			(1 << 4)
57 #define CCR_32PIN			(1 << 0)
58 
59 /* MAC address registers */
60 #define KS_MARL				0x10
61 #define KS_MARM				0x12
62 #define KS_MARH				0x14
63 
64 #define KS_OBCR				0x20
65 #define OBCR_ODS_16MA			(1 << 6)
66 
67 #define KS_EEPCR			0x22
68 #define EEPCR_EESA			(1 << 4)
69 #define EEPCR_EESB			(1 << 3)
70 #define EEPCR_EEDO			(1 << 2)
71 #define EEPCR_EESCK			(1 << 1)
72 #define EEPCR_EECS			(1 << 0)
73 
74 #define KS_MBIR				0x24
75 #define MBIR_TXMBF			(1 << 12)
76 #define MBIR_TXMBFA			(1 << 11)
77 #define MBIR_RXMBF			(1 << 4)
78 #define MBIR_RXMBFA			(1 << 3)
79 
80 #define KS_GRR				0x26
81 #define GRR_QMU				(1 << 1)
82 #define GRR_GSR				(1 << 0)
83 
84 #define KS_WFCR				0x2A
85 #define WFCR_MPRXE			(1 << 7)
86 #define WFCR_WF3E			(1 << 3)
87 #define WFCR_WF2E			(1 << 2)
88 #define WFCR_WF1E			(1 << 1)
89 #define WFCR_WF0E			(1 << 0)
90 
91 #define KS_WF0CRC0			0x30
92 #define KS_WF0CRC1			0x32
93 #define KS_WF0BM0			0x34
94 #define KS_WF0BM1			0x36
95 #define KS_WF0BM2			0x38
96 #define KS_WF0BM3			0x3A
97 
98 #define KS_WF1CRC0			0x40
99 #define KS_WF1CRC1			0x42
100 #define KS_WF1BM0			0x44
101 #define KS_WF1BM1			0x46
102 #define KS_WF1BM2			0x48
103 #define KS_WF1BM3			0x4A
104 
105 #define KS_WF2CRC0			0x50
106 #define KS_WF2CRC1			0x52
107 #define KS_WF2BM0			0x54
108 #define KS_WF2BM1			0x56
109 #define KS_WF2BM2			0x58
110 #define KS_WF2BM3			0x5A
111 
112 #define KS_WF3CRC0			0x60
113 #define KS_WF3CRC1			0x62
114 #define KS_WF3BM0			0x64
115 #define KS_WF3BM1			0x66
116 #define KS_WF3BM2			0x68
117 #define KS_WF3BM3			0x6A
118 
119 #define KS_TXCR				0x70
120 #define TXCR_TCGICMP			(1 << 8)
121 #define TXCR_TCGUDP			(1 << 7)
122 #define TXCR_TCGTCP			(1 << 6)
123 #define TXCR_TCGIP			(1 << 5)
124 #define TXCR_FTXQ			(1 << 4)
125 #define TXCR_TXFCE			(1 << 3)
126 #define TXCR_TXPE			(1 << 2)
127 #define TXCR_TXCRC			(1 << 1)
128 #define TXCR_TXE			(1 << 0)
129 
130 #define KS_TXSR				0x72
131 #define TXSR_TXLC			(1 << 13)
132 #define TXSR_TXMC			(1 << 12)
133 #define TXSR_TXFID_MASK			(0x3f << 0)
134 #define TXSR_TXFID_SHIFT		(0)
135 #define TXSR_TXFID_GET(_v)		(((_v) >> 0) & 0x3f)
136 
137 
138 #define KS_RXCR1			0x74
139 #define RXCR1_FRXQ			(1 << 15)
140 #define RXCR1_RXUDPFCC			(1 << 14)
141 #define RXCR1_RXTCPFCC			(1 << 13)
142 #define RXCR1_RXIPFCC			(1 << 12)
143 #define RXCR1_RXPAFMA			(1 << 11)
144 #define RXCR1_RXFCE			(1 << 10)
145 #define RXCR1_RXEFE			(1 << 9)
146 #define RXCR1_RXMAFMA			(1 << 8)
147 #define RXCR1_RXBE			(1 << 7)
148 #define RXCR1_RXME			(1 << 6)
149 #define RXCR1_RXUE			(1 << 5)
150 #define RXCR1_RXAE			(1 << 4)
151 #define RXCR1_RXINVF			(1 << 1)
152 #define RXCR1_RXE			(1 << 0)
153 #define RXCR1_FILTER_MASK    		(RXCR1_RXINVF | RXCR1_RXAE | \
154 					 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
155 
156 #define KS_RXCR2			0x76
157 #define RXCR2_SRDBL_MASK		(0x7 << 5)
158 #define RXCR2_SRDBL_SHIFT		(5)
159 #define RXCR2_SRDBL_4B			(0x0 << 5)
160 #define RXCR2_SRDBL_8B			(0x1 << 5)
161 #define RXCR2_SRDBL_16B			(0x2 << 5)
162 #define RXCR2_SRDBL_32B			(0x3 << 5)
163 /* #define RXCR2_SRDBL_FRAME		(0x4 << 5) */
164 #define RXCR2_IUFFP			(1 << 4)
165 #define RXCR2_RXIUFCEZ			(1 << 3)
166 #define RXCR2_UDPLFE			(1 << 2)
167 #define RXCR2_RXICMPFCC			(1 << 1)
168 #define RXCR2_RXSAF			(1 << 0)
169 
170 #define KS_TXMIR			0x78
171 
172 #define KS_RXFHSR			0x7C
173 #define RXFSHR_RXFV			(1 << 15)
174 #define RXFSHR_RXICMPFCS		(1 << 13)
175 #define RXFSHR_RXIPFCS			(1 << 12)
176 #define RXFSHR_RXTCPFCS			(1 << 11)
177 #define RXFSHR_RXUDPFCS			(1 << 10)
178 #define RXFSHR_RXBF			(1 << 7)
179 #define RXFSHR_RXMF			(1 << 6)
180 #define RXFSHR_RXUF			(1 << 5)
181 #define RXFSHR_RXMR			(1 << 4)
182 #define RXFSHR_RXFT			(1 << 3)
183 #define RXFSHR_RXFTL			(1 << 2)
184 #define RXFSHR_RXRF			(1 << 1)
185 #define RXFSHR_RXCE			(1 << 0)
186 #define	RXFSHR_ERR			(RXFSHR_RXCE | RXFSHR_RXRF |\
187 					RXFSHR_RXFTL | RXFSHR_RXMR |\
188 					RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
189 					RXFSHR_RXTCPFCS)
190 #define KS_RXFHBCR			0x7E
191 #define RXFHBCR_CNT_MASK		0x0FFF
192 
193 #define KS_TXQCR			0x80
194 #define TXQCR_AETFE			(1 << 2)
195 #define TXQCR_TXQMAM			(1 << 1)
196 #define TXQCR_METFE			(1 << 0)
197 
198 #define KS_RXQCR			0x82
199 #define RXQCR_RXDTTS			(1 << 12)
200 #define RXQCR_RXDBCTS			(1 << 11)
201 #define RXQCR_RXFCTS			(1 << 10)
202 #define RXQCR_RXIPHTOE			(1 << 9)
203 #define RXQCR_RXDTTE			(1 << 7)
204 #define RXQCR_RXDBCTE			(1 << 6)
205 #define RXQCR_RXFCTE			(1 << 5)
206 #define RXQCR_ADRFE			(1 << 4)
207 #define RXQCR_SDA			(1 << 3)
208 #define RXQCR_RRXEF			(1 << 0)
209 #define RXQCR_CMD_CNTL                	(RXQCR_RXFCTE|RXQCR_ADRFE)
210 
211 #define KS_TXFDPR			0x84
212 #define TXFDPR_TXFPAI			(1 << 14)
213 #define TXFDPR_TXFP_MASK		(0x7ff << 0)
214 #define TXFDPR_TXFP_SHIFT		(0)
215 
216 #define KS_RXFDPR			0x86
217 #define RXFDPR_RXFPAI			(1 << 14)
218 
219 #define KS_RXDTTR			0x8C
220 #define KS_RXDBCTR			0x8E
221 
222 #define KS_IER				0x90
223 #define KS_ISR				0x92
224 #define IRQ_LCI				(1 << 15)
225 #define IRQ_TXI				(1 << 14)
226 #define IRQ_RXI				(1 << 13)
227 #define IRQ_RXOI			(1 << 11)
228 #define IRQ_TXPSI			(1 << 9)
229 #define IRQ_RXPSI			(1 << 8)
230 #define IRQ_TXSAI			(1 << 6)
231 #define IRQ_RXWFDI			(1 << 5)
232 #define IRQ_RXMPDI			(1 << 4)
233 #define IRQ_LDI				(1 << 3)
234 #define IRQ_EDI				(1 << 2)
235 #define IRQ_SPIBEI			(1 << 1)
236 #define IRQ_DEDI			(1 << 0)
237 
238 #define KS_RXFCTR			0x9C
239 #define RXFCTR_THRESHOLD_MASK     	0x00FF
240 
241 #define KS_RXFC				0x9D
242 #define RXFCTR_RXFC_MASK		(0xff << 8)
243 #define RXFCTR_RXFC_SHIFT		(8)
244 #define RXFCTR_RXFC_GET(_v)		(((_v) >> 8) & 0xff)
245 #define RXFCTR_RXFCT_MASK		(0xff << 0)
246 #define RXFCTR_RXFCT_SHIFT		(0)
247 
248 #define KS_TXNTFSR			0x9E
249 
250 #define KS_MAHTR0			0xA0
251 #define KS_MAHTR1			0xA2
252 #define KS_MAHTR2			0xA4
253 #define KS_MAHTR3			0xA6
254 
255 #define KS_FCLWR			0xB0
256 #define KS_FCHWR			0xB2
257 #define KS_FCOWR			0xB4
258 
259 #define KS_CIDER			0xC0
260 #define CIDER_ID			0x8870
261 #define CIDER_REV_MASK			(0x7 << 1)
262 #define CIDER_REV_SHIFT			(1)
263 #define CIDER_REV_GET(_v)		(((_v) >> 1) & 0x7)
264 
265 #define KS_CGCR				0xC6
266 #define KS_IACR				0xC8
267 #define IACR_RDEN			(1 << 12)
268 #define IACR_TSEL_MASK			(0x3 << 10)
269 #define IACR_TSEL_SHIFT			(10)
270 #define IACR_TSEL_MIB			(0x3 << 10)
271 #define IACR_ADDR_MASK			(0x1f << 0)
272 #define IACR_ADDR_SHIFT			(0)
273 
274 #define KS_IADLR			0xD0
275 #define KS_IAHDR			0xD2
276 
277 #define KS_PMECR			0xD4
278 #define PMECR_PME_DELAY			(1 << 14)
279 #define PMECR_PME_POL			(1 << 12)
280 #define PMECR_WOL_WAKEUP		(1 << 11)
281 #define PMECR_WOL_MAGICPKT		(1 << 10)
282 #define PMECR_WOL_LINKUP		(1 << 9)
283 #define PMECR_WOL_ENERGY		(1 << 8)
284 #define PMECR_AUTO_WAKE_EN		(1 << 7)
285 #define PMECR_WAKEUP_NORMAL		(1 << 6)
286 #define PMECR_WKEVT_MASK		(0xf << 2)
287 #define PMECR_WKEVT_SHIFT		(2)
288 #define PMECR_WKEVT_GET(_v)		(((_v) >> 2) & 0xf)
289 #define PMECR_WKEVT_ENERGY		(0x1 << 2)
290 #define PMECR_WKEVT_LINK		(0x2 << 2)
291 #define PMECR_WKEVT_MAGICPKT		(0x4 << 2)
292 #define PMECR_WKEVT_FRAME		(0x8 << 2)
293 #define PMECR_PM_MASK			(0x3 << 0)
294 #define PMECR_PM_SHIFT			(0)
295 #define PMECR_PM_NORMAL			(0x0 << 0)
296 #define PMECR_PM_ENERGY			(0x1 << 0)
297 #define PMECR_PM_SOFTDOWN		(0x2 << 0)
298 #define PMECR_PM_POWERSAVE		(0x3 << 0)
299 
300 /* Standard MII PHY data */
301 #define KS_P1MBCR			0xE4
302 #define P1MBCR_FORCE_FDX		(1 << 8)
303 
304 #define KS_P1MBSR			0xE6
305 #define P1MBSR_AN_COMPLETE		(1 << 5)
306 #define P1MBSR_AN_CAPABLE		(1 << 3)
307 #define P1MBSR_LINK_UP			(1 << 2)
308 
309 #define KS_PHY1ILR			0xE8
310 #define KS_PHY1IHR			0xEA
311 #define KS_P1ANAR			0xEC
312 #define KS_P1ANLPR			0xEE
313 
314 #define KS_P1SCLMD			0xF4
315 #define P1SCLMD_LEDOFF			(1 << 15)
316 #define P1SCLMD_TXIDS			(1 << 14)
317 #define P1SCLMD_RESTARTAN		(1 << 13)
318 #define P1SCLMD_DISAUTOMDIX		(1 << 10)
319 #define P1SCLMD_FORCEMDIX		(1 << 9)
320 #define P1SCLMD_AUTONEGEN		(1 << 7)
321 #define P1SCLMD_FORCE100		(1 << 6)
322 #define P1SCLMD_FORCEFDX		(1 << 5)
323 #define P1SCLMD_ADV_FLOW		(1 << 4)
324 #define P1SCLMD_ADV_100BT_FDX		(1 << 3)
325 #define P1SCLMD_ADV_100BT_HDX		(1 << 2)
326 #define P1SCLMD_ADV_10BT_FDX		(1 << 1)
327 #define P1SCLMD_ADV_10BT_HDX		(1 << 0)
328 
329 #define KS_P1CR				0xF6
330 #define P1CR_HP_MDIX			(1 << 15)
331 #define P1CR_REV_POL			(1 << 13)
332 #define P1CR_OP_100M			(1 << 10)
333 #define P1CR_OP_FDX			(1 << 9)
334 #define P1CR_OP_MDI			(1 << 7)
335 #define P1CR_AN_DONE			(1 << 6)
336 #define P1CR_LINK_GOOD			(1 << 5)
337 #define P1CR_PNTR_FLOW			(1 << 4)
338 #define P1CR_PNTR_100BT_FDX		(1 << 3)
339 #define P1CR_PNTR_100BT_HDX		(1 << 2)
340 #define P1CR_PNTR_10BT_FDX		(1 << 1)
341 #define P1CR_PNTR_10BT_HDX		(1 << 0)
342 
343 /* TX Frame control */
344 
345 #define TXFR_TXIC			(1 << 15)
346 #define TXFR_TXFID_MASK			(0x3f << 0)
347 #define TXFR_TXFID_SHIFT		(0)
348 
349 #define KS_P1SR				0xF8
350 #define P1SR_HP_MDIX			(1 << 15)
351 #define P1SR_REV_POL			(1 << 13)
352 #define P1SR_OP_100M			(1 << 10)
353 #define P1SR_OP_FDX			(1 << 9)
354 #define P1SR_OP_MDI			(1 << 7)
355 #define P1SR_AN_DONE			(1 << 6)
356 #define P1SR_LINK_GOOD			(1 << 5)
357 #define P1SR_PNTR_FLOW			(1 << 4)
358 #define P1SR_PNTR_100BT_FDX		(1 << 3)
359 #define P1SR_PNTR_100BT_HDX		(1 << 2)
360 #define P1SR_PNTR_10BT_FDX		(1 << 1)
361 #define P1SR_PNTR_10BT_HDX		(1 << 0)
362 
363 #define	ENUM_BUS_NONE			0
364 #define	ENUM_BUS_8BIT			1
365 #define	ENUM_BUS_16BIT			2
366 #define	ENUM_BUS_32BIT			3
367 
368 #define MAX_MCAST_LST			32
369 #define HW_MCAST_SIZE			8
370 
371 /**
372  * union ks_tx_hdr - tx header data
373  * @txb: The header as bytes
374  * @txw: The header as 16bit, little-endian words
375  *
376  * A dual representation of the tx header data to allow
377  * access to individual bytes, and to allow 16bit accesses
378  * with 16bit alignment.
379  */
380 union ks_tx_hdr {
381 	u8      txb[4];
382 	__le16  txw[2];
383 };
384 
385 /**
386  * struct ks_net - KS8851 driver private data
387  * @net_device 	: The network device we're bound to
388  * @hw_addr	: start address of data register.
389  * @hw_addr_cmd	: start address of command register.
390  * @txh    	: temporaly buffer to save status/length.
391  * @lock	: Lock to ensure that the device is not accessed when busy.
392  * @pdev	: Pointer to platform device.
393  * @mii		: The MII state information for the mii calls.
394  * @frame_head_info   	: frame header information for multi-pkt rx.
395  * @statelock	: Lock on this structure for tx list.
396  * @msg_enable	: The message flags controlling driver output (see ethtool).
397  * @frame_cnt  	: number of frames received.
398  * @bus_width  	: i/o bus width.
399  * @rc_rxqcr	: Cached copy of KS_RXQCR.
400  * @rc_txcr	: Cached copy of KS_TXCR.
401  * @rc_ier	: Cached copy of KS_IER.
402  * @sharedbus  	: Multipex(addr and data bus) mode indicator.
403  * @cmd_reg_cache	: command register cached.
404  * @cmd_reg_cache_int	: command register cached. Used in the irq handler.
405  * @promiscuous	: promiscuous mode indicator.
406  * @all_mcast  	: mutlicast indicator.
407  * @mcast_lst_size   	: size of multicast list.
408  * @mcast_lst    	: multicast list.
409  * @mcast_bits    	: multicast enabed.
410  * @mac_addr   		: MAC address assigned to this device.
411  * @fid    		: frame id.
412  * @extra_byte    	: number of extra byte prepended rx pkt.
413  * @enabled    		: indicator this device works.
414  *
415  * The @lock ensures that the chip is protected when certain operations are
416  * in progress. When the read or write packet transfer is in progress, most
417  * of the chip registers are not accessible until the transfer is finished and
418  * the DMA has been de-asserted.
419  *
420  * The @statelock is used to protect information in the structure which may
421  * need to be accessed via several sources, such as the network driver layer
422  * or one of the work queues.
423  *
424  */
425 
426 /* Receive multiplex framer header info */
427 struct type_frame_head {
428 	u16	sts;         /* Frame status */
429 	u16	len;         /* Byte count */
430 };
431 
432 struct ks_net {
433 	struct net_device	*netdev;
434 	void __iomem    	*hw_addr;
435 	void __iomem    	*hw_addr_cmd;
436 	union ks_tx_hdr		txh ____cacheline_aligned;
437 	struct mutex      	lock; /* spinlock to be interrupt safe */
438 	struct platform_device *pdev;
439 	struct mii_if_info	mii;
440 	struct type_frame_head	*frame_head_info;
441 	spinlock_t		statelock;
442 	u32			msg_enable;
443 	u32			frame_cnt;
444 	int			bus_width;
445 
446 	u16			rc_rxqcr;
447 	u16			rc_txcr;
448 	u16			rc_ier;
449 	u16			sharedbus;
450 	u16			cmd_reg_cache;
451 	u16			cmd_reg_cache_int;
452 	u16			promiscuous;
453 	u16			all_mcast;
454 	u16			mcast_lst_size;
455 	u8			mcast_lst[MAX_MCAST_LST][ETH_ALEN];
456 	u8			mcast_bits[HW_MCAST_SIZE];
457 	u8			mac_addr[6];
458 	u8                      fid;
459 	u8			extra_byte;
460 	u8			enabled;
461 };
462 
463 static int msg_enable;
464 
465 #define BE3             0x8000      /* Byte Enable 3 */
466 #define BE2             0x4000      /* Byte Enable 2 */
467 #define BE1             0x2000      /* Byte Enable 1 */
468 #define BE0             0x1000      /* Byte Enable 0 */
469 
470 /* register read/write calls.
471  *
472  * All these calls issue transactions to access the chip's registers. They
473  * all require that the necessary lock is held to prevent accesses when the
474  * chip is busy transferring packet data (RX/TX FIFO accesses).
475  */
476 
477 /**
478  * ks_rdreg8 - read 8 bit register from device
479  * @ks	  : The chip information
480  * @offset: The register address
481  *
482  * Read a 8bit register from the chip, returning the result
483  */
ks_rdreg8(struct ks_net * ks,int offset)484 static u8 ks_rdreg8(struct ks_net *ks, int offset)
485 {
486 	u16 data;
487 	u8 shift_bit = offset & 0x03;
488 	u8 shift_data = (offset & 1) << 3;
489 	ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
490 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
491 	data  = ioread16(ks->hw_addr);
492 	return (u8)(data >> shift_data);
493 }
494 
495 /**
496  * ks_rdreg16 - read 16 bit register from device
497  * @ks	  : The chip information
498  * @offset: The register address
499  *
500  * Read a 16bit register from the chip, returning the result
501  */
502 
ks_rdreg16(struct ks_net * ks,int offset)503 static u16 ks_rdreg16(struct ks_net *ks, int offset)
504 {
505 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
506 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
507 	return ioread16(ks->hw_addr);
508 }
509 
510 /**
511  * ks_wrreg8 - write 8bit register value to chip
512  * @ks: The chip information
513  * @offset: The register address
514  * @value: The value to write
515  *
516  */
ks_wrreg8(struct ks_net * ks,int offset,u8 value)517 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
518 {
519 	u8  shift_bit = (offset & 0x03);
520 	u16 value_write = (u16)(value << ((offset & 1) << 3));
521 	ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
522 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
523 	iowrite16(value_write, ks->hw_addr);
524 }
525 
526 /**
527  * ks_wrreg16 - write 16bit register value to chip
528  * @ks: The chip information
529  * @offset: The register address
530  * @value: The value to write
531  *
532  */
533 
ks_wrreg16(struct ks_net * ks,int offset,u16 value)534 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
535 {
536 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
537 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
538 	iowrite16(value, ks->hw_addr);
539 }
540 
541 /**
542  * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
543  * @ks: The chip state
544  * @wptr: buffer address to save data
545  * @len: length in byte to read
546  *
547  */
ks_inblk(struct ks_net * ks,u16 * wptr,u32 len)548 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
549 {
550 	len >>= 1;
551 	while (len--)
552 		*wptr++ = (u16)ioread16(ks->hw_addr);
553 }
554 
555 /**
556  * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
557  * @ks: The chip information
558  * @wptr: buffer address
559  * @len: length in byte to write
560  *
561  */
ks_outblk(struct ks_net * ks,u16 * wptr,u32 len)562 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
563 {
564 	len >>= 1;
565 	while (len--)
566 		iowrite16(*wptr++, ks->hw_addr);
567 }
568 
ks_disable_int(struct ks_net * ks)569 static void ks_disable_int(struct ks_net *ks)
570 {
571 	ks_wrreg16(ks, KS_IER, 0x0000);
572 }  /* ks_disable_int */
573 
ks_enable_int(struct ks_net * ks)574 static void ks_enable_int(struct ks_net *ks)
575 {
576 	ks_wrreg16(ks, KS_IER, ks->rc_ier);
577 }  /* ks_enable_int */
578 
579 /**
580  * ks_tx_fifo_space - return the available hardware buffer size.
581  * @ks: The chip information
582  *
583  */
ks_tx_fifo_space(struct ks_net * ks)584 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
585 {
586 	return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
587 }
588 
589 /**
590  * ks_save_cmd_reg - save the command register from the cache.
591  * @ks: The chip information
592  *
593  */
ks_save_cmd_reg(struct ks_net * ks)594 static inline void ks_save_cmd_reg(struct ks_net *ks)
595 {
596 	/*ks8851 MLL has a bug to read back the command register.
597 	* So rely on software to save the content of command register.
598 	*/
599 	ks->cmd_reg_cache_int = ks->cmd_reg_cache;
600 }
601 
602 /**
603  * ks_restore_cmd_reg - restore the command register from the cache and
604  * 	write to hardware register.
605  * @ks: The chip information
606  *
607  */
ks_restore_cmd_reg(struct ks_net * ks)608 static inline void ks_restore_cmd_reg(struct ks_net *ks)
609 {
610 	ks->cmd_reg_cache = ks->cmd_reg_cache_int;
611 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
612 }
613 
614 /**
615  * ks_set_powermode - set power mode of the device
616  * @ks: The chip information
617  * @pwrmode: The power mode value to write to KS_PMECR.
618  *
619  * Change the power mode of the chip.
620  */
ks_set_powermode(struct ks_net * ks,unsigned pwrmode)621 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
622 {
623 	unsigned pmecr;
624 
625 	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
626 
627 	ks_rdreg16(ks, KS_GRR);
628 	pmecr = ks_rdreg16(ks, KS_PMECR);
629 	pmecr &= ~PMECR_PM_MASK;
630 	pmecr |= pwrmode;
631 
632 	ks_wrreg16(ks, KS_PMECR, pmecr);
633 }
634 
635 /**
636  * ks_read_config - read chip configuration of bus width.
637  * @ks: The chip information
638  *
639  */
ks_read_config(struct ks_net * ks)640 static void ks_read_config(struct ks_net *ks)
641 {
642 	u16 reg_data = 0;
643 
644 	/* Regardless of bus width, 8 bit read should always work.*/
645 	reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
646 	reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
647 
648 	/* addr/data bus are multiplexed */
649 	ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
650 
651 	/* There are garbage data when reading data from QMU,
652 	depending on bus-width.
653 	*/
654 
655 	if (reg_data & CCR_8BIT) {
656 		ks->bus_width = ENUM_BUS_8BIT;
657 		ks->extra_byte = 1;
658 	} else if (reg_data & CCR_16BIT) {
659 		ks->bus_width = ENUM_BUS_16BIT;
660 		ks->extra_byte = 2;
661 	} else {
662 		ks->bus_width = ENUM_BUS_32BIT;
663 		ks->extra_byte = 4;
664 	}
665 }
666 
667 /**
668  * ks_soft_reset - issue one of the soft reset to the device
669  * @ks: The device state.
670  * @op: The bit(s) to set in the GRR
671  *
672  * Issue the relevant soft-reset command to the device's GRR register
673  * specified by @op.
674  *
675  * Note, the delays are in there as a caution to ensure that the reset
676  * has time to take effect and then complete. Since the datasheet does
677  * not currently specify the exact sequence, we have chosen something
678  * that seems to work with our device.
679  */
ks_soft_reset(struct ks_net * ks,unsigned op)680 static void ks_soft_reset(struct ks_net *ks, unsigned op)
681 {
682 	/* Disable interrupt first */
683 	ks_wrreg16(ks, KS_IER, 0x0000);
684 	ks_wrreg16(ks, KS_GRR, op);
685 	mdelay(10);	/* wait a short time to effect reset */
686 	ks_wrreg16(ks, KS_GRR, 0);
687 	mdelay(1);	/* wait for condition to clear */
688 }
689 
690 
ks_enable_qmu(struct ks_net * ks)691 static void ks_enable_qmu(struct ks_net *ks)
692 {
693 	u16 w;
694 
695 	w = ks_rdreg16(ks, KS_TXCR);
696 	/* Enables QMU Transmit (TXCR). */
697 	ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
698 
699 	/*
700 	 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
701 	 * Enable
702 	 */
703 
704 	w = ks_rdreg16(ks, KS_RXQCR);
705 	ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
706 
707 	/* Enables QMU Receive (RXCR1). */
708 	w = ks_rdreg16(ks, KS_RXCR1);
709 	ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
710 	ks->enabled = true;
711 }  /* ks_enable_qmu */
712 
ks_disable_qmu(struct ks_net * ks)713 static void ks_disable_qmu(struct ks_net *ks)
714 {
715 	u16	w;
716 
717 	w = ks_rdreg16(ks, KS_TXCR);
718 
719 	/* Disables QMU Transmit (TXCR). */
720 	w  &= ~TXCR_TXE;
721 	ks_wrreg16(ks, KS_TXCR, w);
722 
723 	/* Disables QMU Receive (RXCR1). */
724 	w = ks_rdreg16(ks, KS_RXCR1);
725 	w &= ~RXCR1_RXE ;
726 	ks_wrreg16(ks, KS_RXCR1, w);
727 
728 	ks->enabled = false;
729 
730 }  /* ks_disable_qmu */
731 
732 /**
733  * ks_read_qmu - read 1 pkt data from the QMU.
734  * @ks: The chip information
735  * @buf: buffer address to save 1 pkt
736  * @len: Pkt length
737  * Here is the sequence to read 1 pkt:
738  *	1. set sudo DMA mode
739  *	2. read prepend data
740  *	3. read pkt data
741  *	4. reset sudo DMA Mode
742  */
ks_read_qmu(struct ks_net * ks,u16 * buf,u32 len)743 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
744 {
745 	u32 r =  ks->extra_byte & 0x1 ;
746 	u32 w = ks->extra_byte - r;
747 
748 	/* 1. set sudo DMA mode */
749 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
750 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
751 
752 	/* 2. read prepend data */
753 	/**
754 	 * read 4 + extra bytes and discard them.
755 	 * extra bytes for dummy, 2 for status, 2 for len
756 	 */
757 
758 	/* use likely(r) for 8 bit access for performance */
759 	if (unlikely(r))
760 		ioread8(ks->hw_addr);
761 	ks_inblk(ks, buf, w + 2 + 2);
762 
763 	/* 3. read pkt data */
764 	ks_inblk(ks, buf, ALIGN(len, 4));
765 
766 	/* 4. reset sudo DMA Mode */
767 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
768 }
769 
770 /**
771  * ks_rcv - read multiple pkts data from the QMU.
772  * @ks: The chip information
773  * @netdev: The network device being opened.
774  *
775  * Read all of header information before reading pkt content.
776  * It is not allowed only port of pkts in QMU after issuing
777  * interrupt ack.
778  */
ks_rcv(struct ks_net * ks,struct net_device * netdev)779 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
780 {
781 	u32	i;
782 	struct type_frame_head *frame_hdr = ks->frame_head_info;
783 	struct sk_buff *skb;
784 
785 	ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
786 
787 	/* read all header information */
788 	for (i = 0; i < ks->frame_cnt; i++) {
789 		/* Checking Received packet status */
790 		frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
791 		/* Get packet len from hardware */
792 		frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
793 		frame_hdr++;
794 	}
795 
796 	frame_hdr = ks->frame_head_info;
797 	while (ks->frame_cnt--) {
798 		if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
799 			     frame_hdr->len >= RX_BUF_SIZE ||
800 			     frame_hdr->len <= 0)) {
801 
802 			/* discard an invalid packet */
803 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
804 			netdev->stats.rx_dropped++;
805 			if (!(frame_hdr->sts & RXFSHR_RXFV))
806 				netdev->stats.rx_frame_errors++;
807 			else
808 				netdev->stats.rx_length_errors++;
809 			frame_hdr++;
810 			continue;
811 		}
812 
813 		skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
814 		if (likely(skb)) {
815 			skb_reserve(skb, 2);
816 			/* read data block including CRC 4 bytes */
817 			ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
818 			skb_put(skb, frame_hdr->len - 4);
819 			skb->protocol = eth_type_trans(skb, netdev);
820 			netif_rx(skb);
821 			/* exclude CRC size */
822 			netdev->stats.rx_bytes += frame_hdr->len - 4;
823 			netdev->stats.rx_packets++;
824 		} else {
825 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
826 			netdev->stats.rx_dropped++;
827 		}
828 		frame_hdr++;
829 	}
830 }
831 
832 /**
833  * ks_update_link_status - link status update.
834  * @netdev: The network device being opened.
835  * @ks: The chip information
836  *
837  */
838 
ks_update_link_status(struct net_device * netdev,struct ks_net * ks)839 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
840 {
841 	/* check the status of the link */
842 	u32 link_up_status;
843 	if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
844 		netif_carrier_on(netdev);
845 		link_up_status = true;
846 	} else {
847 		netif_carrier_off(netdev);
848 		link_up_status = false;
849 	}
850 	netif_dbg(ks, link, ks->netdev,
851 		  "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
852 }
853 
854 /**
855  * ks_irq - device interrupt handler
856  * @irq: Interrupt number passed from the IRQ handler.
857  * @pw: The private word passed to register_irq(), our struct ks_net.
858  *
859  * This is the handler invoked to find out what happened
860  *
861  * Read the interrupt status, work out what needs to be done and then clear
862  * any of the interrupts that are not needed.
863  */
864 
ks_irq(int irq,void * pw)865 static irqreturn_t ks_irq(int irq, void *pw)
866 {
867 	struct net_device *netdev = pw;
868 	struct ks_net *ks = netdev_priv(netdev);
869 	u16 status;
870 
871 	/*this should be the first in IRQ handler */
872 	ks_save_cmd_reg(ks);
873 
874 	status = ks_rdreg16(ks, KS_ISR);
875 	if (unlikely(!status)) {
876 		ks_restore_cmd_reg(ks);
877 		return IRQ_NONE;
878 	}
879 
880 	ks_wrreg16(ks, KS_ISR, status);
881 
882 	if (likely(status & IRQ_RXI))
883 		ks_rcv(ks, netdev);
884 
885 	if (unlikely(status & IRQ_LCI))
886 		ks_update_link_status(netdev, ks);
887 
888 	if (unlikely(status & IRQ_TXI))
889 		netif_wake_queue(netdev);
890 
891 	if (unlikely(status & IRQ_LDI)) {
892 
893 		u16 pmecr = ks_rdreg16(ks, KS_PMECR);
894 		pmecr &= ~PMECR_WKEVT_MASK;
895 		ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
896 	}
897 
898 	if (unlikely(status & IRQ_RXOI))
899 		ks->netdev->stats.rx_over_errors++;
900 	/* this should be the last in IRQ handler*/
901 	ks_restore_cmd_reg(ks);
902 	return IRQ_HANDLED;
903 }
904 
905 
906 /**
907  * ks_net_open - open network device
908  * @netdev: The network device being opened.
909  *
910  * Called when the network device is marked active, such as a user executing
911  * 'ifconfig up' on the device.
912  */
ks_net_open(struct net_device * netdev)913 static int ks_net_open(struct net_device *netdev)
914 {
915 	struct ks_net *ks = netdev_priv(netdev);
916 	int err;
917 
918 #define	KS_INT_FLAGS	IRQF_TRIGGER_LOW
919 	/* lock the card, even if we may not actually do anything
920 	 * else at the moment.
921 	 */
922 
923 	netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
924 
925 	/* reset the HW */
926 	err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
927 
928 	if (err) {
929 		pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
930 		return err;
931 	}
932 
933 	/* wake up powermode to normal mode */
934 	ks_set_powermode(ks, PMECR_PM_NORMAL);
935 	mdelay(1);	/* wait for normal mode to take effect */
936 
937 	ks_wrreg16(ks, KS_ISR, 0xffff);
938 	ks_enable_int(ks);
939 	ks_enable_qmu(ks);
940 	netif_start_queue(ks->netdev);
941 
942 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
943 
944 	return 0;
945 }
946 
947 /**
948  * ks_net_stop - close network device
949  * @netdev: The device being closed.
950  *
951  * Called to close down a network device which has been active. Cancell any
952  * work, shutdown the RX and TX process and then place the chip into a low
953  * power state whilst it is not being used.
954  */
ks_net_stop(struct net_device * netdev)955 static int ks_net_stop(struct net_device *netdev)
956 {
957 	struct ks_net *ks = netdev_priv(netdev);
958 
959 	netif_info(ks, ifdown, netdev, "shutting down\n");
960 
961 	netif_stop_queue(netdev);
962 
963 	mutex_lock(&ks->lock);
964 
965 	/* turn off the IRQs and ack any outstanding */
966 	ks_wrreg16(ks, KS_IER, 0x0000);
967 	ks_wrreg16(ks, KS_ISR, 0xffff);
968 
969 	/* shutdown RX/TX QMU */
970 	ks_disable_qmu(ks);
971 
972 	/* set powermode to soft power down to save power */
973 	ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
974 	free_irq(netdev->irq, netdev);
975 	mutex_unlock(&ks->lock);
976 	return 0;
977 }
978 
979 
980 /**
981  * ks_write_qmu - write 1 pkt data to the QMU.
982  * @ks: The chip information
983  * @pdata: buffer address to save 1 pkt
984  * @len: Pkt length in byte
985  * Here is the sequence to write 1 pkt:
986  *	1. set sudo DMA mode
987  *	2. write status/length
988  *	3. write pkt data
989  *	4. reset sudo DMA Mode
990  *	5. reset sudo DMA mode
991  *	6. Wait until pkt is out
992  */
ks_write_qmu(struct ks_net * ks,u8 * pdata,u16 len)993 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
994 {
995 	/* start header at txb[0] to align txw entries */
996 	ks->txh.txw[0] = 0;
997 	ks->txh.txw[1] = cpu_to_le16(len);
998 
999 	/* 1. set sudo-DMA mode */
1000 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
1001 	/* 2. write status/lenth info */
1002 	ks_outblk(ks, ks->txh.txw, 4);
1003 	/* 3. write pkt data */
1004 	ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
1005 	/* 4. reset sudo-DMA mode */
1006 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
1007 	/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
1008 	ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
1009 	/* 6. wait until TXQCR_METFE is auto-cleared */
1010 	while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
1011 		;
1012 }
1013 
1014 /**
1015  * ks_start_xmit - transmit packet
1016  * @skb		: The buffer to transmit
1017  * @netdev	: The device used to transmit the packet.
1018  *
1019  * Called by the network layer to transmit the @skb.
1020  * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1021  * So while tx is in-progress, prevent IRQ interrupt from happenning.
1022  */
ks_start_xmit(struct sk_buff * skb,struct net_device * netdev)1023 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1024 {
1025 	int retv = NETDEV_TX_OK;
1026 	struct ks_net *ks = netdev_priv(netdev);
1027 
1028 	disable_irq(netdev->irq);
1029 	ks_disable_int(ks);
1030 	spin_lock(&ks->statelock);
1031 
1032 	/* Extra space are required:
1033 	*  4 byte for alignment, 4 for status/length, 4 for CRC
1034 	*/
1035 
1036 	if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1037 		ks_write_qmu(ks, skb->data, skb->len);
1038 		/* add tx statistics */
1039 		netdev->stats.tx_bytes += skb->len;
1040 		netdev->stats.tx_packets++;
1041 		dev_kfree_skb(skb);
1042 	} else
1043 		retv = NETDEV_TX_BUSY;
1044 	spin_unlock(&ks->statelock);
1045 	ks_enable_int(ks);
1046 	enable_irq(netdev->irq);
1047 	return retv;
1048 }
1049 
1050 /**
1051  * ks_start_rx - ready to serve pkts
1052  * @ks		: The chip information
1053  *
1054  */
ks_start_rx(struct ks_net * ks)1055 static void ks_start_rx(struct ks_net *ks)
1056 {
1057 	u16 cntl;
1058 
1059 	/* Enables QMU Receive (RXCR1). */
1060 	cntl = ks_rdreg16(ks, KS_RXCR1);
1061 	cntl |= RXCR1_RXE ;
1062 	ks_wrreg16(ks, KS_RXCR1, cntl);
1063 }  /* ks_start_rx */
1064 
1065 /**
1066  * ks_stop_rx - stop to serve pkts
1067  * @ks		: The chip information
1068  *
1069  */
ks_stop_rx(struct ks_net * ks)1070 static void ks_stop_rx(struct ks_net *ks)
1071 {
1072 	u16 cntl;
1073 
1074 	/* Disables QMU Receive (RXCR1). */
1075 	cntl = ks_rdreg16(ks, KS_RXCR1);
1076 	cntl &= ~RXCR1_RXE ;
1077 	ks_wrreg16(ks, KS_RXCR1, cntl);
1078 
1079 }  /* ks_stop_rx */
1080 
1081 static unsigned long const ethernet_polynomial = 0x04c11db7U;
1082 
ether_gen_crc(int length,u8 * data)1083 static unsigned long ether_gen_crc(int length, u8 *data)
1084 {
1085 	long crc = -1;
1086 	while (--length >= 0) {
1087 		u8 current_octet = *data++;
1088 		int bit;
1089 
1090 		for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1091 			crc = (crc << 1) ^
1092 				((crc < 0) ^ (current_octet & 1) ?
1093 			ethernet_polynomial : 0);
1094 		}
1095 	}
1096 	return (unsigned long)crc;
1097 }  /* ether_gen_crc */
1098 
1099 /**
1100 * ks_set_grpaddr - set multicast information
1101 * @ks : The chip information
1102 */
1103 
ks_set_grpaddr(struct ks_net * ks)1104 static void ks_set_grpaddr(struct ks_net *ks)
1105 {
1106 	u8	i;
1107 	u32	index, position, value;
1108 
1109 	memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1110 
1111 	for (i = 0; i < ks->mcast_lst_size; i++) {
1112 		position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1113 		index = position >> 3;
1114 		value = 1 << (position & 7);
1115 		ks->mcast_bits[index] |= (u8)value;
1116 	}
1117 
1118 	for (i  = 0; i < HW_MCAST_SIZE; i++) {
1119 		if (i & 1) {
1120 			ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1121 				(ks->mcast_bits[i] << 8) |
1122 				ks->mcast_bits[i - 1]);
1123 		}
1124 	}
1125 }  /* ks_set_grpaddr */
1126 
1127 /**
1128 * ks_clear_mcast - clear multicast information
1129 *
1130 * @ks : The chip information
1131 * This routine removes all mcast addresses set in the hardware.
1132 */
1133 
ks_clear_mcast(struct ks_net * ks)1134 static void ks_clear_mcast(struct ks_net *ks)
1135 {
1136 	u16	i, mcast_size;
1137 	for (i = 0; i < HW_MCAST_SIZE; i++)
1138 		ks->mcast_bits[i] = 0;
1139 
1140 	mcast_size = HW_MCAST_SIZE >> 2;
1141 	for (i = 0; i < mcast_size; i++)
1142 		ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1143 }
1144 
ks_set_promis(struct ks_net * ks,u16 promiscuous_mode)1145 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1146 {
1147 	u16		cntl;
1148 	ks->promiscuous = promiscuous_mode;
1149 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1150 	cntl = ks_rdreg16(ks, KS_RXCR1);
1151 
1152 	cntl &= ~RXCR1_FILTER_MASK;
1153 	if (promiscuous_mode)
1154 		/* Enable Promiscuous mode */
1155 		cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1156 	else
1157 		/* Disable Promiscuous mode (default normal mode) */
1158 		cntl |= RXCR1_RXPAFMA;
1159 
1160 	ks_wrreg16(ks, KS_RXCR1, cntl);
1161 
1162 	if (ks->enabled)
1163 		ks_start_rx(ks);
1164 
1165 }  /* ks_set_promis */
1166 
ks_set_mcast(struct ks_net * ks,u16 mcast)1167 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1168 {
1169 	u16	cntl;
1170 
1171 	ks->all_mcast = mcast;
1172 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1173 	cntl = ks_rdreg16(ks, KS_RXCR1);
1174 	cntl &= ~RXCR1_FILTER_MASK;
1175 	if (mcast)
1176 		/* Enable "Perfect with Multicast address passed mode" */
1177 		cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1178 	else
1179 		/**
1180 		 * Disable "Perfect with Multicast address passed
1181 		 * mode" (normal mode).
1182 		 */
1183 		cntl |= RXCR1_RXPAFMA;
1184 
1185 	ks_wrreg16(ks, KS_RXCR1, cntl);
1186 
1187 	if (ks->enabled)
1188 		ks_start_rx(ks);
1189 }  /* ks_set_mcast */
1190 
ks_set_rx_mode(struct net_device * netdev)1191 static void ks_set_rx_mode(struct net_device *netdev)
1192 {
1193 	struct ks_net *ks = netdev_priv(netdev);
1194 	struct netdev_hw_addr *ha;
1195 
1196 	/* Turn on/off promiscuous mode. */
1197 	if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1198 		ks_set_promis(ks,
1199 			(u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1200 	/* Turn on/off all mcast mode. */
1201 	else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1202 		ks_set_mcast(ks,
1203 			(u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1204 	else
1205 		ks_set_promis(ks, false);
1206 
1207 	if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1208 		if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1209 			int i = 0;
1210 
1211 			netdev_for_each_mc_addr(ha, netdev) {
1212 				if (i >= MAX_MCAST_LST)
1213 					break;
1214 				memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1215 			}
1216 			ks->mcast_lst_size = (u8)i;
1217 			ks_set_grpaddr(ks);
1218 		} else {
1219 			/**
1220 			 * List too big to support so
1221 			 * turn on all mcast mode.
1222 			 */
1223 			ks->mcast_lst_size = MAX_MCAST_LST;
1224 			ks_set_mcast(ks, true);
1225 		}
1226 	} else {
1227 		ks->mcast_lst_size = 0;
1228 		ks_clear_mcast(ks);
1229 	}
1230 } /* ks_set_rx_mode */
1231 
ks_set_mac(struct ks_net * ks,u8 * data)1232 static void ks_set_mac(struct ks_net *ks, u8 *data)
1233 {
1234 	u16 *pw = (u16 *)data;
1235 	u16 w, u;
1236 
1237 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1238 
1239 	u = *pw++;
1240 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1241 	ks_wrreg16(ks, KS_MARH, w);
1242 
1243 	u = *pw++;
1244 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1245 	ks_wrreg16(ks, KS_MARM, w);
1246 
1247 	u = *pw;
1248 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1249 	ks_wrreg16(ks, KS_MARL, w);
1250 
1251 	memcpy(ks->mac_addr, data, ETH_ALEN);
1252 
1253 	if (ks->enabled)
1254 		ks_start_rx(ks);
1255 }
1256 
ks_set_mac_address(struct net_device * netdev,void * paddr)1257 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1258 {
1259 	struct ks_net *ks = netdev_priv(netdev);
1260 	struct sockaddr *addr = paddr;
1261 	u8 *da;
1262 
1263 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1264 
1265 	da = (u8 *)netdev->dev_addr;
1266 
1267 	ks_set_mac(ks, da);
1268 	return 0;
1269 }
1270 
ks_net_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1271 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1272 {
1273 	struct ks_net *ks = netdev_priv(netdev);
1274 
1275 	if (!netif_running(netdev))
1276 		return -EINVAL;
1277 
1278 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1279 }
1280 
1281 static const struct net_device_ops ks_netdev_ops = {
1282 	.ndo_open		= ks_net_open,
1283 	.ndo_stop		= ks_net_stop,
1284 	.ndo_do_ioctl		= ks_net_ioctl,
1285 	.ndo_start_xmit		= ks_start_xmit,
1286 	.ndo_set_mac_address	= ks_set_mac_address,
1287 	.ndo_set_rx_mode	= ks_set_rx_mode,
1288 	.ndo_change_mtu		= eth_change_mtu,
1289 	.ndo_validate_addr	= eth_validate_addr,
1290 };
1291 
1292 /* ethtool support */
1293 
ks_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * di)1294 static void ks_get_drvinfo(struct net_device *netdev,
1295 			       struct ethtool_drvinfo *di)
1296 {
1297 	strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1298 	strlcpy(di->version, "1.00", sizeof(di->version));
1299 	strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1300 		sizeof(di->bus_info));
1301 }
1302 
ks_get_msglevel(struct net_device * netdev)1303 static u32 ks_get_msglevel(struct net_device *netdev)
1304 {
1305 	struct ks_net *ks = netdev_priv(netdev);
1306 	return ks->msg_enable;
1307 }
1308 
ks_set_msglevel(struct net_device * netdev,u32 to)1309 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1310 {
1311 	struct ks_net *ks = netdev_priv(netdev);
1312 	ks->msg_enable = to;
1313 }
1314 
ks_get_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1315 static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1316 {
1317 	struct ks_net *ks = netdev_priv(netdev);
1318 	return mii_ethtool_gset(&ks->mii, cmd);
1319 }
1320 
ks_set_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1321 static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1322 {
1323 	struct ks_net *ks = netdev_priv(netdev);
1324 	return mii_ethtool_sset(&ks->mii, cmd);
1325 }
1326 
ks_get_link(struct net_device * netdev)1327 static u32 ks_get_link(struct net_device *netdev)
1328 {
1329 	struct ks_net *ks = netdev_priv(netdev);
1330 	return mii_link_ok(&ks->mii);
1331 }
1332 
ks_nway_reset(struct net_device * netdev)1333 static int ks_nway_reset(struct net_device *netdev)
1334 {
1335 	struct ks_net *ks = netdev_priv(netdev);
1336 	return mii_nway_restart(&ks->mii);
1337 }
1338 
1339 static const struct ethtool_ops ks_ethtool_ops = {
1340 	.get_drvinfo	= ks_get_drvinfo,
1341 	.get_msglevel	= ks_get_msglevel,
1342 	.set_msglevel	= ks_set_msglevel,
1343 	.get_settings	= ks_get_settings,
1344 	.set_settings	= ks_set_settings,
1345 	.get_link	= ks_get_link,
1346 	.nway_reset	= ks_nway_reset,
1347 };
1348 
1349 /* MII interface controls */
1350 
1351 /**
1352  * ks_phy_reg - convert MII register into a KS8851 register
1353  * @reg: MII register number.
1354  *
1355  * Return the KS8851 register number for the corresponding MII PHY register
1356  * if possible. Return zero if the MII register has no direct mapping to the
1357  * KS8851 register set.
1358  */
ks_phy_reg(int reg)1359 static int ks_phy_reg(int reg)
1360 {
1361 	switch (reg) {
1362 	case MII_BMCR:
1363 		return KS_P1MBCR;
1364 	case MII_BMSR:
1365 		return KS_P1MBSR;
1366 	case MII_PHYSID1:
1367 		return KS_PHY1ILR;
1368 	case MII_PHYSID2:
1369 		return KS_PHY1IHR;
1370 	case MII_ADVERTISE:
1371 		return KS_P1ANAR;
1372 	case MII_LPA:
1373 		return KS_P1ANLPR;
1374 	}
1375 
1376 	return 0x0;
1377 }
1378 
1379 /**
1380  * ks_phy_read - MII interface PHY register read.
1381  * @netdev: The network device the PHY is on.
1382  * @phy_addr: Address of PHY (ignored as we only have one)
1383  * @reg: The register to read.
1384  *
1385  * This call reads data from the PHY register specified in @reg. Since the
1386  * device does not support all the MII registers, the non-existent values
1387  * are always returned as zero.
1388  *
1389  * We return zero for unsupported registers as the MII code does not check
1390  * the value returned for any error status, and simply returns it to the
1391  * caller. The mii-tool that the driver was tested with takes any -ve error
1392  * as real PHY capabilities, thus displaying incorrect data to the user.
1393  */
ks_phy_read(struct net_device * netdev,int phy_addr,int reg)1394 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1395 {
1396 	struct ks_net *ks = netdev_priv(netdev);
1397 	int ksreg;
1398 	int result;
1399 
1400 	ksreg = ks_phy_reg(reg);
1401 	if (!ksreg)
1402 		return 0x0;	/* no error return allowed, so use zero */
1403 
1404 	mutex_lock(&ks->lock);
1405 	result = ks_rdreg16(ks, ksreg);
1406 	mutex_unlock(&ks->lock);
1407 
1408 	return result;
1409 }
1410 
ks_phy_write(struct net_device * netdev,int phy,int reg,int value)1411 static void ks_phy_write(struct net_device *netdev,
1412 			     int phy, int reg, int value)
1413 {
1414 	struct ks_net *ks = netdev_priv(netdev);
1415 	int ksreg;
1416 
1417 	ksreg = ks_phy_reg(reg);
1418 	if (ksreg) {
1419 		mutex_lock(&ks->lock);
1420 		ks_wrreg16(ks, ksreg, value);
1421 		mutex_unlock(&ks->lock);
1422 	}
1423 }
1424 
1425 /**
1426  * ks_read_selftest - read the selftest memory info.
1427  * @ks: The device state
1428  *
1429  * Read and check the TX/RX memory selftest information.
1430  */
ks_read_selftest(struct ks_net * ks)1431 static int ks_read_selftest(struct ks_net *ks)
1432 {
1433 	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1434 	int ret = 0;
1435 	unsigned rd;
1436 
1437 	rd = ks_rdreg16(ks, KS_MBIR);
1438 
1439 	if ((rd & both_done) != both_done) {
1440 		netdev_warn(ks->netdev, "Memory selftest not finished\n");
1441 		return 0;
1442 	}
1443 
1444 	if (rd & MBIR_TXMBFA) {
1445 		netdev_err(ks->netdev, "TX memory selftest fails\n");
1446 		ret |= 1;
1447 	}
1448 
1449 	if (rd & MBIR_RXMBFA) {
1450 		netdev_err(ks->netdev, "RX memory selftest fails\n");
1451 		ret |= 2;
1452 	}
1453 
1454 	netdev_info(ks->netdev, "the selftest passes\n");
1455 	return ret;
1456 }
1457 
ks_setup(struct ks_net * ks)1458 static void ks_setup(struct ks_net *ks)
1459 {
1460 	u16	w;
1461 
1462 	/**
1463 	 * Configure QMU Transmit
1464 	 */
1465 
1466 	/* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1467 	ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1468 
1469 	/* Setup Receive Frame Data Pointer Auto-Increment */
1470 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1471 
1472 	/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1473 	ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1474 
1475 	/* Setup RxQ Command Control (RXQCR) */
1476 	ks->rc_rxqcr = RXQCR_CMD_CNTL;
1477 	ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1478 
1479 	/**
1480 	 * set the force mode to half duplex, default is full duplex
1481 	 *  because if the auto-negotiation fails, most switch uses
1482 	 *  half-duplex.
1483 	 */
1484 
1485 	w = ks_rdreg16(ks, KS_P1MBCR);
1486 	w &= ~P1MBCR_FORCE_FDX;
1487 	ks_wrreg16(ks, KS_P1MBCR, w);
1488 
1489 	w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1490 	ks_wrreg16(ks, KS_TXCR, w);
1491 
1492 	w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1493 
1494 	if (ks->promiscuous)         /* bPromiscuous */
1495 		w |= (RXCR1_RXAE | RXCR1_RXINVF);
1496 	else if (ks->all_mcast) /* Multicast address passed mode */
1497 		w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1498 	else                                   /* Normal mode */
1499 		w |= RXCR1_RXPAFMA;
1500 
1501 	ks_wrreg16(ks, KS_RXCR1, w);
1502 }  /*ks_setup */
1503 
1504 
ks_setup_int(struct ks_net * ks)1505 static void ks_setup_int(struct ks_net *ks)
1506 {
1507 	ks->rc_ier = 0x00;
1508 	/* Clear the interrupts status of the hardware. */
1509 	ks_wrreg16(ks, KS_ISR, 0xffff);
1510 
1511 	/* Enables the interrupts of the hardware. */
1512 	ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1513 }  /* ks_setup_int */
1514 
ks_hw_init(struct ks_net * ks)1515 static int ks_hw_init(struct ks_net *ks)
1516 {
1517 #define	MHEADER_SIZE	(sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1518 	ks->promiscuous = 0;
1519 	ks->all_mcast = 0;
1520 	ks->mcast_lst_size = 0;
1521 
1522 	ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1523 					   GFP_KERNEL);
1524 	if (!ks->frame_head_info)
1525 		return false;
1526 
1527 	ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1528 	return true;
1529 }
1530 
1531 #if defined(CONFIG_OF)
1532 static const struct of_device_id ks8851_ml_dt_ids[] = {
1533 	{ .compatible = "micrel,ks8851-mll" },
1534 	{ /* sentinel */ }
1535 };
1536 MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1537 #endif
1538 
ks8851_probe(struct platform_device * pdev)1539 static int ks8851_probe(struct platform_device *pdev)
1540 {
1541 	int err;
1542 	struct resource *io_d, *io_c;
1543 	struct net_device *netdev;
1544 	struct ks_net *ks;
1545 	u16 id, data;
1546 	const char *mac;
1547 
1548 	netdev = alloc_etherdev(sizeof(struct ks_net));
1549 	if (!netdev)
1550 		return -ENOMEM;
1551 
1552 	SET_NETDEV_DEV(netdev, &pdev->dev);
1553 
1554 	ks = netdev_priv(netdev);
1555 	ks->netdev = netdev;
1556 
1557 	io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1558 	ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
1559 	if (IS_ERR(ks->hw_addr)) {
1560 		err = PTR_ERR(ks->hw_addr);
1561 		goto err_free;
1562 	}
1563 
1564 	io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1565 	ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
1566 	if (IS_ERR(ks->hw_addr_cmd)) {
1567 		err = PTR_ERR(ks->hw_addr_cmd);
1568 		goto err_free;
1569 	}
1570 
1571 	netdev->irq = platform_get_irq(pdev, 0);
1572 
1573 	if ((int)netdev->irq < 0) {
1574 		err = netdev->irq;
1575 		goto err_free;
1576 	}
1577 
1578 	ks->pdev = pdev;
1579 
1580 	mutex_init(&ks->lock);
1581 	spin_lock_init(&ks->statelock);
1582 
1583 	netdev->netdev_ops = &ks_netdev_ops;
1584 	netdev->ethtool_ops = &ks_ethtool_ops;
1585 
1586 	/* setup mii state */
1587 	ks->mii.dev             = netdev;
1588 	ks->mii.phy_id          = 1,
1589 	ks->mii.phy_id_mask     = 1;
1590 	ks->mii.reg_num_mask    = 0xf;
1591 	ks->mii.mdio_read       = ks_phy_read;
1592 	ks->mii.mdio_write      = ks_phy_write;
1593 
1594 	netdev_info(netdev, "message enable is %d\n", msg_enable);
1595 	/* set the default message enable */
1596 	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1597 						     NETIF_MSG_PROBE |
1598 						     NETIF_MSG_LINK));
1599 	ks_read_config(ks);
1600 
1601 	/* simple check for a valid chip being connected to the bus */
1602 	if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1603 		netdev_err(netdev, "failed to read device ID\n");
1604 		err = -ENODEV;
1605 		goto err_free;
1606 	}
1607 
1608 	if (ks_read_selftest(ks)) {
1609 		netdev_err(netdev, "failed to read device ID\n");
1610 		err = -ENODEV;
1611 		goto err_free;
1612 	}
1613 
1614 	err = register_netdev(netdev);
1615 	if (err)
1616 		goto err_free;
1617 
1618 	platform_set_drvdata(pdev, netdev);
1619 
1620 	ks_soft_reset(ks, GRR_GSR);
1621 	ks_hw_init(ks);
1622 	ks_disable_qmu(ks);
1623 	ks_setup(ks);
1624 	ks_setup_int(ks);
1625 
1626 	data = ks_rdreg16(ks, KS_OBCR);
1627 	ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1628 
1629 	/* overwriting the default MAC address */
1630 	if (pdev->dev.of_node) {
1631 		mac = of_get_mac_address(pdev->dev.of_node);
1632 		if (mac)
1633 			memcpy(ks->mac_addr, mac, ETH_ALEN);
1634 	} else {
1635 		struct ks8851_mll_platform_data *pdata;
1636 
1637 		pdata = dev_get_platdata(&pdev->dev);
1638 		if (!pdata) {
1639 			netdev_err(netdev, "No platform data\n");
1640 			err = -ENODEV;
1641 			goto err_pdata;
1642 		}
1643 		memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1644 	}
1645 	if (!is_valid_ether_addr(ks->mac_addr)) {
1646 		/* Use random MAC address if none passed */
1647 		eth_random_addr(ks->mac_addr);
1648 		netdev_info(netdev, "Using random mac address\n");
1649 	}
1650 	netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1651 
1652 	memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1653 
1654 	ks_set_mac(ks, netdev->dev_addr);
1655 
1656 	id = ks_rdreg16(ks, KS_CIDER);
1657 
1658 	netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1659 		    (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1660 	return 0;
1661 
1662 err_pdata:
1663 	unregister_netdev(netdev);
1664 err_free:
1665 	free_netdev(netdev);
1666 	return err;
1667 }
1668 
ks8851_remove(struct platform_device * pdev)1669 static int ks8851_remove(struct platform_device *pdev)
1670 {
1671 	struct net_device *netdev = platform_get_drvdata(pdev);
1672 
1673 	unregister_netdev(netdev);
1674 	free_netdev(netdev);
1675 	return 0;
1676 
1677 }
1678 
1679 static struct platform_driver ks8851_platform_driver = {
1680 	.driver = {
1681 		.name = DRV_NAME,
1682 		.owner = THIS_MODULE,
1683 		.of_match_table	= of_match_ptr(ks8851_ml_dt_ids),
1684 	},
1685 	.probe = ks8851_probe,
1686 	.remove = ks8851_remove,
1687 };
1688 
1689 module_platform_driver(ks8851_platform_driver);
1690 
1691 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1692 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1693 MODULE_LICENSE("GPL");
1694 module_param_named(message, msg_enable, int, 0);
1695 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1696 
1697