• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * CPSW Ethernet Switch Driver
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <linux/errno.h>
25 #include <asm/gpio.h>
26 #include <asm/io.h>
27 #include <phy.h>
28 #include <asm/arch/cpu.h>
29 #include <dm.h>
30 #include <fdt_support.h>
31 
32 DECLARE_GLOBAL_DATA_PTR;
33 
34 #define BITMASK(bits)		(BIT(bits) - 1)
35 #define PHY_REG_MASK		0x1f
36 #define PHY_ID_MASK		0x1f
37 #define NUM_DESCS		(PKTBUFSRX * 2)
38 #define PKT_MIN			60
39 #define PKT_MAX			(1500 + 14 + 4 + 4)
40 #define CLEAR_BIT		1
41 #define GIGABITEN		BIT(7)
42 #define FULLDUPLEXEN		BIT(0)
43 #define MIIEN			BIT(15)
44 
45 /* reg offset */
46 #define CPSW_HOST_PORT_OFFSET	0x108
47 #define CPSW_SLAVE0_OFFSET	0x208
48 #define CPSW_SLAVE1_OFFSET	0x308
49 #define CPSW_SLAVE_SIZE		0x100
50 #define CPSW_CPDMA_OFFSET	0x800
51 #define CPSW_HW_STATS		0x900
52 #define CPSW_STATERAM_OFFSET	0xa00
53 #define CPSW_CPTS_OFFSET	0xc00
54 #define CPSW_ALE_OFFSET		0xd00
55 #define CPSW_SLIVER0_OFFSET	0xd80
56 #define CPSW_SLIVER1_OFFSET	0xdc0
57 #define CPSW_BD_OFFSET		0x2000
58 #define CPSW_MDIO_DIV		0xff
59 
60 #define AM335X_GMII_SEL_OFFSET	0x630
61 
62 /* DMA Registers */
63 #define CPDMA_TXCONTROL		0x004
64 #define CPDMA_RXCONTROL		0x014
65 #define CPDMA_SOFTRESET		0x01c
66 #define CPDMA_RXFREE		0x0e0
67 #define CPDMA_TXHDP_VER1	0x100
68 #define CPDMA_TXHDP_VER2	0x200
69 #define CPDMA_RXHDP_VER1	0x120
70 #define CPDMA_RXHDP_VER2	0x220
71 #define CPDMA_TXCP_VER1		0x140
72 #define CPDMA_TXCP_VER2		0x240
73 #define CPDMA_RXCP_VER1		0x160
74 #define CPDMA_RXCP_VER2		0x260
75 
76 /* Descriptor mode bits */
77 #define CPDMA_DESC_SOP		BIT(31)
78 #define CPDMA_DESC_EOP		BIT(30)
79 #define CPDMA_DESC_OWNER	BIT(29)
80 #define CPDMA_DESC_EOQ		BIT(28)
81 
82 /*
83  * This timeout definition is a worst-case ultra defensive measure against
84  * unexpected controller lock ups.  Ideally, we should never ever hit this
85  * scenario in practice.
86  */
87 #define MDIO_TIMEOUT            100 /* msecs */
88 #define CPDMA_TIMEOUT		100 /* msecs */
89 
90 struct cpsw_mdio_regs {
91 	u32	version;
92 	u32	control;
93 #define CONTROL_IDLE		BIT(31)
94 #define CONTROL_ENABLE		BIT(30)
95 
96 	u32	alive;
97 	u32	link;
98 	u32	linkintraw;
99 	u32	linkintmasked;
100 	u32	__reserved_0[2];
101 	u32	userintraw;
102 	u32	userintmasked;
103 	u32	userintmaskset;
104 	u32	userintmaskclr;
105 	u32	__reserved_1[20];
106 
107 	struct {
108 		u32		access;
109 		u32		physel;
110 #define USERACCESS_GO		BIT(31)
111 #define USERACCESS_WRITE	BIT(30)
112 #define USERACCESS_ACK		BIT(29)
113 #define USERACCESS_READ		(0)
114 #define USERACCESS_DATA		(0xffff)
115 	} user[0];
116 };
117 
118 struct cpsw_regs {
119 	u32	id_ver;
120 	u32	control;
121 	u32	soft_reset;
122 	u32	stat_port_en;
123 	u32	ptype;
124 };
125 
126 struct cpsw_slave_regs {
127 	u32	max_blks;
128 	u32	blk_cnt;
129 	u32	flow_thresh;
130 	u32	port_vlan;
131 	u32	tx_pri_map;
132 #ifdef CONFIG_AM33XX
133 	u32	gap_thresh;
134 #elif defined(CONFIG_TI814X)
135 	u32	ts_ctl;
136 	u32	ts_seq_ltype;
137 	u32	ts_vlan;
138 #endif
139 	u32	sa_lo;
140 	u32	sa_hi;
141 };
142 
143 struct cpsw_host_regs {
144 	u32	max_blks;
145 	u32	blk_cnt;
146 	u32	flow_thresh;
147 	u32	port_vlan;
148 	u32	tx_pri_map;
149 	u32	cpdma_tx_pri_map;
150 	u32	cpdma_rx_chan_map;
151 };
152 
153 struct cpsw_sliver_regs {
154 	u32	id_ver;
155 	u32	mac_control;
156 	u32	mac_status;
157 	u32	soft_reset;
158 	u32	rx_maxlen;
159 	u32	__reserved_0;
160 	u32	rx_pause;
161 	u32	tx_pause;
162 	u32	__reserved_1;
163 	u32	rx_pri_map;
164 };
165 
166 #define ALE_ENTRY_BITS		68
167 #define ALE_ENTRY_WORDS		DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
168 
169 /* ALE Registers */
170 #define ALE_CONTROL		0x08
171 #define ALE_UNKNOWNVLAN		0x18
172 #define ALE_TABLE_CONTROL	0x20
173 #define ALE_TABLE		0x34
174 #define ALE_PORTCTL		0x40
175 
176 #define ALE_TABLE_WRITE		BIT(31)
177 
178 #define ALE_TYPE_FREE			0
179 #define ALE_TYPE_ADDR			1
180 #define ALE_TYPE_VLAN			2
181 #define ALE_TYPE_VLAN_ADDR		3
182 
183 #define ALE_UCAST_PERSISTANT		0
184 #define ALE_UCAST_UNTOUCHED		1
185 #define ALE_UCAST_OUI			2
186 #define ALE_UCAST_TOUCHED		3
187 
188 #define ALE_MCAST_FWD			0
189 #define ALE_MCAST_BLOCK_LEARN_FWD	1
190 #define ALE_MCAST_FWD_LEARN		2
191 #define ALE_MCAST_FWD_2			3
192 
193 enum cpsw_ale_port_state {
194 	ALE_PORT_STATE_DISABLE	= 0x00,
195 	ALE_PORT_STATE_BLOCK	= 0x01,
196 	ALE_PORT_STATE_LEARN	= 0x02,
197 	ALE_PORT_STATE_FORWARD	= 0x03,
198 };
199 
200 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
201 #define ALE_SECURE	1
202 #define ALE_BLOCKED	2
203 
204 struct cpsw_slave {
205 	struct cpsw_slave_regs		*regs;
206 	struct cpsw_sliver_regs		*sliver;
207 	int				slave_num;
208 	u32				mac_control;
209 	struct cpsw_slave_data		*data;
210 };
211 
212 struct cpdma_desc {
213 	/* hardware fields */
214 	u32			hw_next;
215 	u32			hw_buffer;
216 	u32			hw_len;
217 	u32			hw_mode;
218 	/* software fields */
219 	u32			sw_buffer;
220 	u32			sw_len;
221 };
222 
223 struct cpdma_chan {
224 	struct cpdma_desc	*head, *tail;
225 	void			*hdp, *cp, *rxfree;
226 };
227 
228 /* AM33xx SoC specific definitions for the CONTROL port */
229 #define AM33XX_GMII_SEL_MODE_MII	0
230 #define AM33XX_GMII_SEL_MODE_RMII	1
231 #define AM33XX_GMII_SEL_MODE_RGMII	2
232 
233 #define AM33XX_GMII_SEL_RGMII1_IDMODE	BIT(4)
234 #define AM33XX_GMII_SEL_RGMII2_IDMODE	BIT(5)
235 #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN	BIT(6)
236 #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN	BIT(7)
237 
238 #define GMII_SEL_MODE_MASK		0x3
239 
240 #define desc_write(desc, fld, val)	__raw_writel((u32)(val), &(desc)->fld)
241 #define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
242 #define desc_read_ptr(desc, fld)	((void *)__raw_readl(&(desc)->fld))
243 
244 #define chan_write(chan, fld, val)	__raw_writel((u32)(val), (chan)->fld)
245 #define chan_read(chan, fld)		__raw_readl((chan)->fld)
246 #define chan_read_ptr(chan, fld)	((void *)__raw_readl((chan)->fld))
247 
248 #define for_active_slave(slave, priv) \
249 	slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
250 #define for_each_slave(slave, priv) \
251 	for (slave = (priv)->slaves; slave != (priv)->slaves + \
252 				(priv)->data.slaves; slave++)
253 
254 struct cpsw_priv {
255 #ifdef CONFIG_DM_ETH
256 	struct udevice			*dev;
257 #else
258 	struct eth_device		*dev;
259 #endif
260 	struct cpsw_platform_data	data;
261 	int				host_port;
262 
263 	struct cpsw_regs		*regs;
264 	void				*dma_regs;
265 	struct cpsw_host_regs		*host_port_regs;
266 	void				*ale_regs;
267 
268 	struct cpdma_desc		*descs;
269 	struct cpdma_desc		*desc_free;
270 	struct cpdma_chan		rx_chan, tx_chan;
271 
272 	struct cpsw_slave		*slaves;
273 	struct phy_device		*phydev;
274 	struct mii_dev			*bus;
275 
276 	u32				phy_mask;
277 };
278 
cpsw_ale_get_field(u32 * ale_entry,u32 start,u32 bits)279 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
280 {
281 	int idx;
282 
283 	idx    = start / 32;
284 	start -= idx * 32;
285 	idx    = 2 - idx; /* flip */
286 	return (ale_entry[idx] >> start) & BITMASK(bits);
287 }
288 
cpsw_ale_set_field(u32 * ale_entry,u32 start,u32 bits,u32 value)289 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
290 				      u32 value)
291 {
292 	int idx;
293 
294 	value &= BITMASK(bits);
295 	idx    = start / 32;
296 	start -= idx * 32;
297 	idx    = 2 - idx; /* flip */
298 	ale_entry[idx] &= ~(BITMASK(bits) << start);
299 	ale_entry[idx] |=  (value << start);
300 }
301 
302 #define DEFINE_ALE_FIELD(name, start, bits)				\
303 static inline int cpsw_ale_get_##name(u32 *ale_entry)			\
304 {									\
305 	return cpsw_ale_get_field(ale_entry, start, bits);		\
306 }									\
307 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)	\
308 {									\
309 	cpsw_ale_set_field(ale_entry, start, bits, value);		\
310 }
311 
312 DEFINE_ALE_FIELD(entry_type,		60,	2)
313 DEFINE_ALE_FIELD(mcast_state,		62,	2)
314 DEFINE_ALE_FIELD(port_mask,		66,	3)
315 DEFINE_ALE_FIELD(ucast_type,		62,	2)
316 DEFINE_ALE_FIELD(port_num,		66,	2)
317 DEFINE_ALE_FIELD(blocked,		65,	1)
318 DEFINE_ALE_FIELD(secure,		64,	1)
319 DEFINE_ALE_FIELD(mcast,			40,	1)
320 
321 /* The MAC address field in the ALE entry cannot be macroized as above */
cpsw_ale_get_addr(u32 * ale_entry,u8 * addr)322 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
323 {
324 	int i;
325 
326 	for (i = 0; i < 6; i++)
327 		addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
328 }
329 
cpsw_ale_set_addr(u32 * ale_entry,const u8 * addr)330 static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
331 {
332 	int i;
333 
334 	for (i = 0; i < 6; i++)
335 		cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
336 }
337 
cpsw_ale_read(struct cpsw_priv * priv,int idx,u32 * ale_entry)338 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
339 {
340 	int i;
341 
342 	__raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
343 
344 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
345 		ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
346 
347 	return idx;
348 }
349 
cpsw_ale_write(struct cpsw_priv * priv,int idx,u32 * ale_entry)350 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
351 {
352 	int i;
353 
354 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
355 		__raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
356 
357 	__raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
358 
359 	return idx;
360 }
361 
cpsw_ale_match_addr(struct cpsw_priv * priv,const u8 * addr)362 static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
363 {
364 	u32 ale_entry[ALE_ENTRY_WORDS];
365 	int type, idx;
366 
367 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
368 		u8 entry_addr[6];
369 
370 		cpsw_ale_read(priv, idx, ale_entry);
371 		type = cpsw_ale_get_entry_type(ale_entry);
372 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
373 			continue;
374 		cpsw_ale_get_addr(ale_entry, entry_addr);
375 		if (memcmp(entry_addr, addr, 6) == 0)
376 			return idx;
377 	}
378 	return -ENOENT;
379 }
380 
cpsw_ale_match_free(struct cpsw_priv * priv)381 static int cpsw_ale_match_free(struct cpsw_priv *priv)
382 {
383 	u32 ale_entry[ALE_ENTRY_WORDS];
384 	int type, idx;
385 
386 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
387 		cpsw_ale_read(priv, idx, ale_entry);
388 		type = cpsw_ale_get_entry_type(ale_entry);
389 		if (type == ALE_TYPE_FREE)
390 			return idx;
391 	}
392 	return -ENOENT;
393 }
394 
cpsw_ale_find_ageable(struct cpsw_priv * priv)395 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
396 {
397 	u32 ale_entry[ALE_ENTRY_WORDS];
398 	int type, idx;
399 
400 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
401 		cpsw_ale_read(priv, idx, ale_entry);
402 		type = cpsw_ale_get_entry_type(ale_entry);
403 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
404 			continue;
405 		if (cpsw_ale_get_mcast(ale_entry))
406 			continue;
407 		type = cpsw_ale_get_ucast_type(ale_entry);
408 		if (type != ALE_UCAST_PERSISTANT &&
409 		    type != ALE_UCAST_OUI)
410 			return idx;
411 	}
412 	return -ENOENT;
413 }
414 
cpsw_ale_add_ucast(struct cpsw_priv * priv,const u8 * addr,int port,int flags)415 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
416 			      int port, int flags)
417 {
418 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
419 	int idx;
420 
421 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
422 	cpsw_ale_set_addr(ale_entry, addr);
423 	cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
424 	cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
425 	cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
426 	cpsw_ale_set_port_num(ale_entry, port);
427 
428 	idx = cpsw_ale_match_addr(priv, addr);
429 	if (idx < 0)
430 		idx = cpsw_ale_match_free(priv);
431 	if (idx < 0)
432 		idx = cpsw_ale_find_ageable(priv);
433 	if (idx < 0)
434 		return -ENOMEM;
435 
436 	cpsw_ale_write(priv, idx, ale_entry);
437 	return 0;
438 }
439 
cpsw_ale_add_mcast(struct cpsw_priv * priv,const u8 * addr,int port_mask)440 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
441 			      int port_mask)
442 {
443 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
444 	int idx, mask;
445 
446 	idx = cpsw_ale_match_addr(priv, addr);
447 	if (idx >= 0)
448 		cpsw_ale_read(priv, idx, ale_entry);
449 
450 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
451 	cpsw_ale_set_addr(ale_entry, addr);
452 	cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
453 
454 	mask = cpsw_ale_get_port_mask(ale_entry);
455 	port_mask |= mask;
456 	cpsw_ale_set_port_mask(ale_entry, port_mask);
457 
458 	if (idx < 0)
459 		idx = cpsw_ale_match_free(priv);
460 	if (idx < 0)
461 		idx = cpsw_ale_find_ageable(priv);
462 	if (idx < 0)
463 		return -ENOMEM;
464 
465 	cpsw_ale_write(priv, idx, ale_entry);
466 	return 0;
467 }
468 
cpsw_ale_control(struct cpsw_priv * priv,int bit,int val)469 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
470 {
471 	u32 tmp, mask = BIT(bit);
472 
473 	tmp  = __raw_readl(priv->ale_regs + ALE_CONTROL);
474 	tmp &= ~mask;
475 	tmp |= val ? mask : 0;
476 	__raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
477 }
478 
479 #define cpsw_ale_enable(priv, val)	cpsw_ale_control(priv, 31, val)
480 #define cpsw_ale_clear(priv, val)	cpsw_ale_control(priv, 30, val)
481 #define cpsw_ale_vlan_aware(priv, val)	cpsw_ale_control(priv,  2, val)
482 
cpsw_ale_port_state(struct cpsw_priv * priv,int port,int val)483 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
484 				       int val)
485 {
486 	int offset = ALE_PORTCTL + 4 * port;
487 	u32 tmp, mask = 0x3;
488 
489 	tmp  = __raw_readl(priv->ale_regs + offset);
490 	tmp &= ~mask;
491 	tmp |= val & mask;
492 	__raw_writel(tmp, priv->ale_regs + offset);
493 }
494 
495 static struct cpsw_mdio_regs *mdio_regs;
496 
497 /* wait until hardware is ready for another user access */
wait_for_user_access(void)498 static inline u32 wait_for_user_access(void)
499 {
500 	u32 reg = 0;
501 	int timeout = MDIO_TIMEOUT;
502 
503 	while (timeout-- &&
504 	((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
505 		udelay(10);
506 
507 	if (timeout == -1) {
508 		printf("wait_for_user_access Timeout\n");
509 		return -ETIMEDOUT;
510 	}
511 	return reg;
512 }
513 
514 /* wait until hardware state machine is idle */
wait_for_idle(void)515 static inline void wait_for_idle(void)
516 {
517 	int timeout = MDIO_TIMEOUT;
518 
519 	while (timeout-- &&
520 		((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
521 		udelay(10);
522 
523 	if (timeout == -1)
524 		printf("wait_for_idle Timeout\n");
525 }
526 
cpsw_mdio_read(struct mii_dev * bus,int phy_id,int dev_addr,int phy_reg)527 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
528 				int dev_addr, int phy_reg)
529 {
530 	int data;
531 	u32 reg;
532 
533 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
534 		return -EINVAL;
535 
536 	wait_for_user_access();
537 	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
538 	       (phy_id << 16));
539 	__raw_writel(reg, &mdio_regs->user[0].access);
540 	reg = wait_for_user_access();
541 
542 	data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
543 	return data;
544 }
545 
cpsw_mdio_write(struct mii_dev * bus,int phy_id,int dev_addr,int phy_reg,u16 data)546 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
547 				int phy_reg, u16 data)
548 {
549 	u32 reg;
550 
551 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
552 		return -EINVAL;
553 
554 	wait_for_user_access();
555 	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
556 		   (phy_id << 16) | (data & USERACCESS_DATA));
557 	__raw_writel(reg, &mdio_regs->user[0].access);
558 	wait_for_user_access();
559 
560 	return 0;
561 }
562 
cpsw_mdio_init(const char * name,u32 mdio_base,u32 div)563 static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
564 {
565 	struct mii_dev *bus = mdio_alloc();
566 
567 	mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
568 
569 	/* set enable and clock divider */
570 	__raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
571 
572 	/*
573 	 * wait for scan logic to settle:
574 	 * the scan time consists of (a) a large fixed component, and (b) a
575 	 * small component that varies with the mii bus frequency.  These
576 	 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
577 	 * silicon.  Since the effect of (b) was found to be largely
578 	 * negligible, we keep things simple here.
579 	 */
580 	udelay(1000);
581 
582 	bus->read = cpsw_mdio_read;
583 	bus->write = cpsw_mdio_write;
584 	strcpy(bus->name, name);
585 
586 	mdio_register(bus);
587 }
588 
589 /* Set a self-clearing bit in a register, and wait for it to clear */
setbit_and_wait_for_clear32(void * addr)590 static inline void setbit_and_wait_for_clear32(void *addr)
591 {
592 	__raw_writel(CLEAR_BIT, addr);
593 	while (__raw_readl(addr) & CLEAR_BIT)
594 		;
595 }
596 
597 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
598 			 ((mac)[2] << 16) | ((mac)[3] << 24))
599 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
600 
cpsw_set_slave_mac(struct cpsw_slave * slave,struct cpsw_priv * priv)601 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
602 			       struct cpsw_priv *priv)
603 {
604 #ifdef CONFIG_DM_ETH
605 	struct eth_pdata *pdata = dev_get_platdata(priv->dev);
606 
607 	writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
608 	writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
609 #else
610 	__raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
611 	__raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
612 #endif
613 }
614 
cpsw_slave_update_link(struct cpsw_slave * slave,struct cpsw_priv * priv,int * link)615 static int cpsw_slave_update_link(struct cpsw_slave *slave,
616 				   struct cpsw_priv *priv, int *link)
617 {
618 	struct phy_device *phy;
619 	u32 mac_control = 0;
620 	int ret = -ENODEV;
621 
622 	phy = priv->phydev;
623 	if (!phy)
624 		goto out;
625 
626 	ret = phy_startup(phy);
627 	if (ret)
628 		goto out;
629 
630 	if (link)
631 		*link = phy->link;
632 
633 	if (phy->link) { /* link up */
634 		mac_control = priv->data.mac_control;
635 		if (phy->speed == 1000)
636 			mac_control |= GIGABITEN;
637 		if (phy->duplex == DUPLEX_FULL)
638 			mac_control |= FULLDUPLEXEN;
639 		if (phy->speed == 100)
640 			mac_control |= MIIEN;
641 	}
642 
643 	if (mac_control == slave->mac_control)
644 		goto out;
645 
646 	if (mac_control) {
647 		printf("link up on port %d, speed %d, %s duplex\n",
648 				slave->slave_num, phy->speed,
649 				(phy->duplex == DUPLEX_FULL) ? "full" : "half");
650 	} else {
651 		printf("link down on port %d\n", slave->slave_num);
652 	}
653 
654 	__raw_writel(mac_control, &slave->sliver->mac_control);
655 	slave->mac_control = mac_control;
656 
657 out:
658 	return ret;
659 }
660 
cpsw_update_link(struct cpsw_priv * priv)661 static int cpsw_update_link(struct cpsw_priv *priv)
662 {
663 	int ret = -ENODEV;
664 	struct cpsw_slave *slave;
665 
666 	for_active_slave(slave, priv)
667 		ret = cpsw_slave_update_link(slave, priv, NULL);
668 
669 	return ret;
670 }
671 
cpsw_get_slave_port(struct cpsw_priv * priv,u32 slave_num)672 static inline u32  cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
673 {
674 	if (priv->host_port == 0)
675 		return slave_num + 1;
676 	else
677 		return slave_num;
678 }
679 
cpsw_slave_init(struct cpsw_slave * slave,struct cpsw_priv * priv)680 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
681 {
682 	u32     slave_port;
683 
684 	setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
685 
686 	/* setup priority mapping */
687 	__raw_writel(0x76543210, &slave->sliver->rx_pri_map);
688 	__raw_writel(0x33221100, &slave->regs->tx_pri_map);
689 
690 	/* setup max packet size, and mac address */
691 	__raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
692 	cpsw_set_slave_mac(slave, priv);
693 
694 	slave->mac_control = 0;	/* no link yet */
695 
696 	/* enable forwarding */
697 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
698 	cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
699 
700 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
701 
702 	priv->phy_mask |= 1 << slave->data->phy_addr;
703 }
704 
cpdma_desc_alloc(struct cpsw_priv * priv)705 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
706 {
707 	struct cpdma_desc *desc = priv->desc_free;
708 
709 	if (desc)
710 		priv->desc_free = desc_read_ptr(desc, hw_next);
711 	return desc;
712 }
713 
cpdma_desc_free(struct cpsw_priv * priv,struct cpdma_desc * desc)714 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
715 {
716 	if (desc) {
717 		desc_write(desc, hw_next, priv->desc_free);
718 		priv->desc_free = desc;
719 	}
720 }
721 
cpdma_submit(struct cpsw_priv * priv,struct cpdma_chan * chan,void * buffer,int len)722 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
723 			void *buffer, int len)
724 {
725 	struct cpdma_desc *desc, *prev;
726 	u32 mode;
727 
728 	desc = cpdma_desc_alloc(priv);
729 	if (!desc)
730 		return -ENOMEM;
731 
732 	if (len < PKT_MIN)
733 		len = PKT_MIN;
734 
735 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
736 
737 	desc_write(desc, hw_next,   0);
738 	desc_write(desc, hw_buffer, buffer);
739 	desc_write(desc, hw_len,    len);
740 	desc_write(desc, hw_mode,   mode | len);
741 	desc_write(desc, sw_buffer, buffer);
742 	desc_write(desc, sw_len,    len);
743 
744 	if (!chan->head) {
745 		/* simple case - first packet enqueued */
746 		chan->head = desc;
747 		chan->tail = desc;
748 		chan_write(chan, hdp, desc);
749 		goto done;
750 	}
751 
752 	/* not the first packet - enqueue at the tail */
753 	prev = chan->tail;
754 	desc_write(prev, hw_next, desc);
755 	chan->tail = desc;
756 
757 	/* next check if EOQ has been triggered already */
758 	if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
759 		chan_write(chan, hdp, desc);
760 
761 done:
762 	if (chan->rxfree)
763 		chan_write(chan, rxfree, 1);
764 	return 0;
765 }
766 
cpdma_process(struct cpsw_priv * priv,struct cpdma_chan * chan,void ** buffer,int * len)767 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
768 			 void **buffer, int *len)
769 {
770 	struct cpdma_desc *desc = chan->head;
771 	u32 status;
772 
773 	if (!desc)
774 		return -ENOENT;
775 
776 	status = desc_read(desc, hw_mode);
777 
778 	if (len)
779 		*len = status & 0x7ff;
780 
781 	if (buffer)
782 		*buffer = desc_read_ptr(desc, sw_buffer);
783 
784 	if (status & CPDMA_DESC_OWNER) {
785 		if (chan_read(chan, hdp) == 0) {
786 			if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
787 				chan_write(chan, hdp, desc);
788 		}
789 
790 		return -EBUSY;
791 	}
792 
793 	chan->head = desc_read_ptr(desc, hw_next);
794 	chan_write(chan, cp, desc);
795 
796 	cpdma_desc_free(priv, desc);
797 	return 0;
798 }
799 
_cpsw_init(struct cpsw_priv * priv,u8 * enetaddr)800 static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
801 {
802 	struct cpsw_slave	*slave;
803 	int i, ret;
804 
805 	/* soft reset the controller and initialize priv */
806 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
807 
808 	/* initialize and reset the address lookup engine */
809 	cpsw_ale_enable(priv, 1);
810 	cpsw_ale_clear(priv, 1);
811 	cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
812 
813 	/* setup host port priority mapping */
814 	__raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
815 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
816 
817 	/* disable priority elevation and enable statistics on all ports */
818 	__raw_writel(0, &priv->regs->ptype);
819 
820 	/* enable statistics collection only on the host port */
821 	__raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
822 	__raw_writel(0x7, &priv->regs->stat_port_en);
823 
824 	cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
825 
826 	cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
827 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
828 
829 	for_active_slave(slave, priv)
830 		cpsw_slave_init(slave, priv);
831 
832 	ret = cpsw_update_link(priv);
833 	if (ret)
834 		goto out;
835 
836 	/* init descriptor pool */
837 	for (i = 0; i < NUM_DESCS; i++) {
838 		desc_write(&priv->descs[i], hw_next,
839 			   (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
840 	}
841 	priv->desc_free = &priv->descs[0];
842 
843 	/* initialize channels */
844 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
845 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
846 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER2;
847 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER2;
848 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
849 
850 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
851 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER2;
852 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER2;
853 	} else {
854 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
855 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER1;
856 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER1;
857 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
858 
859 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
860 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER1;
861 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER1;
862 	}
863 
864 	/* clear dma state */
865 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
866 
867 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
868 		for (i = 0; i < priv->data.channels; i++) {
869 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
870 					* i);
871 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
872 					* i);
873 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
874 					* i);
875 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
876 					* i);
877 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
878 					* i);
879 		}
880 	} else {
881 		for (i = 0; i < priv->data.channels; i++) {
882 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
883 					* i);
884 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
885 					* i);
886 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
887 					* i);
888 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
889 					* i);
890 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
891 					* i);
892 
893 		}
894 	}
895 
896 	__raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
897 	__raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
898 
899 	/* submit rx descs */
900 	for (i = 0; i < PKTBUFSRX; i++) {
901 		ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
902 				   PKTSIZE);
903 		if (ret < 0) {
904 			printf("error %d submitting rx desc\n", ret);
905 			break;
906 		}
907 	}
908 
909 out:
910 	return ret;
911 }
912 
cpsw_reap_completed_packets(struct cpsw_priv * priv)913 static int cpsw_reap_completed_packets(struct cpsw_priv *priv)
914 {
915 	int timeout = CPDMA_TIMEOUT;
916 
917 	/* reap completed packets */
918 	while (timeout-- &&
919 	       (cpdma_process(priv, &priv->tx_chan, NULL, NULL) >= 0))
920 		;
921 
922 	return timeout;
923 }
924 
_cpsw_halt(struct cpsw_priv * priv)925 static void _cpsw_halt(struct cpsw_priv *priv)
926 {
927 	cpsw_reap_completed_packets(priv);
928 
929 	writel(0, priv->dma_regs + CPDMA_TXCONTROL);
930 	writel(0, priv->dma_regs + CPDMA_RXCONTROL);
931 
932 	/* soft reset the controller and initialize priv */
933 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
934 
935 	/* clear dma state */
936 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
937 
938 }
939 
_cpsw_send(struct cpsw_priv * priv,void * packet,int length)940 static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
941 {
942 	int timeout;
943 
944 	flush_dcache_range((unsigned long)packet,
945 			   (unsigned long)packet + ALIGN(length, PKTALIGN));
946 
947 	timeout = cpsw_reap_completed_packets(priv);
948 	if (timeout == -1) {
949 		printf("cpdma_process timeout\n");
950 		return -ETIMEDOUT;
951 	}
952 
953 	return cpdma_submit(priv, &priv->tx_chan, packet, length);
954 }
955 
_cpsw_recv(struct cpsw_priv * priv,uchar ** pkt)956 static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
957 {
958 	void *buffer;
959 	int len;
960 	int ret;
961 
962 	ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
963 	if (ret < 0)
964 		return ret;
965 
966 	invalidate_dcache_range((unsigned long)buffer,
967 				(unsigned long)buffer + PKTSIZE_ALIGN);
968 	*pkt = buffer;
969 
970 	return len;
971 }
972 
cpsw_slave_setup(struct cpsw_slave * slave,int slave_num,struct cpsw_priv * priv)973 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
974 			    struct cpsw_priv *priv)
975 {
976 	void			*regs = priv->regs;
977 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
978 	slave->slave_num = slave_num;
979 	slave->data	= data;
980 	slave->regs	= regs + data->slave_reg_ofs;
981 	slave->sliver	= regs + data->sliver_reg_ofs;
982 }
983 
cpsw_phy_init(struct cpsw_priv * priv,struct cpsw_slave * slave)984 static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
985 {
986 	struct phy_device *phydev;
987 	u32 supported = PHY_GBIT_FEATURES;
988 
989 	phydev = phy_connect(priv->bus,
990 			slave->data->phy_addr,
991 			priv->dev,
992 			slave->data->phy_if);
993 
994 	if (!phydev)
995 		return -1;
996 
997 	phydev->supported &= supported;
998 	phydev->advertising = phydev->supported;
999 
1000 #ifdef CONFIG_DM_ETH
1001 	if (slave->data->phy_of_handle)
1002 		dev_set_of_offset(phydev->dev, slave->data->phy_of_handle);
1003 #endif
1004 
1005 	priv->phydev = phydev;
1006 	phy_config(phydev);
1007 
1008 	return 1;
1009 }
1010 
_cpsw_register(struct cpsw_priv * priv)1011 int _cpsw_register(struct cpsw_priv *priv)
1012 {
1013 	struct cpsw_slave	*slave;
1014 	struct cpsw_platform_data *data = &priv->data;
1015 	void			*regs = (void *)data->cpsw_base;
1016 
1017 	priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
1018 	if (!priv->slaves) {
1019 		return -ENOMEM;
1020 	}
1021 
1022 	priv->host_port		= data->host_port_num;
1023 	priv->regs		= regs;
1024 	priv->host_port_regs	= regs + data->host_port_reg_ofs;
1025 	priv->dma_regs		= regs + data->cpdma_reg_ofs;
1026 	priv->ale_regs		= regs + data->ale_reg_ofs;
1027 	priv->descs		= (void *)regs + data->bd_ram_ofs;
1028 
1029 	int idx = 0;
1030 
1031 	for_each_slave(slave, priv) {
1032 		cpsw_slave_setup(slave, idx, priv);
1033 		idx = idx + 1;
1034 	}
1035 
1036 	cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
1037 	priv->bus = miiphy_get_dev_by_name(priv->dev->name);
1038 	for_active_slave(slave, priv)
1039 		cpsw_phy_init(priv, slave);
1040 
1041 	return 0;
1042 }
1043 
1044 #ifndef CONFIG_DM_ETH
cpsw_init(struct eth_device * dev,bd_t * bis)1045 static int cpsw_init(struct eth_device *dev, bd_t *bis)
1046 {
1047 	struct cpsw_priv	*priv = dev->priv;
1048 
1049 	return _cpsw_init(priv, dev->enetaddr);
1050 }
1051 
cpsw_halt(struct eth_device * dev)1052 static void cpsw_halt(struct eth_device *dev)
1053 {
1054 	struct cpsw_priv *priv = dev->priv;
1055 
1056 	return _cpsw_halt(priv);
1057 }
1058 
cpsw_send(struct eth_device * dev,void * packet,int length)1059 static int cpsw_send(struct eth_device *dev, void *packet, int length)
1060 {
1061 	struct cpsw_priv	*priv = dev->priv;
1062 
1063 	return _cpsw_send(priv, packet, length);
1064 }
1065 
cpsw_recv(struct eth_device * dev)1066 static int cpsw_recv(struct eth_device *dev)
1067 {
1068 	struct cpsw_priv *priv = dev->priv;
1069 	uchar *pkt = NULL;
1070 	int len;
1071 
1072 	len = _cpsw_recv(priv, &pkt);
1073 
1074 	if (len > 0) {
1075 		net_process_received_packet(pkt, len);
1076 		cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
1077 	}
1078 
1079 	return len;
1080 }
1081 
cpsw_register(struct cpsw_platform_data * data)1082 int cpsw_register(struct cpsw_platform_data *data)
1083 {
1084 	struct cpsw_priv	*priv;
1085 	struct eth_device	*dev;
1086 	int ret;
1087 
1088 	dev = calloc(sizeof(*dev), 1);
1089 	if (!dev)
1090 		return -ENOMEM;
1091 
1092 	priv = calloc(sizeof(*priv), 1);
1093 	if (!priv) {
1094 		free(dev);
1095 		return -ENOMEM;
1096 	}
1097 
1098 	priv->dev = dev;
1099 	priv->data = *data;
1100 
1101 	strcpy(dev->name, "cpsw");
1102 	dev->iobase	= 0;
1103 	dev->init	= cpsw_init;
1104 	dev->halt	= cpsw_halt;
1105 	dev->send	= cpsw_send;
1106 	dev->recv	= cpsw_recv;
1107 	dev->priv	= priv;
1108 
1109 	eth_register(dev);
1110 
1111 	ret = _cpsw_register(priv);
1112 	if (ret < 0) {
1113 		eth_unregister(dev);
1114 		free(dev);
1115 		free(priv);
1116 		return ret;
1117 	}
1118 
1119 	return 1;
1120 }
1121 #else
cpsw_eth_start(struct udevice * dev)1122 static int cpsw_eth_start(struct udevice *dev)
1123 {
1124 	struct eth_pdata *pdata = dev_get_platdata(dev);
1125 	struct cpsw_priv *priv = dev_get_priv(dev);
1126 
1127 	return _cpsw_init(priv, pdata->enetaddr);
1128 }
1129 
cpsw_eth_send(struct udevice * dev,void * packet,int length)1130 static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1131 {
1132 	struct cpsw_priv *priv = dev_get_priv(dev);
1133 
1134 	return _cpsw_send(priv, packet, length);
1135 }
1136 
cpsw_eth_recv(struct udevice * dev,int flags,uchar ** packetp)1137 static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1138 {
1139 	struct cpsw_priv *priv = dev_get_priv(dev);
1140 
1141 	return _cpsw_recv(priv, packetp);
1142 }
1143 
cpsw_eth_free_pkt(struct udevice * dev,uchar * packet,int length)1144 static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1145 				   int length)
1146 {
1147 	struct cpsw_priv *priv = dev_get_priv(dev);
1148 
1149 	return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1150 }
1151 
cpsw_eth_stop(struct udevice * dev)1152 static void cpsw_eth_stop(struct udevice *dev)
1153 {
1154 	struct cpsw_priv *priv = dev_get_priv(dev);
1155 
1156 	return _cpsw_halt(priv);
1157 }
1158 
1159 
cpsw_eth_probe(struct udevice * dev)1160 static int cpsw_eth_probe(struct udevice *dev)
1161 {
1162 	struct cpsw_priv *priv = dev_get_priv(dev);
1163 
1164 	priv->dev = dev;
1165 
1166 	return _cpsw_register(priv);
1167 }
1168 
1169 static const struct eth_ops cpsw_eth_ops = {
1170 	.start		= cpsw_eth_start,
1171 	.send		= cpsw_eth_send,
1172 	.recv		= cpsw_eth_recv,
1173 	.free_pkt	= cpsw_eth_free_pkt,
1174 	.stop		= cpsw_eth_stop,
1175 };
1176 
cpsw_get_addr_by_node(const void * fdt,int node)1177 static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1178 {
1179 	return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
1180 						  false);
1181 }
1182 
cpsw_gmii_sel_am3352(struct cpsw_priv * priv,phy_interface_t phy_mode)1183 static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
1184 				 phy_interface_t phy_mode)
1185 {
1186 	u32 reg;
1187 	u32 mask;
1188 	u32 mode = 0;
1189 	bool rgmii_id = false;
1190 	int slave = priv->data.active_slave;
1191 
1192 	reg = readl(priv->data.gmii_sel);
1193 
1194 	switch (phy_mode) {
1195 	case PHY_INTERFACE_MODE_RMII:
1196 		mode = AM33XX_GMII_SEL_MODE_RMII;
1197 		break;
1198 
1199 	case PHY_INTERFACE_MODE_RGMII:
1200 		mode = AM33XX_GMII_SEL_MODE_RGMII;
1201 		break;
1202 	case PHY_INTERFACE_MODE_RGMII_ID:
1203 	case PHY_INTERFACE_MODE_RGMII_RXID:
1204 	case PHY_INTERFACE_MODE_RGMII_TXID:
1205 		mode = AM33XX_GMII_SEL_MODE_RGMII;
1206 		rgmii_id = true;
1207 		break;
1208 
1209 	case PHY_INTERFACE_MODE_MII:
1210 	default:
1211 		mode = AM33XX_GMII_SEL_MODE_MII;
1212 		break;
1213 	};
1214 
1215 	mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
1216 	mode <<= slave * 2;
1217 
1218 	if (priv->data.rmii_clock_external) {
1219 		if (slave == 0)
1220 			mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
1221 		else
1222 			mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
1223 	}
1224 
1225 	if (rgmii_id) {
1226 		if (slave == 0)
1227 			mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
1228 		else
1229 			mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
1230 	}
1231 
1232 	reg &= ~mask;
1233 	reg |= mode;
1234 
1235 	writel(reg, priv->data.gmii_sel);
1236 }
1237 
cpsw_gmii_sel_dra7xx(struct cpsw_priv * priv,phy_interface_t phy_mode)1238 static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
1239 				 phy_interface_t phy_mode)
1240 {
1241 	u32 reg;
1242 	u32 mask;
1243 	u32 mode = 0;
1244 	int slave = priv->data.active_slave;
1245 
1246 	reg = readl(priv->data.gmii_sel);
1247 
1248 	switch (phy_mode) {
1249 	case PHY_INTERFACE_MODE_RMII:
1250 		mode = AM33XX_GMII_SEL_MODE_RMII;
1251 		break;
1252 
1253 	case PHY_INTERFACE_MODE_RGMII:
1254 	case PHY_INTERFACE_MODE_RGMII_ID:
1255 	case PHY_INTERFACE_MODE_RGMII_RXID:
1256 	case PHY_INTERFACE_MODE_RGMII_TXID:
1257 		mode = AM33XX_GMII_SEL_MODE_RGMII;
1258 		break;
1259 
1260 	case PHY_INTERFACE_MODE_MII:
1261 	default:
1262 		mode = AM33XX_GMII_SEL_MODE_MII;
1263 		break;
1264 	};
1265 
1266 	switch (slave) {
1267 	case 0:
1268 		mask = GMII_SEL_MODE_MASK;
1269 		break;
1270 	case 1:
1271 		mask = GMII_SEL_MODE_MASK << 4;
1272 		mode <<= 4;
1273 		break;
1274 	default:
1275 		dev_err(priv->dev, "invalid slave number...\n");
1276 		return;
1277 	}
1278 
1279 	if (priv->data.rmii_clock_external)
1280 		dev_err(priv->dev, "RMII External clock is not supported\n");
1281 
1282 	reg &= ~mask;
1283 	reg |= mode;
1284 
1285 	writel(reg, priv->data.gmii_sel);
1286 }
1287 
cpsw_phy_sel(struct cpsw_priv * priv,const char * compat,phy_interface_t phy_mode)1288 static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
1289 			 phy_interface_t phy_mode)
1290 {
1291 	if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
1292 		cpsw_gmii_sel_am3352(priv, phy_mode);
1293 	if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
1294 		cpsw_gmii_sel_am3352(priv, phy_mode);
1295 	else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
1296 		cpsw_gmii_sel_dra7xx(priv, phy_mode);
1297 }
1298 
cpsw_eth_ofdata_to_platdata(struct udevice * dev)1299 static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1300 {
1301 	struct eth_pdata *pdata = dev_get_platdata(dev);
1302 	struct cpsw_priv *priv = dev_get_priv(dev);
1303 	struct gpio_desc *mode_gpios;
1304 	const char *phy_mode;
1305 	const char *phy_sel_compat = NULL;
1306 	const void *fdt = gd->fdt_blob;
1307 	int node = dev_of_offset(dev);
1308 	int subnode;
1309 	int slave_index = 0;
1310 	int active_slave;
1311 	int num_mode_gpios;
1312 	int ret;
1313 
1314 	pdata->iobase = devfdt_get_addr(dev);
1315 	priv->data.version = CPSW_CTRL_VERSION_2;
1316 	priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1317 	priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1318 	priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1319 	priv->data.mdio_div = CPSW_MDIO_DIV;
1320 	priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1321 
1322 	pdata->phy_interface = -1;
1323 
1324 	priv->data.cpsw_base = pdata->iobase;
1325 	priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1326 	if (priv->data.channels <= 0) {
1327 		printf("error: cpdma_channels not found in dt\n");
1328 		return -ENOENT;
1329 	}
1330 
1331 	priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1332 	if (priv->data.slaves <= 0) {
1333 		printf("error: slaves not found in dt\n");
1334 		return -ENOENT;
1335 	}
1336 	priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1337 				       priv->data.slaves);
1338 
1339 	priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1340 	if (priv->data.ale_entries <= 0) {
1341 		printf("error: ale_entries not found in dt\n");
1342 		return -ENOENT;
1343 	}
1344 
1345 	priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1346 	if (priv->data.bd_ram_ofs <= 0) {
1347 		printf("error: bd_ram_size not found in dt\n");
1348 		return -ENOENT;
1349 	}
1350 
1351 	priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1352 	if (priv->data.mac_control <= 0) {
1353 		printf("error: ale_entries not found in dt\n");
1354 		return -ENOENT;
1355 	}
1356 
1357 	num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
1358 	if (num_mode_gpios > 0) {
1359 		mode_gpios = malloc(sizeof(struct gpio_desc) *
1360 				    num_mode_gpios);
1361 		gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
1362 					  num_mode_gpios, GPIOD_IS_OUT);
1363 		free(mode_gpios);
1364 	}
1365 
1366 	active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1367 	priv->data.active_slave = active_slave;
1368 
1369 	fdt_for_each_subnode(subnode, fdt, node) {
1370 		int len;
1371 		const char *name;
1372 
1373 		name = fdt_get_name(fdt, subnode, &len);
1374 		if (!strncmp(name, "mdio", 4)) {
1375 			u32 mdio_base;
1376 
1377 			mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1378 			if (mdio_base == FDT_ADDR_T_NONE) {
1379 				pr_err("Not able to get MDIO address space\n");
1380 				return -ENOENT;
1381 			}
1382 			priv->data.mdio_base = mdio_base;
1383 		}
1384 
1385 		if (!strncmp(name, "slave", 5)) {
1386 			u32 phy_id[2];
1387 
1388 			if (slave_index >= priv->data.slaves)
1389 				continue;
1390 			phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1391 			if (phy_mode)
1392 				priv->data.slave_data[slave_index].phy_if =
1393 					phy_get_interface_by_name(phy_mode);
1394 
1395 			priv->data.slave_data[slave_index].phy_of_handle =
1396 				fdtdec_lookup_phandle(fdt, subnode,
1397 						      "phy-handle");
1398 
1399 			if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1400 				priv->data.slave_data[slave_index].phy_addr =
1401 						fdtdec_get_int(gd->fdt_blob,
1402 							       priv->data.slave_data[slave_index].phy_of_handle,
1403 							       "reg", -1);
1404 			} else {
1405 				fdtdec_get_int_array(fdt, subnode, "phy_id",
1406 						     phy_id, 2);
1407 				priv->data.slave_data[slave_index].phy_addr =
1408 						phy_id[1];
1409 			}
1410 			slave_index++;
1411 		}
1412 
1413 		if (!strncmp(name, "cpsw-phy-sel", 12)) {
1414 			priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1415 								    subnode);
1416 
1417 			if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
1418 				pr_err("Not able to get gmii_sel reg address\n");
1419 				return -ENOENT;
1420 			}
1421 
1422 			if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
1423 					     NULL))
1424 				priv->data.rmii_clock_external = true;
1425 
1426 			phy_sel_compat = fdt_getprop(fdt, subnode, "compatible",
1427 						     NULL);
1428 			if (!phy_sel_compat) {
1429 				pr_err("Not able to get gmii_sel compatible\n");
1430 				return -ENOENT;
1431 			}
1432 		}
1433 	}
1434 
1435 	priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1436 	priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1437 
1438 	if (priv->data.slaves == 2) {
1439 		priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1440 		priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1441 	}
1442 
1443 	ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1444 	if (ret < 0) {
1445 		pr_err("cpsw read efuse mac failed\n");
1446 		return ret;
1447 	}
1448 
1449 	pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1450 	if (pdata->phy_interface == -1) {
1451 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1452 		return -EINVAL;
1453 	}
1454 
1455 	/* Select phy interface in control module */
1456 	cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface);
1457 
1458 	return 0;
1459 }
1460 
1461 
1462 static const struct udevice_id cpsw_eth_ids[] = {
1463 	{ .compatible = "ti,cpsw" },
1464 	{ .compatible = "ti,am335x-cpsw" },
1465 	{ }
1466 };
1467 
1468 U_BOOT_DRIVER(eth_cpsw) = {
1469 	.name	= "eth_cpsw",
1470 	.id	= UCLASS_ETH,
1471 	.of_match = cpsw_eth_ids,
1472 	.ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1473 	.probe	= cpsw_eth_probe,
1474 	.ops	= &cpsw_eth_ops,
1475 	.priv_auto_alloc_size = sizeof(struct cpsw_priv),
1476 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
1477 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
1478 };
1479 #endif /* CONFIG_DM_ETH */
1480