• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Rami Rosen <rosenr@marvell.com>
7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8  *
9  * This file is licensed under the terms of the GNU General Public
10  * License version 2. This program is licensed "as is" without any
11  * warranty of any kind, whether express or implied.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/phy_fixed.h>
32 #include <linux/platform_device.h>
33 #include <linux/skbuff.h>
34 #include <net/hwbm.h>
35 #include "mvneta_bm.h"
36 #include <net/ip.h>
37 #include <net/ipv6.h>
38 #include <net/tso.h>
39 
40 /* Registers */
41 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
42 #define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
43 #define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
44 #define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
45 #define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
46 #define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
47 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
48 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
49 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
50 #define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
51 #define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
52 #define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
53 #define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
54 #define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
55 #define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
56 #define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
57 #define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
58 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
59 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
60 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
61 #define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
62 #define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
63 #define MVNETA_PORT_RX_RESET                    0x1cc0
64 #define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
65 #define MVNETA_PHY_ADDR                         0x2000
66 #define      MVNETA_PHY_ADDR_MASK               0x1f
67 #define MVNETA_MBUS_RETRY                       0x2010
68 #define MVNETA_UNIT_INTR_CAUSE                  0x2080
69 #define MVNETA_UNIT_CONTROL                     0x20B0
70 #define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
71 #define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
72 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
73 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
74 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
75 #define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
76 #define MVNETA_PORT_CONFIG                      0x2400
77 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
78 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
79 #define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
80 #define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
81 #define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
82 #define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
83 #define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
84 #define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
85 #define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
86 						 MVNETA_DEF_RXQ_ARP(q)	 | \
87 						 MVNETA_DEF_RXQ_TCP(q)	 | \
88 						 MVNETA_DEF_RXQ_UDP(q)	 | \
89 						 MVNETA_DEF_RXQ_BPDU(q)	 | \
90 						 MVNETA_TX_UNSET_ERR_SUM | \
91 						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92 #define MVNETA_PORT_CONFIG_EXTEND                0x2404
93 #define MVNETA_MAC_ADDR_LOW                      0x2414
94 #define MVNETA_MAC_ADDR_HIGH                     0x2418
95 #define MVNETA_SDMA_CONFIG                       0x241c
96 #define      MVNETA_SDMA_BRST_SIZE_16            4
97 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
98 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
99 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
100 #define      MVNETA_DESC_SWAP                    BIT(6)
101 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
102 #define MVNETA_PORT_STATUS                       0x2444
103 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
104 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
105 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
106 #define MVNETA_SERDES_CFG			 0x24A0
107 #define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
108 #define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
109 #define MVNETA_TYPE_PRIO                         0x24bc
110 #define      MVNETA_FORCE_UNI                    BIT(21)
111 #define MVNETA_TXQ_CMD_1                         0x24e4
112 #define MVNETA_TXQ_CMD                           0x2448
113 #define      MVNETA_TXQ_DISABLE_SHIFT            8
114 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
115 #define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
116 #define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
117 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
118 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
119 #define MVNETA_ACC_MODE                          0x2500
120 #define MVNETA_BM_ADDRESS                        0x2504
121 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
122 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
123 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
124 #define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
125 #define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
126 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
127 
128 /* Exception Interrupt Port/Queue Cause register
129  *
130  * Their behavior depend of the mapping done using the PCPX2Q
131  * registers. For a given CPU if the bit associated to a queue is not
132  * set, then for the register a read from this CPU will always return
133  * 0 and a write won't do anything
134  */
135 
136 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
137 #define MVNETA_INTR_NEW_MASK                     0x25a4
138 
139 /* bits  0..7  = TXQ SENT, one bit per queue.
140  * bits  8..15 = RXQ OCCUP, one bit per queue.
141  * bits 16..23 = RXQ FREE, one bit per queue.
142  * bit  29 = OLD_REG_SUM, see old reg ?
143  * bit  30 = TX_ERR_SUM, one bit for 4 ports
144  * bit  31 = MISC_SUM,   one bit for 4 ports
145  */
146 #define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
147 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
148 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
149 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
150 #define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
151 
152 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
153 #define MVNETA_INTR_OLD_MASK                     0x25ac
154 
155 /* Data Path Port/Queue Cause Register */
156 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
157 #define MVNETA_INTR_MISC_MASK                    0x25b4
158 
159 #define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
160 #define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
161 #define      MVNETA_CAUSE_PTP                    BIT(4)
162 
163 #define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
164 #define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
165 #define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
166 #define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
167 #define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
168 #define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
169 #define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
170 #define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
171 
172 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
173 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175 
176 #define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
177 #define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178 #define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179 
180 #define MVNETA_INTR_ENABLE                       0x25b8
181 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
182 #define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
183 
184 #define MVNETA_RXQ_CMD                           0x2680
185 #define      MVNETA_RXQ_DISABLE_SHIFT            8
186 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
187 #define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
188 #define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
189 #define MVNETA_GMAC_CTRL_0                       0x2c00
190 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
191 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
192 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
193 #define MVNETA_GMAC_CTRL_2                       0x2c08
194 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
195 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
196 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
197 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
198 #define MVNETA_GMAC_STATUS                       0x2c10
199 #define      MVNETA_GMAC_LINK_UP                 BIT(0)
200 #define      MVNETA_GMAC_SPEED_1000              BIT(1)
201 #define      MVNETA_GMAC_SPEED_100               BIT(2)
202 #define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
203 #define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
204 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
205 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
206 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
207 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
208 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
209 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
210 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
211 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
212 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
213 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
214 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
215 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
216 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
217 #define MVNETA_MIB_COUNTERS_BASE                 0x3000
218 #define      MVNETA_MIB_LATE_COLLISION           0x7c
219 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
220 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
221 #define MVNETA_DA_FILT_UCAST_BASE                0x3600
222 #define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
223 #define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
224 #define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
225 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
226 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
227 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
228 #define      MVNETA_TXQ_DEC_SENT_MASK            0xff
229 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
230 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
231 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
232 #define MVNETA_PORT_TX_RESET                     0x3cf0
233 #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
234 #define MVNETA_TX_MTU                            0x3e0c
235 #define MVNETA_TX_TOKEN_SIZE                     0x3e14
236 #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
237 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
238 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
239 
240 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
241 
242 /* Descriptor ring Macros */
243 #define MVNETA_QUEUE_NEXT_DESC(q, index)	\
244 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
245 
246 /* Various constants */
247 
248 /* Coalescing */
249 #define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
250 #define MVNETA_RX_COAL_PKTS		32
251 #define MVNETA_RX_COAL_USEC		100
252 
253 /* The two bytes Marvell header. Either contains a special value used
254  * by Marvell switches when a specific hardware mode is enabled (not
255  * supported by this driver) or is filled automatically by zeroes on
256  * the RX side. Those two bytes being at the front of the Ethernet
257  * header, they allow to have the IP header aligned on a 4 bytes
258  * boundary automatically: the hardware skips those two bytes on its
259  * own.
260  */
261 #define MVNETA_MH_SIZE			2
262 
263 #define MVNETA_VLAN_TAG_LEN             4
264 
265 #define MVNETA_TX_CSUM_DEF_SIZE		1600
266 #define MVNETA_TX_CSUM_MAX_SIZE		9800
267 #define MVNETA_ACC_MODE_EXT1		1
268 #define MVNETA_ACC_MODE_EXT2		2
269 
270 #define MVNETA_MAX_DECODE_WIN		6
271 
272 /* Timeout constants */
273 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
274 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
275 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
276 
277 #define MVNETA_TX_MTU_MAX		0x3ffff
278 
279 /* The RSS lookup table actually has 256 entries but we do not use
280  * them yet
281  */
282 #define MVNETA_RSS_LU_TABLE_SIZE	1
283 
284 /* Max number of Rx descriptors */
285 #define MVNETA_MAX_RXD 128
286 
287 /* Max number of Tx descriptors */
288 #define MVNETA_MAX_TXD 532
289 
290 /* Max number of allowed TCP segments for software TSO */
291 #define MVNETA_MAX_TSO_SEGS 100
292 
293 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
294 
295 /* descriptor aligned size */
296 #define MVNETA_DESC_ALIGNED_SIZE	32
297 
298 /* Number of bytes to be taken into account by HW when putting incoming data
299  * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
300  * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
301  */
302 #define MVNETA_RX_PKT_OFFSET_CORRECTION		64
303 
304 #define MVNETA_RX_PKT_SIZE(mtu) \
305 	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
306 	      ETH_HLEN + ETH_FCS_LEN,			     \
307 	      cache_line_size())
308 
309 #define IS_TSO_HEADER(txq, addr) \
310 	((addr >= txq->tso_hdrs_phys) && \
311 	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
312 
313 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
314 	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
315 
316 struct mvneta_statistic {
317 	unsigned short offset;
318 	unsigned short type;
319 	const char name[ETH_GSTRING_LEN];
320 };
321 
322 #define T_REG_32	32
323 #define T_REG_64	64
324 
325 static const struct mvneta_statistic mvneta_statistics[] = {
326 	{ 0x3000, T_REG_64, "good_octets_received", },
327 	{ 0x3010, T_REG_32, "good_frames_received", },
328 	{ 0x3008, T_REG_32, "bad_octets_received", },
329 	{ 0x3014, T_REG_32, "bad_frames_received", },
330 	{ 0x3018, T_REG_32, "broadcast_frames_received", },
331 	{ 0x301c, T_REG_32, "multicast_frames_received", },
332 	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
333 	{ 0x3058, T_REG_32, "good_fc_received", },
334 	{ 0x305c, T_REG_32, "bad_fc_received", },
335 	{ 0x3060, T_REG_32, "undersize_received", },
336 	{ 0x3064, T_REG_32, "fragments_received", },
337 	{ 0x3068, T_REG_32, "oversize_received", },
338 	{ 0x306c, T_REG_32, "jabber_received", },
339 	{ 0x3070, T_REG_32, "mac_receive_error", },
340 	{ 0x3074, T_REG_32, "bad_crc_event", },
341 	{ 0x3078, T_REG_32, "collision", },
342 	{ 0x307c, T_REG_32, "late_collision", },
343 	{ 0x2484, T_REG_32, "rx_discard", },
344 	{ 0x2488, T_REG_32, "rx_overrun", },
345 	{ 0x3020, T_REG_32, "frames_64_octets", },
346 	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
347 	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
348 	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
349 	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
350 	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
351 	{ 0x3038, T_REG_64, "good_octets_sent", },
352 	{ 0x3040, T_REG_32, "good_frames_sent", },
353 	{ 0x3044, T_REG_32, "excessive_collision", },
354 	{ 0x3048, T_REG_32, "multicast_frames_sent", },
355 	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
356 	{ 0x3054, T_REG_32, "fc_sent", },
357 	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
358 };
359 
360 struct mvneta_pcpu_stats {
361 	struct	u64_stats_sync syncp;
362 	u64	rx_packets;
363 	u64	rx_bytes;
364 	u64	tx_packets;
365 	u64	tx_bytes;
366 };
367 
368 struct mvneta_pcpu_port {
369 	/* Pointer to the shared port */
370 	struct mvneta_port	*pp;
371 
372 	/* Pointer to the CPU-local NAPI struct */
373 	struct napi_struct	napi;
374 
375 	/* Cause of the previous interrupt */
376 	u32			cause_rx_tx;
377 };
378 
379 struct mvneta_port {
380 	u8 id;
381 	struct mvneta_pcpu_port __percpu	*ports;
382 	struct mvneta_pcpu_stats __percpu	*stats;
383 
384 	int pkt_size;
385 	unsigned int frag_size;
386 	void __iomem *base;
387 	struct mvneta_rx_queue *rxqs;
388 	struct mvneta_tx_queue *txqs;
389 	struct net_device *dev;
390 	struct hlist_node node_online;
391 	struct hlist_node node_dead;
392 	int rxq_def;
393 	/* Protect the access to the percpu interrupt registers,
394 	 * ensuring that the configuration remains coherent.
395 	 */
396 	spinlock_t lock;
397 	bool is_stopped;
398 
399 	u32 cause_rx_tx;
400 	struct napi_struct napi;
401 
402 	/* Core clock */
403 	struct clk *clk;
404 	/* AXI clock */
405 	struct clk *clk_bus;
406 	u8 mcast_count[256];
407 	u16 tx_ring_size;
408 	u16 rx_ring_size;
409 
410 	struct mii_bus *mii_bus;
411 	phy_interface_t phy_interface;
412 	struct device_node *phy_node;
413 	unsigned int link;
414 	unsigned int duplex;
415 	unsigned int speed;
416 	unsigned int tx_csum_limit;
417 	unsigned int use_inband_status:1;
418 
419 	struct mvneta_bm *bm_priv;
420 	struct mvneta_bm_pool *pool_long;
421 	struct mvneta_bm_pool *pool_short;
422 	int bm_win_id;
423 
424 	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
425 
426 	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
427 
428 	/* Flags for special SoC configurations */
429 	bool neta_armada3700;
430 	u16 rx_offset_correction;
431 	const struct mbus_dram_target_info *dram_target_info;
432 };
433 
434 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
435  * layout of the transmit and reception DMA descriptors, and their
436  * layout is therefore defined by the hardware design
437  */
438 
439 #define MVNETA_TX_L3_OFF_SHIFT	0
440 #define MVNETA_TX_IP_HLEN_SHIFT	8
441 #define MVNETA_TX_L4_UDP	BIT(16)
442 #define MVNETA_TX_L3_IP6	BIT(17)
443 #define MVNETA_TXD_IP_CSUM	BIT(18)
444 #define MVNETA_TXD_Z_PAD	BIT(19)
445 #define MVNETA_TXD_L_DESC	BIT(20)
446 #define MVNETA_TXD_F_DESC	BIT(21)
447 #define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
448 				 MVNETA_TXD_L_DESC | \
449 				 MVNETA_TXD_F_DESC)
450 #define MVNETA_TX_L4_CSUM_FULL	BIT(30)
451 #define MVNETA_TX_L4_CSUM_NOT	BIT(31)
452 
453 #define MVNETA_RXD_ERR_CRC		0x0
454 #define MVNETA_RXD_BM_POOL_SHIFT	13
455 #define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
456 #define MVNETA_RXD_ERR_SUMMARY		BIT(16)
457 #define MVNETA_RXD_ERR_OVERRUN		BIT(17)
458 #define MVNETA_RXD_ERR_LEN		BIT(18)
459 #define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
460 #define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
461 #define MVNETA_RXD_L3_IP4		BIT(25)
462 #define MVNETA_RXD_FIRST_LAST_DESC	(BIT(26) | BIT(27))
463 #define MVNETA_RXD_L4_CSUM_OK		BIT(30)
464 
465 #if defined(__LITTLE_ENDIAN)
466 struct mvneta_tx_desc {
467 	u32  command;		/* Options used by HW for packet transmitting.*/
468 	u16  reserverd1;	/* csum_l4 (for future use)		*/
469 	u16  data_size;		/* Data size of transmitted packet in bytes */
470 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
471 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
472 	u32  reserved3[4];	/* Reserved - (for future use)		*/
473 };
474 
475 struct mvneta_rx_desc {
476 	u32  status;		/* Info about received packet		*/
477 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
478 	u16  data_size;		/* Size of received packet in bytes	*/
479 
480 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
481 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
482 
483 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
484 	u16  reserved3;		/* prefetch_cmd, for future use		*/
485 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
486 
487 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
488 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
489 };
490 #else
491 struct mvneta_tx_desc {
492 	u16  data_size;		/* Data size of transmitted packet in bytes */
493 	u16  reserverd1;	/* csum_l4 (for future use)		*/
494 	u32  command;		/* Options used by HW for packet transmitting.*/
495 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
496 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
497 	u32  reserved3[4];	/* Reserved - (for future use)		*/
498 };
499 
500 struct mvneta_rx_desc {
501 	u16  data_size;		/* Size of received packet in bytes	*/
502 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
503 	u32  status;		/* Info about received packet		*/
504 
505 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
506 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
507 
508 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
509 	u16  reserved3;		/* prefetch_cmd, for future use		*/
510 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
511 
512 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
513 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
514 };
515 #endif
516 
517 struct mvneta_tx_queue {
518 	/* Number of this TX queue, in the range 0-7 */
519 	u8 id;
520 
521 	/* Number of TX DMA descriptors in the descriptor ring */
522 	int size;
523 
524 	/* Number of currently used TX DMA descriptor in the
525 	 * descriptor ring
526 	 */
527 	int count;
528 	int pending;
529 	int tx_stop_threshold;
530 	int tx_wake_threshold;
531 
532 	/* Array of transmitted skb */
533 	struct sk_buff **tx_skb;
534 
535 	/* Index of last TX DMA descriptor that was inserted */
536 	int txq_put_index;
537 
538 	/* Index of the TX DMA descriptor to be cleaned up */
539 	int txq_get_index;
540 
541 	u32 done_pkts_coal;
542 
543 	/* Virtual address of the TX DMA descriptors array */
544 	struct mvneta_tx_desc *descs;
545 
546 	/* DMA address of the TX DMA descriptors array */
547 	dma_addr_t descs_phys;
548 
549 	/* Index of the last TX DMA descriptor */
550 	int last_desc;
551 
552 	/* Index of the next TX DMA descriptor to process */
553 	int next_desc_to_proc;
554 
555 	/* DMA buffers for TSO headers */
556 	char *tso_hdrs;
557 
558 	/* DMA address of TSO headers */
559 	dma_addr_t tso_hdrs_phys;
560 
561 	/* Affinity mask for CPUs*/
562 	cpumask_t affinity_mask;
563 };
564 
565 struct mvneta_rx_queue {
566 	/* rx queue number, in the range 0-7 */
567 	u8 id;
568 
569 	/* num of rx descriptors in the rx descriptor ring */
570 	int size;
571 
572 	/* counter of times when mvneta_refill() failed */
573 	int missed;
574 
575 	u32 pkts_coal;
576 	u32 time_coal;
577 
578 	/* Virtual address of the RX buffer */
579 	void  **buf_virt_addr;
580 
581 	/* Virtual address of the RX DMA descriptors array */
582 	struct mvneta_rx_desc *descs;
583 
584 	/* DMA address of the RX DMA descriptors array */
585 	dma_addr_t descs_phys;
586 
587 	/* Index of the last RX DMA descriptor */
588 	int last_desc;
589 
590 	/* Index of the next RX DMA descriptor to process */
591 	int next_desc_to_proc;
592 };
593 
594 static enum cpuhp_state online_hpstate;
595 /* The hardware supports eight (8) rx queues, but we are only allowing
596  * the first one to be used. Therefore, let's just allocate one queue.
597  */
598 static int rxq_number = 8;
599 static int txq_number = 8;
600 
601 static int rxq_def;
602 
603 static int rx_copybreak __read_mostly = 256;
604 
605 /* HW BM need that each port be identify by a unique ID */
606 static int global_port_id;
607 
608 #define MVNETA_DRIVER_NAME "mvneta"
609 #define MVNETA_DRIVER_VERSION "1.0"
610 
611 /* Utility/helper methods */
612 
613 /* Write helper method */
mvreg_write(struct mvneta_port * pp,u32 offset,u32 data)614 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
615 {
616 	writel(data, pp->base + offset);
617 }
618 
619 /* Read helper method */
mvreg_read(struct mvneta_port * pp,u32 offset)620 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
621 {
622 	return readl(pp->base + offset);
623 }
624 
625 /* Increment txq get counter */
mvneta_txq_inc_get(struct mvneta_tx_queue * txq)626 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
627 {
628 	txq->txq_get_index++;
629 	if (txq->txq_get_index == txq->size)
630 		txq->txq_get_index = 0;
631 }
632 
633 /* Increment txq put counter */
mvneta_txq_inc_put(struct mvneta_tx_queue * txq)634 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
635 {
636 	txq->txq_put_index++;
637 	if (txq->txq_put_index == txq->size)
638 		txq->txq_put_index = 0;
639 }
640 
641 
642 /* Clear all MIB counters */
mvneta_mib_counters_clear(struct mvneta_port * pp)643 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
644 {
645 	int i;
646 	u32 dummy;
647 
648 	/* Perform dummy reads from MIB counters */
649 	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
650 		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
651 	dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
652 	dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
653 }
654 
655 /* Get System Network Statistics */
656 static void
mvneta_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)657 mvneta_get_stats64(struct net_device *dev,
658 		   struct rtnl_link_stats64 *stats)
659 {
660 	struct mvneta_port *pp = netdev_priv(dev);
661 	unsigned int start;
662 	int cpu;
663 
664 	for_each_possible_cpu(cpu) {
665 		struct mvneta_pcpu_stats *cpu_stats;
666 		u64 rx_packets;
667 		u64 rx_bytes;
668 		u64 tx_packets;
669 		u64 tx_bytes;
670 
671 		cpu_stats = per_cpu_ptr(pp->stats, cpu);
672 		do {
673 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
674 			rx_packets = cpu_stats->rx_packets;
675 			rx_bytes   = cpu_stats->rx_bytes;
676 			tx_packets = cpu_stats->tx_packets;
677 			tx_bytes   = cpu_stats->tx_bytes;
678 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
679 
680 		stats->rx_packets += rx_packets;
681 		stats->rx_bytes   += rx_bytes;
682 		stats->tx_packets += tx_packets;
683 		stats->tx_bytes   += tx_bytes;
684 	}
685 
686 	stats->rx_errors	= dev->stats.rx_errors;
687 	stats->rx_dropped	= dev->stats.rx_dropped;
688 
689 	stats->tx_dropped	= dev->stats.tx_dropped;
690 }
691 
692 /* Rx descriptors helper methods */
693 
694 /* Checks whether the RX descriptor having this status is both the first
695  * and the last descriptor for the RX packet. Each RX packet is currently
696  * received through a single RX descriptor, so not having each RX
697  * descriptor with its first and last bits set is an error
698  */
mvneta_rxq_desc_is_first_last(u32 status)699 static int mvneta_rxq_desc_is_first_last(u32 status)
700 {
701 	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
702 		MVNETA_RXD_FIRST_LAST_DESC;
703 }
704 
705 /* Add number of descriptors ready to receive new packets */
mvneta_rxq_non_occup_desc_add(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int ndescs)706 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
707 					  struct mvneta_rx_queue *rxq,
708 					  int ndescs)
709 {
710 	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
711 	 * be added at once
712 	 */
713 	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
714 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
715 			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
716 			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
717 		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
718 	}
719 
720 	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
721 		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
722 }
723 
724 /* Get number of RX descriptors occupied by received packets */
mvneta_rxq_busy_desc_num_get(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)725 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
726 					struct mvneta_rx_queue *rxq)
727 {
728 	u32 val;
729 
730 	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
731 	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
732 }
733 
734 /* Update num of rx desc called upon return from rx path or
735  * from mvneta_rxq_drop_pkts().
736  */
mvneta_rxq_desc_num_update(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int rx_done,int rx_filled)737 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
738 				       struct mvneta_rx_queue *rxq,
739 				       int rx_done, int rx_filled)
740 {
741 	u32 val;
742 
743 	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
744 		val = rx_done |
745 		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
746 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
747 		return;
748 	}
749 
750 	/* Only 255 descriptors can be added at once */
751 	while ((rx_done > 0) || (rx_filled > 0)) {
752 		if (rx_done <= 0xff) {
753 			val = rx_done;
754 			rx_done = 0;
755 		} else {
756 			val = 0xff;
757 			rx_done -= 0xff;
758 		}
759 		if (rx_filled <= 0xff) {
760 			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
761 			rx_filled = 0;
762 		} else {
763 			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
764 			rx_filled -= 0xff;
765 		}
766 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
767 	}
768 }
769 
770 /* Get pointer to next RX descriptor to be processed by SW */
771 static struct mvneta_rx_desc *
mvneta_rxq_next_desc_get(struct mvneta_rx_queue * rxq)772 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
773 {
774 	int rx_desc = rxq->next_desc_to_proc;
775 
776 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
777 	prefetch(rxq->descs + rxq->next_desc_to_proc);
778 	return rxq->descs + rx_desc;
779 }
780 
781 /* Change maximum receive size of the port. */
mvneta_max_rx_size_set(struct mvneta_port * pp,int max_rx_size)782 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
783 {
784 	u32 val;
785 
786 	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
787 	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
788 	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
789 		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
790 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
791 }
792 
793 
794 /* Set rx queue offset */
mvneta_rxq_offset_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int offset)795 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
796 				  struct mvneta_rx_queue *rxq,
797 				  int offset)
798 {
799 	u32 val;
800 
801 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
802 	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
803 
804 	/* Offset is in */
805 	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
806 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
807 }
808 
809 
810 /* Tx descriptors helper methods */
811 
812 /* Update HW with number of TX descriptors to be sent */
mvneta_txq_pend_desc_add(struct mvneta_port * pp,struct mvneta_tx_queue * txq,int pend_desc)813 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
814 				     struct mvneta_tx_queue *txq,
815 				     int pend_desc)
816 {
817 	u32 val;
818 
819 	pend_desc += txq->pending;
820 
821 	/* Only 255 Tx descriptors can be added at once */
822 	do {
823 		val = min(pend_desc, 255);
824 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
825 		pend_desc -= val;
826 	} while (pend_desc > 0);
827 	txq->pending = 0;
828 }
829 
830 /* Get pointer to next TX descriptor to be processed (send) by HW */
831 static struct mvneta_tx_desc *
mvneta_txq_next_desc_get(struct mvneta_tx_queue * txq)832 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
833 {
834 	int tx_desc = txq->next_desc_to_proc;
835 
836 	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
837 	return txq->descs + tx_desc;
838 }
839 
840 /* Release the last allocated TX descriptor. Useful to handle DMA
841  * mapping failures in the TX path.
842  */
mvneta_txq_desc_put(struct mvneta_tx_queue * txq)843 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
844 {
845 	if (txq->next_desc_to_proc == 0)
846 		txq->next_desc_to_proc = txq->last_desc - 1;
847 	else
848 		txq->next_desc_to_proc--;
849 }
850 
851 /* Set rxq buf size */
mvneta_rxq_buf_size_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int buf_size)852 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
853 				    struct mvneta_rx_queue *rxq,
854 				    int buf_size)
855 {
856 	u32 val;
857 
858 	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
859 
860 	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
861 	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
862 
863 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
864 }
865 
866 /* Disable buffer management (BM) */
mvneta_rxq_bm_disable(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)867 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
868 				  struct mvneta_rx_queue *rxq)
869 {
870 	u32 val;
871 
872 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
873 	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
874 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
875 }
876 
877 /* Enable buffer management (BM) */
mvneta_rxq_bm_enable(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)878 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
879 				 struct mvneta_rx_queue *rxq)
880 {
881 	u32 val;
882 
883 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
884 	val |= MVNETA_RXQ_HW_BUF_ALLOC;
885 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
886 }
887 
888 /* Notify HW about port's assignment of pool for bigger packets */
mvneta_rxq_long_pool_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)889 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
890 				     struct mvneta_rx_queue *rxq)
891 {
892 	u32 val;
893 
894 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
895 	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
896 	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
897 
898 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
899 }
900 
901 /* Notify HW about port's assignment of pool for smaller packets */
mvneta_rxq_short_pool_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)902 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
903 				      struct mvneta_rx_queue *rxq)
904 {
905 	u32 val;
906 
907 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
908 	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
909 	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
910 
911 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
912 }
913 
914 /* Set port's receive buffer size for assigned BM pool */
mvneta_bm_pool_bufsize_set(struct mvneta_port * pp,int buf_size,u8 pool_id)915 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
916 					      int buf_size,
917 					      u8 pool_id)
918 {
919 	u32 val;
920 
921 	if (!IS_ALIGNED(buf_size, 8)) {
922 		dev_warn(pp->dev->dev.parent,
923 			 "illegal buf_size value %d, round to %d\n",
924 			 buf_size, ALIGN(buf_size, 8));
925 		buf_size = ALIGN(buf_size, 8);
926 	}
927 
928 	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
929 	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
930 	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
931 }
932 
933 /* Configure MBUS window in order to enable access BM internal SRAM */
mvneta_mbus_io_win_set(struct mvneta_port * pp,u32 base,u32 wsize,u8 target,u8 attr)934 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
935 				  u8 target, u8 attr)
936 {
937 	u32 win_enable, win_protect;
938 	int i;
939 
940 	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
941 
942 	if (pp->bm_win_id < 0) {
943 		/* Find first not occupied window */
944 		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
945 			if (win_enable & (1 << i)) {
946 				pp->bm_win_id = i;
947 				break;
948 			}
949 		}
950 		if (i == MVNETA_MAX_DECODE_WIN)
951 			return -ENOMEM;
952 	} else {
953 		i = pp->bm_win_id;
954 	}
955 
956 	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
957 	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
958 
959 	if (i < 4)
960 		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
961 
962 	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
963 		    (attr << 8) | target);
964 
965 	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
966 
967 	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
968 	win_protect |= 3 << (2 * i);
969 	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
970 
971 	win_enable &= ~(1 << i);
972 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
973 
974 	return 0;
975 }
976 
mvneta_bm_port_mbus_init(struct mvneta_port * pp)977 static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
978 {
979 	u32 wsize;
980 	u8 target, attr;
981 	int err;
982 
983 	/* Get BM window information */
984 	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
985 					 &target, &attr);
986 	if (err < 0)
987 		return err;
988 
989 	pp->bm_win_id = -1;
990 
991 	/* Open NETA -> BM window */
992 	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
993 				     target, attr);
994 	if (err < 0) {
995 		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
996 		return err;
997 	}
998 	return 0;
999 }
1000 
1001 /* Assign and initialize pools for port. In case of fail
1002  * buffer manager will remain disabled for current port.
1003  */
mvneta_bm_port_init(struct platform_device * pdev,struct mvneta_port * pp)1004 static int mvneta_bm_port_init(struct platform_device *pdev,
1005 			       struct mvneta_port *pp)
1006 {
1007 	struct device_node *dn = pdev->dev.of_node;
1008 	u32 long_pool_id, short_pool_id;
1009 
1010 	if (!pp->neta_armada3700) {
1011 		int ret;
1012 
1013 		ret = mvneta_bm_port_mbus_init(pp);
1014 		if (ret)
1015 			return ret;
1016 	}
1017 
1018 	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1019 		netdev_info(pp->dev, "missing long pool id\n");
1020 		return -EINVAL;
1021 	}
1022 
1023 	/* Create port's long pool depending on mtu */
1024 	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1025 					   MVNETA_BM_LONG, pp->id,
1026 					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1027 	if (!pp->pool_long) {
1028 		netdev_info(pp->dev, "fail to obtain long pool for port\n");
1029 		return -ENOMEM;
1030 	}
1031 
1032 	pp->pool_long->port_map |= 1 << pp->id;
1033 
1034 	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1035 				   pp->pool_long->id);
1036 
1037 	/* If short pool id is not defined, assume using single pool */
1038 	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1039 		short_pool_id = long_pool_id;
1040 
1041 	/* Create port's short pool */
1042 	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1043 					    MVNETA_BM_SHORT, pp->id,
1044 					    MVNETA_BM_SHORT_PKT_SIZE);
1045 	if (!pp->pool_short) {
1046 		netdev_info(pp->dev, "fail to obtain short pool for port\n");
1047 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1048 		return -ENOMEM;
1049 	}
1050 
1051 	if (short_pool_id != long_pool_id) {
1052 		pp->pool_short->port_map |= 1 << pp->id;
1053 		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1054 					   pp->pool_short->id);
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 /* Update settings of a pool for bigger packets */
mvneta_bm_update_mtu(struct mvneta_port * pp,int mtu)1061 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1062 {
1063 	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1064 	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1065 	int num;
1066 
1067 	/* Release all buffers from long pool */
1068 	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1069 	if (hwbm_pool->buf_num) {
1070 		WARN(1, "cannot free all buffers in pool %d\n",
1071 		     bm_pool->id);
1072 		goto bm_mtu_err;
1073 	}
1074 
1075 	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1076 	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1077 	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1078 			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1079 
1080 	/* Fill entire long pool */
1081 	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1082 	if (num != hwbm_pool->size) {
1083 		WARN(1, "pool %d: %d of %d allocated\n",
1084 		     bm_pool->id, num, hwbm_pool->size);
1085 		goto bm_mtu_err;
1086 	}
1087 	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1088 
1089 	return;
1090 
1091 bm_mtu_err:
1092 	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1093 	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1094 
1095 	pp->bm_priv = NULL;
1096 	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1097 	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1098 }
1099 
1100 /* Start the Ethernet port RX and TX activity */
mvneta_port_up(struct mvneta_port * pp)1101 static void mvneta_port_up(struct mvneta_port *pp)
1102 {
1103 	int queue;
1104 	u32 q_map;
1105 
1106 	/* Enable all initialized TXs. */
1107 	q_map = 0;
1108 	for (queue = 0; queue < txq_number; queue++) {
1109 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1110 		if (txq->descs)
1111 			q_map |= (1 << queue);
1112 	}
1113 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1114 
1115 	q_map = 0;
1116 	/* Enable all initialized RXQs. */
1117 	for (queue = 0; queue < rxq_number; queue++) {
1118 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1119 
1120 		if (rxq->descs)
1121 			q_map |= (1 << queue);
1122 	}
1123 	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1124 }
1125 
1126 /* Stop the Ethernet port activity */
mvneta_port_down(struct mvneta_port * pp)1127 static void mvneta_port_down(struct mvneta_port *pp)
1128 {
1129 	u32 val;
1130 	int count;
1131 
1132 	/* Stop Rx port activity. Check port Rx activity. */
1133 	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1134 
1135 	/* Issue stop command for active channels only */
1136 	if (val != 0)
1137 		mvreg_write(pp, MVNETA_RXQ_CMD,
1138 			    val << MVNETA_RXQ_DISABLE_SHIFT);
1139 
1140 	/* Wait for all Rx activity to terminate. */
1141 	count = 0;
1142 	do {
1143 		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1144 			netdev_warn(pp->dev,
1145 				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1146 				    val);
1147 			break;
1148 		}
1149 		mdelay(1);
1150 
1151 		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1152 	} while (val & MVNETA_RXQ_ENABLE_MASK);
1153 
1154 	/* Stop Tx port activity. Check port Tx activity. Issue stop
1155 	 * command for active channels only
1156 	 */
1157 	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1158 
1159 	if (val != 0)
1160 		mvreg_write(pp, MVNETA_TXQ_CMD,
1161 			    (val << MVNETA_TXQ_DISABLE_SHIFT));
1162 
1163 	/* Wait for all Tx activity to terminate. */
1164 	count = 0;
1165 	do {
1166 		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1167 			netdev_warn(pp->dev,
1168 				    "TIMEOUT for TX stopped status=0x%08x\n",
1169 				    val);
1170 			break;
1171 		}
1172 		mdelay(1);
1173 
1174 		/* Check TX Command reg that all Txqs are stopped */
1175 		val = mvreg_read(pp, MVNETA_TXQ_CMD);
1176 
1177 	} while (val & MVNETA_TXQ_ENABLE_MASK);
1178 
1179 	/* Double check to verify that TX FIFO is empty */
1180 	count = 0;
1181 	do {
1182 		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1183 			netdev_warn(pp->dev,
1184 				    "TX FIFO empty timeout status=0x%08x\n",
1185 				    val);
1186 			break;
1187 		}
1188 		mdelay(1);
1189 
1190 		val = mvreg_read(pp, MVNETA_PORT_STATUS);
1191 	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1192 		 (val & MVNETA_TX_IN_PRGRS));
1193 
1194 	udelay(200);
1195 }
1196 
1197 /* Enable the port by setting the port enable bit of the MAC control register */
mvneta_port_enable(struct mvneta_port * pp)1198 static void mvneta_port_enable(struct mvneta_port *pp)
1199 {
1200 	u32 val;
1201 
1202 	/* Enable port */
1203 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1204 	val |= MVNETA_GMAC0_PORT_ENABLE;
1205 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1206 }
1207 
1208 /* Disable the port and wait for about 200 usec before retuning */
mvneta_port_disable(struct mvneta_port * pp)1209 static void mvneta_port_disable(struct mvneta_port *pp)
1210 {
1211 	u32 val;
1212 
1213 	/* Reset the Enable bit in the Serial Control Register */
1214 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1215 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
1216 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1217 
1218 	pp->link = 0;
1219 	pp->duplex = -1;
1220 	pp->speed = 0;
1221 
1222 	udelay(200);
1223 }
1224 
1225 /* Multicast tables methods */
1226 
1227 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
mvneta_set_ucast_table(struct mvneta_port * pp,int queue)1228 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1229 {
1230 	int offset;
1231 	u32 val;
1232 
1233 	if (queue == -1) {
1234 		val = 0;
1235 	} else {
1236 		val = 0x1 | (queue << 1);
1237 		val |= (val << 24) | (val << 16) | (val << 8);
1238 	}
1239 
1240 	for (offset = 0; offset <= 0xc; offset += 4)
1241 		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1242 }
1243 
1244 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
mvneta_set_special_mcast_table(struct mvneta_port * pp,int queue)1245 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1246 {
1247 	int offset;
1248 	u32 val;
1249 
1250 	if (queue == -1) {
1251 		val = 0;
1252 	} else {
1253 		val = 0x1 | (queue << 1);
1254 		val |= (val << 24) | (val << 16) | (val << 8);
1255 	}
1256 
1257 	for (offset = 0; offset <= 0xfc; offset += 4)
1258 		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1259 
1260 }
1261 
1262 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
mvneta_set_other_mcast_table(struct mvneta_port * pp,int queue)1263 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1264 {
1265 	int offset;
1266 	u32 val;
1267 
1268 	if (queue == -1) {
1269 		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1270 		val = 0;
1271 	} else {
1272 		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1273 		val = 0x1 | (queue << 1);
1274 		val |= (val << 24) | (val << 16) | (val << 8);
1275 	}
1276 
1277 	for (offset = 0; offset <= 0xfc; offset += 4)
1278 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1279 }
1280 
mvneta_set_autoneg(struct mvneta_port * pp,int enable)1281 static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1282 {
1283 	u32 val;
1284 
1285 	if (enable) {
1286 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1287 		val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1288 			 MVNETA_GMAC_FORCE_LINK_DOWN |
1289 			 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1290 		val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1291 		       MVNETA_GMAC_AN_SPEED_EN |
1292 		       MVNETA_GMAC_AN_DUPLEX_EN;
1293 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1294 
1295 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1296 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1297 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1298 
1299 		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1300 		val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1301 		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1302 	} else {
1303 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1304 		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1305 		       MVNETA_GMAC_AN_SPEED_EN |
1306 		       MVNETA_GMAC_AN_DUPLEX_EN);
1307 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1308 
1309 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1310 		val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1311 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1312 
1313 		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1314 		val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1315 		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1316 	}
1317 }
1318 
mvneta_percpu_unmask_interrupt(void * arg)1319 static void mvneta_percpu_unmask_interrupt(void *arg)
1320 {
1321 	struct mvneta_port *pp = arg;
1322 
1323 	/* All the queue are unmasked, but actually only the ones
1324 	 * mapped to this CPU will be unmasked
1325 	 */
1326 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1327 		    MVNETA_RX_INTR_MASK_ALL |
1328 		    MVNETA_TX_INTR_MASK_ALL |
1329 		    MVNETA_MISCINTR_INTR_MASK);
1330 }
1331 
mvneta_percpu_mask_interrupt(void * arg)1332 static void mvneta_percpu_mask_interrupt(void *arg)
1333 {
1334 	struct mvneta_port *pp = arg;
1335 
1336 	/* All the queue are masked, but actually only the ones
1337 	 * mapped to this CPU will be masked
1338 	 */
1339 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1340 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1341 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1342 }
1343 
mvneta_percpu_clear_intr_cause(void * arg)1344 static void mvneta_percpu_clear_intr_cause(void *arg)
1345 {
1346 	struct mvneta_port *pp = arg;
1347 
1348 	/* All the queue are cleared, but actually only the ones
1349 	 * mapped to this CPU will be cleared
1350 	 */
1351 	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1352 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1353 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1354 }
1355 
1356 /* This method sets defaults to the NETA port:
1357  *	Clears interrupt Cause and Mask registers.
1358  *	Clears all MAC tables.
1359  *	Sets defaults to all registers.
1360  *	Resets RX and TX descriptor rings.
1361  *	Resets PHY.
1362  * This method can be called after mvneta_port_down() to return the port
1363  *	settings to defaults.
1364  */
mvneta_defaults_set(struct mvneta_port * pp)1365 static void mvneta_defaults_set(struct mvneta_port *pp)
1366 {
1367 	int cpu;
1368 	int queue;
1369 	u32 val;
1370 	int max_cpu = num_present_cpus();
1371 
1372 	/* Clear all Cause registers */
1373 	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1374 
1375 	/* Mask all interrupts */
1376 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1377 	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1378 
1379 	/* Enable MBUS Retry bit16 */
1380 	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1381 
1382 	/* Set CPU queue access map. CPUs are assigned to the RX and
1383 	 * TX queues modulo their number. If there is only one TX
1384 	 * queue then it is assigned to the CPU associated to the
1385 	 * default RX queue.
1386 	 */
1387 	for_each_present_cpu(cpu) {
1388 		int rxq_map = 0, txq_map = 0;
1389 		int rxq, txq;
1390 		if (!pp->neta_armada3700) {
1391 			for (rxq = 0; rxq < rxq_number; rxq++)
1392 				if ((rxq % max_cpu) == cpu)
1393 					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1394 
1395 			for (txq = 0; txq < txq_number; txq++)
1396 				if ((txq % max_cpu) == cpu)
1397 					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1398 
1399 			/* With only one TX queue we configure a special case
1400 			 * which will allow to get all the irq on a single
1401 			 * CPU
1402 			 */
1403 			if (txq_number == 1)
1404 				txq_map = (cpu == pp->rxq_def) ?
1405 					MVNETA_CPU_TXQ_ACCESS(1) : 0;
1406 
1407 		} else {
1408 			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1409 			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1410 		}
1411 
1412 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1413 	}
1414 
1415 	/* Reset RX and TX DMAs */
1416 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1417 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1418 
1419 	/* Disable Legacy WRR, Disable EJP, Release from reset */
1420 	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1421 	for (queue = 0; queue < txq_number; queue++) {
1422 		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1423 		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1424 	}
1425 
1426 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1427 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1428 
1429 	/* Set Port Acceleration Mode */
1430 	if (pp->bm_priv)
1431 		/* HW buffer management + legacy parser */
1432 		val = MVNETA_ACC_MODE_EXT2;
1433 	else
1434 		/* SW buffer management + legacy parser */
1435 		val = MVNETA_ACC_MODE_EXT1;
1436 	mvreg_write(pp, MVNETA_ACC_MODE, val);
1437 
1438 	if (pp->bm_priv)
1439 		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1440 
1441 	/* Update val of portCfg register accordingly with all RxQueue types */
1442 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1443 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1444 
1445 	val = 0;
1446 	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1447 	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1448 
1449 	/* Build PORT_SDMA_CONFIG_REG */
1450 	val = 0;
1451 
1452 	/* Default burst size */
1453 	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1454 	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1455 	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1456 
1457 #if defined(__BIG_ENDIAN)
1458 	val |= MVNETA_DESC_SWAP;
1459 #endif
1460 
1461 	/* Assign port SDMA configuration */
1462 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1463 
1464 	/* Disable PHY polling in hardware, since we're using the
1465 	 * kernel phylib to do this.
1466 	 */
1467 	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1468 	val &= ~MVNETA_PHY_POLLING_ENABLE;
1469 	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1470 
1471 	mvneta_set_autoneg(pp, pp->use_inband_status);
1472 	mvneta_set_ucast_table(pp, -1);
1473 	mvneta_set_special_mcast_table(pp, -1);
1474 	mvneta_set_other_mcast_table(pp, -1);
1475 
1476 	/* Set port interrupt enable register - default enable all */
1477 	mvreg_write(pp, MVNETA_INTR_ENABLE,
1478 		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1479 		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1480 
1481 	mvneta_mib_counters_clear(pp);
1482 }
1483 
1484 /* Set max sizes for tx queues */
mvneta_txq_max_tx_size_set(struct mvneta_port * pp,int max_tx_size)1485 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1486 
1487 {
1488 	u32 val, size, mtu;
1489 	int queue;
1490 
1491 	mtu = max_tx_size * 8;
1492 	if (mtu > MVNETA_TX_MTU_MAX)
1493 		mtu = MVNETA_TX_MTU_MAX;
1494 
1495 	/* Set MTU */
1496 	val = mvreg_read(pp, MVNETA_TX_MTU);
1497 	val &= ~MVNETA_TX_MTU_MAX;
1498 	val |= mtu;
1499 	mvreg_write(pp, MVNETA_TX_MTU, val);
1500 
1501 	/* TX token size and all TXQs token size must be larger that MTU */
1502 	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1503 
1504 	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1505 	if (size < mtu) {
1506 		size = mtu;
1507 		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1508 		val |= size;
1509 		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1510 	}
1511 	for (queue = 0; queue < txq_number; queue++) {
1512 		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1513 
1514 		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1515 		if (size < mtu) {
1516 			size = mtu;
1517 			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1518 			val |= size;
1519 			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1520 		}
1521 	}
1522 }
1523 
1524 /* Set unicast address */
mvneta_set_ucast_addr(struct mvneta_port * pp,u8 last_nibble,int queue)1525 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1526 				  int queue)
1527 {
1528 	unsigned int unicast_reg;
1529 	unsigned int tbl_offset;
1530 	unsigned int reg_offset;
1531 
1532 	/* Locate the Unicast table entry */
1533 	last_nibble = (0xf & last_nibble);
1534 
1535 	/* offset from unicast tbl base */
1536 	tbl_offset = (last_nibble / 4) * 4;
1537 
1538 	/* offset within the above reg  */
1539 	reg_offset = last_nibble % 4;
1540 
1541 	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1542 
1543 	if (queue == -1) {
1544 		/* Clear accepts frame bit at specified unicast DA tbl entry */
1545 		unicast_reg &= ~(0xff << (8 * reg_offset));
1546 	} else {
1547 		unicast_reg &= ~(0xff << (8 * reg_offset));
1548 		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1549 	}
1550 
1551 	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1552 }
1553 
1554 /* Set mac address */
mvneta_mac_addr_set(struct mvneta_port * pp,unsigned char * addr,int queue)1555 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1556 				int queue)
1557 {
1558 	unsigned int mac_h;
1559 	unsigned int mac_l;
1560 
1561 	if (queue != -1) {
1562 		mac_l = (addr[4] << 8) | (addr[5]);
1563 		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1564 			(addr[2] << 8) | (addr[3] << 0);
1565 
1566 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1567 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1568 	}
1569 
1570 	/* Accept frames of this address */
1571 	mvneta_set_ucast_addr(pp, addr[5], queue);
1572 }
1573 
1574 /* Set the number of packets that will be received before RX interrupt
1575  * will be generated by HW.
1576  */
mvneta_rx_pkts_coal_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,u32 value)1577 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1578 				    struct mvneta_rx_queue *rxq, u32 value)
1579 {
1580 	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1581 		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1582 	rxq->pkts_coal = value;
1583 }
1584 
1585 /* Set the time delay in usec before RX interrupt will be generated by
1586  * HW.
1587  */
mvneta_rx_time_coal_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,u32 value)1588 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1589 				    struct mvneta_rx_queue *rxq, u32 value)
1590 {
1591 	u32 val;
1592 	unsigned long clk_rate;
1593 
1594 	clk_rate = clk_get_rate(pp->clk);
1595 	val = (clk_rate / 1000000) * value;
1596 
1597 	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1598 	rxq->time_coal = value;
1599 }
1600 
1601 /* Set threshold for TX_DONE pkts coalescing */
mvneta_tx_done_pkts_coal_set(struct mvneta_port * pp,struct mvneta_tx_queue * txq,u32 value)1602 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1603 					 struct mvneta_tx_queue *txq, u32 value)
1604 {
1605 	u32 val;
1606 
1607 	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1608 
1609 	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1610 	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1611 
1612 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1613 
1614 	txq->done_pkts_coal = value;
1615 }
1616 
1617 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
mvneta_rx_desc_fill(struct mvneta_rx_desc * rx_desc,u32 phys_addr,void * virt_addr,struct mvneta_rx_queue * rxq)1618 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1619 				u32 phys_addr, void *virt_addr,
1620 				struct mvneta_rx_queue *rxq)
1621 {
1622 	int i;
1623 
1624 	rx_desc->buf_phys_addr = phys_addr;
1625 	i = rx_desc - rxq->descs;
1626 	rxq->buf_virt_addr[i] = virt_addr;
1627 }
1628 
1629 /* Decrement sent descriptors counter */
mvneta_txq_sent_desc_dec(struct mvneta_port * pp,struct mvneta_tx_queue * txq,int sent_desc)1630 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1631 				     struct mvneta_tx_queue *txq,
1632 				     int sent_desc)
1633 {
1634 	u32 val;
1635 
1636 	/* Only 255 TX descriptors can be updated at once */
1637 	while (sent_desc > 0xff) {
1638 		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1639 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1640 		sent_desc = sent_desc - 0xff;
1641 	}
1642 
1643 	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1644 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1645 }
1646 
1647 /* Get number of TX descriptors already sent by HW */
mvneta_txq_sent_desc_num_get(struct mvneta_port * pp,struct mvneta_tx_queue * txq)1648 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1649 					struct mvneta_tx_queue *txq)
1650 {
1651 	u32 val;
1652 	int sent_desc;
1653 
1654 	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1655 	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1656 		MVNETA_TXQ_SENT_DESC_SHIFT;
1657 
1658 	return sent_desc;
1659 }
1660 
1661 /* Get number of sent descriptors and decrement counter.
1662  *  The number of sent descriptors is returned.
1663  */
mvneta_txq_sent_desc_proc(struct mvneta_port * pp,struct mvneta_tx_queue * txq)1664 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1665 				     struct mvneta_tx_queue *txq)
1666 {
1667 	int sent_desc;
1668 
1669 	/* Get number of sent descriptors */
1670 	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1671 
1672 	/* Decrement sent descriptors counter */
1673 	if (sent_desc)
1674 		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1675 
1676 	return sent_desc;
1677 }
1678 
1679 /* Set TXQ descriptors fields relevant for CSUM calculation */
mvneta_txq_desc_csum(int l3_offs,int l3_proto,int ip_hdr_len,int l4_proto)1680 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1681 				int ip_hdr_len, int l4_proto)
1682 {
1683 	u32 command;
1684 
1685 	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1686 	 * G_L4_chk, L4_type; required only for checksum
1687 	 * calculation
1688 	 */
1689 	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1690 	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1691 
1692 	if (l3_proto == htons(ETH_P_IP))
1693 		command |= MVNETA_TXD_IP_CSUM;
1694 	else
1695 		command |= MVNETA_TX_L3_IP6;
1696 
1697 	if (l4_proto == IPPROTO_TCP)
1698 		command |=  MVNETA_TX_L4_CSUM_FULL;
1699 	else if (l4_proto == IPPROTO_UDP)
1700 		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1701 	else
1702 		command |= MVNETA_TX_L4_CSUM_NOT;
1703 
1704 	return command;
1705 }
1706 
1707 
1708 /* Display more error info */
mvneta_rx_error(struct mvneta_port * pp,struct mvneta_rx_desc * rx_desc)1709 static void mvneta_rx_error(struct mvneta_port *pp,
1710 			    struct mvneta_rx_desc *rx_desc)
1711 {
1712 	u32 status = rx_desc->status;
1713 
1714 	if (!mvneta_rxq_desc_is_first_last(status)) {
1715 		netdev_err(pp->dev,
1716 			   "bad rx status %08x (buffer oversize), size=%d\n",
1717 			   status, rx_desc->data_size);
1718 		return;
1719 	}
1720 
1721 	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1722 	case MVNETA_RXD_ERR_CRC:
1723 		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1724 			   status, rx_desc->data_size);
1725 		break;
1726 	case MVNETA_RXD_ERR_OVERRUN:
1727 		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1728 			   status, rx_desc->data_size);
1729 		break;
1730 	case MVNETA_RXD_ERR_LEN:
1731 		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1732 			   status, rx_desc->data_size);
1733 		break;
1734 	case MVNETA_RXD_ERR_RESOURCE:
1735 		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1736 			   status, rx_desc->data_size);
1737 		break;
1738 	}
1739 }
1740 
1741 /* Handle RX checksum offload based on the descriptor's status */
mvneta_rx_csum(struct mvneta_port * pp,u32 status,struct sk_buff * skb)1742 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1743 			   struct sk_buff *skb)
1744 {
1745 	if ((status & MVNETA_RXD_L3_IP4) &&
1746 	    (status & MVNETA_RXD_L4_CSUM_OK)) {
1747 		skb->csum = 0;
1748 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1749 		return;
1750 	}
1751 
1752 	skb->ip_summed = CHECKSUM_NONE;
1753 }
1754 
1755 /* Return tx queue pointer (find last set bit) according to <cause> returned
1756  * form tx_done reg. <cause> must not be null. The return value is always a
1757  * valid queue for matching the first one found in <cause>.
1758  */
mvneta_tx_done_policy(struct mvneta_port * pp,u32 cause)1759 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1760 						     u32 cause)
1761 {
1762 	int queue = fls(cause) - 1;
1763 
1764 	return &pp->txqs[queue];
1765 }
1766 
1767 /* Free tx queue skbuffs */
mvneta_txq_bufs_free(struct mvneta_port * pp,struct mvneta_tx_queue * txq,int num,struct netdev_queue * nq)1768 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1769 				 struct mvneta_tx_queue *txq, int num,
1770 				 struct netdev_queue *nq)
1771 {
1772 	unsigned int bytes_compl = 0, pkts_compl = 0;
1773 	int i;
1774 
1775 	for (i = 0; i < num; i++) {
1776 		struct mvneta_tx_desc *tx_desc = txq->descs +
1777 			txq->txq_get_index;
1778 		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1779 
1780 		if (skb) {
1781 			bytes_compl += skb->len;
1782 			pkts_compl++;
1783 		}
1784 
1785 		mvneta_txq_inc_get(txq);
1786 
1787 		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1788 			dma_unmap_single(pp->dev->dev.parent,
1789 					 tx_desc->buf_phys_addr,
1790 					 tx_desc->data_size, DMA_TO_DEVICE);
1791 		if (!skb)
1792 			continue;
1793 		dev_kfree_skb_any(skb);
1794 	}
1795 
1796 	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1797 }
1798 
1799 /* Handle end of transmission */
mvneta_txq_done(struct mvneta_port * pp,struct mvneta_tx_queue * txq)1800 static void mvneta_txq_done(struct mvneta_port *pp,
1801 			   struct mvneta_tx_queue *txq)
1802 {
1803 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1804 	int tx_done;
1805 
1806 	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1807 	if (!tx_done)
1808 		return;
1809 
1810 	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1811 
1812 	txq->count -= tx_done;
1813 
1814 	if (netif_tx_queue_stopped(nq)) {
1815 		if (txq->count <= txq->tx_wake_threshold)
1816 			netif_tx_wake_queue(nq);
1817 	}
1818 }
1819 
mvneta_frag_alloc(unsigned int frag_size)1820 void *mvneta_frag_alloc(unsigned int frag_size)
1821 {
1822 	if (likely(frag_size <= PAGE_SIZE))
1823 		return netdev_alloc_frag(frag_size);
1824 	else
1825 		return kmalloc(frag_size, GFP_ATOMIC);
1826 }
1827 EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1828 
mvneta_frag_free(unsigned int frag_size,void * data)1829 void mvneta_frag_free(unsigned int frag_size, void *data)
1830 {
1831 	if (likely(frag_size <= PAGE_SIZE))
1832 		skb_free_frag(data);
1833 	else
1834 		kfree(data);
1835 }
1836 EXPORT_SYMBOL_GPL(mvneta_frag_free);
1837 
1838 /* Refill processing for SW buffer management */
mvneta_rx_refill(struct mvneta_port * pp,struct mvneta_rx_desc * rx_desc,struct mvneta_rx_queue * rxq)1839 static int mvneta_rx_refill(struct mvneta_port *pp,
1840 			    struct mvneta_rx_desc *rx_desc,
1841 			    struct mvneta_rx_queue *rxq)
1842 
1843 {
1844 	dma_addr_t phys_addr;
1845 	void *data;
1846 
1847 	data = mvneta_frag_alloc(pp->frag_size);
1848 	if (!data)
1849 		return -ENOMEM;
1850 
1851 	phys_addr = dma_map_single(pp->dev->dev.parent, data,
1852 				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
1853 				   DMA_FROM_DEVICE);
1854 	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1855 		mvneta_frag_free(pp->frag_size, data);
1856 		return -ENOMEM;
1857 	}
1858 
1859 	phys_addr += pp->rx_offset_correction;
1860 	mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1861 	return 0;
1862 }
1863 
1864 /* Handle tx checksum */
mvneta_skb_tx_csum(struct mvneta_port * pp,struct sk_buff * skb)1865 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1866 {
1867 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1868 		int ip_hdr_len = 0;
1869 		__be16 l3_proto = vlan_get_protocol(skb);
1870 		u8 l4_proto;
1871 
1872 		if (l3_proto == htons(ETH_P_IP)) {
1873 			struct iphdr *ip4h = ip_hdr(skb);
1874 
1875 			/* Calculate IPv4 checksum and L4 checksum */
1876 			ip_hdr_len = ip4h->ihl;
1877 			l4_proto = ip4h->protocol;
1878 		} else if (l3_proto == htons(ETH_P_IPV6)) {
1879 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1880 
1881 			/* Read l4_protocol from one of IPv6 extra headers */
1882 			if (skb_network_header_len(skb) > 0)
1883 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1884 			l4_proto = ip6h->nexthdr;
1885 		} else
1886 			return MVNETA_TX_L4_CSUM_NOT;
1887 
1888 		return mvneta_txq_desc_csum(skb_network_offset(skb),
1889 					    l3_proto, ip_hdr_len, l4_proto);
1890 	}
1891 
1892 	return MVNETA_TX_L4_CSUM_NOT;
1893 }
1894 
1895 /* Drop packets received by the RXQ and free buffers */
mvneta_rxq_drop_pkts(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)1896 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1897 				 struct mvneta_rx_queue *rxq)
1898 {
1899 	int rx_done, i;
1900 
1901 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1902 	if (rx_done)
1903 		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1904 
1905 	if (pp->bm_priv) {
1906 		for (i = 0; i < rx_done; i++) {
1907 			struct mvneta_rx_desc *rx_desc =
1908 						  mvneta_rxq_next_desc_get(rxq);
1909 			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1910 			struct mvneta_bm_pool *bm_pool;
1911 
1912 			bm_pool = &pp->bm_priv->bm_pools[pool_id];
1913 			/* Return dropped buffer to the pool */
1914 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1915 					      rx_desc->buf_phys_addr);
1916 		}
1917 		return;
1918 	}
1919 
1920 	for (i = 0; i < rxq->size; i++) {
1921 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1922 		void *data = rxq->buf_virt_addr[i];
1923 
1924 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1925 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1926 		mvneta_frag_free(pp->frag_size, data);
1927 	}
1928 }
1929 
1930 /* Main rx processing when using software buffer management */
mvneta_rx_swbm(struct mvneta_port * pp,int rx_todo,struct mvneta_rx_queue * rxq)1931 static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1932 			  struct mvneta_rx_queue *rxq)
1933 {
1934 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
1935 	struct net_device *dev = pp->dev;
1936 	int rx_done;
1937 	u32 rcvd_pkts = 0;
1938 	u32 rcvd_bytes = 0;
1939 
1940 	/* Get number of received packets */
1941 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1942 
1943 	if (rx_todo > rx_done)
1944 		rx_todo = rx_done;
1945 
1946 	rx_done = 0;
1947 
1948 	/* Fairness NAPI loop */
1949 	while (rx_done < rx_todo) {
1950 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1951 		struct sk_buff *skb;
1952 		unsigned char *data;
1953 		dma_addr_t phys_addr;
1954 		u32 rx_status, frag_size;
1955 		int rx_bytes, err, index;
1956 
1957 		rx_done++;
1958 		rx_status = rx_desc->status;
1959 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1960 		index = rx_desc - rxq->descs;
1961 		data = rxq->buf_virt_addr[index];
1962 		phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
1963 
1964 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1965 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1966 			mvneta_rx_error(pp, rx_desc);
1967 err_drop_frame:
1968 			dev->stats.rx_errors++;
1969 			/* leave the descriptor untouched */
1970 			continue;
1971 		}
1972 
1973 		if (rx_bytes <= rx_copybreak) {
1974 		/* better copy a small frame and not unmap the DMA region */
1975 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1976 			if (unlikely(!skb))
1977 				goto err_drop_frame;
1978 
1979 			dma_sync_single_range_for_cpu(dev->dev.parent,
1980 						      phys_addr,
1981 						      MVNETA_MH_SIZE + NET_SKB_PAD,
1982 						      rx_bytes,
1983 						      DMA_FROM_DEVICE);
1984 			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
1985 				     rx_bytes);
1986 
1987 			skb->protocol = eth_type_trans(skb, dev);
1988 			mvneta_rx_csum(pp, rx_status, skb);
1989 			napi_gro_receive(&port->napi, skb);
1990 
1991 			rcvd_pkts++;
1992 			rcvd_bytes += rx_bytes;
1993 
1994 			/* leave the descriptor and buffer untouched */
1995 			continue;
1996 		}
1997 
1998 		/* Refill processing */
1999 		err = mvneta_rx_refill(pp, rx_desc, rxq);
2000 		if (err) {
2001 			netdev_err(dev, "Linux processing - Can't refill\n");
2002 			rxq->missed++;
2003 			goto err_drop_frame;
2004 		}
2005 
2006 		frag_size = pp->frag_size;
2007 
2008 		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2009 
2010 		/* After refill old buffer has to be unmapped regardless
2011 		 * the skb is successfully built or not.
2012 		 */
2013 		dma_unmap_single(dev->dev.parent, phys_addr,
2014 				 MVNETA_RX_BUF_SIZE(pp->pkt_size),
2015 				 DMA_FROM_DEVICE);
2016 
2017 		if (!skb)
2018 			goto err_drop_frame;
2019 
2020 		rcvd_pkts++;
2021 		rcvd_bytes += rx_bytes;
2022 
2023 		/* Linux processing */
2024 		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2025 		skb_put(skb, rx_bytes);
2026 
2027 		skb->protocol = eth_type_trans(skb, dev);
2028 
2029 		mvneta_rx_csum(pp, rx_status, skb);
2030 
2031 		napi_gro_receive(&port->napi, skb);
2032 	}
2033 
2034 	if (rcvd_pkts) {
2035 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2036 
2037 		u64_stats_update_begin(&stats->syncp);
2038 		stats->rx_packets += rcvd_pkts;
2039 		stats->rx_bytes   += rcvd_bytes;
2040 		u64_stats_update_end(&stats->syncp);
2041 	}
2042 
2043 	/* Update rxq management counters */
2044 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2045 
2046 	return rx_done;
2047 }
2048 
2049 /* Main rx processing when using hardware buffer management */
mvneta_rx_hwbm(struct mvneta_port * pp,int rx_todo,struct mvneta_rx_queue * rxq)2050 static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
2051 			  struct mvneta_rx_queue *rxq)
2052 {
2053 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2054 	struct net_device *dev = pp->dev;
2055 	int rx_done;
2056 	u32 rcvd_pkts = 0;
2057 	u32 rcvd_bytes = 0;
2058 
2059 	/* Get number of received packets */
2060 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2061 
2062 	if (rx_todo > rx_done)
2063 		rx_todo = rx_done;
2064 
2065 	rx_done = 0;
2066 
2067 	/* Fairness NAPI loop */
2068 	while (rx_done < rx_todo) {
2069 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2070 		struct mvneta_bm_pool *bm_pool = NULL;
2071 		struct sk_buff *skb;
2072 		unsigned char *data;
2073 		dma_addr_t phys_addr;
2074 		u32 rx_status, frag_size;
2075 		int rx_bytes, err;
2076 		u8 pool_id;
2077 
2078 		rx_done++;
2079 		rx_status = rx_desc->status;
2080 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2081 		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2082 		phys_addr = rx_desc->buf_phys_addr;
2083 		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2084 		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2085 
2086 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2087 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2088 err_drop_frame_ret_pool:
2089 			/* Return the buffer to the pool */
2090 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2091 					      rx_desc->buf_phys_addr);
2092 err_drop_frame:
2093 			dev->stats.rx_errors++;
2094 			mvneta_rx_error(pp, rx_desc);
2095 			/* leave the descriptor untouched */
2096 			continue;
2097 		}
2098 
2099 		if (rx_bytes <= rx_copybreak) {
2100 			/* better copy a small frame and not unmap the DMA region */
2101 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2102 			if (unlikely(!skb))
2103 				goto err_drop_frame_ret_pool;
2104 
2105 			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2106 			                              rx_desc->buf_phys_addr,
2107 			                              MVNETA_MH_SIZE + NET_SKB_PAD,
2108 			                              rx_bytes,
2109 			                              DMA_FROM_DEVICE);
2110 			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2111 				     rx_bytes);
2112 
2113 			skb->protocol = eth_type_trans(skb, dev);
2114 			mvneta_rx_csum(pp, rx_status, skb);
2115 			napi_gro_receive(&port->napi, skb);
2116 
2117 			rcvd_pkts++;
2118 			rcvd_bytes += rx_bytes;
2119 
2120 			/* Return the buffer to the pool */
2121 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2122 					      rx_desc->buf_phys_addr);
2123 
2124 			/* leave the descriptor and buffer untouched */
2125 			continue;
2126 		}
2127 
2128 		/* Refill processing */
2129 		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2130 		if (err) {
2131 			netdev_err(dev, "Linux processing - Can't refill\n");
2132 			rxq->missed++;
2133 			goto err_drop_frame_ret_pool;
2134 		}
2135 
2136 		frag_size = bm_pool->hwbm_pool.frag_size;
2137 
2138 		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2139 
2140 		/* After refill old buffer has to be unmapped regardless
2141 		 * the skb is successfully built or not.
2142 		 */
2143 		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2144 				 bm_pool->buf_size, DMA_FROM_DEVICE);
2145 		if (!skb)
2146 			goto err_drop_frame;
2147 
2148 		rcvd_pkts++;
2149 		rcvd_bytes += rx_bytes;
2150 
2151 		/* Linux processing */
2152 		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2153 		skb_put(skb, rx_bytes);
2154 
2155 		skb->protocol = eth_type_trans(skb, dev);
2156 
2157 		mvneta_rx_csum(pp, rx_status, skb);
2158 
2159 		napi_gro_receive(&port->napi, skb);
2160 	}
2161 
2162 	if (rcvd_pkts) {
2163 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2164 
2165 		u64_stats_update_begin(&stats->syncp);
2166 		stats->rx_packets += rcvd_pkts;
2167 		stats->rx_bytes   += rcvd_bytes;
2168 		u64_stats_update_end(&stats->syncp);
2169 	}
2170 
2171 	/* Update rxq management counters */
2172 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2173 
2174 	return rx_done;
2175 }
2176 
2177 static inline void
mvneta_tso_put_hdr(struct sk_buff * skb,struct mvneta_port * pp,struct mvneta_tx_queue * txq)2178 mvneta_tso_put_hdr(struct sk_buff *skb,
2179 		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2180 {
2181 	struct mvneta_tx_desc *tx_desc;
2182 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2183 
2184 	txq->tx_skb[txq->txq_put_index] = NULL;
2185 	tx_desc = mvneta_txq_next_desc_get(txq);
2186 	tx_desc->data_size = hdr_len;
2187 	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2188 	tx_desc->command |= MVNETA_TXD_F_DESC;
2189 	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2190 				 txq->txq_put_index * TSO_HEADER_SIZE;
2191 	mvneta_txq_inc_put(txq);
2192 }
2193 
2194 static inline int
mvneta_tso_put_data(struct net_device * dev,struct mvneta_tx_queue * txq,struct sk_buff * skb,char * data,int size,bool last_tcp,bool is_last)2195 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2196 		    struct sk_buff *skb, char *data, int size,
2197 		    bool last_tcp, bool is_last)
2198 {
2199 	struct mvneta_tx_desc *tx_desc;
2200 
2201 	tx_desc = mvneta_txq_next_desc_get(txq);
2202 	tx_desc->data_size = size;
2203 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2204 						size, DMA_TO_DEVICE);
2205 	if (unlikely(dma_mapping_error(dev->dev.parent,
2206 		     tx_desc->buf_phys_addr))) {
2207 		mvneta_txq_desc_put(txq);
2208 		return -ENOMEM;
2209 	}
2210 
2211 	tx_desc->command = 0;
2212 	txq->tx_skb[txq->txq_put_index] = NULL;
2213 
2214 	if (last_tcp) {
2215 		/* last descriptor in the TCP packet */
2216 		tx_desc->command = MVNETA_TXD_L_DESC;
2217 
2218 		/* last descriptor in SKB */
2219 		if (is_last)
2220 			txq->tx_skb[txq->txq_put_index] = skb;
2221 	}
2222 	mvneta_txq_inc_put(txq);
2223 	return 0;
2224 }
2225 
mvneta_tx_tso(struct sk_buff * skb,struct net_device * dev,struct mvneta_tx_queue * txq)2226 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2227 			 struct mvneta_tx_queue *txq)
2228 {
2229 	int total_len, data_left;
2230 	int desc_count = 0;
2231 	struct mvneta_port *pp = netdev_priv(dev);
2232 	struct tso_t tso;
2233 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2234 	int i;
2235 
2236 	/* Count needed descriptors */
2237 	if ((txq->count + tso_count_descs(skb)) >= txq->size)
2238 		return 0;
2239 
2240 	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2241 		pr_info("*** Is this even  possible???!?!?\n");
2242 		return 0;
2243 	}
2244 
2245 	/* Initialize the TSO handler, and prepare the first payload */
2246 	tso_start(skb, &tso);
2247 
2248 	total_len = skb->len - hdr_len;
2249 	while (total_len > 0) {
2250 		char *hdr;
2251 
2252 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2253 		total_len -= data_left;
2254 		desc_count++;
2255 
2256 		/* prepare packet headers: MAC + IP + TCP */
2257 		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2258 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2259 
2260 		mvneta_tso_put_hdr(skb, pp, txq);
2261 
2262 		while (data_left > 0) {
2263 			int size;
2264 			desc_count++;
2265 
2266 			size = min_t(int, tso.size, data_left);
2267 
2268 			if (mvneta_tso_put_data(dev, txq, skb,
2269 						 tso.data, size,
2270 						 size == data_left,
2271 						 total_len == 0))
2272 				goto err_release;
2273 			data_left -= size;
2274 
2275 			tso_build_data(skb, &tso, size);
2276 		}
2277 	}
2278 
2279 	return desc_count;
2280 
2281 err_release:
2282 	/* Release all used data descriptors; header descriptors must not
2283 	 * be DMA-unmapped.
2284 	 */
2285 	for (i = desc_count - 1; i >= 0; i--) {
2286 		struct mvneta_tx_desc *tx_desc = txq->descs + i;
2287 		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2288 			dma_unmap_single(pp->dev->dev.parent,
2289 					 tx_desc->buf_phys_addr,
2290 					 tx_desc->data_size,
2291 					 DMA_TO_DEVICE);
2292 		mvneta_txq_desc_put(txq);
2293 	}
2294 	return 0;
2295 }
2296 
2297 /* Handle tx fragmentation processing */
mvneta_tx_frag_process(struct mvneta_port * pp,struct sk_buff * skb,struct mvneta_tx_queue * txq)2298 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2299 				  struct mvneta_tx_queue *txq)
2300 {
2301 	struct mvneta_tx_desc *tx_desc;
2302 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
2303 
2304 	for (i = 0; i < nr_frags; i++) {
2305 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2306 		void *addr = page_address(frag->page.p) + frag->page_offset;
2307 
2308 		tx_desc = mvneta_txq_next_desc_get(txq);
2309 		tx_desc->data_size = frag->size;
2310 
2311 		tx_desc->buf_phys_addr =
2312 			dma_map_single(pp->dev->dev.parent, addr,
2313 				       tx_desc->data_size, DMA_TO_DEVICE);
2314 
2315 		if (dma_mapping_error(pp->dev->dev.parent,
2316 				      tx_desc->buf_phys_addr)) {
2317 			mvneta_txq_desc_put(txq);
2318 			goto error;
2319 		}
2320 
2321 		if (i == nr_frags - 1) {
2322 			/* Last descriptor */
2323 			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2324 			txq->tx_skb[txq->txq_put_index] = skb;
2325 		} else {
2326 			/* Descriptor in the middle: Not First, Not Last */
2327 			tx_desc->command = 0;
2328 			txq->tx_skb[txq->txq_put_index] = NULL;
2329 		}
2330 		mvneta_txq_inc_put(txq);
2331 	}
2332 
2333 	return 0;
2334 
2335 error:
2336 	/* Release all descriptors that were used to map fragments of
2337 	 * this packet, as well as the corresponding DMA mappings
2338 	 */
2339 	for (i = i - 1; i >= 0; i--) {
2340 		tx_desc = txq->descs + i;
2341 		dma_unmap_single(pp->dev->dev.parent,
2342 				 tx_desc->buf_phys_addr,
2343 				 tx_desc->data_size,
2344 				 DMA_TO_DEVICE);
2345 		mvneta_txq_desc_put(txq);
2346 	}
2347 
2348 	return -ENOMEM;
2349 }
2350 
2351 /* Main tx processing */
mvneta_tx(struct sk_buff * skb,struct net_device * dev)2352 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2353 {
2354 	struct mvneta_port *pp = netdev_priv(dev);
2355 	u16 txq_id = skb_get_queue_mapping(skb);
2356 	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2357 	struct mvneta_tx_desc *tx_desc;
2358 	int len = skb->len;
2359 	int frags = 0;
2360 	u32 tx_cmd;
2361 
2362 	if (!netif_running(dev))
2363 		goto out;
2364 
2365 	if (skb_is_gso(skb)) {
2366 		frags = mvneta_tx_tso(skb, dev, txq);
2367 		goto out;
2368 	}
2369 
2370 	frags = skb_shinfo(skb)->nr_frags + 1;
2371 
2372 	/* Get a descriptor for the first part of the packet */
2373 	tx_desc = mvneta_txq_next_desc_get(txq);
2374 
2375 	tx_cmd = mvneta_skb_tx_csum(pp, skb);
2376 
2377 	tx_desc->data_size = skb_headlen(skb);
2378 
2379 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2380 						tx_desc->data_size,
2381 						DMA_TO_DEVICE);
2382 	if (unlikely(dma_mapping_error(dev->dev.parent,
2383 				       tx_desc->buf_phys_addr))) {
2384 		mvneta_txq_desc_put(txq);
2385 		frags = 0;
2386 		goto out;
2387 	}
2388 
2389 	if (frags == 1) {
2390 		/* First and Last descriptor */
2391 		tx_cmd |= MVNETA_TXD_FLZ_DESC;
2392 		tx_desc->command = tx_cmd;
2393 		txq->tx_skb[txq->txq_put_index] = skb;
2394 		mvneta_txq_inc_put(txq);
2395 	} else {
2396 		/* First but not Last */
2397 		tx_cmd |= MVNETA_TXD_F_DESC;
2398 		txq->tx_skb[txq->txq_put_index] = NULL;
2399 		mvneta_txq_inc_put(txq);
2400 		tx_desc->command = tx_cmd;
2401 		/* Continue with other skb fragments */
2402 		if (mvneta_tx_frag_process(pp, skb, txq)) {
2403 			dma_unmap_single(dev->dev.parent,
2404 					 tx_desc->buf_phys_addr,
2405 					 tx_desc->data_size,
2406 					 DMA_TO_DEVICE);
2407 			mvneta_txq_desc_put(txq);
2408 			frags = 0;
2409 			goto out;
2410 		}
2411 	}
2412 
2413 out:
2414 	if (frags > 0) {
2415 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2416 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2417 
2418 		netdev_tx_sent_queue(nq, len);
2419 
2420 		txq->count += frags;
2421 		if (txq->count >= txq->tx_stop_threshold)
2422 			netif_tx_stop_queue(nq);
2423 
2424 		if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2425 		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2426 			mvneta_txq_pend_desc_add(pp, txq, frags);
2427 		else
2428 			txq->pending += frags;
2429 
2430 		u64_stats_update_begin(&stats->syncp);
2431 		stats->tx_packets++;
2432 		stats->tx_bytes  += len;
2433 		u64_stats_update_end(&stats->syncp);
2434 	} else {
2435 		dev->stats.tx_dropped++;
2436 		dev_kfree_skb_any(skb);
2437 	}
2438 
2439 	return NETDEV_TX_OK;
2440 }
2441 
2442 
2443 /* Free tx resources, when resetting a port */
mvneta_txq_done_force(struct mvneta_port * pp,struct mvneta_tx_queue * txq)2444 static void mvneta_txq_done_force(struct mvneta_port *pp,
2445 				  struct mvneta_tx_queue *txq)
2446 
2447 {
2448 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2449 	int tx_done = txq->count;
2450 
2451 	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2452 
2453 	/* reset txq */
2454 	txq->count = 0;
2455 	txq->txq_put_index = 0;
2456 	txq->txq_get_index = 0;
2457 }
2458 
2459 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2460  * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2461  */
mvneta_tx_done_gbe(struct mvneta_port * pp,u32 cause_tx_done)2462 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2463 {
2464 	struct mvneta_tx_queue *txq;
2465 	struct netdev_queue *nq;
2466 
2467 	while (cause_tx_done) {
2468 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
2469 
2470 		nq = netdev_get_tx_queue(pp->dev, txq->id);
2471 		__netif_tx_lock(nq, smp_processor_id());
2472 
2473 		if (txq->count)
2474 			mvneta_txq_done(pp, txq);
2475 
2476 		__netif_tx_unlock(nq);
2477 		cause_tx_done &= ~((1 << txq->id));
2478 	}
2479 }
2480 
2481 /* Compute crc8 of the specified address, using a unique algorithm ,
2482  * according to hw spec, different than generic crc8 algorithm
2483  */
mvneta_addr_crc(unsigned char * addr)2484 static int mvneta_addr_crc(unsigned char *addr)
2485 {
2486 	int crc = 0;
2487 	int i;
2488 
2489 	for (i = 0; i < ETH_ALEN; i++) {
2490 		int j;
2491 
2492 		crc = (crc ^ addr[i]) << 8;
2493 		for (j = 7; j >= 0; j--) {
2494 			if (crc & (0x100 << j))
2495 				crc ^= 0x107 << j;
2496 		}
2497 	}
2498 
2499 	return crc;
2500 }
2501 
2502 /* This method controls the net device special MAC multicast support.
2503  * The Special Multicast Table for MAC addresses supports MAC of the form
2504  * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2505  * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2506  * Table entries in the DA-Filter table. This method set the Special
2507  * Multicast Table appropriate entry.
2508  */
mvneta_set_special_mcast_addr(struct mvneta_port * pp,unsigned char last_byte,int queue)2509 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2510 					  unsigned char last_byte,
2511 					  int queue)
2512 {
2513 	unsigned int smc_table_reg;
2514 	unsigned int tbl_offset;
2515 	unsigned int reg_offset;
2516 
2517 	/* Register offset from SMC table base    */
2518 	tbl_offset = (last_byte / 4);
2519 	/* Entry offset within the above reg */
2520 	reg_offset = last_byte % 4;
2521 
2522 	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2523 					+ tbl_offset * 4));
2524 
2525 	if (queue == -1)
2526 		smc_table_reg &= ~(0xff << (8 * reg_offset));
2527 	else {
2528 		smc_table_reg &= ~(0xff << (8 * reg_offset));
2529 		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2530 	}
2531 
2532 	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2533 		    smc_table_reg);
2534 }
2535 
2536 /* This method controls the network device Other MAC multicast support.
2537  * The Other Multicast Table is used for multicast of another type.
2538  * A CRC-8 is used as an index to the Other Multicast Table entries
2539  * in the DA-Filter table.
2540  * The method gets the CRC-8 value from the calling routine and
2541  * sets the Other Multicast Table appropriate entry according to the
2542  * specified CRC-8 .
2543  */
mvneta_set_other_mcast_addr(struct mvneta_port * pp,unsigned char crc8,int queue)2544 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2545 					unsigned char crc8,
2546 					int queue)
2547 {
2548 	unsigned int omc_table_reg;
2549 	unsigned int tbl_offset;
2550 	unsigned int reg_offset;
2551 
2552 	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2553 	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
2554 
2555 	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2556 
2557 	if (queue == -1) {
2558 		/* Clear accepts frame bit at specified Other DA table entry */
2559 		omc_table_reg &= ~(0xff << (8 * reg_offset));
2560 	} else {
2561 		omc_table_reg &= ~(0xff << (8 * reg_offset));
2562 		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2563 	}
2564 
2565 	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2566 }
2567 
2568 /* The network device supports multicast using two tables:
2569  *    1) Special Multicast Table for MAC addresses of the form
2570  *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2571  *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2572  *       Table entries in the DA-Filter table.
2573  *    2) Other Multicast Table for multicast of another type. A CRC-8 value
2574  *       is used as an index to the Other Multicast Table entries in the
2575  *       DA-Filter table.
2576  */
mvneta_mcast_addr_set(struct mvneta_port * pp,unsigned char * p_addr,int queue)2577 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2578 				 int queue)
2579 {
2580 	unsigned char crc_result = 0;
2581 
2582 	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2583 		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2584 		return 0;
2585 	}
2586 
2587 	crc_result = mvneta_addr_crc(p_addr);
2588 	if (queue == -1) {
2589 		if (pp->mcast_count[crc_result] == 0) {
2590 			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2591 				    crc_result);
2592 			return -EINVAL;
2593 		}
2594 
2595 		pp->mcast_count[crc_result]--;
2596 		if (pp->mcast_count[crc_result] != 0) {
2597 			netdev_info(pp->dev,
2598 				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
2599 				    pp->mcast_count[crc_result], crc_result);
2600 			return -EINVAL;
2601 		}
2602 	} else
2603 		pp->mcast_count[crc_result]++;
2604 
2605 	mvneta_set_other_mcast_addr(pp, crc_result, queue);
2606 
2607 	return 0;
2608 }
2609 
2610 /* Configure Fitering mode of Ethernet port */
mvneta_rx_unicast_promisc_set(struct mvneta_port * pp,int is_promisc)2611 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2612 					  int is_promisc)
2613 {
2614 	u32 port_cfg_reg, val;
2615 
2616 	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2617 
2618 	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2619 
2620 	/* Set / Clear UPM bit in port configuration register */
2621 	if (is_promisc) {
2622 		/* Accept all Unicast addresses */
2623 		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2624 		val |= MVNETA_FORCE_UNI;
2625 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2626 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2627 	} else {
2628 		/* Reject all Unicast addresses */
2629 		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2630 		val &= ~MVNETA_FORCE_UNI;
2631 	}
2632 
2633 	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2634 	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2635 }
2636 
2637 /* register unicast and multicast addresses */
mvneta_set_rx_mode(struct net_device * dev)2638 static void mvneta_set_rx_mode(struct net_device *dev)
2639 {
2640 	struct mvneta_port *pp = netdev_priv(dev);
2641 	struct netdev_hw_addr *ha;
2642 
2643 	if (dev->flags & IFF_PROMISC) {
2644 		/* Accept all: Multicast + Unicast */
2645 		mvneta_rx_unicast_promisc_set(pp, 1);
2646 		mvneta_set_ucast_table(pp, pp->rxq_def);
2647 		mvneta_set_special_mcast_table(pp, pp->rxq_def);
2648 		mvneta_set_other_mcast_table(pp, pp->rxq_def);
2649 	} else {
2650 		/* Accept single Unicast */
2651 		mvneta_rx_unicast_promisc_set(pp, 0);
2652 		mvneta_set_ucast_table(pp, -1);
2653 		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2654 
2655 		if (dev->flags & IFF_ALLMULTI) {
2656 			/* Accept all multicast */
2657 			mvneta_set_special_mcast_table(pp, pp->rxq_def);
2658 			mvneta_set_other_mcast_table(pp, pp->rxq_def);
2659 		} else {
2660 			/* Accept only initialized multicast */
2661 			mvneta_set_special_mcast_table(pp, -1);
2662 			mvneta_set_other_mcast_table(pp, -1);
2663 
2664 			if (!netdev_mc_empty(dev)) {
2665 				netdev_for_each_mc_addr(ha, dev) {
2666 					mvneta_mcast_addr_set(pp, ha->addr,
2667 							      pp->rxq_def);
2668 				}
2669 			}
2670 		}
2671 	}
2672 }
2673 
2674 /* Interrupt handling - the callback for request_irq() */
mvneta_isr(int irq,void * dev_id)2675 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2676 {
2677 	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2678 
2679 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2680 	napi_schedule(&pp->napi);
2681 
2682 	return IRQ_HANDLED;
2683 }
2684 
2685 /* Interrupt handling - the callback for request_percpu_irq() */
mvneta_percpu_isr(int irq,void * dev_id)2686 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2687 {
2688 	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2689 
2690 	disable_percpu_irq(port->pp->dev->irq);
2691 	napi_schedule(&port->napi);
2692 
2693 	return IRQ_HANDLED;
2694 }
2695 
mvneta_fixed_link_update(struct mvneta_port * pp,struct phy_device * phy)2696 static int mvneta_fixed_link_update(struct mvneta_port *pp,
2697 				    struct phy_device *phy)
2698 {
2699 	struct fixed_phy_status status;
2700 	struct fixed_phy_status changed = {};
2701 	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2702 
2703 	status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2704 	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2705 		status.speed = SPEED_1000;
2706 	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2707 		status.speed = SPEED_100;
2708 	else
2709 		status.speed = SPEED_10;
2710 	status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2711 	changed.link = 1;
2712 	changed.speed = 1;
2713 	changed.duplex = 1;
2714 	fixed_phy_update_state(phy, &status, &changed);
2715 	return 0;
2716 }
2717 
2718 /* NAPI handler
2719  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2720  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2721  * Bits 8 -15 of the cause Rx Tx register indicate that are received
2722  * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2723  * Each CPU has its own causeRxTx register
2724  */
mvneta_poll(struct napi_struct * napi,int budget)2725 static int mvneta_poll(struct napi_struct *napi, int budget)
2726 {
2727 	int rx_done = 0;
2728 	u32 cause_rx_tx;
2729 	int rx_queue;
2730 	struct mvneta_port *pp = netdev_priv(napi->dev);
2731 	struct net_device *ndev = pp->dev;
2732 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2733 
2734 	if (!netif_running(pp->dev)) {
2735 		napi_complete(napi);
2736 		return rx_done;
2737 	}
2738 
2739 	/* Read cause register */
2740 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2741 	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2742 		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2743 
2744 		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2745 		if (pp->use_inband_status && (cause_misc &
2746 				(MVNETA_CAUSE_PHY_STATUS_CHANGE |
2747 				 MVNETA_CAUSE_LINK_CHANGE |
2748 				 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2749 			mvneta_fixed_link_update(pp, ndev->phydev);
2750 		}
2751 	}
2752 
2753 	/* Release Tx descriptors */
2754 	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2755 		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2756 		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2757 	}
2758 
2759 	/* For the case where the last mvneta_poll did not process all
2760 	 * RX packets
2761 	 */
2762 	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2763 		port->cause_rx_tx;
2764 
2765 	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2766 	if (rx_queue) {
2767 		rx_queue = rx_queue - 1;
2768 		if (pp->bm_priv)
2769 			rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
2770 		else
2771 			rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
2772 	}
2773 
2774 	if (rx_done < budget) {
2775 		cause_rx_tx = 0;
2776 		napi_complete_done(napi, rx_done);
2777 
2778 		if (pp->neta_armada3700) {
2779 			unsigned long flags;
2780 
2781 			local_irq_save(flags);
2782 			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2783 				    MVNETA_RX_INTR_MASK(rxq_number) |
2784 				    MVNETA_TX_INTR_MASK(txq_number) |
2785 				    MVNETA_MISCINTR_INTR_MASK);
2786 			local_irq_restore(flags);
2787 		} else {
2788 			enable_percpu_irq(pp->dev->irq, 0);
2789 		}
2790 	}
2791 
2792 	if (pp->neta_armada3700)
2793 		pp->cause_rx_tx = cause_rx_tx;
2794 	else
2795 		port->cause_rx_tx = cause_rx_tx;
2796 
2797 	return rx_done;
2798 }
2799 
2800 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
mvneta_rxq_fill(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int num)2801 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2802 			   int num)
2803 {
2804 	int i;
2805 
2806 	for (i = 0; i < num; i++) {
2807 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2808 		if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2809 			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
2810 				__func__, rxq->id, i, num);
2811 			break;
2812 		}
2813 	}
2814 
2815 	/* Add this number of RX descriptors as non occupied (ready to
2816 	 * get packets)
2817 	 */
2818 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2819 
2820 	return i;
2821 }
2822 
2823 /* Free all packets pending transmit from all TXQs and reset TX port */
mvneta_tx_reset(struct mvneta_port * pp)2824 static void mvneta_tx_reset(struct mvneta_port *pp)
2825 {
2826 	int queue;
2827 
2828 	/* free the skb's in the tx ring */
2829 	for (queue = 0; queue < txq_number; queue++)
2830 		mvneta_txq_done_force(pp, &pp->txqs[queue]);
2831 
2832 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2833 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2834 }
2835 
mvneta_rx_reset(struct mvneta_port * pp)2836 static void mvneta_rx_reset(struct mvneta_port *pp)
2837 {
2838 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2839 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2840 }
2841 
2842 /* Rx/Tx queue initialization/cleanup methods */
2843 
2844 /* Create a specified RX queue */
mvneta_rxq_init(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)2845 static int mvneta_rxq_init(struct mvneta_port *pp,
2846 			   struct mvneta_rx_queue *rxq)
2847 
2848 {
2849 	rxq->size = pp->rx_ring_size;
2850 
2851 	/* Allocate memory for RX descriptors */
2852 	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2853 					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2854 					&rxq->descs_phys, GFP_KERNEL);
2855 	if (!rxq->descs)
2856 		return -ENOMEM;
2857 
2858 	rxq->last_desc = rxq->size - 1;
2859 
2860 	/* Set Rx descriptors queue starting address */
2861 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2862 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2863 
2864 	/* Set Offset */
2865 	mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
2866 
2867 	/* Set coalescing pkts and time */
2868 	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2869 	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2870 
2871 	if (!pp->bm_priv) {
2872 		/* Fill RXQ with buffers from RX pool */
2873 		mvneta_rxq_buf_size_set(pp, rxq,
2874 					MVNETA_RX_BUF_SIZE(pp->pkt_size));
2875 		mvneta_rxq_bm_disable(pp, rxq);
2876 		mvneta_rxq_fill(pp, rxq, rxq->size);
2877 	} else {
2878 		mvneta_rxq_bm_enable(pp, rxq);
2879 		mvneta_rxq_long_pool_set(pp, rxq);
2880 		mvneta_rxq_short_pool_set(pp, rxq);
2881 		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2882 	}
2883 
2884 	return 0;
2885 }
2886 
2887 /* Cleanup Rx queue */
mvneta_rxq_deinit(struct mvneta_port * pp,struct mvneta_rx_queue * rxq)2888 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2889 			      struct mvneta_rx_queue *rxq)
2890 {
2891 	mvneta_rxq_drop_pkts(pp, rxq);
2892 
2893 	if (rxq->descs)
2894 		dma_free_coherent(pp->dev->dev.parent,
2895 				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2896 				  rxq->descs,
2897 				  rxq->descs_phys);
2898 
2899 	rxq->descs             = NULL;
2900 	rxq->last_desc         = 0;
2901 	rxq->next_desc_to_proc = 0;
2902 	rxq->descs_phys        = 0;
2903 }
2904 
2905 /* Create and initialize a tx queue */
mvneta_txq_init(struct mvneta_port * pp,struct mvneta_tx_queue * txq)2906 static int mvneta_txq_init(struct mvneta_port *pp,
2907 			   struct mvneta_tx_queue *txq)
2908 {
2909 	int cpu;
2910 
2911 	txq->size = pp->tx_ring_size;
2912 
2913 	/* A queue must always have room for at least one skb.
2914 	 * Therefore, stop the queue when the free entries reaches
2915 	 * the maximum number of descriptors per skb.
2916 	 */
2917 	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2918 	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2919 
2920 
2921 	/* Allocate memory for TX descriptors */
2922 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2923 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
2924 					&txq->descs_phys, GFP_KERNEL);
2925 	if (!txq->descs)
2926 		return -ENOMEM;
2927 
2928 	txq->last_desc = txq->size - 1;
2929 
2930 	/* Set maximum bandwidth for enabled TXQs */
2931 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2932 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2933 
2934 	/* Set Tx descriptors queue starting address */
2935 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2936 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2937 
2938 	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2939 				    GFP_KERNEL);
2940 	if (!txq->tx_skb) {
2941 		dma_free_coherent(pp->dev->dev.parent,
2942 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2943 				  txq->descs, txq->descs_phys);
2944 		return -ENOMEM;
2945 	}
2946 
2947 	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2948 	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2949 					   txq->size * TSO_HEADER_SIZE,
2950 					   &txq->tso_hdrs_phys, GFP_KERNEL);
2951 	if (!txq->tso_hdrs) {
2952 		kfree(txq->tx_skb);
2953 		dma_free_coherent(pp->dev->dev.parent,
2954 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2955 				  txq->descs, txq->descs_phys);
2956 		return -ENOMEM;
2957 	}
2958 	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2959 
2960 	/* Setup XPS mapping */
2961 	if (txq_number > 1)
2962 		cpu = txq->id % num_present_cpus();
2963 	else
2964 		cpu = pp->rxq_def % num_present_cpus();
2965 	cpumask_set_cpu(cpu, &txq->affinity_mask);
2966 	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2967 
2968 	return 0;
2969 }
2970 
2971 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
mvneta_txq_deinit(struct mvneta_port * pp,struct mvneta_tx_queue * txq)2972 static void mvneta_txq_deinit(struct mvneta_port *pp,
2973 			      struct mvneta_tx_queue *txq)
2974 {
2975 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2976 
2977 	kfree(txq->tx_skb);
2978 
2979 	if (txq->tso_hdrs)
2980 		dma_free_coherent(pp->dev->dev.parent,
2981 				  txq->size * TSO_HEADER_SIZE,
2982 				  txq->tso_hdrs, txq->tso_hdrs_phys);
2983 	if (txq->descs)
2984 		dma_free_coherent(pp->dev->dev.parent,
2985 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2986 				  txq->descs, txq->descs_phys);
2987 
2988 	netdev_tx_reset_queue(nq);
2989 
2990 	txq->descs             = NULL;
2991 	txq->last_desc         = 0;
2992 	txq->next_desc_to_proc = 0;
2993 	txq->descs_phys        = 0;
2994 
2995 	/* Set minimum bandwidth for disabled TXQs */
2996 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2997 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2998 
2999 	/* Set Tx descriptors queue starting address and size */
3000 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3001 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3002 }
3003 
3004 /* Cleanup all Tx queues */
mvneta_cleanup_txqs(struct mvneta_port * pp)3005 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3006 {
3007 	int queue;
3008 
3009 	for (queue = 0; queue < txq_number; queue++)
3010 		mvneta_txq_deinit(pp, &pp->txqs[queue]);
3011 }
3012 
3013 /* Cleanup all Rx queues */
mvneta_cleanup_rxqs(struct mvneta_port * pp)3014 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3015 {
3016 	int queue;
3017 
3018 	for (queue = 0; queue < rxq_number; queue++)
3019 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3020 }
3021 
3022 
3023 /* Init all Rx queues */
mvneta_setup_rxqs(struct mvneta_port * pp)3024 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3025 {
3026 	int queue;
3027 
3028 	for (queue = 0; queue < rxq_number; queue++) {
3029 		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3030 
3031 		if (err) {
3032 			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3033 				   __func__, queue);
3034 			mvneta_cleanup_rxqs(pp);
3035 			return err;
3036 		}
3037 	}
3038 
3039 	return 0;
3040 }
3041 
3042 /* Init all tx queues */
mvneta_setup_txqs(struct mvneta_port * pp)3043 static int mvneta_setup_txqs(struct mvneta_port *pp)
3044 {
3045 	int queue;
3046 
3047 	for (queue = 0; queue < txq_number; queue++) {
3048 		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3049 		if (err) {
3050 			netdev_err(pp->dev, "%s: can't create txq=%d\n",
3051 				   __func__, queue);
3052 			mvneta_cleanup_txqs(pp);
3053 			return err;
3054 		}
3055 	}
3056 
3057 	return 0;
3058 }
3059 
mvneta_start_dev(struct mvneta_port * pp)3060 static void mvneta_start_dev(struct mvneta_port *pp)
3061 {
3062 	int cpu;
3063 	struct net_device *ndev = pp->dev;
3064 
3065 	mvneta_max_rx_size_set(pp, pp->pkt_size);
3066 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3067 
3068 	/* start the Rx/Tx activity */
3069 	mvneta_port_enable(pp);
3070 
3071 	if (!pp->neta_armada3700) {
3072 		/* Enable polling on the port */
3073 		for_each_online_cpu(cpu) {
3074 			struct mvneta_pcpu_port *port =
3075 				per_cpu_ptr(pp->ports, cpu);
3076 
3077 			napi_enable(&port->napi);
3078 		}
3079 	} else {
3080 		napi_enable(&pp->napi);
3081 	}
3082 
3083 	/* Unmask interrupts. It has to be done from each CPU */
3084 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3085 
3086 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3087 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3088 		    MVNETA_CAUSE_LINK_CHANGE |
3089 		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3090 
3091 	phy_start(ndev->phydev);
3092 	netif_tx_start_all_queues(pp->dev);
3093 }
3094 
mvneta_stop_dev(struct mvneta_port * pp)3095 static void mvneta_stop_dev(struct mvneta_port *pp)
3096 {
3097 	unsigned int cpu;
3098 	struct net_device *ndev = pp->dev;
3099 
3100 	phy_stop(ndev->phydev);
3101 
3102 	if (!pp->neta_armada3700) {
3103 		for_each_online_cpu(cpu) {
3104 			struct mvneta_pcpu_port *port =
3105 				per_cpu_ptr(pp->ports, cpu);
3106 
3107 			napi_disable(&port->napi);
3108 		}
3109 	} else {
3110 		napi_disable(&pp->napi);
3111 	}
3112 
3113 	netif_carrier_off(pp->dev);
3114 
3115 	mvneta_port_down(pp);
3116 	netif_tx_stop_all_queues(pp->dev);
3117 
3118 	/* Stop the port activity */
3119 	mvneta_port_disable(pp);
3120 
3121 	/* Clear all ethernet port interrupts */
3122 	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3123 
3124 	/* Mask all ethernet port interrupts */
3125 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3126 
3127 	mvneta_tx_reset(pp);
3128 	mvneta_rx_reset(pp);
3129 }
3130 
mvneta_percpu_enable(void * arg)3131 static void mvneta_percpu_enable(void *arg)
3132 {
3133 	struct mvneta_port *pp = arg;
3134 
3135 	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3136 }
3137 
mvneta_percpu_disable(void * arg)3138 static void mvneta_percpu_disable(void *arg)
3139 {
3140 	struct mvneta_port *pp = arg;
3141 
3142 	disable_percpu_irq(pp->dev->irq);
3143 }
3144 
3145 /* Change the device mtu */
mvneta_change_mtu(struct net_device * dev,int mtu)3146 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3147 {
3148 	struct mvneta_port *pp = netdev_priv(dev);
3149 	int ret;
3150 
3151 	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3152 		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3153 			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3154 		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3155 	}
3156 
3157 	dev->mtu = mtu;
3158 
3159 	if (!netif_running(dev)) {
3160 		if (pp->bm_priv)
3161 			mvneta_bm_update_mtu(pp, mtu);
3162 
3163 		netdev_update_features(dev);
3164 		return 0;
3165 	}
3166 
3167 	/* The interface is running, so we have to force a
3168 	 * reallocation of the queues
3169 	 */
3170 	mvneta_stop_dev(pp);
3171 	on_each_cpu(mvneta_percpu_disable, pp, true);
3172 
3173 	mvneta_cleanup_txqs(pp);
3174 	mvneta_cleanup_rxqs(pp);
3175 
3176 	if (pp->bm_priv)
3177 		mvneta_bm_update_mtu(pp, mtu);
3178 
3179 	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3180 	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3181 	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3182 
3183 	ret = mvneta_setup_rxqs(pp);
3184 	if (ret) {
3185 		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3186 		return ret;
3187 	}
3188 
3189 	ret = mvneta_setup_txqs(pp);
3190 	if (ret) {
3191 		netdev_err(dev, "unable to setup txqs after MTU change\n");
3192 		return ret;
3193 	}
3194 
3195 	on_each_cpu(mvneta_percpu_enable, pp, true);
3196 	mvneta_start_dev(pp);
3197 
3198 	netdev_update_features(dev);
3199 
3200 	return 0;
3201 }
3202 
mvneta_fix_features(struct net_device * dev,netdev_features_t features)3203 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3204 					     netdev_features_t features)
3205 {
3206 	struct mvneta_port *pp = netdev_priv(dev);
3207 
3208 	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3209 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3210 		netdev_info(dev,
3211 			    "Disable IP checksum for MTU greater than %dB\n",
3212 			    pp->tx_csum_limit);
3213 	}
3214 
3215 	return features;
3216 }
3217 
3218 /* Get mac address */
mvneta_get_mac_addr(struct mvneta_port * pp,unsigned char * addr)3219 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3220 {
3221 	u32 mac_addr_l, mac_addr_h;
3222 
3223 	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3224 	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3225 	addr[0] = (mac_addr_h >> 24) & 0xFF;
3226 	addr[1] = (mac_addr_h >> 16) & 0xFF;
3227 	addr[2] = (mac_addr_h >> 8) & 0xFF;
3228 	addr[3] = mac_addr_h & 0xFF;
3229 	addr[4] = (mac_addr_l >> 8) & 0xFF;
3230 	addr[5] = mac_addr_l & 0xFF;
3231 }
3232 
3233 /* Handle setting mac address */
mvneta_set_mac_addr(struct net_device * dev,void * addr)3234 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3235 {
3236 	struct mvneta_port *pp = netdev_priv(dev);
3237 	struct sockaddr *sockaddr = addr;
3238 	int ret;
3239 
3240 	ret = eth_prepare_mac_addr_change(dev, addr);
3241 	if (ret < 0)
3242 		return ret;
3243 	/* Remove previous address table entry */
3244 	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3245 
3246 	/* Set new addr in hw */
3247 	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3248 
3249 	eth_commit_mac_addr_change(dev, addr);
3250 	return 0;
3251 }
3252 
mvneta_adjust_link(struct net_device * ndev)3253 static void mvneta_adjust_link(struct net_device *ndev)
3254 {
3255 	struct mvneta_port *pp = netdev_priv(ndev);
3256 	struct phy_device *phydev = ndev->phydev;
3257 	int status_change = 0;
3258 
3259 	if (phydev->link) {
3260 		if ((pp->speed != phydev->speed) ||
3261 		    (pp->duplex != phydev->duplex)) {
3262 			u32 val;
3263 
3264 			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3265 			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3266 				 MVNETA_GMAC_CONFIG_GMII_SPEED |
3267 				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3268 
3269 			if (phydev->duplex)
3270 				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3271 
3272 			if (phydev->speed == SPEED_1000)
3273 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3274 			else if (phydev->speed == SPEED_100)
3275 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3276 
3277 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3278 
3279 			pp->duplex = phydev->duplex;
3280 			pp->speed  = phydev->speed;
3281 		}
3282 	}
3283 
3284 	if (phydev->link != pp->link) {
3285 		if (!phydev->link) {
3286 			pp->duplex = -1;
3287 			pp->speed = 0;
3288 		}
3289 
3290 		pp->link = phydev->link;
3291 		status_change = 1;
3292 	}
3293 
3294 	if (status_change) {
3295 		if (phydev->link) {
3296 			if (!pp->use_inband_status) {
3297 				u32 val = mvreg_read(pp,
3298 						  MVNETA_GMAC_AUTONEG_CONFIG);
3299 				val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3300 				val |= MVNETA_GMAC_FORCE_LINK_PASS;
3301 				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3302 					    val);
3303 			}
3304 			mvneta_port_up(pp);
3305 		} else {
3306 			if (!pp->use_inband_status) {
3307 				u32 val = mvreg_read(pp,
3308 						  MVNETA_GMAC_AUTONEG_CONFIG);
3309 				val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3310 				val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3311 				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3312 					    val);
3313 			}
3314 			mvneta_port_down(pp);
3315 		}
3316 		phy_print_status(phydev);
3317 	}
3318 }
3319 
mvneta_mdio_probe(struct mvneta_port * pp)3320 static int mvneta_mdio_probe(struct mvneta_port *pp)
3321 {
3322 	struct phy_device *phy_dev;
3323 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3324 
3325 	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
3326 				 pp->phy_interface);
3327 	if (!phy_dev) {
3328 		netdev_err(pp->dev, "could not find the PHY\n");
3329 		return -ENODEV;
3330 	}
3331 
3332 	phy_ethtool_get_wol(phy_dev, &wol);
3333 	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3334 
3335 	phy_dev->supported &= PHY_GBIT_FEATURES;
3336 	phy_dev->advertising = phy_dev->supported;
3337 
3338 	pp->link    = 0;
3339 	pp->duplex  = 0;
3340 	pp->speed   = 0;
3341 
3342 	return 0;
3343 }
3344 
mvneta_mdio_remove(struct mvneta_port * pp)3345 static void mvneta_mdio_remove(struct mvneta_port *pp)
3346 {
3347 	struct net_device *ndev = pp->dev;
3348 
3349 	phy_disconnect(ndev->phydev);
3350 }
3351 
3352 /* Electing a CPU must be done in an atomic way: it should be done
3353  * after or before the removal/insertion of a CPU and this function is
3354  * not reentrant.
3355  */
mvneta_percpu_elect(struct mvneta_port * pp)3356 static void mvneta_percpu_elect(struct mvneta_port *pp)
3357 {
3358 	int elected_cpu = 0, max_cpu, cpu, i = 0;
3359 
3360 	/* Use the cpu associated to the rxq when it is online, in all
3361 	 * the other cases, use the cpu 0 which can't be offline.
3362 	 */
3363 	if (cpu_online(pp->rxq_def))
3364 		elected_cpu = pp->rxq_def;
3365 
3366 	max_cpu = num_present_cpus();
3367 
3368 	for_each_online_cpu(cpu) {
3369 		int rxq_map = 0, txq_map = 0;
3370 		int rxq;
3371 
3372 		for (rxq = 0; rxq < rxq_number; rxq++)
3373 			if ((rxq % max_cpu) == cpu)
3374 				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3375 
3376 		if (cpu == elected_cpu)
3377 			/* Map the default receive queue queue to the
3378 			 * elected CPU
3379 			 */
3380 			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3381 
3382 		/* We update the TX queue map only if we have one
3383 		 * queue. In this case we associate the TX queue to
3384 		 * the CPU bound to the default RX queue
3385 		 */
3386 		if (txq_number == 1)
3387 			txq_map = (cpu == elected_cpu) ?
3388 				MVNETA_CPU_TXQ_ACCESS(1) : 0;
3389 		else
3390 			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3391 				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3392 
3393 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3394 
3395 		/* Update the interrupt mask on each CPU according the
3396 		 * new mapping
3397 		 */
3398 		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3399 					 pp, true);
3400 		i++;
3401 
3402 	}
3403 };
3404 
mvneta_cpu_online(unsigned int cpu,struct hlist_node * node)3405 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3406 {
3407 	int other_cpu;
3408 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3409 						  node_online);
3410 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3411 
3412 
3413 	spin_lock(&pp->lock);
3414 	/*
3415 	 * Configuring the driver for a new CPU while the driver is
3416 	 * stopping is racy, so just avoid it.
3417 	 */
3418 	if (pp->is_stopped) {
3419 		spin_unlock(&pp->lock);
3420 		return 0;
3421 	}
3422 	netif_tx_stop_all_queues(pp->dev);
3423 
3424 	/*
3425 	 * We have to synchronise on tha napi of each CPU except the one
3426 	 * just being woken up
3427 	 */
3428 	for_each_online_cpu(other_cpu) {
3429 		if (other_cpu != cpu) {
3430 			struct mvneta_pcpu_port *other_port =
3431 				per_cpu_ptr(pp->ports, other_cpu);
3432 
3433 			napi_synchronize(&other_port->napi);
3434 		}
3435 	}
3436 
3437 	/* Mask all ethernet port interrupts */
3438 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3439 	napi_enable(&port->napi);
3440 
3441 	/*
3442 	 * Enable per-CPU interrupts on the CPU that is
3443 	 * brought up.
3444 	 */
3445 	mvneta_percpu_enable(pp);
3446 
3447 	/*
3448 	 * Enable per-CPU interrupt on the one CPU we care
3449 	 * about.
3450 	 */
3451 	mvneta_percpu_elect(pp);
3452 
3453 	/* Unmask all ethernet port interrupts */
3454 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3455 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3456 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3457 		    MVNETA_CAUSE_LINK_CHANGE |
3458 		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3459 	netif_tx_start_all_queues(pp->dev);
3460 	spin_unlock(&pp->lock);
3461 	return 0;
3462 }
3463 
mvneta_cpu_down_prepare(unsigned int cpu,struct hlist_node * node)3464 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3465 {
3466 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3467 						  node_online);
3468 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3469 
3470 	/*
3471 	 * Thanks to this lock we are sure that any pending cpu election is
3472 	 * done.
3473 	 */
3474 	spin_lock(&pp->lock);
3475 	/* Mask all ethernet port interrupts */
3476 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3477 	spin_unlock(&pp->lock);
3478 
3479 	napi_synchronize(&port->napi);
3480 	napi_disable(&port->napi);
3481 	/* Disable per-CPU interrupts on the CPU that is brought down. */
3482 	mvneta_percpu_disable(pp);
3483 	return 0;
3484 }
3485 
mvneta_cpu_dead(unsigned int cpu,struct hlist_node * node)3486 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3487 {
3488 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3489 						  node_dead);
3490 
3491 	/* Check if a new CPU must be elected now this on is down */
3492 	spin_lock(&pp->lock);
3493 	mvneta_percpu_elect(pp);
3494 	spin_unlock(&pp->lock);
3495 	/* Unmask all ethernet port interrupts */
3496 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3497 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3498 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3499 		    MVNETA_CAUSE_LINK_CHANGE |
3500 		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3501 	netif_tx_start_all_queues(pp->dev);
3502 	return 0;
3503 }
3504 
mvneta_open(struct net_device * dev)3505 static int mvneta_open(struct net_device *dev)
3506 {
3507 	struct mvneta_port *pp = netdev_priv(dev);
3508 	int ret;
3509 
3510 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3511 	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3512 	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3513 
3514 	ret = mvneta_setup_rxqs(pp);
3515 	if (ret)
3516 		return ret;
3517 
3518 	ret = mvneta_setup_txqs(pp);
3519 	if (ret)
3520 		goto err_cleanup_rxqs;
3521 
3522 	/* Connect to port interrupt line */
3523 	if (pp->neta_armada3700)
3524 		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3525 				  dev->name, pp);
3526 	else
3527 		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3528 					 dev->name, pp->ports);
3529 	if (ret) {
3530 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3531 		goto err_cleanup_txqs;
3532 	}
3533 
3534 	if (!pp->neta_armada3700) {
3535 		/* Enable per-CPU interrupt on all the CPU to handle our RX
3536 		 * queue interrupts
3537 		 */
3538 		on_each_cpu(mvneta_percpu_enable, pp, true);
3539 
3540 		pp->is_stopped = false;
3541 		/* Register a CPU notifier to handle the case where our CPU
3542 		 * might be taken offline.
3543 		 */
3544 		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3545 						       &pp->node_online);
3546 		if (ret)
3547 			goto err_free_irq;
3548 
3549 		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3550 						       &pp->node_dead);
3551 		if (ret)
3552 			goto err_free_online_hp;
3553 	}
3554 
3555 	/* In default link is down */
3556 	netif_carrier_off(pp->dev);
3557 
3558 	ret = mvneta_mdio_probe(pp);
3559 	if (ret < 0) {
3560 		netdev_err(dev, "cannot probe MDIO bus\n");
3561 		goto err_free_dead_hp;
3562 	}
3563 
3564 	mvneta_start_dev(pp);
3565 
3566 	return 0;
3567 
3568 err_free_dead_hp:
3569 	if (!pp->neta_armada3700)
3570 		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3571 						    &pp->node_dead);
3572 err_free_online_hp:
3573 	if (!pp->neta_armada3700)
3574 		cpuhp_state_remove_instance_nocalls(online_hpstate,
3575 						    &pp->node_online);
3576 err_free_irq:
3577 	if (pp->neta_armada3700) {
3578 		free_irq(pp->dev->irq, pp);
3579 	} else {
3580 		on_each_cpu(mvneta_percpu_disable, pp, true);
3581 		free_percpu_irq(pp->dev->irq, pp->ports);
3582 	}
3583 err_cleanup_txqs:
3584 	mvneta_cleanup_txqs(pp);
3585 err_cleanup_rxqs:
3586 	mvneta_cleanup_rxqs(pp);
3587 	return ret;
3588 }
3589 
3590 /* Stop the port, free port interrupt line */
mvneta_stop(struct net_device * dev)3591 static int mvneta_stop(struct net_device *dev)
3592 {
3593 	struct mvneta_port *pp = netdev_priv(dev);
3594 
3595 	if (!pp->neta_armada3700) {
3596 		/* Inform that we are stopping so we don't want to setup the
3597 		 * driver for new CPUs in the notifiers. The code of the
3598 		 * notifier for CPU online is protected by the same spinlock,
3599 		 * so when we get the lock, the notifer work is done.
3600 		 */
3601 		spin_lock(&pp->lock);
3602 		pp->is_stopped = true;
3603 		spin_unlock(&pp->lock);
3604 
3605 		mvneta_stop_dev(pp);
3606 		mvneta_mdio_remove(pp);
3607 
3608 		cpuhp_state_remove_instance_nocalls(online_hpstate,
3609 						    &pp->node_online);
3610 		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3611 						    &pp->node_dead);
3612 		on_each_cpu(mvneta_percpu_disable, pp, true);
3613 		free_percpu_irq(dev->irq, pp->ports);
3614 	} else {
3615 		mvneta_stop_dev(pp);
3616 		mvneta_mdio_remove(pp);
3617 		free_irq(dev->irq, pp);
3618 	}
3619 
3620 	mvneta_cleanup_rxqs(pp);
3621 	mvneta_cleanup_txqs(pp);
3622 
3623 	return 0;
3624 }
3625 
mvneta_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)3626 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3627 {
3628 	if (!dev->phydev)
3629 		return -ENOTSUPP;
3630 
3631 	return phy_mii_ioctl(dev->phydev, ifr, cmd);
3632 }
3633 
3634 /* Ethtool methods */
3635 
3636 /* Set link ksettings (phy address, speed) for ethtools */
3637 static int
mvneta_ethtool_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)3638 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3639 				  const struct ethtool_link_ksettings *cmd)
3640 {
3641 	struct mvneta_port *pp = netdev_priv(ndev);
3642 	struct phy_device *phydev = ndev->phydev;
3643 
3644 	if (!phydev)
3645 		return -ENODEV;
3646 
3647 	if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3648 		u32 val;
3649 
3650 		mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
3651 
3652 		if (cmd->base.autoneg == AUTONEG_DISABLE) {
3653 			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3654 			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3655 				 MVNETA_GMAC_CONFIG_GMII_SPEED |
3656 				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3657 
3658 			if (phydev->duplex)
3659 				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3660 
3661 			if (phydev->speed == SPEED_1000)
3662 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3663 			else if (phydev->speed == SPEED_100)
3664 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3665 
3666 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3667 		}
3668 
3669 		pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
3670 		netdev_info(pp->dev, "autoneg status set to %i\n",
3671 			    pp->use_inband_status);
3672 
3673 		if (netif_running(ndev)) {
3674 			mvneta_port_down(pp);
3675 			mvneta_port_up(pp);
3676 		}
3677 	}
3678 
3679 	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
3680 }
3681 
3682 /* Set interrupt coalescing for ethtools */
mvneta_ethtool_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)3683 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3684 				       struct ethtool_coalesce *c)
3685 {
3686 	struct mvneta_port *pp = netdev_priv(dev);
3687 	int queue;
3688 
3689 	for (queue = 0; queue < rxq_number; queue++) {
3690 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3691 		rxq->time_coal = c->rx_coalesce_usecs;
3692 		rxq->pkts_coal = c->rx_max_coalesced_frames;
3693 		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3694 		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3695 	}
3696 
3697 	for (queue = 0; queue < txq_number; queue++) {
3698 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
3699 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
3700 		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3701 	}
3702 
3703 	return 0;
3704 }
3705 
3706 /* get coalescing for ethtools */
mvneta_ethtool_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)3707 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3708 				       struct ethtool_coalesce *c)
3709 {
3710 	struct mvneta_port *pp = netdev_priv(dev);
3711 
3712 	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
3713 	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
3714 
3715 	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
3716 	return 0;
3717 }
3718 
3719 
mvneta_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)3720 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3721 				    struct ethtool_drvinfo *drvinfo)
3722 {
3723 	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3724 		sizeof(drvinfo->driver));
3725 	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3726 		sizeof(drvinfo->version));
3727 	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3728 		sizeof(drvinfo->bus_info));
3729 }
3730 
3731 
mvneta_ethtool_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)3732 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3733 					 struct ethtool_ringparam *ring)
3734 {
3735 	struct mvneta_port *pp = netdev_priv(netdev);
3736 
3737 	ring->rx_max_pending = MVNETA_MAX_RXD;
3738 	ring->tx_max_pending = MVNETA_MAX_TXD;
3739 	ring->rx_pending = pp->rx_ring_size;
3740 	ring->tx_pending = pp->tx_ring_size;
3741 }
3742 
mvneta_ethtool_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring)3743 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3744 					struct ethtool_ringparam *ring)
3745 {
3746 	struct mvneta_port *pp = netdev_priv(dev);
3747 
3748 	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3749 		return -EINVAL;
3750 	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3751 		ring->rx_pending : MVNETA_MAX_RXD;
3752 
3753 	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3754 				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3755 	if (pp->tx_ring_size != ring->tx_pending)
3756 		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3757 			    pp->tx_ring_size, ring->tx_pending);
3758 
3759 	if (netif_running(dev)) {
3760 		mvneta_stop(dev);
3761 		if (mvneta_open(dev)) {
3762 			netdev_err(dev,
3763 				   "error on opening device after ring param change\n");
3764 			return -ENOMEM;
3765 		}
3766 	}
3767 
3768 	return 0;
3769 }
3770 
mvneta_ethtool_get_strings(struct net_device * netdev,u32 sset,u8 * data)3771 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3772 				       u8 *data)
3773 {
3774 	if (sset == ETH_SS_STATS) {
3775 		int i;
3776 
3777 		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3778 			memcpy(data + i * ETH_GSTRING_LEN,
3779 			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
3780 	}
3781 }
3782 
mvneta_ethtool_update_stats(struct mvneta_port * pp)3783 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3784 {
3785 	const struct mvneta_statistic *s;
3786 	void __iomem *base = pp->base;
3787 	u32 high, low, val;
3788 	u64 val64;
3789 	int i;
3790 
3791 	for (i = 0, s = mvneta_statistics;
3792 	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3793 	     s++, i++) {
3794 		switch (s->type) {
3795 		case T_REG_32:
3796 			val = readl_relaxed(base + s->offset);
3797 			pp->ethtool_stats[i] += val;
3798 			break;
3799 		case T_REG_64:
3800 			/* Docs say to read low 32-bit then high */
3801 			low = readl_relaxed(base + s->offset);
3802 			high = readl_relaxed(base + s->offset + 4);
3803 			val64 = (u64)high << 32 | low;
3804 			pp->ethtool_stats[i] += val64;
3805 			break;
3806 		}
3807 	}
3808 }
3809 
mvneta_ethtool_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3810 static void mvneta_ethtool_get_stats(struct net_device *dev,
3811 				     struct ethtool_stats *stats, u64 *data)
3812 {
3813 	struct mvneta_port *pp = netdev_priv(dev);
3814 	int i;
3815 
3816 	mvneta_ethtool_update_stats(pp);
3817 
3818 	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3819 		*data++ = pp->ethtool_stats[i];
3820 }
3821 
mvneta_ethtool_get_sset_count(struct net_device * dev,int sset)3822 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3823 {
3824 	if (sset == ETH_SS_STATS)
3825 		return ARRAY_SIZE(mvneta_statistics);
3826 	return -EOPNOTSUPP;
3827 }
3828 
mvneta_ethtool_get_rxfh_indir_size(struct net_device * dev)3829 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3830 {
3831 	return MVNETA_RSS_LU_TABLE_SIZE;
3832 }
3833 
mvneta_ethtool_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)3834 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3835 				    struct ethtool_rxnfc *info,
3836 				    u32 *rules __always_unused)
3837 {
3838 	switch (info->cmd) {
3839 	case ETHTOOL_GRXRINGS:
3840 		info->data =  rxq_number;
3841 		return 0;
3842 	case ETHTOOL_GRXFH:
3843 		return -EOPNOTSUPP;
3844 	default:
3845 		return -EOPNOTSUPP;
3846 	}
3847 }
3848 
mvneta_config_rss(struct mvneta_port * pp)3849 static int  mvneta_config_rss(struct mvneta_port *pp)
3850 {
3851 	int cpu;
3852 	u32 val;
3853 
3854 	netif_tx_stop_all_queues(pp->dev);
3855 
3856 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3857 
3858 	/* We have to synchronise on the napi of each CPU */
3859 	for_each_online_cpu(cpu) {
3860 		struct mvneta_pcpu_port *pcpu_port =
3861 			per_cpu_ptr(pp->ports, cpu);
3862 
3863 		napi_synchronize(&pcpu_port->napi);
3864 		napi_disable(&pcpu_port->napi);
3865 	}
3866 
3867 	pp->rxq_def = pp->indir[0];
3868 
3869 	/* Update unicast mapping */
3870 	mvneta_set_rx_mode(pp->dev);
3871 
3872 	/* Update val of portCfg register accordingly with all RxQueue types */
3873 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3874 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3875 
3876 	/* Update the elected CPU matching the new rxq_def */
3877 	spin_lock(&pp->lock);
3878 	mvneta_percpu_elect(pp);
3879 	spin_unlock(&pp->lock);
3880 
3881 	/* We have to synchronise on the napi of each CPU */
3882 	for_each_online_cpu(cpu) {
3883 		struct mvneta_pcpu_port *pcpu_port =
3884 			per_cpu_ptr(pp->ports, cpu);
3885 
3886 		napi_enable(&pcpu_port->napi);
3887 	}
3888 
3889 	netif_tx_start_all_queues(pp->dev);
3890 
3891 	return 0;
3892 }
3893 
mvneta_ethtool_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)3894 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3895 				   const u8 *key, const u8 hfunc)
3896 {
3897 	struct mvneta_port *pp = netdev_priv(dev);
3898 
3899 	/* Current code for Armada 3700 doesn't support RSS features yet */
3900 	if (pp->neta_armada3700)
3901 		return -EOPNOTSUPP;
3902 
3903 	/* We require at least one supported parameter to be changed
3904 	 * and no change in any of the unsupported parameters
3905 	 */
3906 	if (key ||
3907 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3908 		return -EOPNOTSUPP;
3909 
3910 	if (!indir)
3911 		return 0;
3912 
3913 	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3914 
3915 	return mvneta_config_rss(pp);
3916 }
3917 
mvneta_ethtool_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)3918 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3919 				   u8 *hfunc)
3920 {
3921 	struct mvneta_port *pp = netdev_priv(dev);
3922 
3923 	/* Current code for Armada 3700 doesn't support RSS features yet */
3924 	if (pp->neta_armada3700)
3925 		return -EOPNOTSUPP;
3926 
3927 	if (hfunc)
3928 		*hfunc = ETH_RSS_HASH_TOP;
3929 
3930 	if (!indir)
3931 		return 0;
3932 
3933 	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3934 
3935 	return 0;
3936 }
3937 
mvneta_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)3938 static void mvneta_ethtool_get_wol(struct net_device *dev,
3939 				   struct ethtool_wolinfo *wol)
3940 {
3941 	wol->supported = 0;
3942 	wol->wolopts = 0;
3943 
3944 	if (dev->phydev)
3945 		phy_ethtool_get_wol(dev->phydev, wol);
3946 }
3947 
mvneta_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)3948 static int mvneta_ethtool_set_wol(struct net_device *dev,
3949 				  struct ethtool_wolinfo *wol)
3950 {
3951 	int ret;
3952 
3953 	if (!dev->phydev)
3954 		return -EOPNOTSUPP;
3955 
3956 	ret = phy_ethtool_set_wol(dev->phydev, wol);
3957 	if (!ret)
3958 		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
3959 
3960 	return ret;
3961 }
3962 
3963 static const struct net_device_ops mvneta_netdev_ops = {
3964 	.ndo_open            = mvneta_open,
3965 	.ndo_stop            = mvneta_stop,
3966 	.ndo_start_xmit      = mvneta_tx,
3967 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
3968 	.ndo_set_mac_address = mvneta_set_mac_addr,
3969 	.ndo_change_mtu      = mvneta_change_mtu,
3970 	.ndo_fix_features    = mvneta_fix_features,
3971 	.ndo_get_stats64     = mvneta_get_stats64,
3972 	.ndo_do_ioctl        = mvneta_ioctl,
3973 };
3974 
3975 static const struct ethtool_ops mvneta_eth_tool_ops = {
3976 	.nway_reset	= phy_ethtool_nway_reset,
3977 	.get_link       = ethtool_op_get_link,
3978 	.set_coalesce   = mvneta_ethtool_set_coalesce,
3979 	.get_coalesce   = mvneta_ethtool_get_coalesce,
3980 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
3981 	.get_ringparam  = mvneta_ethtool_get_ringparam,
3982 	.set_ringparam	= mvneta_ethtool_set_ringparam,
3983 	.get_strings	= mvneta_ethtool_get_strings,
3984 	.get_ethtool_stats = mvneta_ethtool_get_stats,
3985 	.get_sset_count	= mvneta_ethtool_get_sset_count,
3986 	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3987 	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
3988 	.get_rxfh	= mvneta_ethtool_get_rxfh,
3989 	.set_rxfh	= mvneta_ethtool_set_rxfh,
3990 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
3991 	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
3992 	.get_wol        = mvneta_ethtool_get_wol,
3993 	.set_wol        = mvneta_ethtool_set_wol,
3994 };
3995 
3996 /* Initialize hw */
mvneta_init(struct device * dev,struct mvneta_port * pp)3997 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3998 {
3999 	int queue;
4000 
4001 	/* Disable port */
4002 	mvneta_port_disable(pp);
4003 
4004 	/* Set port default values */
4005 	mvneta_defaults_set(pp);
4006 
4007 	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4008 	if (!pp->txqs)
4009 		return -ENOMEM;
4010 
4011 	/* Initialize TX descriptor rings */
4012 	for (queue = 0; queue < txq_number; queue++) {
4013 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4014 		txq->id = queue;
4015 		txq->size = pp->tx_ring_size;
4016 		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4017 	}
4018 
4019 	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4020 	if (!pp->rxqs)
4021 		return -ENOMEM;
4022 
4023 	/* Create Rx descriptor rings */
4024 	for (queue = 0; queue < rxq_number; queue++) {
4025 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4026 		rxq->id = queue;
4027 		rxq->size = pp->rx_ring_size;
4028 		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4029 		rxq->time_coal = MVNETA_RX_COAL_USEC;
4030 		rxq->buf_virt_addr
4031 			= devm_kmalloc_array(pp->dev->dev.parent,
4032 					     rxq->size,
4033 					     sizeof(*rxq->buf_virt_addr),
4034 					     GFP_KERNEL);
4035 		if (!rxq->buf_virt_addr)
4036 			return -ENOMEM;
4037 	}
4038 
4039 	return 0;
4040 }
4041 
4042 /* platform glue : initialize decoding windows */
mvneta_conf_mbus_windows(struct mvneta_port * pp,const struct mbus_dram_target_info * dram)4043 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4044 				     const struct mbus_dram_target_info *dram)
4045 {
4046 	u32 win_enable;
4047 	u32 win_protect;
4048 	int i;
4049 
4050 	for (i = 0; i < 6; i++) {
4051 		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4052 		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4053 
4054 		if (i < 4)
4055 			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4056 	}
4057 
4058 	win_enable = 0x3f;
4059 	win_protect = 0;
4060 
4061 	if (dram) {
4062 		for (i = 0; i < dram->num_cs; i++) {
4063 			const struct mbus_dram_window *cs = dram->cs + i;
4064 
4065 			mvreg_write(pp, MVNETA_WIN_BASE(i),
4066 				    (cs->base & 0xffff0000) |
4067 				    (cs->mbus_attr << 8) |
4068 				    dram->mbus_dram_target_id);
4069 
4070 			mvreg_write(pp, MVNETA_WIN_SIZE(i),
4071 				    (cs->size - 1) & 0xffff0000);
4072 
4073 			win_enable &= ~(1 << i);
4074 			win_protect |= 3 << (2 * i);
4075 		}
4076 	} else {
4077 		/* For Armada3700 open default 4GB Mbus window, leaving
4078 		 * arbitration of target/attribute to a different layer
4079 		 * of configuration.
4080 		 */
4081 		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4082 		win_enable &= ~BIT(0);
4083 		win_protect = 3;
4084 	}
4085 
4086 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4087 	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4088 }
4089 
4090 /* Power up the port */
mvneta_port_power_up(struct mvneta_port * pp,int phy_mode)4091 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4092 {
4093 	u32 ctrl;
4094 
4095 	/* MAC Cause register should be cleared */
4096 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4097 
4098 	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4099 
4100 	/* Even though it might look weird, when we're configured in
4101 	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
4102 	 */
4103 	switch(phy_mode) {
4104 	case PHY_INTERFACE_MODE_QSGMII:
4105 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4106 		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4107 		break;
4108 	case PHY_INTERFACE_MODE_SGMII:
4109 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4110 		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4111 		break;
4112 	case PHY_INTERFACE_MODE_RGMII:
4113 	case PHY_INTERFACE_MODE_RGMII_ID:
4114 	case PHY_INTERFACE_MODE_RGMII_RXID:
4115 	case PHY_INTERFACE_MODE_RGMII_TXID:
4116 		ctrl |= MVNETA_GMAC2_PORT_RGMII;
4117 		break;
4118 	default:
4119 		return -EINVAL;
4120 	}
4121 
4122 	/* Cancel Port Reset */
4123 	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
4124 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
4125 
4126 	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4127 		MVNETA_GMAC2_PORT_RESET) != 0)
4128 		continue;
4129 
4130 	return 0;
4131 }
4132 
4133 /* Device initialization routine */
mvneta_probe(struct platform_device * pdev)4134 static int mvneta_probe(struct platform_device *pdev)
4135 {
4136 	struct resource *res;
4137 	struct device_node *dn = pdev->dev.of_node;
4138 	struct device_node *phy_node;
4139 	struct device_node *bm_node;
4140 	struct mvneta_port *pp;
4141 	struct net_device *dev;
4142 	const char *dt_mac_addr;
4143 	char hw_mac_addr[ETH_ALEN];
4144 	const char *mac_from;
4145 	const char *managed;
4146 	int tx_csum_limit;
4147 	int phy_mode;
4148 	int err;
4149 	int cpu;
4150 
4151 	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4152 	if (!dev)
4153 		return -ENOMEM;
4154 
4155 	dev->irq = irq_of_parse_and_map(dn, 0);
4156 	if (dev->irq == 0) {
4157 		err = -EINVAL;
4158 		goto err_free_netdev;
4159 	}
4160 
4161 	phy_node = of_parse_phandle(dn, "phy", 0);
4162 	if (!phy_node) {
4163 		if (!of_phy_is_fixed_link(dn)) {
4164 			dev_err(&pdev->dev, "no PHY specified\n");
4165 			err = -ENODEV;
4166 			goto err_free_irq;
4167 		}
4168 
4169 		err = of_phy_register_fixed_link(dn);
4170 		if (err < 0) {
4171 			dev_err(&pdev->dev, "cannot register fixed PHY\n");
4172 			goto err_free_irq;
4173 		}
4174 
4175 		/* In the case of a fixed PHY, the DT node associated
4176 		 * to the PHY is the Ethernet MAC DT node.
4177 		 */
4178 		phy_node = of_node_get(dn);
4179 	}
4180 
4181 	phy_mode = of_get_phy_mode(dn);
4182 	if (phy_mode < 0) {
4183 		dev_err(&pdev->dev, "incorrect phy-mode\n");
4184 		err = -EINVAL;
4185 		goto err_put_phy_node;
4186 	}
4187 
4188 	dev->tx_queue_len = MVNETA_MAX_TXD;
4189 	dev->watchdog_timeo = 5 * HZ;
4190 	dev->netdev_ops = &mvneta_netdev_ops;
4191 
4192 	dev->ethtool_ops = &mvneta_eth_tool_ops;
4193 
4194 	pp = netdev_priv(dev);
4195 	spin_lock_init(&pp->lock);
4196 	pp->phy_node = phy_node;
4197 	pp->phy_interface = phy_mode;
4198 
4199 	err = of_property_read_string(dn, "managed", &managed);
4200 	pp->use_inband_status = (err == 0 &&
4201 				 strcmp(managed, "in-band-status") == 0);
4202 
4203 	pp->rxq_def = rxq_def;
4204 
4205 	/* Set RX packet offset correction for platforms, whose
4206 	 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4207 	 * platforms and 0B for 32-bit ones.
4208 	 */
4209 	pp->rx_offset_correction =
4210 		max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
4211 
4212 	pp->indir[0] = rxq_def;
4213 
4214 	/* Get special SoC configurations */
4215 	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4216 		pp->neta_armada3700 = true;
4217 
4218 	pp->clk = devm_clk_get(&pdev->dev, "core");
4219 	if (IS_ERR(pp->clk))
4220 		pp->clk = devm_clk_get(&pdev->dev, NULL);
4221 	if (IS_ERR(pp->clk)) {
4222 		err = PTR_ERR(pp->clk);
4223 		goto err_put_phy_node;
4224 	}
4225 
4226 	clk_prepare_enable(pp->clk);
4227 
4228 	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4229 	if (!IS_ERR(pp->clk_bus))
4230 		clk_prepare_enable(pp->clk_bus);
4231 
4232 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4233 	pp->base = devm_ioremap_resource(&pdev->dev, res);
4234 	if (IS_ERR(pp->base)) {
4235 		err = PTR_ERR(pp->base);
4236 		goto err_clk;
4237 	}
4238 
4239 	/* Alloc per-cpu port structure */
4240 	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4241 	if (!pp->ports) {
4242 		err = -ENOMEM;
4243 		goto err_clk;
4244 	}
4245 
4246 	/* Alloc per-cpu stats */
4247 	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4248 	if (!pp->stats) {
4249 		err = -ENOMEM;
4250 		goto err_free_ports;
4251 	}
4252 
4253 	dt_mac_addr = of_get_mac_address(dn);
4254 	if (dt_mac_addr) {
4255 		mac_from = "device tree";
4256 		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4257 	} else {
4258 		mvneta_get_mac_addr(pp, hw_mac_addr);
4259 		if (is_valid_ether_addr(hw_mac_addr)) {
4260 			mac_from = "hardware";
4261 			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4262 		} else {
4263 			mac_from = "random";
4264 			eth_hw_addr_random(dev);
4265 		}
4266 	}
4267 
4268 	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4269 		if (tx_csum_limit < 0 ||
4270 		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4271 			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4272 			dev_info(&pdev->dev,
4273 				 "Wrong TX csum limit in DT, set to %dB\n",
4274 				 MVNETA_TX_CSUM_DEF_SIZE);
4275 		}
4276 	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4277 		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4278 	} else {
4279 		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4280 	}
4281 
4282 	pp->tx_csum_limit = tx_csum_limit;
4283 
4284 	pp->dram_target_info = mv_mbus_dram_info();
4285 	/* Armada3700 requires setting default configuration of Mbus
4286 	 * windows, however without using filled mbus_dram_target_info
4287 	 * structure.
4288 	 */
4289 	if (pp->dram_target_info || pp->neta_armada3700)
4290 		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4291 
4292 	pp->tx_ring_size = MVNETA_MAX_TXD;
4293 	pp->rx_ring_size = MVNETA_MAX_RXD;
4294 
4295 	pp->dev = dev;
4296 	SET_NETDEV_DEV(dev, &pdev->dev);
4297 
4298 	pp->id = global_port_id++;
4299 
4300 	/* Obtain access to BM resources if enabled and already initialized */
4301 	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4302 	if (bm_node && bm_node->data) {
4303 		pp->bm_priv = bm_node->data;
4304 		err = mvneta_bm_port_init(pdev, pp);
4305 		if (err < 0) {
4306 			dev_info(&pdev->dev, "use SW buffer management\n");
4307 			pp->bm_priv = NULL;
4308 		}
4309 	}
4310 	of_node_put(bm_node);
4311 
4312 	err = mvneta_init(&pdev->dev, pp);
4313 	if (err < 0)
4314 		goto err_netdev;
4315 
4316 	err = mvneta_port_power_up(pp, phy_mode);
4317 	if (err < 0) {
4318 		dev_err(&pdev->dev, "can't power up port\n");
4319 		goto err_netdev;
4320 	}
4321 
4322 	/* Armada3700 network controller does not support per-cpu
4323 	 * operation, so only single NAPI should be initialized.
4324 	 */
4325 	if (pp->neta_armada3700) {
4326 		netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4327 	} else {
4328 		for_each_present_cpu(cpu) {
4329 			struct mvneta_pcpu_port *port =
4330 				per_cpu_ptr(pp->ports, cpu);
4331 
4332 			netif_napi_add(dev, &port->napi, mvneta_poll,
4333 				       NAPI_POLL_WEIGHT);
4334 			port->pp = pp;
4335 		}
4336 	}
4337 
4338 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
4339 	dev->hw_features |= dev->features;
4340 	dev->vlan_features |= dev->features;
4341 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4342 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4343 
4344 	/* MTU range: 68 - 9676 */
4345 	dev->min_mtu = ETH_MIN_MTU;
4346 	/* 9676 == 9700 - 20 and rounding to 8 */
4347 	dev->max_mtu = 9676;
4348 
4349 	err = register_netdev(dev);
4350 	if (err < 0) {
4351 		dev_err(&pdev->dev, "failed to register\n");
4352 		goto err_netdev;
4353 	}
4354 
4355 	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4356 		    dev->dev_addr);
4357 
4358 	platform_set_drvdata(pdev, pp->dev);
4359 
4360 	if (pp->use_inband_status) {
4361 		struct phy_device *phy = of_phy_find_device(dn);
4362 
4363 		mvneta_fixed_link_update(pp, phy);
4364 
4365 		put_device(&phy->mdio.dev);
4366 	}
4367 
4368 	return 0;
4369 
4370 err_netdev:
4371 	if (pp->bm_priv) {
4372 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4373 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4374 				       1 << pp->id);
4375 	}
4376 	free_percpu(pp->stats);
4377 err_free_ports:
4378 	free_percpu(pp->ports);
4379 err_clk:
4380 	clk_disable_unprepare(pp->clk_bus);
4381 	clk_disable_unprepare(pp->clk);
4382 err_put_phy_node:
4383 	of_node_put(phy_node);
4384 	if (of_phy_is_fixed_link(dn))
4385 		of_phy_deregister_fixed_link(dn);
4386 err_free_irq:
4387 	irq_dispose_mapping(dev->irq);
4388 err_free_netdev:
4389 	free_netdev(dev);
4390 	return err;
4391 }
4392 
4393 /* Device removal routine */
mvneta_remove(struct platform_device * pdev)4394 static int mvneta_remove(struct platform_device *pdev)
4395 {
4396 	struct net_device  *dev = platform_get_drvdata(pdev);
4397 	struct device_node *dn = pdev->dev.of_node;
4398 	struct mvneta_port *pp = netdev_priv(dev);
4399 
4400 	unregister_netdev(dev);
4401 	clk_disable_unprepare(pp->clk_bus);
4402 	clk_disable_unprepare(pp->clk);
4403 	free_percpu(pp->ports);
4404 	free_percpu(pp->stats);
4405 	if (of_phy_is_fixed_link(dn))
4406 		of_phy_deregister_fixed_link(dn);
4407 	irq_dispose_mapping(dev->irq);
4408 	of_node_put(pp->phy_node);
4409 	free_netdev(dev);
4410 
4411 	if (pp->bm_priv) {
4412 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4413 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4414 				       1 << pp->id);
4415 	}
4416 
4417 	return 0;
4418 }
4419 
4420 #ifdef CONFIG_PM_SLEEP
mvneta_suspend(struct device * device)4421 static int mvneta_suspend(struct device *device)
4422 {
4423 	struct net_device *dev = dev_get_drvdata(device);
4424 	struct mvneta_port *pp = netdev_priv(dev);
4425 
4426 	if (netif_running(dev))
4427 		mvneta_stop(dev);
4428 	netif_device_detach(dev);
4429 	clk_disable_unprepare(pp->clk_bus);
4430 	clk_disable_unprepare(pp->clk);
4431 	return 0;
4432 }
4433 
mvneta_resume(struct device * device)4434 static int mvneta_resume(struct device *device)
4435 {
4436 	struct platform_device *pdev = to_platform_device(device);
4437 	struct net_device *dev = dev_get_drvdata(device);
4438 	struct mvneta_port *pp = netdev_priv(dev);
4439 	int err;
4440 
4441 	clk_prepare_enable(pp->clk);
4442 	if (!IS_ERR(pp->clk_bus))
4443 		clk_prepare_enable(pp->clk_bus);
4444 	if (pp->dram_target_info || pp->neta_armada3700)
4445 		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4446 	if (pp->bm_priv) {
4447 		err = mvneta_bm_port_init(pdev, pp);
4448 		if (err < 0) {
4449 			dev_info(&pdev->dev, "use SW buffer management\n");
4450 			pp->bm_priv = NULL;
4451 		}
4452 	}
4453 	mvneta_defaults_set(pp);
4454 	err = mvneta_port_power_up(pp, pp->phy_interface);
4455 	if (err < 0) {
4456 		dev_err(device, "can't power up port\n");
4457 		return err;
4458 	}
4459 
4460 	if (pp->use_inband_status)
4461 		mvneta_fixed_link_update(pp, dev->phydev);
4462 
4463 	netif_device_attach(dev);
4464 	if (netif_running(dev)) {
4465 		mvneta_open(dev);
4466 		mvneta_set_rx_mode(dev);
4467 	}
4468 
4469 	return 0;
4470 }
4471 #endif
4472 
4473 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4474 
4475 static const struct of_device_id mvneta_match[] = {
4476 	{ .compatible = "marvell,armada-370-neta" },
4477 	{ .compatible = "marvell,armada-xp-neta" },
4478 	{ .compatible = "marvell,armada-3700-neta" },
4479 	{ }
4480 };
4481 MODULE_DEVICE_TABLE(of, mvneta_match);
4482 
4483 static struct platform_driver mvneta_driver = {
4484 	.probe = mvneta_probe,
4485 	.remove = mvneta_remove,
4486 	.driver = {
4487 		.name = MVNETA_DRIVER_NAME,
4488 		.of_match_table = mvneta_match,
4489 		.pm = &mvneta_pm_ops,
4490 	},
4491 };
4492 
mvneta_driver_init(void)4493 static int __init mvneta_driver_init(void)
4494 {
4495 	int ret;
4496 
4497 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4498 				      mvneta_cpu_online,
4499 				      mvneta_cpu_down_prepare);
4500 	if (ret < 0)
4501 		goto out;
4502 	online_hpstate = ret;
4503 	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4504 				      NULL, mvneta_cpu_dead);
4505 	if (ret)
4506 		goto err_dead;
4507 
4508 	ret = platform_driver_register(&mvneta_driver);
4509 	if (ret)
4510 		goto err;
4511 	return 0;
4512 
4513 err:
4514 	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4515 err_dead:
4516 	cpuhp_remove_multi_state(online_hpstate);
4517 out:
4518 	return ret;
4519 }
4520 module_init(mvneta_driver_init);
4521 
mvneta_driver_exit(void)4522 static void __exit mvneta_driver_exit(void)
4523 {
4524 	platform_driver_unregister(&mvneta_driver);
4525 	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4526 	cpuhp_remove_multi_state(online_hpstate);
4527 }
4528 module_exit(mvneta_driver_exit);
4529 
4530 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4531 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4532 MODULE_LICENSE("GPL");
4533 
4534 module_param(rxq_number, int, S_IRUGO);
4535 module_param(txq_number, int, S_IRUGO);
4536 
4537 module_param(rxq_def, int, S_IRUGO);
4538 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
4539