• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3  *
4  * Copyright (C) 2014 Marvell
5  *
6  * Marcin Wojtas <mw@semihalf.com>
7  *
8  * U-Boot version:
9  * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
10  *
11  * This file is licensed under the terms of the GNU General Public
12  * License version 2. This program is licensed "as is" without any
13  * warranty of any kind, whether express or implied.
14  */
15 
16 #include <common.h>
17 #include <cpu_func.h>
18 #include <dm.h>
19 #include <dm/device-internal.h>
20 #include <dm/lists.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <config.h>
24 #include <malloc.h>
25 #include <asm/io.h>
26 #include <linux/errno.h>
27 #include <phy.h>
28 #include <miiphy.h>
29 #include <watchdog.h>
30 #include <asm/arch/cpu.h>
31 #include <asm/arch/soc.h>
32 #include <linux/compat.h>
33 #include <linux/mbus.h>
34 #include <asm-generic/gpio.h>
35 #include <fdt_support.h>
36 #include <linux/mdio.h>
37 
38 DECLARE_GLOBAL_DATA_PTR;
39 
40 #define __verify_pcpu_ptr(ptr)						\
41 do {									\
42 	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
43 	(void)__vpp_verify;						\
44 } while (0)
45 
46 #define VERIFY_PERCPU_PTR(__p)						\
47 ({									\
48 	__verify_pcpu_ptr(__p);						\
49 	(typeof(*(__p)) __kernel __force *)(__p);			\
50 })
51 
52 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
53 #define smp_processor_id()	0
54 #define num_present_cpus()	1
55 #define for_each_present_cpu(cpu)			\
56 	for ((cpu) = 0; (cpu) < 1; (cpu)++)
57 
58 #define NET_SKB_PAD	max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
59 
60 #define CONFIG_NR_CPUS		1
61 
62 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
63 #define WRAP			(2 + ETH_HLEN + 4 + 32)
64 #define MTU			1500
65 #define RX_BUFFER_SIZE		(ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
66 
67 /* RX Fifo Registers */
68 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
69 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
70 #define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
71 #define MVPP2_RX_FIFO_INIT_REG			0x64
72 
73 /* RX DMA Top Registers */
74 #define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
75 #define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
76 #define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
77 #define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
78 #define     MVPP2_POOL_BUF_SIZE_OFFSET		5
79 #define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
80 #define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
81 #define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
82 #define     MVPP2_RXQ_POOL_SHORT_OFFS		20
83 #define     MVPP21_RXQ_POOL_SHORT_MASK		0x700000
84 #define     MVPP22_RXQ_POOL_SHORT_MASK		0xf00000
85 #define     MVPP2_RXQ_POOL_LONG_OFFS		24
86 #define     MVPP21_RXQ_POOL_LONG_MASK		0x7000000
87 #define     MVPP22_RXQ_POOL_LONG_MASK		0xf000000
88 #define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
89 #define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
90 #define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
91 
92 /* Parser Registers */
93 #define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
94 #define     MVPP2_PRS_PORT_LU_MAX		0xf
95 #define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
96 #define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
97 #define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
98 #define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
99 #define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
100 #define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
101 #define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
102 #define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
103 #define MVPP2_PRS_TCAM_IDX_REG			0x1100
104 #define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
105 #define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
106 #define MVPP2_PRS_SRAM_IDX_REG			0x1200
107 #define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
108 #define MVPP2_PRS_TCAM_CTRL_REG			0x1230
109 #define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
110 
111 /* Classifier Registers */
112 #define MVPP2_CLS_MODE_REG			0x1800
113 #define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
114 #define MVPP2_CLS_PORT_WAY_REG			0x1810
115 #define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
116 #define MVPP2_CLS_LKP_INDEX_REG			0x1814
117 #define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
118 #define MVPP2_CLS_LKP_TBL_REG			0x1818
119 #define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
120 #define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
121 #define MVPP2_CLS_FLOW_INDEX_REG		0x1820
122 #define MVPP2_CLS_FLOW_TBL0_REG			0x1824
123 #define MVPP2_CLS_FLOW_TBL1_REG			0x1828
124 #define MVPP2_CLS_FLOW_TBL2_REG			0x182c
125 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
126 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
127 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
128 #define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
129 #define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
130 #define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
131 
132 /* Descriptor Manager Top Registers */
133 #define MVPP2_RXQ_NUM_REG			0x2040
134 #define MVPP2_RXQ_DESC_ADDR_REG			0x2044
135 #define     MVPP22_DESC_ADDR_OFFS		8
136 #define MVPP2_RXQ_DESC_SIZE_REG			0x2048
137 #define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
138 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
139 #define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
140 #define     MVPP2_RXQ_NUM_NEW_OFFSET		16
141 #define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
142 #define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
143 #define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
144 #define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
145 #define MVPP2_RXQ_THRESH_REG			0x204c
146 #define     MVPP2_OCCUPIED_THRESH_OFFSET	0
147 #define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
148 #define MVPP2_RXQ_INDEX_REG			0x2050
149 #define MVPP2_TXQ_NUM_REG			0x2080
150 #define MVPP2_TXQ_DESC_ADDR_REG			0x2084
151 #define MVPP2_TXQ_DESC_SIZE_REG			0x2088
152 #define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
153 #define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
154 #define MVPP2_TXQ_THRESH_REG			0x2094
155 #define     MVPP2_TRANSMITTED_THRESH_OFFSET	16
156 #define     MVPP2_TRANSMITTED_THRESH_MASK	0x3fff0000
157 #define MVPP2_TXQ_INDEX_REG			0x2098
158 #define MVPP2_TXQ_PREF_BUF_REG			0x209c
159 #define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
160 #define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
161 #define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
162 #define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
163 #define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
164 #define MVPP2_TXQ_PENDING_REG			0x20a0
165 #define     MVPP2_TXQ_PENDING_MASK		0x3fff
166 #define MVPP2_TXQ_INT_STATUS_REG		0x20a4
167 #define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
168 #define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
169 #define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
170 #define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
171 #define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
172 #define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
173 #define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
174 #define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
175 #define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
176 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
177 #define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS	8
178 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
179 #define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
180 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
181 #define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
182 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
183 
184 /* MBUS bridge registers */
185 #define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
186 #define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
187 #define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
188 #define MVPP2_BASE_ADDR_ENABLE			0x4060
189 
190 /* AXI Bridge Registers */
191 #define MVPP22_AXI_BM_WR_ATTR_REG		0x4100
192 #define MVPP22_AXI_BM_RD_ATTR_REG		0x4104
193 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG	0x4110
194 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG	0x4114
195 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG	0x4118
196 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG	0x411c
197 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG		0x4120
198 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG		0x4130
199 #define MVPP22_AXI_RD_NORMAL_CODE_REG		0x4150
200 #define MVPP22_AXI_RD_SNOOP_CODE_REG		0x4154
201 #define MVPP22_AXI_WR_NORMAL_CODE_REG		0x4160
202 #define MVPP22_AXI_WR_SNOOP_CODE_REG		0x4164
203 
204 /* Values for AXI Bridge registers */
205 #define MVPP22_AXI_ATTR_CACHE_OFFS		0
206 #define MVPP22_AXI_ATTR_DOMAIN_OFFS		12
207 
208 #define MVPP22_AXI_CODE_CACHE_OFFS		0
209 #define MVPP22_AXI_CODE_DOMAIN_OFFS		4
210 
211 #define MVPP22_AXI_CODE_CACHE_NON_CACHE		0x3
212 #define MVPP22_AXI_CODE_CACHE_WR_CACHE		0x7
213 #define MVPP22_AXI_CODE_CACHE_RD_CACHE		0xb
214 
215 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM	2
216 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM		3
217 
218 /* Interrupt Cause and Mask registers */
219 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
220 #define MVPP21_ISR_RXQ_GROUP_REG(rxq)		(0x5400 + 4 * (rxq))
221 
222 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
223 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
224 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
225 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
226 
227 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
228 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
229 
230 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
231 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
232 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
233 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
234 
235 #define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
236 #define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
237 #define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
238 #define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
239 #define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
240 #define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
241 #define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
242 #define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
243 #define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
244 #define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
245 #define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
246 #define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
247 #define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
248 #define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
249 #define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
250 #define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
251 #define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
252 #define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
253 
254 /* Buffer Manager registers */
255 #define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
256 #define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
257 #define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
258 #define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
259 #define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
260 #define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
261 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
262 #define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
263 #define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
264 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
265 #define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
266 #define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
267 #define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
268 #define     MVPP2_BM_START_MASK			BIT(0)
269 #define     MVPP2_BM_STOP_MASK			BIT(1)
270 #define     MVPP2_BM_STATE_MASK			BIT(4)
271 #define     MVPP2_BM_LOW_THRESH_OFFS		8
272 #define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
273 #define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
274 						MVPP2_BM_LOW_THRESH_OFFS)
275 #define     MVPP2_BM_HIGH_THRESH_OFFS		16
276 #define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
277 #define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
278 						MVPP2_BM_HIGH_THRESH_OFFS)
279 #define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
280 #define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
281 #define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
282 #define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
283 #define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
284 #define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
285 #define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
286 #define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
287 #define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
288 #define MVPP2_BM_VIRT_ALLOC_REG			0x6440
289 #define MVPP2_BM_ADDR_HIGH_ALLOC		0x6444
290 #define     MVPP2_BM_ADDR_HIGH_PHYS_MASK	0xff
291 #define     MVPP2_BM_ADDR_HIGH_VIRT_MASK	0xff00
292 #define     MVPP2_BM_ADDR_HIGH_VIRT_SHIFT	8
293 #define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
294 #define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
295 #define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
296 #define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
297 #define MVPP2_BM_VIRT_RLS_REG			0x64c0
298 #define MVPP21_BM_MC_RLS_REG			0x64c4
299 #define     MVPP2_BM_MC_ID_MASK			0xfff
300 #define     MVPP2_BM_FORCE_RELEASE_MASK		BIT(12)
301 #define MVPP22_BM_ADDR_HIGH_RLS_REG		0x64c4
302 #define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK	0xff
303 #define	    MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK	0xff00
304 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT	8
305 #define MVPP22_BM_MC_RLS_REG			0x64d4
306 #define MVPP22_BM_POOL_BASE_HIGH_REG		0x6310
307 #define MVPP22_BM_POOL_BASE_HIGH_MASK		0xff
308 
309 /* TX Scheduler registers */
310 #define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
311 #define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
312 #define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
313 #define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
314 #define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
315 #define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
316 #define MVPP2_TXP_SCHED_MTU_REG			0x801c
317 #define     MVPP2_TXP_MTU_MAX			0x7FFFF
318 #define MVPP2_TXP_SCHED_REFILL_REG		0x8020
319 #define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
320 #define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
321 #define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
322 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
323 #define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
324 #define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
325 #define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
326 #define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
327 #define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
328 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
329 #define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
330 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
331 #define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
332 
333 /* TX general registers */
334 #define MVPP2_TX_SNOOP_REG			0x8800
335 #define MVPP2_TX_PORT_FLUSH_REG			0x8810
336 #define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
337 
338 /* LMS registers */
339 #define MVPP2_SRC_ADDR_MIDDLE			0x24
340 #define MVPP2_SRC_ADDR_HIGH			0x28
341 #define MVPP2_PHY_AN_CFG0_REG			0x34
342 #define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
343 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
344 #define     MVPP2_EXT_GLOBAL_CTRL_DEFAULT	0x27
345 
346 /* Per-port registers */
347 #define MVPP2_GMAC_CTRL_0_REG			0x0
348 #define      MVPP2_GMAC_PORT_EN_MASK		BIT(0)
349 #define      MVPP2_GMAC_PORT_TYPE_MASK		BIT(1)
350 #define      MVPP2_GMAC_MAX_RX_SIZE_OFFS	2
351 #define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
352 #define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
353 #define MVPP2_GMAC_CTRL_1_REG			0x4
354 #define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
355 #define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
356 #define      MVPP2_GMAC_PCS_LB_EN_BIT		6
357 #define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
358 #define      MVPP2_GMAC_SA_LOW_OFFS		7
359 #define MVPP2_GMAC_CTRL_2_REG			0x8
360 #define      MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
361 #define      MVPP2_GMAC_SGMII_MODE_MASK		BIT(0)
362 #define      MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
363 #define      MVPP2_GMAC_PORT_RGMII_MASK		BIT(4)
364 #define      MVPP2_GMAC_PORT_DIS_PADING_MASK	BIT(5)
365 #define      MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
366 #define      MVPP2_GMAC_CLK_125_BYPS_EN_MASK	BIT(9)
367 #define MVPP2_GMAC_AUTONEG_CONFIG		0xc
368 #define      MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
369 #define      MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
370 #define      MVPP2_GMAC_EN_PCS_AN		BIT(2)
371 #define      MVPP2_GMAC_AN_BYPASS_EN		BIT(3)
372 #define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
373 #define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
374 #define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
375 #define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
376 #define      MVPP2_GMAC_EN_FC_AN		BIT(11)
377 #define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
378 #define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
379 #define      MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG	BIT(15)
380 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
381 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
382 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
383 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
384 					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
385 #define MVPP2_GMAC_CTRL_4_REG			0x90
386 #define      MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK	BIT(0)
387 #define      MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK	BIT(5)
388 #define      MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK	BIT(6)
389 #define      MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK	BIT(7)
390 
391 /*
392  * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
393  * relative to port->base.
394  */
395 
396 /* Port Mac Control0 */
397 #define MVPP22_XLG_CTRL0_REG			0x100
398 #define      MVPP22_XLG_PORT_EN			BIT(0)
399 #define      MVPP22_XLG_MAC_RESETN		BIT(1)
400 #define      MVPP22_XLG_RX_FC_EN		BIT(7)
401 #define      MVPP22_XLG_MIBCNT_DIS		BIT(13)
402 /* Port Mac Control1 */
403 #define MVPP22_XLG_CTRL1_REG			0x104
404 #define      MVPP22_XLG_MAX_RX_SIZE_OFFS	0
405 #define      MVPP22_XLG_MAX_RX_SIZE_MASK	0x1fff
406 /* Port Interrupt Mask */
407 #define MVPP22_XLG_INTERRUPT_MASK_REG		0x118
408 #define      MVPP22_XLG_INTERRUPT_LINK_CHANGE	BIT(1)
409 /* Port Mac Control3 */
410 #define MVPP22_XLG_CTRL3_REG			0x11c
411 #define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK	(7 << 13)
412 #define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC	(0 << 13)
413 #define      MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC	(1 << 13)
414 /* Port Mac Control4 */
415 #define MVPP22_XLG_CTRL4_REG			0x184
416 #define      MVPP22_XLG_FORWARD_802_3X_FC_EN	BIT(5)
417 #define      MVPP22_XLG_FORWARD_PFC_EN		BIT(6)
418 #define      MVPP22_XLG_MODE_DMA_1G		BIT(12)
419 #define      MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK	BIT(14)
420 
421 /* XPCS registers */
422 
423 /* Global Configuration 0 */
424 #define MVPP22_XPCS_GLOBAL_CFG_0_REG		0x0
425 #define      MVPP22_XPCS_PCSRESET		BIT(0)
426 #define      MVPP22_XPCS_PCSMODE_OFFS		3
427 #define      MVPP22_XPCS_PCSMODE_MASK		(0x3 << \
428 						 MVPP22_XPCS_PCSMODE_OFFS)
429 #define      MVPP22_XPCS_LANEACTIVE_OFFS	5
430 #define      MVPP22_XPCS_LANEACTIVE_MASK	(0x3 << \
431 						 MVPP22_XPCS_LANEACTIVE_OFFS)
432 
433 /* MPCS registers */
434 
435 #define PCS40G_COMMON_CONTROL			0x14
436 #define      FORWARD_ERROR_CORRECTION_MASK	BIT(10)
437 
438 #define PCS_CLOCK_RESET				0x14c
439 #define      TX_SD_CLK_RESET_MASK		BIT(0)
440 #define      RX_SD_CLK_RESET_MASK		BIT(1)
441 #define      MAC_CLK_RESET_MASK			BIT(2)
442 #define      CLK_DIVISION_RATIO_OFFS		4
443 #define      CLK_DIVISION_RATIO_MASK		(0x7 << CLK_DIVISION_RATIO_OFFS)
444 #define      CLK_DIV_PHASE_SET_MASK		BIT(11)
445 
446 /* System Soft Reset 1 */
447 #define GOP_SOFT_RESET_1_REG			0x108
448 #define     NETC_GOP_SOFT_RESET_OFFS		6
449 #define     NETC_GOP_SOFT_RESET_MASK		(0x1 << \
450 						 NETC_GOP_SOFT_RESET_OFFS)
451 
452 /* Ports Control 0 */
453 #define NETCOMP_PORTS_CONTROL_0_REG		0x110
454 #define     NETC_BUS_WIDTH_SELECT_OFFS		1
455 #define     NETC_BUS_WIDTH_SELECT_MASK		(0x1 << \
456 						 NETC_BUS_WIDTH_SELECT_OFFS)
457 #define     NETC_GIG_RX_DATA_SAMPLE_OFFS	29
458 #define     NETC_GIG_RX_DATA_SAMPLE_MASK	(0x1 << \
459 						 NETC_GIG_RX_DATA_SAMPLE_OFFS)
460 #define     NETC_CLK_DIV_PHASE_OFFS		31
461 #define     NETC_CLK_DIV_PHASE_MASK		(0x1 << NETC_CLK_DIV_PHASE_OFFS)
462 /* Ports Control 1 */
463 #define NETCOMP_PORTS_CONTROL_1_REG		0x114
464 #define     NETC_PORTS_ACTIVE_OFFSET(p)		(0 + p)
465 #define     NETC_PORTS_ACTIVE_MASK(p)		(0x1 << \
466 						 NETC_PORTS_ACTIVE_OFFSET(p))
467 #define     NETC_PORT_GIG_RF_RESET_OFFS(p)	(28 + p)
468 #define     NETC_PORT_GIG_RF_RESET_MASK(p)	(0x1 << \
469 						 NETC_PORT_GIG_RF_RESET_OFFS(p))
470 #define NETCOMP_CONTROL_0_REG			0x120
471 #define     NETC_GBE_PORT0_SGMII_MODE_OFFS	0
472 #define     NETC_GBE_PORT0_SGMII_MODE_MASK	(0x1 << \
473 						 NETC_GBE_PORT0_SGMII_MODE_OFFS)
474 #define     NETC_GBE_PORT1_SGMII_MODE_OFFS	1
475 #define     NETC_GBE_PORT1_SGMII_MODE_MASK	(0x1 << \
476 						 NETC_GBE_PORT1_SGMII_MODE_OFFS)
477 #define     NETC_GBE_PORT1_MII_MODE_OFFS	2
478 #define     NETC_GBE_PORT1_MII_MODE_MASK	(0x1 << \
479 						 NETC_GBE_PORT1_MII_MODE_OFFS)
480 
481 #define MVPP22_SMI_MISC_CFG_REG			(MVPP22_SMI + 0x04)
482 #define      MVPP22_SMI_POLLING_EN		BIT(10)
483 
484 #define MVPP22_SMI_PHY_ADDR_REG(port)		(MVPP22_SMI + 0x04 + \
485 						 (0x4 * (port)))
486 
487 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
488 
489 /* Descriptor ring Macros */
490 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
491 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
492 
493 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
494 #define MVPP22_SMI				0x1200
495 
496 /* Additional PPv2.2 offsets */
497 #define MVPP22_MPCS				0x007000
498 #define MVPP22_XPCS				0x007400
499 #define MVPP22_PORT_BASE			0x007e00
500 #define MVPP22_PORT_OFFSET			0x001000
501 #define MVPP22_RFU1				0x318000
502 
503 /* Maximum number of ports */
504 #define MVPP22_GOP_MAC_NUM			4
505 
506 /* Sets the field located at the specified in data */
507 #define MVPP2_RGMII_TX_FIFO_MIN_TH		0x41
508 #define MVPP2_SGMII_TX_FIFO_MIN_TH		0x5
509 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH		0xb
510 
511 /* Net Complex */
512 enum mv_netc_topology {
513 	MV_NETC_GE_MAC2_SGMII		=	BIT(0),
514 	MV_NETC_GE_MAC3_SGMII		=	BIT(1),
515 	MV_NETC_GE_MAC3_RGMII		=	BIT(2),
516 };
517 
518 enum mv_netc_phase {
519 	MV_NETC_FIRST_PHASE,
520 	MV_NETC_SECOND_PHASE,
521 };
522 
523 enum mv_netc_sgmii_xmi_mode {
524 	MV_NETC_GBE_SGMII,
525 	MV_NETC_GBE_XMII,
526 };
527 
528 enum mv_netc_mii_mode {
529 	MV_NETC_GBE_RGMII,
530 	MV_NETC_GBE_MII,
531 };
532 
533 enum mv_netc_lanes {
534 	MV_NETC_LANE_23,
535 	MV_NETC_LANE_45,
536 };
537 
538 /* Various constants */
539 
540 /* Coalescing */
541 #define MVPP2_TXDONE_COAL_PKTS_THRESH	15
542 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
543 #define MVPP2_RX_COAL_PKTS		32
544 #define MVPP2_RX_COAL_USEC		100
545 
546 /* The two bytes Marvell header. Either contains a special value used
547  * by Marvell switches when a specific hardware mode is enabled (not
548  * supported by this driver) or is filled automatically by zeroes on
549  * the RX side. Those two bytes being at the front of the Ethernet
550  * header, they allow to have the IP header aligned on a 4 bytes
551  * boundary automatically: the hardware skips those two bytes on its
552  * own.
553  */
554 #define MVPP2_MH_SIZE			2
555 #define MVPP2_ETH_TYPE_LEN		2
556 #define MVPP2_PPPOE_HDR_SIZE		8
557 #define MVPP2_VLAN_TAG_LEN		4
558 
559 /* Lbtd 802.3 type */
560 #define MVPP2_IP_LBDT_TYPE		0xfffa
561 
562 #define MVPP2_CPU_D_CACHE_LINE_SIZE	32
563 #define MVPP2_TX_CSUM_MAX_SIZE		9800
564 
565 /* Timeout constants */
566 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
567 #define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
568 
569 #define MVPP2_TX_MTU_MAX		0x7ffff
570 
571 /* Maximum number of T-CONTs of PON port */
572 #define MVPP2_MAX_TCONT			16
573 
574 /* Maximum number of supported ports */
575 #define MVPP2_MAX_PORTS			4
576 
577 /* Maximum number of TXQs used by single port */
578 #define MVPP2_MAX_TXQ			8
579 
580 /* Default number of TXQs in use */
581 #define MVPP2_DEFAULT_TXQ		1
582 
583 /* Dfault number of RXQs in use */
584 #define MVPP2_DEFAULT_RXQ		1
585 #define CONFIG_MV_ETH_RXQ		8	/* increment by 8 */
586 
587 /* Max number of Rx descriptors */
588 #define MVPP2_MAX_RXD			16
589 
590 /* Max number of Tx descriptors */
591 #define MVPP2_MAX_TXD			16
592 
593 /* Amount of Tx descriptors that can be reserved at once by CPU */
594 #define MVPP2_CPU_DESC_CHUNK		16
595 
596 /* Max number of Tx descriptors in each aggregated queue */
597 #define MVPP2_AGGR_TXQ_SIZE		16
598 
599 /* Descriptor aligned size */
600 #define MVPP2_DESC_ALIGNED_SIZE		32
601 
602 /* Descriptor alignment mask */
603 #define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
604 
605 /* RX FIFO constants */
606 #define MVPP21_RX_FIFO_PORT_DATA_SIZE		0x2000
607 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE		0x80
608 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE	0x8000
609 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE	0x2000
610 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE	0x1000
611 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE	0x200
612 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE	0x80
613 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE	0x40
614 #define MVPP2_RX_FIFO_PORT_MIN_PKT		0x80
615 
616 /* TX general registers */
617 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port)	(0x8860 + ((eth_tx_port) << 2))
618 #define MVPP22_TX_FIFO_SIZE_MASK		0xf
619 
620 /* TX FIFO constants */
621 #define MVPP2_TX_FIFO_DATA_SIZE_10KB		0xa
622 #define MVPP2_TX_FIFO_DATA_SIZE_3KB		0x3
623 
624 /* RX buffer constants */
625 #define MVPP2_SKB_SHINFO_SIZE \
626 	0
627 
628 #define MVPP2_RX_PKT_SIZE(mtu) \
629 	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
630 	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
631 
632 #define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
633 #define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
634 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
635 	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
636 
637 #define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
638 
639 /* IPv6 max L3 address size */
640 #define MVPP2_MAX_L3_ADDR_SIZE		16
641 
642 /* Port flags */
643 #define MVPP2_F_LOOPBACK		BIT(0)
644 
645 /* Marvell tag types */
646 enum mvpp2_tag_type {
647 	MVPP2_TAG_TYPE_NONE = 0,
648 	MVPP2_TAG_TYPE_MH   = 1,
649 	MVPP2_TAG_TYPE_DSA  = 2,
650 	MVPP2_TAG_TYPE_EDSA = 3,
651 	MVPP2_TAG_TYPE_VLAN = 4,
652 	MVPP2_TAG_TYPE_LAST = 5
653 };
654 
655 /* Parser constants */
656 #define MVPP2_PRS_TCAM_SRAM_SIZE	256
657 #define MVPP2_PRS_TCAM_WORDS		6
658 #define MVPP2_PRS_SRAM_WORDS		4
659 #define MVPP2_PRS_FLOW_ID_SIZE		64
660 #define MVPP2_PRS_FLOW_ID_MASK		0x3f
661 #define MVPP2_PRS_TCAM_ENTRY_INVALID	1
662 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
663 #define MVPP2_PRS_IPV4_HEAD		0x40
664 #define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
665 #define MVPP2_PRS_IPV4_MC		0xe0
666 #define MVPP2_PRS_IPV4_MC_MASK		0xf0
667 #define MVPP2_PRS_IPV4_BC_MASK		0xff
668 #define MVPP2_PRS_IPV4_IHL		0x5
669 #define MVPP2_PRS_IPV4_IHL_MASK		0xf
670 #define MVPP2_PRS_IPV6_MC		0xff
671 #define MVPP2_PRS_IPV6_MC_MASK		0xff
672 #define MVPP2_PRS_IPV6_HOP_MASK		0xff
673 #define MVPP2_PRS_TCAM_PROTO_MASK	0xff
674 #define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
675 #define MVPP2_PRS_DBL_VLANS_MAX		100
676 
677 /* Tcam structure:
678  * - lookup ID - 4 bits
679  * - port ID - 1 byte
680  * - additional information - 1 byte
681  * - header data - 8 bytes
682  * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
683  */
684 #define MVPP2_PRS_AI_BITS			8
685 #define MVPP2_PRS_PORT_MASK			0xff
686 #define MVPP2_PRS_LU_MASK			0xf
687 #define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
688 				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
689 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
690 					      (((offs) * 2) - ((offs) % 2)  + 2)
691 #define MVPP2_PRS_TCAM_AI_BYTE			16
692 #define MVPP2_PRS_TCAM_PORT_BYTE		17
693 #define MVPP2_PRS_TCAM_LU_BYTE			20
694 #define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
695 #define MVPP2_PRS_TCAM_INV_WORD			5
696 /* Tcam entries ID */
697 #define MVPP2_PE_DROP_ALL		0
698 #define MVPP2_PE_FIRST_FREE_TID		1
699 #define MVPP2_PE_LAST_FREE_TID		(MVPP2_PRS_TCAM_SRAM_SIZE - 31)
700 #define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
701 #define MVPP2_PE_MAC_MC_IP6		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
702 #define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
703 #define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
704 #define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 26)
705 #define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
706 #define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
707 #define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
708 #define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
709 #define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
710 #define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
711 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
712 #define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
713 #define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
714 #define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
715 #define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
716 #define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
717 #define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
718 #define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
719 #define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
720 #define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 4)
721 #define MVPP2_PE_MAC_MC_ALL		(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
722 #define MVPP2_PE_MAC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
723 #define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
724 
725 /* Sram structure
726  * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
727  */
728 #define MVPP2_PRS_SRAM_RI_OFFS			0
729 #define MVPP2_PRS_SRAM_RI_WORD			0
730 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
731 #define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
732 #define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
733 #define MVPP2_PRS_SRAM_SHIFT_OFFS		64
734 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
735 #define MVPP2_PRS_SRAM_UDF_OFFS			73
736 #define MVPP2_PRS_SRAM_UDF_BITS			8
737 #define MVPP2_PRS_SRAM_UDF_MASK			0xff
738 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
739 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
740 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
741 #define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
742 #define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
743 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
744 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
745 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
746 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
747 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
748 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
749 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
750 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
751 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
752 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
753 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
754 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
755 #define MVPP2_PRS_SRAM_AI_OFFS			90
756 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
757 #define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
758 #define MVPP2_PRS_SRAM_AI_MASK			0xff
759 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
760 #define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
761 #define MVPP2_PRS_SRAM_LU_DONE_BIT		110
762 #define MVPP2_PRS_SRAM_LU_GEN_BIT		111
763 
764 /* Sram result info bits assignment */
765 #define MVPP2_PRS_RI_MAC_ME_MASK		0x1
766 #define MVPP2_PRS_RI_DSA_MASK			0x2
767 #define MVPP2_PRS_RI_VLAN_MASK			(BIT(2) | BIT(3))
768 #define MVPP2_PRS_RI_VLAN_NONE			0x0
769 #define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
770 #define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
771 #define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
772 #define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
773 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
774 #define MVPP2_PRS_RI_L2_CAST_MASK		(BIT(9) | BIT(10))
775 #define MVPP2_PRS_RI_L2_UCAST			0x0
776 #define MVPP2_PRS_RI_L2_MCAST			BIT(9)
777 #define MVPP2_PRS_RI_L2_BCAST			BIT(10)
778 #define MVPP2_PRS_RI_PPPOE_MASK			0x800
779 #define MVPP2_PRS_RI_L3_PROTO_MASK		(BIT(12) | BIT(13) | BIT(14))
780 #define MVPP2_PRS_RI_L3_UN			0x0
781 #define MVPP2_PRS_RI_L3_IP4			BIT(12)
782 #define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
783 #define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
784 #define MVPP2_PRS_RI_L3_IP6			BIT(14)
785 #define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
786 #define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
787 #define MVPP2_PRS_RI_L3_ADDR_MASK		(BIT(15) | BIT(16))
788 #define MVPP2_PRS_RI_L3_UCAST			0x0
789 #define MVPP2_PRS_RI_L3_MCAST			BIT(15)
790 #define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
791 #define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
792 #define MVPP2_PRS_RI_UDF3_MASK			0x300000
793 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
794 #define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
795 #define MVPP2_PRS_RI_L4_TCP			BIT(22)
796 #define MVPP2_PRS_RI_L4_UDP			BIT(23)
797 #define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
798 #define MVPP2_PRS_RI_UDF7_MASK			0x60000000
799 #define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
800 #define MVPP2_PRS_RI_DROP_MASK			0x80000000
801 
802 /* Sram additional info bits assignment */
803 #define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
804 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
805 #define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
806 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
807 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
808 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
809 #define MVPP2_PRS_SINGLE_VLAN_AI		0
810 #define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
811 
812 /* DSA/EDSA type */
813 #define MVPP2_PRS_TAGGED		true
814 #define MVPP2_PRS_UNTAGGED		false
815 #define MVPP2_PRS_EDSA			true
816 #define MVPP2_PRS_DSA			false
817 
818 /* MAC entries, shadow udf */
819 enum mvpp2_prs_udf {
820 	MVPP2_PRS_UDF_MAC_DEF,
821 	MVPP2_PRS_UDF_MAC_RANGE,
822 	MVPP2_PRS_UDF_L2_DEF,
823 	MVPP2_PRS_UDF_L2_DEF_COPY,
824 	MVPP2_PRS_UDF_L2_USER,
825 };
826 
827 /* Lookup ID */
828 enum mvpp2_prs_lookup {
829 	MVPP2_PRS_LU_MH,
830 	MVPP2_PRS_LU_MAC,
831 	MVPP2_PRS_LU_DSA,
832 	MVPP2_PRS_LU_VLAN,
833 	MVPP2_PRS_LU_L2,
834 	MVPP2_PRS_LU_PPPOE,
835 	MVPP2_PRS_LU_IP4,
836 	MVPP2_PRS_LU_IP6,
837 	MVPP2_PRS_LU_FLOWS,
838 	MVPP2_PRS_LU_LAST,
839 };
840 
841 /* L3 cast enum */
842 enum mvpp2_prs_l3_cast {
843 	MVPP2_PRS_L3_UNI_CAST,
844 	MVPP2_PRS_L3_MULTI_CAST,
845 	MVPP2_PRS_L3_BROAD_CAST
846 };
847 
848 /* Classifier constants */
849 #define MVPP2_CLS_FLOWS_TBL_SIZE	512
850 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
851 #define MVPP2_CLS_LKP_TBL_SIZE		64
852 
853 /* BM constants */
854 #define MVPP2_BM_POOLS_NUM		1
855 #define MVPP2_BM_LONG_BUF_NUM		16
856 #define MVPP2_BM_SHORT_BUF_NUM		16
857 #define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
858 #define MVPP2_BM_POOL_PTR_ALIGN		128
859 #define MVPP2_BM_SWF_LONG_POOL(port)	0
860 
861 /* BM cookie (32 bits) definition */
862 #define MVPP2_BM_COOKIE_POOL_OFFS	8
863 #define MVPP2_BM_COOKIE_CPU_OFFS	24
864 
865 /* BM short pool packet size
866  * These value assure that for SWF the total number
867  * of bytes allocated for each buffer will be 512
868  */
869 #define MVPP2_BM_SHORT_PKT_SIZE		MVPP2_RX_MAX_PKT_SIZE(512)
870 
871 enum mvpp2_bm_type {
872 	MVPP2_BM_FREE,
873 	MVPP2_BM_SWF_LONG,
874 	MVPP2_BM_SWF_SHORT
875 };
876 
877 /* Definitions */
878 
879 /* Shared Packet Processor resources */
880 struct mvpp2 {
881 	/* Shared registers' base addresses */
882 	void __iomem *base;
883 	void __iomem *lms_base;
884 	void __iomem *iface_base;
885 
886 	void __iomem *mpcs_base;
887 	void __iomem *xpcs_base;
888 	void __iomem *rfu1_base;
889 
890 	u32 netc_config;
891 
892 	/* List of pointers to port structures */
893 	struct mvpp2_port **port_list;
894 
895 	/* Aggregated TXQs */
896 	struct mvpp2_tx_queue *aggr_txqs;
897 
898 	/* BM pools */
899 	struct mvpp2_bm_pool *bm_pools;
900 
901 	/* PRS shadow table */
902 	struct mvpp2_prs_shadow *prs_shadow;
903 	/* PRS auxiliary table for double vlan entries control */
904 	bool *prs_double_vlans;
905 
906 	/* Tclk value */
907 	u32 tclk;
908 
909 	/* HW version */
910 	enum { MVPP21, MVPP22 } hw_version;
911 
912 	/* Maximum number of RXQs per port */
913 	unsigned int max_port_rxqs;
914 
915 	int probe_done;
916 	u8 num_ports;
917 };
918 
919 struct mvpp2_pcpu_stats {
920 	u64	rx_packets;
921 	u64	rx_bytes;
922 	u64	tx_packets;
923 	u64	tx_bytes;
924 };
925 
926 struct mvpp2_port {
927 	u8 id;
928 
929 	/* Index of the port from the "group of ports" complex point
930 	 * of view
931 	 */
932 	int gop_id;
933 
934 	int irq;
935 
936 	struct mvpp2 *priv;
937 
938 	/* Per-port registers' base address */
939 	void __iomem *base;
940 
941 	struct mvpp2_rx_queue **rxqs;
942 	struct mvpp2_tx_queue **txqs;
943 
944 	int pkt_size;
945 
946 	u32 pending_cause_rx;
947 
948 	/* Per-CPU port control */
949 	struct mvpp2_port_pcpu __percpu *pcpu;
950 
951 	/* Flags */
952 	unsigned long flags;
953 
954 	u16 tx_ring_size;
955 	u16 rx_ring_size;
956 	struct mvpp2_pcpu_stats __percpu *stats;
957 
958 	struct phy_device *phy_dev;
959 	phy_interface_t phy_interface;
960 	int phyaddr;
961 	struct udevice *mdio_dev;
962 #ifdef CONFIG_DM_GPIO
963 	struct gpio_desc phy_reset_gpio;
964 	struct gpio_desc phy_tx_disable_gpio;
965 #endif
966 	int init;
967 	unsigned int link;
968 	unsigned int duplex;
969 	unsigned int speed;
970 
971 	unsigned int phy_speed;		/* SGMII 1Gbps vs 2.5Gbps */
972 
973 	struct mvpp2_bm_pool *pool_long;
974 	struct mvpp2_bm_pool *pool_short;
975 
976 	/* Index of first port's physical RXQ */
977 	u8 first_rxq;
978 
979 	u8 dev_addr[ETH_ALEN];
980 };
981 
982 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
983  * layout of the transmit and reception DMA descriptors, and their
984  * layout is therefore defined by the hardware design
985  */
986 
987 #define MVPP2_TXD_L3_OFF_SHIFT		0
988 #define MVPP2_TXD_IP_HLEN_SHIFT		8
989 #define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
990 #define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
991 #define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
992 #define MVPP2_TXD_PADDING_DISABLE	BIT(23)
993 #define MVPP2_TXD_L4_UDP		BIT(24)
994 #define MVPP2_TXD_L3_IP6		BIT(26)
995 #define MVPP2_TXD_L_DESC		BIT(28)
996 #define MVPP2_TXD_F_DESC		BIT(29)
997 
998 #define MVPP2_RXD_ERR_SUMMARY		BIT(15)
999 #define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
1000 #define MVPP2_RXD_ERR_CRC		0x0
1001 #define MVPP2_RXD_ERR_OVERRUN		BIT(13)
1002 #define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
1003 #define MVPP2_RXD_BM_POOL_ID_OFFS	16
1004 #define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
1005 #define MVPP2_RXD_HWF_SYNC		BIT(21)
1006 #define MVPP2_RXD_L4_CSUM_OK		BIT(22)
1007 #define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
1008 #define MVPP2_RXD_L4_TCP		BIT(25)
1009 #define MVPP2_RXD_L4_UDP		BIT(26)
1010 #define MVPP2_RXD_L3_IP4		BIT(28)
1011 #define MVPP2_RXD_L3_IP6		BIT(30)
1012 #define MVPP2_RXD_BUF_HDR		BIT(31)
1013 
1014 /* HW TX descriptor for PPv2.1 */
1015 struct mvpp21_tx_desc {
1016 	u32 command;		/* Options used by HW for packet transmitting.*/
1017 	u8  packet_offset;	/* the offset from the buffer beginning	*/
1018 	u8  phys_txq;		/* destination queue ID			*/
1019 	u16 data_size;		/* data size of transmitted packet in bytes */
1020 	u32 buf_dma_addr;	/* physical addr of transmitted buffer	*/
1021 	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
1022 	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
1023 	u32 reserved2;		/* reserved (for future use)		*/
1024 };
1025 
1026 /* HW RX descriptor for PPv2.1 */
1027 struct mvpp21_rx_desc {
1028 	u32 status;		/* info about received packet		*/
1029 	u16 reserved1;		/* parser_info (for future use, PnC)	*/
1030 	u16 data_size;		/* size of received packet in bytes	*/
1031 	u32 buf_dma_addr;	/* physical address of the buffer	*/
1032 	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
1033 	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
1034 	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
1035 	u8  reserved4;		/* bm_qset (for future use, BM)		*/
1036 	u8  reserved5;
1037 	u16 reserved6;		/* classify_info (for future use, PnC)	*/
1038 	u32 reserved7;		/* flow_id (for future use, PnC) */
1039 	u32 reserved8;
1040 };
1041 
1042 /* HW TX descriptor for PPv2.2 */
1043 struct mvpp22_tx_desc {
1044 	u32 command;
1045 	u8  packet_offset;
1046 	u8  phys_txq;
1047 	u16 data_size;
1048 	u64 reserved1;
1049 	u64 buf_dma_addr_ptp;
1050 	u64 buf_cookie_misc;
1051 };
1052 
1053 /* HW RX descriptor for PPv2.2 */
1054 struct mvpp22_rx_desc {
1055 	u32 status;
1056 	u16 reserved1;
1057 	u16 data_size;
1058 	u32 reserved2;
1059 	u32 reserved3;
1060 	u64 buf_dma_addr_key_hash;
1061 	u64 buf_cookie_misc;
1062 };
1063 
1064 /* Opaque type used by the driver to manipulate the HW TX and RX
1065  * descriptors
1066  */
1067 struct mvpp2_tx_desc {
1068 	union {
1069 		struct mvpp21_tx_desc pp21;
1070 		struct mvpp22_tx_desc pp22;
1071 	};
1072 };
1073 
1074 struct mvpp2_rx_desc {
1075 	union {
1076 		struct mvpp21_rx_desc pp21;
1077 		struct mvpp22_rx_desc pp22;
1078 	};
1079 };
1080 
1081 /* Per-CPU Tx queue control */
1082 struct mvpp2_txq_pcpu {
1083 	int cpu;
1084 
1085 	/* Number of Tx DMA descriptors in the descriptor ring */
1086 	int size;
1087 
1088 	/* Number of currently used Tx DMA descriptor in the
1089 	 * descriptor ring
1090 	 */
1091 	int count;
1092 
1093 	/* Number of Tx DMA descriptors reserved for each CPU */
1094 	int reserved_num;
1095 
1096 	/* Index of last TX DMA descriptor that was inserted */
1097 	int txq_put_index;
1098 
1099 	/* Index of the TX DMA descriptor to be cleaned up */
1100 	int txq_get_index;
1101 };
1102 
1103 struct mvpp2_tx_queue {
1104 	/* Physical number of this Tx queue */
1105 	u8 id;
1106 
1107 	/* Logical number of this Tx queue */
1108 	u8 log_id;
1109 
1110 	/* Number of Tx DMA descriptors in the descriptor ring */
1111 	int size;
1112 
1113 	/* Number of currently used Tx DMA descriptor in the descriptor ring */
1114 	int count;
1115 
1116 	/* Per-CPU control of physical Tx queues */
1117 	struct mvpp2_txq_pcpu __percpu *pcpu;
1118 
1119 	u32 done_pkts_coal;
1120 
1121 	/* Virtual address of thex Tx DMA descriptors array */
1122 	struct mvpp2_tx_desc *descs;
1123 
1124 	/* DMA address of the Tx DMA descriptors array */
1125 	dma_addr_t descs_dma;
1126 
1127 	/* Index of the last Tx DMA descriptor */
1128 	int last_desc;
1129 
1130 	/* Index of the next Tx DMA descriptor to process */
1131 	int next_desc_to_proc;
1132 };
1133 
1134 struct mvpp2_rx_queue {
1135 	/* RX queue number, in the range 0-31 for physical RXQs */
1136 	u8 id;
1137 
1138 	/* Num of rx descriptors in the rx descriptor ring */
1139 	int size;
1140 
1141 	u32 pkts_coal;
1142 	u32 time_coal;
1143 
1144 	/* Virtual address of the RX DMA descriptors array */
1145 	struct mvpp2_rx_desc *descs;
1146 
1147 	/* DMA address of the RX DMA descriptors array */
1148 	dma_addr_t descs_dma;
1149 
1150 	/* Index of the last RX DMA descriptor */
1151 	int last_desc;
1152 
1153 	/* Index of the next RX DMA descriptor to process */
1154 	int next_desc_to_proc;
1155 
1156 	/* ID of port to which physical RXQ is mapped */
1157 	int port;
1158 
1159 	/* Port's logic RXQ number to which physical RXQ is mapped */
1160 	int logic_rxq;
1161 };
1162 
1163 union mvpp2_prs_tcam_entry {
1164 	u32 word[MVPP2_PRS_TCAM_WORDS];
1165 	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
1166 };
1167 
1168 union mvpp2_prs_sram_entry {
1169 	u32 word[MVPP2_PRS_SRAM_WORDS];
1170 	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
1171 };
1172 
1173 struct mvpp2_prs_entry {
1174 	u32 index;
1175 	union mvpp2_prs_tcam_entry tcam;
1176 	union mvpp2_prs_sram_entry sram;
1177 };
1178 
1179 struct mvpp2_prs_shadow {
1180 	bool valid;
1181 	bool finish;
1182 
1183 	/* Lookup ID */
1184 	int lu;
1185 
1186 	/* User defined offset */
1187 	int udf;
1188 
1189 	/* Result info */
1190 	u32 ri;
1191 	u32 ri_mask;
1192 };
1193 
1194 struct mvpp2_cls_flow_entry {
1195 	u32 index;
1196 	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1197 };
1198 
1199 struct mvpp2_cls_lookup_entry {
1200 	u32 lkpid;
1201 	u32 way;
1202 	u32 data;
1203 };
1204 
1205 struct mvpp2_bm_pool {
1206 	/* Pool number in the range 0-7 */
1207 	int id;
1208 	enum mvpp2_bm_type type;
1209 
1210 	/* Buffer Pointers Pool External (BPPE) size */
1211 	int size;
1212 	/* Number of buffers for this pool */
1213 	int buf_num;
1214 	/* Pool buffer size */
1215 	int buf_size;
1216 	/* Packet size */
1217 	int pkt_size;
1218 
1219 	/* BPPE virtual base address */
1220 	unsigned long *virt_addr;
1221 	/* BPPE DMA base address */
1222 	dma_addr_t dma_addr;
1223 
1224 	/* Ports using BM pool */
1225 	u32 port_map;
1226 };
1227 
1228 /* Static declaractions */
1229 
1230 /* Number of RXQs used by single port */
1231 static int rxq_number = MVPP2_DEFAULT_RXQ;
1232 /* Number of TXQs used by single port */
1233 static int txq_number = MVPP2_DEFAULT_TXQ;
1234 
1235 static int base_id;
1236 
1237 #define MVPP2_DRIVER_NAME "mvpp2"
1238 #define MVPP2_DRIVER_VERSION "1.0"
1239 
1240 /*
1241  * U-Boot internal data, mostly uncached buffers for descriptors and data
1242  */
1243 struct buffer_location {
1244 	struct mvpp2_tx_desc *aggr_tx_descs;
1245 	struct mvpp2_tx_desc *tx_descs;
1246 	struct mvpp2_rx_desc *rx_descs;
1247 	unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1248 	unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
1249 	int first_rxq;
1250 };
1251 
1252 /*
1253  * All 4 interfaces use the same global buffer, since only one interface
1254  * can be enabled at once
1255  */
1256 static struct buffer_location buffer_loc;
1257 
1258 /*
1259  * Page table entries are set to 1MB, or multiples of 1MB
1260  * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1261  */
1262 #define BD_SPACE	(1 << 20)
1263 
1264 /* Utility/helper methods */
1265 
mvpp2_write(struct mvpp2 * priv,u32 offset,u32 data)1266 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1267 {
1268 	writel(data, priv->base + offset);
1269 }
1270 
mvpp2_read(struct mvpp2 * priv,u32 offset)1271 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1272 {
1273 	return readl(priv->base + offset);
1274 }
1275 
mvpp2_txdesc_dma_addr_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,dma_addr_t dma_addr)1276 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1277 				      struct mvpp2_tx_desc *tx_desc,
1278 				      dma_addr_t dma_addr)
1279 {
1280 	if (port->priv->hw_version == MVPP21) {
1281 		tx_desc->pp21.buf_dma_addr = dma_addr;
1282 	} else {
1283 		u64 val = (u64)dma_addr;
1284 
1285 		tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1286 		tx_desc->pp22.buf_dma_addr_ptp |= val;
1287 	}
1288 }
1289 
mvpp2_txdesc_size_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,size_t size)1290 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1291 				  struct mvpp2_tx_desc *tx_desc,
1292 				  size_t size)
1293 {
1294 	if (port->priv->hw_version == MVPP21)
1295 		tx_desc->pp21.data_size = size;
1296 	else
1297 		tx_desc->pp22.data_size = size;
1298 }
1299 
mvpp2_txdesc_txq_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int txq)1300 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1301 				 struct mvpp2_tx_desc *tx_desc,
1302 				 unsigned int txq)
1303 {
1304 	if (port->priv->hw_version == MVPP21)
1305 		tx_desc->pp21.phys_txq = txq;
1306 	else
1307 		tx_desc->pp22.phys_txq = txq;
1308 }
1309 
mvpp2_txdesc_cmd_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int command)1310 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1311 				 struct mvpp2_tx_desc *tx_desc,
1312 				 unsigned int command)
1313 {
1314 	if (port->priv->hw_version == MVPP21)
1315 		tx_desc->pp21.command = command;
1316 	else
1317 		tx_desc->pp22.command = command;
1318 }
1319 
mvpp2_txdesc_offset_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int offset)1320 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1321 				    struct mvpp2_tx_desc *tx_desc,
1322 				    unsigned int offset)
1323 {
1324 	if (port->priv->hw_version == MVPP21)
1325 		tx_desc->pp21.packet_offset = offset;
1326 	else
1327 		tx_desc->pp22.packet_offset = offset;
1328 }
1329 
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1330 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1331 					    struct mvpp2_rx_desc *rx_desc)
1332 {
1333 	if (port->priv->hw_version == MVPP21)
1334 		return rx_desc->pp21.buf_dma_addr;
1335 	else
1336 		return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1337 }
1338 
mvpp2_rxdesc_cookie_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1339 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1340 					     struct mvpp2_rx_desc *rx_desc)
1341 {
1342 	if (port->priv->hw_version == MVPP21)
1343 		return rx_desc->pp21.buf_cookie;
1344 	else
1345 		return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1346 }
1347 
mvpp2_rxdesc_size_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1348 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1349 				    struct mvpp2_rx_desc *rx_desc)
1350 {
1351 	if (port->priv->hw_version == MVPP21)
1352 		return rx_desc->pp21.data_size;
1353 	else
1354 		return rx_desc->pp22.data_size;
1355 }
1356 
mvpp2_rxdesc_status_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1357 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1358 				   struct mvpp2_rx_desc *rx_desc)
1359 {
1360 	if (port->priv->hw_version == MVPP21)
1361 		return rx_desc->pp21.status;
1362 	else
1363 		return rx_desc->pp22.status;
1364 }
1365 
mvpp2_txq_inc_get(struct mvpp2_txq_pcpu * txq_pcpu)1366 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1367 {
1368 	txq_pcpu->txq_get_index++;
1369 	if (txq_pcpu->txq_get_index == txq_pcpu->size)
1370 		txq_pcpu->txq_get_index = 0;
1371 }
1372 
1373 /* Get number of physical egress port */
mvpp2_egress_port(struct mvpp2_port * port)1374 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1375 {
1376 	return MVPP2_MAX_TCONT + port->id;
1377 }
1378 
1379 /* Get number of physical TXQ */
mvpp2_txq_phys(int port,int txq)1380 static inline int mvpp2_txq_phys(int port, int txq)
1381 {
1382 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1383 }
1384 
1385 /* Parser configuration routines */
1386 
1387 /* Update parser tcam and sram hw entries */
mvpp2_prs_hw_write(struct mvpp2 * priv,struct mvpp2_prs_entry * pe)1388 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1389 {
1390 	int i;
1391 
1392 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1393 		return -EINVAL;
1394 
1395 	/* Clear entry invalidation bit */
1396 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1397 
1398 	/* Write tcam index - indirect access */
1399 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1400 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1401 		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1402 
1403 	/* Write sram index - indirect access */
1404 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1405 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1406 		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1407 
1408 	return 0;
1409 }
1410 
1411 /* Read tcam entry from hw */
mvpp2_prs_hw_read(struct mvpp2 * priv,struct mvpp2_prs_entry * pe)1412 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1413 {
1414 	int i;
1415 
1416 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1417 		return -EINVAL;
1418 
1419 	/* Write tcam index - indirect access */
1420 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1421 
1422 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1423 			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1424 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1425 		return MVPP2_PRS_TCAM_ENTRY_INVALID;
1426 
1427 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1428 		pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1429 
1430 	/* Write sram index - indirect access */
1431 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1432 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1433 		pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1434 
1435 	return 0;
1436 }
1437 
1438 /* Invalidate tcam hw entry */
mvpp2_prs_hw_inv(struct mvpp2 * priv,int index)1439 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1440 {
1441 	/* Write index - indirect access */
1442 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1443 	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1444 		    MVPP2_PRS_TCAM_INV_MASK);
1445 }
1446 
1447 /* Enable shadow table entry and set its lookup ID */
mvpp2_prs_shadow_set(struct mvpp2 * priv,int index,int lu)1448 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1449 {
1450 	priv->prs_shadow[index].valid = true;
1451 	priv->prs_shadow[index].lu = lu;
1452 }
1453 
1454 /* Update ri fields in shadow table entry */
mvpp2_prs_shadow_ri_set(struct mvpp2 * priv,int index,unsigned int ri,unsigned int ri_mask)1455 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1456 				    unsigned int ri, unsigned int ri_mask)
1457 {
1458 	priv->prs_shadow[index].ri_mask = ri_mask;
1459 	priv->prs_shadow[index].ri = ri;
1460 }
1461 
1462 /* Update lookup field in tcam sw entry */
mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)1463 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1464 {
1465 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1466 
1467 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1468 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1469 }
1470 
1471 /* Update mask for single port in tcam sw entry */
mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry * pe,unsigned int port,bool add)1472 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1473 				    unsigned int port, bool add)
1474 {
1475 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1476 
1477 	if (add)
1478 		pe->tcam.byte[enable_off] &= ~(1 << port);
1479 	else
1480 		pe->tcam.byte[enable_off] |= 1 << port;
1481 }
1482 
1483 /* Update port map in tcam sw entry */
mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry * pe,unsigned int ports)1484 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1485 					unsigned int ports)
1486 {
1487 	unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1488 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1489 
1490 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1491 	pe->tcam.byte[enable_off] &= ~port_mask;
1492 	pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1493 }
1494 
1495 /* Obtain port map from tcam sw entry */
mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry * pe)1496 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1497 {
1498 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1499 
1500 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1501 }
1502 
1503 /* Set byte of data and its enable bits in tcam sw entry */
mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char byte,unsigned char enable)1504 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1505 					 unsigned int offs, unsigned char byte,
1506 					 unsigned char enable)
1507 {
1508 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1509 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1510 }
1511 
1512 /* Get byte of data and its enable bits from tcam sw entry */
mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char * byte,unsigned char * enable)1513 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1514 					 unsigned int offs, unsigned char *byte,
1515 					 unsigned char *enable)
1516 {
1517 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1518 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1519 }
1520 
1521 /* Set ethertype in tcam sw entry */
mvpp2_prs_match_etype(struct mvpp2_prs_entry * pe,int offset,unsigned short ethertype)1522 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1523 				  unsigned short ethertype)
1524 {
1525 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1526 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1527 }
1528 
1529 /* Set bits in sram sw entry */
mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry * pe,int bit_num,int val)1530 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1531 				    int val)
1532 {
1533 	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1534 }
1535 
1536 /* Clear bits in sram sw entry */
mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry * pe,int bit_num,int val)1537 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1538 				      int val)
1539 {
1540 	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1541 }
1542 
1543 /* Update ri bits in sram sw entry */
mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)1544 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1545 				     unsigned int bits, unsigned int mask)
1546 {
1547 	unsigned int i;
1548 
1549 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1550 		int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1551 
1552 		if (!(mask & BIT(i)))
1553 			continue;
1554 
1555 		if (bits & BIT(i))
1556 			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1557 		else
1558 			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1559 
1560 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1561 	}
1562 }
1563 
1564 /* Update ai bits in sram sw entry */
mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)1565 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1566 				     unsigned int bits, unsigned int mask)
1567 {
1568 	unsigned int i;
1569 	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1570 
1571 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1572 
1573 		if (!(mask & BIT(i)))
1574 			continue;
1575 
1576 		if (bits & BIT(i))
1577 			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1578 		else
1579 			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1580 
1581 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1582 	}
1583 }
1584 
1585 /* Read ai bits from sram sw entry */
mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry * pe)1586 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1587 {
1588 	u8 bits;
1589 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1590 	int ai_en_off = ai_off + 1;
1591 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1592 
1593 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
1594 	       (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1595 
1596 	return bits;
1597 }
1598 
1599 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1600  * lookup interation
1601  */
mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)1602 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1603 				       unsigned int lu)
1604 {
1605 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1606 
1607 	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1608 				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1609 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1610 }
1611 
1612 /* In the sram sw entry set sign and value of the next lookup offset
1613  * and the offset value generated to the classifier
1614  */
mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry * pe,int shift,unsigned int op)1615 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1616 				     unsigned int op)
1617 {
1618 	/* Set sign */
1619 	if (shift < 0) {
1620 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1621 		shift = 0 - shift;
1622 	} else {
1623 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1624 	}
1625 
1626 	/* Set value */
1627 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1628 							   (unsigned char)shift;
1629 
1630 	/* Reset and set operation */
1631 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1632 				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1633 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1634 
1635 	/* Set base offset as current */
1636 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1637 }
1638 
1639 /* In the sram sw entry set sign and value of the user defined offset
1640  * generated to the classifier
1641  */
mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry * pe,unsigned int type,int offset,unsigned int op)1642 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1643 				      unsigned int type, int offset,
1644 				      unsigned int op)
1645 {
1646 	/* Set sign */
1647 	if (offset < 0) {
1648 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1649 		offset = 0 - offset;
1650 	} else {
1651 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1652 	}
1653 
1654 	/* Set value */
1655 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1656 				  MVPP2_PRS_SRAM_UDF_MASK);
1657 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1658 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1659 					MVPP2_PRS_SRAM_UDF_BITS)] &=
1660 	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1661 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1662 					MVPP2_PRS_SRAM_UDF_BITS)] |=
1663 				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1664 
1665 	/* Set offset type */
1666 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1667 				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1668 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1669 
1670 	/* Set offset operation */
1671 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1672 				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1673 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1674 
1675 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1676 					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1677 					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1678 				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1679 
1680 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1681 					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1682 			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1683 
1684 	/* Set base offset as current */
1685 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1686 }
1687 
1688 /* Find parser flow entry */
mvpp2_prs_flow_find(struct mvpp2 * priv,int flow)1689 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1690 {
1691 	struct mvpp2_prs_entry *pe;
1692 	int tid;
1693 
1694 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1695 	if (!pe)
1696 		return NULL;
1697 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1698 
1699 	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1700 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1701 		u8 bits;
1702 
1703 		if (!priv->prs_shadow[tid].valid ||
1704 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1705 			continue;
1706 
1707 		pe->index = tid;
1708 		mvpp2_prs_hw_read(priv, pe);
1709 		bits = mvpp2_prs_sram_ai_get(pe);
1710 
1711 		/* Sram store classification lookup ID in AI bits [5:0] */
1712 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1713 			return pe;
1714 	}
1715 	kfree(pe);
1716 
1717 	return NULL;
1718 }
1719 
1720 /* Return first free tcam index, seeking from start to end */
mvpp2_prs_tcam_first_free(struct mvpp2 * priv,unsigned char start,unsigned char end)1721 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1722 				     unsigned char end)
1723 {
1724 	int tid;
1725 
1726 	if (start > end)
1727 		swap(start, end);
1728 
1729 	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1730 		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1731 
1732 	for (tid = start; tid <= end; tid++) {
1733 		if (!priv->prs_shadow[tid].valid)
1734 			return tid;
1735 	}
1736 
1737 	return -EINVAL;
1738 }
1739 
1740 /* Enable/disable dropping all mac da's */
mvpp2_prs_mac_drop_all_set(struct mvpp2 * priv,int port,bool add)1741 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1742 {
1743 	struct mvpp2_prs_entry pe;
1744 
1745 	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1746 		/* Entry exist - update port only */
1747 		pe.index = MVPP2_PE_DROP_ALL;
1748 		mvpp2_prs_hw_read(priv, &pe);
1749 	} else {
1750 		/* Entry doesn't exist - create new */
1751 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1752 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1753 		pe.index = MVPP2_PE_DROP_ALL;
1754 
1755 		/* Non-promiscuous mode for all ports - DROP unknown packets */
1756 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1757 					 MVPP2_PRS_RI_DROP_MASK);
1758 
1759 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1760 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1761 
1762 		/* Update shadow table */
1763 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1764 
1765 		/* Mask all ports */
1766 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1767 	}
1768 
1769 	/* Update port mask */
1770 	mvpp2_prs_tcam_port_set(&pe, port, add);
1771 
1772 	mvpp2_prs_hw_write(priv, &pe);
1773 }
1774 
1775 /* Set port to promiscuous mode */
mvpp2_prs_mac_promisc_set(struct mvpp2 * priv,int port,bool add)1776 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1777 {
1778 	struct mvpp2_prs_entry pe;
1779 
1780 	/* Promiscuous mode - Accept unknown packets */
1781 
1782 	if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1783 		/* Entry exist - update port only */
1784 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1785 		mvpp2_prs_hw_read(priv, &pe);
1786 	} else {
1787 		/* Entry doesn't exist - create new */
1788 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1789 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1790 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1791 
1792 		/* Continue - set next lookup */
1793 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1794 
1795 		/* Set result info bits */
1796 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1797 					 MVPP2_PRS_RI_L2_CAST_MASK);
1798 
1799 		/* Shift to ethertype */
1800 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1801 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1802 
1803 		/* Mask all ports */
1804 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1805 
1806 		/* Update shadow table */
1807 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1808 	}
1809 
1810 	/* Update port mask */
1811 	mvpp2_prs_tcam_port_set(&pe, port, add);
1812 
1813 	mvpp2_prs_hw_write(priv, &pe);
1814 }
1815 
1816 /* Accept multicast */
mvpp2_prs_mac_multi_set(struct mvpp2 * priv,int port,int index,bool add)1817 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1818 				    bool add)
1819 {
1820 	struct mvpp2_prs_entry pe;
1821 	unsigned char da_mc;
1822 
1823 	/* Ethernet multicast address first byte is
1824 	 * 0x01 for IPv4 and 0x33 for IPv6
1825 	 */
1826 	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1827 
1828 	if (priv->prs_shadow[index].valid) {
1829 		/* Entry exist - update port only */
1830 		pe.index = index;
1831 		mvpp2_prs_hw_read(priv, &pe);
1832 	} else {
1833 		/* Entry doesn't exist - create new */
1834 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1835 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1836 		pe.index = index;
1837 
1838 		/* Continue - set next lookup */
1839 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1840 
1841 		/* Set result info bits */
1842 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1843 					 MVPP2_PRS_RI_L2_CAST_MASK);
1844 
1845 		/* Update tcam entry data first byte */
1846 		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1847 
1848 		/* Shift to ethertype */
1849 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1850 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1851 
1852 		/* Mask all ports */
1853 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1854 
1855 		/* Update shadow table */
1856 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1857 	}
1858 
1859 	/* Update port mask */
1860 	mvpp2_prs_tcam_port_set(&pe, port, add);
1861 
1862 	mvpp2_prs_hw_write(priv, &pe);
1863 }
1864 
1865 /* Parser per-port initialization */
mvpp2_prs_hw_port_init(struct mvpp2 * priv,int port,int lu_first,int lu_max,int offset)1866 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1867 				   int lu_max, int offset)
1868 {
1869 	u32 val;
1870 
1871 	/* Set lookup ID */
1872 	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1873 	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1874 	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1875 	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1876 
1877 	/* Set maximum number of loops for packet received from port */
1878 	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1879 	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1880 	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1881 	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1882 
1883 	/* Set initial offset for packet header extraction for the first
1884 	 * searching loop
1885 	 */
1886 	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1887 	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1888 	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1889 	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1890 }
1891 
1892 /* Default flow entries initialization for all ports */
mvpp2_prs_def_flow_init(struct mvpp2 * priv)1893 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1894 {
1895 	struct mvpp2_prs_entry pe;
1896 	int port;
1897 
1898 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1899 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1900 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1901 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1902 
1903 		/* Mask all ports */
1904 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1905 
1906 		/* Set flow ID*/
1907 		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1908 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1909 
1910 		/* Update shadow table and hw entry */
1911 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1912 		mvpp2_prs_hw_write(priv, &pe);
1913 	}
1914 }
1915 
1916 /* Set default entry for Marvell Header field */
mvpp2_prs_mh_init(struct mvpp2 * priv)1917 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1918 {
1919 	struct mvpp2_prs_entry pe;
1920 
1921 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1922 
1923 	pe.index = MVPP2_PE_MH_DEFAULT;
1924 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1925 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1926 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1927 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1928 
1929 	/* Unmask all ports */
1930 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1931 
1932 	/* Update shadow table and hw entry */
1933 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1934 	mvpp2_prs_hw_write(priv, &pe);
1935 }
1936 
1937 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1938  * multicast MAC addresses
1939  */
mvpp2_prs_mac_init(struct mvpp2 * priv)1940 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1941 {
1942 	struct mvpp2_prs_entry pe;
1943 
1944 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1945 
1946 	/* Non-promiscuous mode for all ports - DROP unknown packets */
1947 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1948 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1949 
1950 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1951 				 MVPP2_PRS_RI_DROP_MASK);
1952 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1953 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1954 
1955 	/* Unmask all ports */
1956 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1957 
1958 	/* Update shadow table and hw entry */
1959 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1960 	mvpp2_prs_hw_write(priv, &pe);
1961 
1962 	/* place holders only - no ports */
1963 	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1964 	mvpp2_prs_mac_promisc_set(priv, 0, false);
1965 	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1966 	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1967 }
1968 
1969 /* Match basic ethertypes */
mvpp2_prs_etype_init(struct mvpp2 * priv)1970 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1971 {
1972 	struct mvpp2_prs_entry pe;
1973 	int tid;
1974 
1975 	/* Ethertype: PPPoE */
1976 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1977 					MVPP2_PE_LAST_FREE_TID);
1978 	if (tid < 0)
1979 		return tid;
1980 
1981 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1982 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1983 	pe.index = tid;
1984 
1985 	mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1986 
1987 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1988 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1989 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1990 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1991 				 MVPP2_PRS_RI_PPPOE_MASK);
1992 
1993 	/* Update shadow table and hw entry */
1994 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1995 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1996 	priv->prs_shadow[pe.index].finish = false;
1997 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1998 				MVPP2_PRS_RI_PPPOE_MASK);
1999 	mvpp2_prs_hw_write(priv, &pe);
2000 
2001 	/* Ethertype: ARP */
2002 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2003 					MVPP2_PE_LAST_FREE_TID);
2004 	if (tid < 0)
2005 		return tid;
2006 
2007 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2008 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2009 	pe.index = tid;
2010 
2011 	mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2012 
2013 	/* Generate flow in the next iteration*/
2014 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2015 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2016 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2017 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2018 	/* Set L3 offset */
2019 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2020 				  MVPP2_ETH_TYPE_LEN,
2021 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2022 
2023 	/* Update shadow table and hw entry */
2024 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2025 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2026 	priv->prs_shadow[pe.index].finish = true;
2027 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2028 				MVPP2_PRS_RI_L3_PROTO_MASK);
2029 	mvpp2_prs_hw_write(priv, &pe);
2030 
2031 	/* Ethertype: LBTD */
2032 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2033 					MVPP2_PE_LAST_FREE_TID);
2034 	if (tid < 0)
2035 		return tid;
2036 
2037 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2038 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2039 	pe.index = tid;
2040 
2041 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2042 
2043 	/* Generate flow in the next iteration*/
2044 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2045 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2046 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2047 				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2048 				 MVPP2_PRS_RI_CPU_CODE_MASK |
2049 				 MVPP2_PRS_RI_UDF3_MASK);
2050 	/* Set L3 offset */
2051 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2052 				  MVPP2_ETH_TYPE_LEN,
2053 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2054 
2055 	/* Update shadow table and hw entry */
2056 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2057 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2058 	priv->prs_shadow[pe.index].finish = true;
2059 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2060 				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2061 				MVPP2_PRS_RI_CPU_CODE_MASK |
2062 				MVPP2_PRS_RI_UDF3_MASK);
2063 	mvpp2_prs_hw_write(priv, &pe);
2064 
2065 	/* Ethertype: IPv4 without options */
2066 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2067 					MVPP2_PE_LAST_FREE_TID);
2068 	if (tid < 0)
2069 		return tid;
2070 
2071 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2072 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2073 	pe.index = tid;
2074 
2075 	mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2076 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2077 				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2078 				     MVPP2_PRS_IPV4_HEAD_MASK |
2079 				     MVPP2_PRS_IPV4_IHL_MASK);
2080 
2081 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2082 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2083 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2084 	/* Skip eth_type + 4 bytes of IP header */
2085 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2086 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2087 	/* Set L3 offset */
2088 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2089 				  MVPP2_ETH_TYPE_LEN,
2090 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2091 
2092 	/* Update shadow table and hw entry */
2093 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2094 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2095 	priv->prs_shadow[pe.index].finish = false;
2096 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2097 				MVPP2_PRS_RI_L3_PROTO_MASK);
2098 	mvpp2_prs_hw_write(priv, &pe);
2099 
2100 	/* Ethertype: IPv4 with options */
2101 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2102 					MVPP2_PE_LAST_FREE_TID);
2103 	if (tid < 0)
2104 		return tid;
2105 
2106 	pe.index = tid;
2107 
2108 	/* Clear tcam data before updating */
2109 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2110 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2111 
2112 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2113 				     MVPP2_PRS_IPV4_HEAD,
2114 				     MVPP2_PRS_IPV4_HEAD_MASK);
2115 
2116 	/* Clear ri before updating */
2117 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2118 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2119 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2120 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2121 
2122 	/* Update shadow table and hw entry */
2123 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2124 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2125 	priv->prs_shadow[pe.index].finish = false;
2126 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2127 				MVPP2_PRS_RI_L3_PROTO_MASK);
2128 	mvpp2_prs_hw_write(priv, &pe);
2129 
2130 	/* Ethertype: IPv6 without options */
2131 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2132 					MVPP2_PE_LAST_FREE_TID);
2133 	if (tid < 0)
2134 		return tid;
2135 
2136 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2137 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2138 	pe.index = tid;
2139 
2140 	mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2141 
2142 	/* Skip DIP of IPV6 header */
2143 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2144 				 MVPP2_MAX_L3_ADDR_SIZE,
2145 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2146 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2147 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2148 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2149 	/* Set L3 offset */
2150 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2151 				  MVPP2_ETH_TYPE_LEN,
2152 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2153 
2154 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2155 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2156 	priv->prs_shadow[pe.index].finish = false;
2157 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2158 				MVPP2_PRS_RI_L3_PROTO_MASK);
2159 	mvpp2_prs_hw_write(priv, &pe);
2160 
2161 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2162 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2163 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2164 	pe.index = MVPP2_PE_ETH_TYPE_UN;
2165 
2166 	/* Unmask all ports */
2167 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2168 
2169 	/* Generate flow in the next iteration*/
2170 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2171 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2172 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2173 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2174 	/* Set L3 offset even it's unknown L3 */
2175 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2176 				  MVPP2_ETH_TYPE_LEN,
2177 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2178 
2179 	/* Update shadow table and hw entry */
2180 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2181 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2182 	priv->prs_shadow[pe.index].finish = true;
2183 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2184 				MVPP2_PRS_RI_L3_PROTO_MASK);
2185 	mvpp2_prs_hw_write(priv, &pe);
2186 
2187 	return 0;
2188 }
2189 
2190 /* Parser default initialization */
mvpp2_prs_default_init(struct udevice * dev,struct mvpp2 * priv)2191 static int mvpp2_prs_default_init(struct udevice *dev,
2192 				  struct mvpp2 *priv)
2193 {
2194 	int err, index, i;
2195 
2196 	/* Enable tcam table */
2197 	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2198 
2199 	/* Clear all tcam and sram entries */
2200 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2201 		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2202 		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2203 			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2204 
2205 		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2206 		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2207 			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2208 	}
2209 
2210 	/* Invalidate all tcam entries */
2211 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2212 		mvpp2_prs_hw_inv(priv, index);
2213 
2214 	priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2215 					sizeof(struct mvpp2_prs_shadow),
2216 					GFP_KERNEL);
2217 	if (!priv->prs_shadow)
2218 		return -ENOMEM;
2219 
2220 	/* Always start from lookup = 0 */
2221 	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2222 		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2223 				       MVPP2_PRS_PORT_LU_MAX, 0);
2224 
2225 	mvpp2_prs_def_flow_init(priv);
2226 
2227 	mvpp2_prs_mh_init(priv);
2228 
2229 	mvpp2_prs_mac_init(priv);
2230 
2231 	err = mvpp2_prs_etype_init(priv);
2232 	if (err)
2233 		return err;
2234 
2235 	return 0;
2236 }
2237 
2238 /* Compare MAC DA with tcam entry data */
mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry * pe,const u8 * da,unsigned char * mask)2239 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2240 				       const u8 *da, unsigned char *mask)
2241 {
2242 	unsigned char tcam_byte, tcam_mask;
2243 	int index;
2244 
2245 	for (index = 0; index < ETH_ALEN; index++) {
2246 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2247 		if (tcam_mask != mask[index])
2248 			return false;
2249 
2250 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2251 			return false;
2252 	}
2253 
2254 	return true;
2255 }
2256 
2257 /* Find tcam entry with matched pair <MAC DA, port> */
2258 static struct mvpp2_prs_entry *
mvpp2_prs_mac_da_range_find(struct mvpp2 * priv,int pmap,const u8 * da,unsigned char * mask,int udf_type)2259 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2260 			    unsigned char *mask, int udf_type)
2261 {
2262 	struct mvpp2_prs_entry *pe;
2263 	int tid;
2264 
2265 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2266 	if (!pe)
2267 		return NULL;
2268 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2269 
2270 	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2271 	for (tid = MVPP2_PE_FIRST_FREE_TID;
2272 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2273 		unsigned int entry_pmap;
2274 
2275 		if (!priv->prs_shadow[tid].valid ||
2276 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2277 		    (priv->prs_shadow[tid].udf != udf_type))
2278 			continue;
2279 
2280 		pe->index = tid;
2281 		mvpp2_prs_hw_read(priv, pe);
2282 		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2283 
2284 		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2285 		    entry_pmap == pmap)
2286 			return pe;
2287 	}
2288 	kfree(pe);
2289 
2290 	return NULL;
2291 }
2292 
2293 /* Update parser's mac da entry */
mvpp2_prs_mac_da_accept(struct mvpp2 * priv,int port,const u8 * da,bool add)2294 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2295 				   const u8 *da, bool add)
2296 {
2297 	struct mvpp2_prs_entry *pe;
2298 	unsigned int pmap, len, ri;
2299 	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2300 	int tid;
2301 
2302 	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2303 	pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2304 					 MVPP2_PRS_UDF_MAC_DEF);
2305 
2306 	/* No such entry */
2307 	if (!pe) {
2308 		if (!add)
2309 			return 0;
2310 
2311 		/* Create new TCAM entry */
2312 		/* Find first range mac entry*/
2313 		for (tid = MVPP2_PE_FIRST_FREE_TID;
2314 		     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2315 			if (priv->prs_shadow[tid].valid &&
2316 			    (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2317 			    (priv->prs_shadow[tid].udf ==
2318 						       MVPP2_PRS_UDF_MAC_RANGE))
2319 				break;
2320 
2321 		/* Go through the all entries from first to last */
2322 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2323 						tid - 1);
2324 		if (tid < 0)
2325 			return tid;
2326 
2327 		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2328 		if (!pe)
2329 			return -1;
2330 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2331 		pe->index = tid;
2332 
2333 		/* Mask all ports */
2334 		mvpp2_prs_tcam_port_map_set(pe, 0);
2335 	}
2336 
2337 	/* Update port mask */
2338 	mvpp2_prs_tcam_port_set(pe, port, add);
2339 
2340 	/* Invalidate the entry if no ports are left enabled */
2341 	pmap = mvpp2_prs_tcam_port_map_get(pe);
2342 	if (pmap == 0) {
2343 		if (add) {
2344 			kfree(pe);
2345 			return -1;
2346 		}
2347 		mvpp2_prs_hw_inv(priv, pe->index);
2348 		priv->prs_shadow[pe->index].valid = false;
2349 		kfree(pe);
2350 		return 0;
2351 	}
2352 
2353 	/* Continue - set next lookup */
2354 	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2355 
2356 	/* Set match on DA */
2357 	len = ETH_ALEN;
2358 	while (len--)
2359 		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2360 
2361 	/* Set result info bits */
2362 	ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2363 
2364 	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2365 				 MVPP2_PRS_RI_MAC_ME_MASK);
2366 	mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2367 				MVPP2_PRS_RI_MAC_ME_MASK);
2368 
2369 	/* Shift to ethertype */
2370 	mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2371 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2372 
2373 	/* Update shadow table and hw entry */
2374 	priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2375 	mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2376 	mvpp2_prs_hw_write(priv, pe);
2377 
2378 	kfree(pe);
2379 
2380 	return 0;
2381 }
2382 
mvpp2_prs_update_mac_da(struct mvpp2_port * port,const u8 * da)2383 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2384 {
2385 	int err;
2386 
2387 	/* Remove old parser entry */
2388 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2389 				      false);
2390 	if (err)
2391 		return err;
2392 
2393 	/* Add new parser entry */
2394 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2395 	if (err)
2396 		return err;
2397 
2398 	/* Set addr in the device */
2399 	memcpy(port->dev_addr, da, ETH_ALEN);
2400 
2401 	return 0;
2402 }
2403 
2404 /* Set prs flow for the port */
mvpp2_prs_def_flow(struct mvpp2_port * port)2405 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2406 {
2407 	struct mvpp2_prs_entry *pe;
2408 	int tid;
2409 
2410 	pe = mvpp2_prs_flow_find(port->priv, port->id);
2411 
2412 	/* Such entry not exist */
2413 	if (!pe) {
2414 		/* Go through the all entires from last to first */
2415 		tid = mvpp2_prs_tcam_first_free(port->priv,
2416 						MVPP2_PE_LAST_FREE_TID,
2417 					       MVPP2_PE_FIRST_FREE_TID);
2418 		if (tid < 0)
2419 			return tid;
2420 
2421 		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2422 		if (!pe)
2423 			return -ENOMEM;
2424 
2425 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2426 		pe->index = tid;
2427 
2428 		/* Set flow ID*/
2429 		mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2430 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2431 
2432 		/* Update shadow table */
2433 		mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2434 	}
2435 
2436 	mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2437 	mvpp2_prs_hw_write(port->priv, pe);
2438 	kfree(pe);
2439 
2440 	return 0;
2441 }
2442 
2443 /* Classifier configuration routines */
2444 
2445 /* Update classification flow table registers */
mvpp2_cls_flow_write(struct mvpp2 * priv,struct mvpp2_cls_flow_entry * fe)2446 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2447 				 struct mvpp2_cls_flow_entry *fe)
2448 {
2449 	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2450 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
2451 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
2452 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
2453 }
2454 
2455 /* Update classification lookup table register */
mvpp2_cls_lookup_write(struct mvpp2 * priv,struct mvpp2_cls_lookup_entry * le)2456 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2457 				   struct mvpp2_cls_lookup_entry *le)
2458 {
2459 	u32 val;
2460 
2461 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2462 	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2463 	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2464 }
2465 
2466 /* Classifier default initialization */
mvpp2_cls_init(struct mvpp2 * priv)2467 static void mvpp2_cls_init(struct mvpp2 *priv)
2468 {
2469 	struct mvpp2_cls_lookup_entry le;
2470 	struct mvpp2_cls_flow_entry fe;
2471 	int index;
2472 
2473 	/* Enable classifier */
2474 	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2475 
2476 	/* Clear classifier flow table */
2477 	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2478 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2479 		fe.index = index;
2480 		mvpp2_cls_flow_write(priv, &fe);
2481 	}
2482 
2483 	/* Clear classifier lookup table */
2484 	le.data = 0;
2485 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2486 		le.lkpid = index;
2487 		le.way = 0;
2488 		mvpp2_cls_lookup_write(priv, &le);
2489 
2490 		le.way = 1;
2491 		mvpp2_cls_lookup_write(priv, &le);
2492 	}
2493 }
2494 
mvpp2_cls_port_config(struct mvpp2_port * port)2495 static void mvpp2_cls_port_config(struct mvpp2_port *port)
2496 {
2497 	struct mvpp2_cls_lookup_entry le;
2498 	u32 val;
2499 
2500 	/* Set way for the port */
2501 	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2502 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2503 	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2504 
2505 	/* Pick the entry to be accessed in lookup ID decoding table
2506 	 * according to the way and lkpid.
2507 	 */
2508 	le.lkpid = port->id;
2509 	le.way = 0;
2510 	le.data = 0;
2511 
2512 	/* Set initial CPU queue for receiving packets */
2513 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2514 	le.data |= port->first_rxq;
2515 
2516 	/* Disable classification engines */
2517 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2518 
2519 	/* Update lookup ID table entry */
2520 	mvpp2_cls_lookup_write(port->priv, &le);
2521 }
2522 
2523 /* Set CPU queue number for oversize packets */
mvpp2_cls_oversize_rxq_set(struct mvpp2_port * port)2524 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2525 {
2526 	u32 val;
2527 
2528 	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2529 		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2530 
2531 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2532 		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2533 
2534 	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2535 	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2536 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2537 }
2538 
2539 /* Buffer Manager configuration routines */
2540 
2541 /* Create pool */
mvpp2_bm_pool_create(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int size)2542 static int mvpp2_bm_pool_create(struct udevice *dev,
2543 				struct mvpp2 *priv,
2544 				struct mvpp2_bm_pool *bm_pool, int size)
2545 {
2546 	u32 val;
2547 
2548 	/* Number of buffer pointers must be a multiple of 16, as per
2549 	 * hardware constraints
2550 	 */
2551 	if (!IS_ALIGNED(size, 16))
2552 		return -EINVAL;
2553 
2554 	bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2555 	bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2556 	if (!bm_pool->virt_addr)
2557 		return -ENOMEM;
2558 
2559 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2560 			MVPP2_BM_POOL_PTR_ALIGN)) {
2561 		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2562 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2563 		return -ENOMEM;
2564 	}
2565 
2566 	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2567 		    lower_32_bits(bm_pool->dma_addr));
2568 	if (priv->hw_version == MVPP22)
2569 		mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2570 			    (upper_32_bits(bm_pool->dma_addr) &
2571 			    MVPP22_BM_POOL_BASE_HIGH_MASK));
2572 	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2573 
2574 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2575 	val |= MVPP2_BM_START_MASK;
2576 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2577 
2578 	bm_pool->type = MVPP2_BM_FREE;
2579 	bm_pool->size = size;
2580 	bm_pool->pkt_size = 0;
2581 	bm_pool->buf_num = 0;
2582 
2583 	return 0;
2584 }
2585 
2586 /* Set pool buffer size */
mvpp2_bm_pool_bufsize_set(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_size)2587 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2588 				      struct mvpp2_bm_pool *bm_pool,
2589 				      int buf_size)
2590 {
2591 	u32 val;
2592 
2593 	bm_pool->buf_size = buf_size;
2594 
2595 	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2596 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2597 }
2598 
2599 /* Free all buffers from the pool */
mvpp2_bm_bufs_free(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)2600 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2601 			       struct mvpp2_bm_pool *bm_pool)
2602 {
2603 	int i;
2604 
2605 	for (i = 0; i < bm_pool->buf_num; i++) {
2606 		/* Allocate buffer back from the buffer manager */
2607 		mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2608 	}
2609 
2610 	bm_pool->buf_num = 0;
2611 }
2612 
2613 /* Cleanup pool */
mvpp2_bm_pool_destroy(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)2614 static int mvpp2_bm_pool_destroy(struct udevice *dev,
2615 				 struct mvpp2 *priv,
2616 				 struct mvpp2_bm_pool *bm_pool)
2617 {
2618 	u32 val;
2619 
2620 	mvpp2_bm_bufs_free(dev, priv, bm_pool);
2621 	if (bm_pool->buf_num) {
2622 		dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2623 		return 0;
2624 	}
2625 
2626 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2627 	val |= MVPP2_BM_STOP_MASK;
2628 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2629 
2630 	return 0;
2631 }
2632 
mvpp2_bm_pools_init(struct udevice * dev,struct mvpp2 * priv)2633 static int mvpp2_bm_pools_init(struct udevice *dev,
2634 			       struct mvpp2 *priv)
2635 {
2636 	int i, err, size;
2637 	struct mvpp2_bm_pool *bm_pool;
2638 
2639 	/* Create all pools with maximum size */
2640 	size = MVPP2_BM_POOL_SIZE_MAX;
2641 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2642 		bm_pool = &priv->bm_pools[i];
2643 		bm_pool->id = i;
2644 		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2645 		if (err)
2646 			goto err_unroll_pools;
2647 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
2648 	}
2649 	return 0;
2650 
2651 err_unroll_pools:
2652 	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2653 	for (i = i - 1; i >= 0; i--)
2654 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2655 	return err;
2656 }
2657 
mvpp2_bm_init(struct udevice * dev,struct mvpp2 * priv)2658 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2659 {
2660 	int i, err;
2661 
2662 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2663 		/* Mask BM all interrupts */
2664 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2665 		/* Clear BM cause register */
2666 		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2667 	}
2668 
2669 	/* Allocate and initialize BM pools */
2670 	priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2671 				     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2672 	if (!priv->bm_pools)
2673 		return -ENOMEM;
2674 
2675 	err = mvpp2_bm_pools_init(dev, priv);
2676 	if (err < 0)
2677 		return err;
2678 	return 0;
2679 }
2680 
2681 /* Attach long pool to rxq */
mvpp2_rxq_long_pool_set(struct mvpp2_port * port,int lrxq,int long_pool)2682 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2683 				    int lrxq, int long_pool)
2684 {
2685 	u32 val, mask;
2686 	int prxq;
2687 
2688 	/* Get queue physical ID */
2689 	prxq = port->rxqs[lrxq]->id;
2690 
2691 	if (port->priv->hw_version == MVPP21)
2692 		mask = MVPP21_RXQ_POOL_LONG_MASK;
2693 	else
2694 		mask = MVPP22_RXQ_POOL_LONG_MASK;
2695 
2696 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2697 	val &= ~mask;
2698 	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
2699 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2700 }
2701 
2702 /* Set pool number in a BM cookie */
mvpp2_bm_cookie_pool_set(u32 cookie,int pool)2703 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2704 {
2705 	u32 bm;
2706 
2707 	bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2708 	bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2709 
2710 	return bm;
2711 }
2712 
2713 /* Get pool number from a BM cookie */
mvpp2_bm_cookie_pool_get(unsigned long cookie)2714 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
2715 {
2716 	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2717 }
2718 
2719 /* Release buffer to BM */
mvpp2_bm_pool_put(struct mvpp2_port * port,int pool,dma_addr_t buf_dma_addr,unsigned long buf_phys_addr)2720 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2721 				     dma_addr_t buf_dma_addr,
2722 				     unsigned long buf_phys_addr)
2723 {
2724 	if (port->priv->hw_version == MVPP22) {
2725 		u32 val = 0;
2726 
2727 		if (sizeof(dma_addr_t) == 8)
2728 			val |= upper_32_bits(buf_dma_addr) &
2729 				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2730 
2731 		if (sizeof(phys_addr_t) == 8)
2732 			val |= (upper_32_bits(buf_phys_addr)
2733 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2734 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2735 
2736 		mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2737 	}
2738 
2739 	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2740 	 * returned in the "cookie" field of the RX
2741 	 * descriptor. Instead of storing the virtual address, we
2742 	 * store the physical address
2743 	 */
2744 	mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
2745 	mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
2746 }
2747 
2748 /* Refill BM pool */
mvpp2_pool_refill(struct mvpp2_port * port,u32 bm,dma_addr_t dma_addr,phys_addr_t phys_addr)2749 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2750 			      dma_addr_t dma_addr,
2751 			      phys_addr_t phys_addr)
2752 {
2753 	int pool = mvpp2_bm_cookie_pool_get(bm);
2754 
2755 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2756 }
2757 
2758 /* Allocate buffers for the pool */
mvpp2_bm_bufs_add(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,int buf_num)2759 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2760 			     struct mvpp2_bm_pool *bm_pool, int buf_num)
2761 {
2762 	int i;
2763 
2764 	if (buf_num < 0 ||
2765 	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
2766 		netdev_err(port->dev,
2767 			   "cannot allocate %d buffers for pool %d\n",
2768 			   buf_num, bm_pool->id);
2769 		return 0;
2770 	}
2771 
2772 	for (i = 0; i < buf_num; i++) {
2773 		mvpp2_bm_pool_put(port, bm_pool->id,
2774 				  (dma_addr_t)buffer_loc.rx_buffer[i],
2775 				  (unsigned long)buffer_loc.rx_buffer[i]);
2776 
2777 	}
2778 
2779 	/* Update BM driver with number of buffers added to pool */
2780 	bm_pool->buf_num += i;
2781 
2782 	return i;
2783 }
2784 
2785 /* Notify the driver that BM pool is being used as specific type and return the
2786  * pool pointer on success
2787  */
2788 static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port * port,int pool,enum mvpp2_bm_type type,int pkt_size)2789 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2790 		  int pkt_size)
2791 {
2792 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2793 	int num;
2794 
2795 	if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2796 		netdev_err(port->dev, "mixing pool types is forbidden\n");
2797 		return NULL;
2798 	}
2799 
2800 	if (new_pool->type == MVPP2_BM_FREE)
2801 		new_pool->type = type;
2802 
2803 	/* Allocate buffers in case BM pool is used as long pool, but packet
2804 	 * size doesn't match MTU or BM pool hasn't being used yet
2805 	 */
2806 	if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2807 	    (new_pool->pkt_size == 0)) {
2808 		int pkts_num;
2809 
2810 		/* Set default buffer number or free all the buffers in case
2811 		 * the pool is not empty
2812 		 */
2813 		pkts_num = new_pool->buf_num;
2814 		if (pkts_num == 0)
2815 			pkts_num = type == MVPP2_BM_SWF_LONG ?
2816 				   MVPP2_BM_LONG_BUF_NUM :
2817 				   MVPP2_BM_SHORT_BUF_NUM;
2818 		else
2819 			mvpp2_bm_bufs_free(NULL,
2820 					   port->priv, new_pool);
2821 
2822 		new_pool->pkt_size = pkt_size;
2823 
2824 		/* Allocate buffers for this pool */
2825 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2826 		if (num != pkts_num) {
2827 			dev_err(dev, "pool %d: %d of %d allocated\n",
2828 				new_pool->id, num, pkts_num);
2829 			return NULL;
2830 		}
2831 	}
2832 
2833 	return new_pool;
2834 }
2835 
2836 /* Initialize pools for swf */
mvpp2_swf_bm_pool_init(struct mvpp2_port * port)2837 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2838 {
2839 	int rxq;
2840 
2841 	if (!port->pool_long) {
2842 		port->pool_long =
2843 		       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2844 					 MVPP2_BM_SWF_LONG,
2845 					 port->pkt_size);
2846 		if (!port->pool_long)
2847 			return -ENOMEM;
2848 
2849 		port->pool_long->port_map |= (1 << port->id);
2850 
2851 		for (rxq = 0; rxq < rxq_number; rxq++)
2852 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 /* Port configuration routines */
2859 
mvpp2_port_mii_set(struct mvpp2_port * port)2860 static void mvpp2_port_mii_set(struct mvpp2_port *port)
2861 {
2862 	u32 val;
2863 
2864 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2865 
2866 	switch (port->phy_interface) {
2867 	case PHY_INTERFACE_MODE_SGMII:
2868 		val |= MVPP2_GMAC_INBAND_AN_MASK;
2869 		break;
2870 	case PHY_INTERFACE_MODE_RGMII:
2871 	case PHY_INTERFACE_MODE_RGMII_ID:
2872 		val |= MVPP2_GMAC_PORT_RGMII_MASK;
2873 	default:
2874 		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2875 	}
2876 
2877 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2878 }
2879 
mvpp2_port_fc_adv_enable(struct mvpp2_port * port)2880 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2881 {
2882 	u32 val;
2883 
2884 	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2885 	val |= MVPP2_GMAC_FC_ADV_EN;
2886 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2887 }
2888 
mvpp2_port_enable(struct mvpp2_port * port)2889 static void mvpp2_port_enable(struct mvpp2_port *port)
2890 {
2891 	u32 val;
2892 
2893 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2894 	val |= MVPP2_GMAC_PORT_EN_MASK;
2895 	val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2896 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2897 }
2898 
mvpp2_port_disable(struct mvpp2_port * port)2899 static void mvpp2_port_disable(struct mvpp2_port *port)
2900 {
2901 	u32 val;
2902 
2903 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2904 	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2905 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2906 }
2907 
2908 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
mvpp2_port_periodic_xon_disable(struct mvpp2_port * port)2909 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2910 {
2911 	u32 val;
2912 
2913 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2914 		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2915 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2916 }
2917 
2918 /* Configure loopback port */
mvpp2_port_loopback_set(struct mvpp2_port * port)2919 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2920 {
2921 	u32 val;
2922 
2923 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2924 
2925 	if (port->speed == 1000)
2926 		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2927 	else
2928 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2929 
2930 	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2931 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2932 	else
2933 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2934 
2935 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2936 }
2937 
mvpp2_port_reset(struct mvpp2_port * port)2938 static void mvpp2_port_reset(struct mvpp2_port *port)
2939 {
2940 	u32 val;
2941 
2942 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2943 		    ~MVPP2_GMAC_PORT_RESET_MASK;
2944 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2945 
2946 	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2947 	       MVPP2_GMAC_PORT_RESET_MASK)
2948 		continue;
2949 }
2950 
2951 /* Change maximum receive size of the port */
mvpp2_gmac_max_rx_size_set(struct mvpp2_port * port)2952 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2953 {
2954 	u32 val;
2955 
2956 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2957 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2958 	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2959 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2960 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2961 }
2962 
2963 /* PPv2.2 GoP/GMAC config */
2964 
2965 /* Set the MAC to reset or exit from reset */
gop_gmac_reset(struct mvpp2_port * port,int reset)2966 static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2967 {
2968 	u32 val;
2969 
2970 	/* read - modify - write */
2971 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2972 	if (reset)
2973 		val |= MVPP2_GMAC_PORT_RESET_MASK;
2974 	else
2975 		val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2976 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2977 
2978 	return 0;
2979 }
2980 
2981 /*
2982  * gop_gpcs_mode_cfg
2983  *
2984  * Configure port to working with Gig PCS or don't.
2985  */
gop_gpcs_mode_cfg(struct mvpp2_port * port,int en)2986 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
2987 {
2988 	u32 val;
2989 
2990 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2991 	if (en)
2992 		val |= MVPP2_GMAC_PCS_ENABLE_MASK;
2993 	else
2994 		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2995 	/* enable / disable PCS on this port */
2996 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2997 
2998 	return 0;
2999 }
3000 
gop_bypass_clk_cfg(struct mvpp2_port * port,int en)3001 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3002 {
3003 	u32 val;
3004 
3005 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3006 	if (en)
3007 		val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3008 	else
3009 		val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3010 	/* enable / disable PCS on this port */
3011 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3012 
3013 	return 0;
3014 }
3015 
gop_gmac_sgmii2_5_cfg(struct mvpp2_port * port)3016 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port)
3017 {
3018 	u32 val, thresh;
3019 
3020 	/*
3021 	 * Configure minimal level of the Tx FIFO before the lower part
3022 	 * starts to read a packet
3023 	 */
3024 	thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3025 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3026 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3027 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3028 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3029 
3030 	/* Disable bypass of sync module */
3031 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3032 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3033 	/* configure DP clock select according to mode */
3034 	val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3035 	/* configure QSGMII bypass according to mode */
3036 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3037 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3038 
3039 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3040 	/*
3041 	 * Configure GIG MAC to 1000Base-X mode connected to a fiber
3042 	 * transceiver
3043 	 */
3044 	val |= MVPP2_GMAC_PORT_TYPE_MASK;
3045 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3046 
3047 	/* configure AN 0x9268 */
3048 	val = MVPP2_GMAC_EN_PCS_AN |
3049 		MVPP2_GMAC_AN_BYPASS_EN |
3050 		MVPP2_GMAC_CONFIG_MII_SPEED  |
3051 		MVPP2_GMAC_CONFIG_GMII_SPEED     |
3052 		MVPP2_GMAC_FC_ADV_EN    |
3053 		MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3054 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3055 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3056 }
3057 
gop_gmac_sgmii_cfg(struct mvpp2_port * port)3058 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3059 {
3060 	u32 val, thresh;
3061 
3062 	/*
3063 	 * Configure minimal level of the Tx FIFO before the lower part
3064 	 * starts to read a packet
3065 	 */
3066 	thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3067 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3068 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3069 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3070 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3071 
3072 	/* Disable bypass of sync module */
3073 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3074 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3075 	/* configure DP clock select according to mode */
3076 	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3077 	/* configure QSGMII bypass according to mode */
3078 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3079 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3080 
3081 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3082 	/* configure GIG MAC to SGMII mode */
3083 	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3084 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3085 
3086 	/* configure AN */
3087 	val = MVPP2_GMAC_EN_PCS_AN |
3088 		MVPP2_GMAC_AN_BYPASS_EN |
3089 		MVPP2_GMAC_AN_SPEED_EN  |
3090 		MVPP2_GMAC_EN_FC_AN     |
3091 		MVPP2_GMAC_AN_DUPLEX_EN |
3092 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3093 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3094 }
3095 
gop_gmac_rgmii_cfg(struct mvpp2_port * port)3096 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3097 {
3098 	u32 val, thresh;
3099 
3100 	/*
3101 	 * Configure minimal level of the Tx FIFO before the lower part
3102 	 * starts to read a packet
3103 	 */
3104 	thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3105 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3106 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3107 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3108 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3109 
3110 	/* Disable bypass of sync module */
3111 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3112 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3113 	/* configure DP clock select according to mode */
3114 	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3115 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3116 	val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3117 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3118 
3119 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3120 	/* configure GIG MAC to SGMII mode */
3121 	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3122 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3123 
3124 	/* configure AN 0xb8e8 */
3125 	val = MVPP2_GMAC_AN_BYPASS_EN |
3126 		MVPP2_GMAC_AN_SPEED_EN   |
3127 		MVPP2_GMAC_EN_FC_AN      |
3128 		MVPP2_GMAC_AN_DUPLEX_EN  |
3129 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3130 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3131 }
3132 
3133 /* Set the internal mux's to the required MAC in the GOP */
gop_gmac_mode_cfg(struct mvpp2_port * port)3134 static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3135 {
3136 	u32 val;
3137 
3138 	/* Set TX FIFO thresholds */
3139 	switch (port->phy_interface) {
3140 	case PHY_INTERFACE_MODE_SGMII:
3141 		if (port->phy_speed == 2500)
3142 			gop_gmac_sgmii2_5_cfg(port);
3143 		else
3144 			gop_gmac_sgmii_cfg(port);
3145 		break;
3146 
3147 	case PHY_INTERFACE_MODE_RGMII:
3148 	case PHY_INTERFACE_MODE_RGMII_ID:
3149 		gop_gmac_rgmii_cfg(port);
3150 		break;
3151 
3152 	default:
3153 		return -1;
3154 	}
3155 
3156 	/* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3157 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3158 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3159 	val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3160 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3161 
3162 	/* PeriodicXonEn disable */
3163 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3164 	val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3165 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3166 
3167 	return 0;
3168 }
3169 
gop_xlg_2_gig_mac_cfg(struct mvpp2_port * port)3170 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3171 {
3172 	u32 val;
3173 
3174 	/* relevant only for MAC0 (XLG0 and GMAC0) */
3175 	if (port->gop_id > 0)
3176 		return;
3177 
3178 	/* configure 1Gig MAC mode */
3179 	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3180 	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3181 	val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3182 	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3183 }
3184 
gop_gpcs_reset(struct mvpp2_port * port,int reset)3185 static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3186 {
3187 	u32 val;
3188 
3189 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3190 	if (reset)
3191 		val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3192 	else
3193 		val |= MVPP2_GMAC_SGMII_MODE_MASK;
3194 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3195 
3196 	return 0;
3197 }
3198 
3199 /* Set the internal mux's to the required PCS in the PI */
gop_xpcs_mode(struct mvpp2_port * port,int num_of_lanes)3200 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes)
3201 {
3202 	u32 val;
3203 	int lane;
3204 
3205 	switch (num_of_lanes) {
3206 	case 1:
3207 		lane = 0;
3208 		break;
3209 	case 2:
3210 		lane = 1;
3211 		break;
3212 	case 4:
3213 		lane = 2;
3214 		break;
3215 	default:
3216 		return -1;
3217 	}
3218 
3219 	/* configure XG MAC mode */
3220 	val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3221 	val &= ~MVPP22_XPCS_PCSMODE_MASK;
3222 	val &= ~MVPP22_XPCS_LANEACTIVE_MASK;
3223 	val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS;
3224 	writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3225 
3226 	return 0;
3227 }
3228 
gop_mpcs_mode(struct mvpp2_port * port)3229 static int gop_mpcs_mode(struct mvpp2_port *port)
3230 {
3231 	u32 val;
3232 
3233 	/* configure PCS40G COMMON CONTROL */
3234 	val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3235 	val &= ~FORWARD_ERROR_CORRECTION_MASK;
3236 	writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3237 
3238 	/* configure PCS CLOCK RESET */
3239 	val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET);
3240 	val &= ~CLK_DIVISION_RATIO_MASK;
3241 	val |= 1 << CLK_DIVISION_RATIO_OFFS;
3242 	writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3243 
3244 	val &= ~CLK_DIV_PHASE_SET_MASK;
3245 	val |= MAC_CLK_RESET_MASK;
3246 	val |= RX_SD_CLK_RESET_MASK;
3247 	val |= TX_SD_CLK_RESET_MASK;
3248 	writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3249 
3250 	return 0;
3251 }
3252 
3253 /* Set the internal mux's to the required MAC in the GOP */
gop_xlg_mac_mode_cfg(struct mvpp2_port * port,int num_of_act_lanes)3254 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3255 {
3256 	u32 val;
3257 
3258 	/* configure 10G MAC mode */
3259 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3260 	val |= MVPP22_XLG_RX_FC_EN;
3261 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3262 
3263 	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3264 	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3265 	val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3266 	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3267 
3268 	/* read - modify - write */
3269 	val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3270 	val &= ~MVPP22_XLG_MODE_DMA_1G;
3271 	val |= MVPP22_XLG_FORWARD_PFC_EN;
3272 	val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3273 	val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3274 	writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3275 
3276 	/* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3277 	val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3278 	val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3279 	val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3280 	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3281 
3282 	/* unmask link change interrupt */
3283 	val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3284 	val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3285 	val |= 1; /* unmask summary bit */
3286 	writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3287 
3288 	return 0;
3289 }
3290 
3291 /* Set PCS to reset or exit from reset */
gop_xpcs_reset(struct mvpp2_port * port,int reset)3292 static int gop_xpcs_reset(struct mvpp2_port *port, int reset)
3293 {
3294 	u32 val;
3295 
3296 	/* read - modify - write */
3297 	val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3298 	if (reset)
3299 		val &= ~MVPP22_XPCS_PCSRESET;
3300 	else
3301 		val |= MVPP22_XPCS_PCSRESET;
3302 	writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3303 
3304 	return 0;
3305 }
3306 
3307 /* Set the MAC to reset or exit from reset */
gop_xlg_mac_reset(struct mvpp2_port * port,int reset)3308 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3309 {
3310 	u32 val;
3311 
3312 	/* read - modify - write */
3313 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3314 	if (reset)
3315 		val &= ~MVPP22_XLG_MAC_RESETN;
3316 	else
3317 		val |= MVPP22_XLG_MAC_RESETN;
3318 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3319 
3320 	return 0;
3321 }
3322 
3323 /*
3324  * gop_port_init
3325  *
3326  * Init physical port. Configures the port mode and all it's elements
3327  * accordingly.
3328  * Does not verify that the selected mode/port number is valid at the
3329  * core level.
3330  */
gop_port_init(struct mvpp2_port * port)3331 static int gop_port_init(struct mvpp2_port *port)
3332 {
3333 	int mac_num = port->gop_id;
3334 	int num_of_act_lanes;
3335 
3336 	if (mac_num >= MVPP22_GOP_MAC_NUM) {
3337 		netdev_err(NULL, "%s: illegal port number %d", __func__,
3338 			   mac_num);
3339 		return -1;
3340 	}
3341 
3342 	switch (port->phy_interface) {
3343 	case PHY_INTERFACE_MODE_RGMII:
3344 	case PHY_INTERFACE_MODE_RGMII_ID:
3345 		gop_gmac_reset(port, 1);
3346 
3347 		/* configure PCS */
3348 		gop_gpcs_mode_cfg(port, 0);
3349 		gop_bypass_clk_cfg(port, 1);
3350 
3351 		/* configure MAC */
3352 		gop_gmac_mode_cfg(port);
3353 		/* pcs unreset */
3354 		gop_gpcs_reset(port, 0);
3355 
3356 		/* mac unreset */
3357 		gop_gmac_reset(port, 0);
3358 		break;
3359 
3360 	case PHY_INTERFACE_MODE_SGMII:
3361 		/* configure PCS */
3362 		gop_gpcs_mode_cfg(port, 1);
3363 
3364 		/* configure MAC */
3365 		gop_gmac_mode_cfg(port);
3366 		/* select proper Mac mode */
3367 		gop_xlg_2_gig_mac_cfg(port);
3368 
3369 		/* pcs unreset */
3370 		gop_gpcs_reset(port, 0);
3371 		/* mac unreset */
3372 		gop_gmac_reset(port, 0);
3373 		break;
3374 
3375 	case PHY_INTERFACE_MODE_SFI:
3376 		num_of_act_lanes = 2;
3377 		mac_num = 0;
3378 		/* configure PCS */
3379 		gop_xpcs_mode(port, num_of_act_lanes);
3380 		gop_mpcs_mode(port);
3381 		/* configure MAC */
3382 		gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3383 
3384 		/* pcs unreset */
3385 		gop_xpcs_reset(port, 0);
3386 
3387 		/* mac unreset */
3388 		gop_xlg_mac_reset(port, 0);
3389 		break;
3390 
3391 	default:
3392 		netdev_err(NULL, "%s: Requested port mode (%d) not supported\n",
3393 			   __func__, port->phy_interface);
3394 		return -1;
3395 	}
3396 
3397 	return 0;
3398 }
3399 
gop_xlg_mac_port_enable(struct mvpp2_port * port,int enable)3400 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3401 {
3402 	u32 val;
3403 
3404 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3405 	if (enable) {
3406 		/* Enable port and MIB counters update */
3407 		val |= MVPP22_XLG_PORT_EN;
3408 		val &= ~MVPP22_XLG_MIBCNT_DIS;
3409 	} else {
3410 		/* Disable port */
3411 		val &= ~MVPP22_XLG_PORT_EN;
3412 	}
3413 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3414 }
3415 
gop_port_enable(struct mvpp2_port * port,int enable)3416 static void gop_port_enable(struct mvpp2_port *port, int enable)
3417 {
3418 	switch (port->phy_interface) {
3419 	case PHY_INTERFACE_MODE_RGMII:
3420 	case PHY_INTERFACE_MODE_RGMII_ID:
3421 	case PHY_INTERFACE_MODE_SGMII:
3422 		if (enable)
3423 			mvpp2_port_enable(port);
3424 		else
3425 			mvpp2_port_disable(port);
3426 		break;
3427 
3428 	case PHY_INTERFACE_MODE_SFI:
3429 		gop_xlg_mac_port_enable(port, enable);
3430 
3431 		break;
3432 	default:
3433 		netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__,
3434 			   port->phy_interface);
3435 		return;
3436 	}
3437 }
3438 
3439 /* RFU1 functions */
gop_rfu1_read(struct mvpp2 * priv,u32 offset)3440 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3441 {
3442 	return readl(priv->rfu1_base + offset);
3443 }
3444 
gop_rfu1_write(struct mvpp2 * priv,u32 offset,u32 data)3445 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3446 {
3447 	writel(data, priv->rfu1_base + offset);
3448 }
3449 
mvpp2_netc_cfg_create(int gop_id,phy_interface_t phy_type)3450 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3451 {
3452 	u32 val = 0;
3453 
3454 	if (gop_id == 2) {
3455 		if (phy_type == PHY_INTERFACE_MODE_SGMII)
3456 			val |= MV_NETC_GE_MAC2_SGMII;
3457 	}
3458 
3459 	if (gop_id == 3) {
3460 		if (phy_type == PHY_INTERFACE_MODE_SGMII)
3461 			val |= MV_NETC_GE_MAC3_SGMII;
3462 		else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3463 			 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3464 			val |= MV_NETC_GE_MAC3_RGMII;
3465 	}
3466 
3467 	return val;
3468 }
3469 
gop_netc_active_port(struct mvpp2 * priv,int gop_id,u32 val)3470 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3471 {
3472 	u32 reg;
3473 
3474 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3475 	reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3476 
3477 	val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3478 	val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3479 
3480 	reg |= val;
3481 
3482 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3483 }
3484 
gop_netc_mii_mode(struct mvpp2 * priv,int gop_id,u32 val)3485 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3486 {
3487 	u32 reg;
3488 
3489 	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3490 	reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3491 
3492 	val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3493 	val &= NETC_GBE_PORT1_MII_MODE_MASK;
3494 
3495 	reg |= val;
3496 
3497 	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3498 }
3499 
gop_netc_gop_reset(struct mvpp2 * priv,u32 val)3500 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3501 {
3502 	u32 reg;
3503 
3504 	reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3505 	reg &= ~NETC_GOP_SOFT_RESET_MASK;
3506 
3507 	val <<= NETC_GOP_SOFT_RESET_OFFS;
3508 	val &= NETC_GOP_SOFT_RESET_MASK;
3509 
3510 	reg |= val;
3511 
3512 	gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3513 }
3514 
gop_netc_gop_clock_logic_set(struct mvpp2 * priv,u32 val)3515 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3516 {
3517 	u32 reg;
3518 
3519 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3520 	reg &= ~NETC_CLK_DIV_PHASE_MASK;
3521 
3522 	val <<= NETC_CLK_DIV_PHASE_OFFS;
3523 	val &= NETC_CLK_DIV_PHASE_MASK;
3524 
3525 	reg |= val;
3526 
3527 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3528 }
3529 
gop_netc_port_rf_reset(struct mvpp2 * priv,int gop_id,u32 val)3530 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3531 {
3532 	u32 reg;
3533 
3534 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3535 	reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3536 
3537 	val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3538 	val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3539 
3540 	reg |= val;
3541 
3542 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3543 }
3544 
gop_netc_gbe_sgmii_mode_select(struct mvpp2 * priv,int gop_id,u32 val)3545 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3546 					   u32 val)
3547 {
3548 	u32 reg, mask, offset;
3549 
3550 	if (gop_id == 2) {
3551 		mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3552 		offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3553 	} else {
3554 		mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3555 		offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3556 	}
3557 	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3558 	reg &= ~mask;
3559 
3560 	val <<= offset;
3561 	val &= mask;
3562 
3563 	reg |= val;
3564 
3565 	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3566 }
3567 
gop_netc_bus_width_select(struct mvpp2 * priv,u32 val)3568 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3569 {
3570 	u32 reg;
3571 
3572 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3573 	reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3574 
3575 	val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3576 	val &= NETC_BUS_WIDTH_SELECT_MASK;
3577 
3578 	reg |= val;
3579 
3580 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3581 }
3582 
gop_netc_sample_stages_timing(struct mvpp2 * priv,u32 val)3583 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3584 {
3585 	u32 reg;
3586 
3587 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3588 	reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3589 
3590 	val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3591 	val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3592 
3593 	reg |= val;
3594 
3595 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3596 }
3597 
gop_netc_mac_to_xgmii(struct mvpp2 * priv,int gop_id,enum mv_netc_phase phase)3598 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3599 				  enum mv_netc_phase phase)
3600 {
3601 	switch (phase) {
3602 	case MV_NETC_FIRST_PHASE:
3603 		/* Set Bus Width to HB mode = 1 */
3604 		gop_netc_bus_width_select(priv, 1);
3605 		/* Select RGMII mode */
3606 		gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3607 		break;
3608 
3609 	case MV_NETC_SECOND_PHASE:
3610 		/* De-assert the relevant port HB reset */
3611 		gop_netc_port_rf_reset(priv, gop_id, 1);
3612 		break;
3613 	}
3614 }
3615 
gop_netc_mac_to_sgmii(struct mvpp2 * priv,int gop_id,enum mv_netc_phase phase)3616 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3617 				  enum mv_netc_phase phase)
3618 {
3619 	switch (phase) {
3620 	case MV_NETC_FIRST_PHASE:
3621 		/* Set Bus Width to HB mode = 1 */
3622 		gop_netc_bus_width_select(priv, 1);
3623 		/* Select SGMII mode */
3624 		if (gop_id >= 1) {
3625 			gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3626 						       MV_NETC_GBE_SGMII);
3627 		}
3628 
3629 		/* Configure the sample stages */
3630 		gop_netc_sample_stages_timing(priv, 0);
3631 		/* Configure the ComPhy Selector */
3632 		/* gop_netc_com_phy_selector_config(netComplex); */
3633 		break;
3634 
3635 	case MV_NETC_SECOND_PHASE:
3636 		/* De-assert the relevant port HB reset */
3637 		gop_netc_port_rf_reset(priv, gop_id, 1);
3638 		break;
3639 	}
3640 }
3641 
gop_netc_init(struct mvpp2 * priv,enum mv_netc_phase phase)3642 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3643 {
3644 	u32 c = priv->netc_config;
3645 
3646 	if (c & MV_NETC_GE_MAC2_SGMII)
3647 		gop_netc_mac_to_sgmii(priv, 2, phase);
3648 	else
3649 		gop_netc_mac_to_xgmii(priv, 2, phase);
3650 
3651 	if (c & MV_NETC_GE_MAC3_SGMII) {
3652 		gop_netc_mac_to_sgmii(priv, 3, phase);
3653 	} else {
3654 		gop_netc_mac_to_xgmii(priv, 3, phase);
3655 		if (c & MV_NETC_GE_MAC3_RGMII)
3656 			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3657 		else
3658 			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3659 	}
3660 
3661 	/* Activate gop ports 0, 2, 3 */
3662 	gop_netc_active_port(priv, 0, 1);
3663 	gop_netc_active_port(priv, 2, 1);
3664 	gop_netc_active_port(priv, 3, 1);
3665 
3666 	if (phase == MV_NETC_SECOND_PHASE) {
3667 		/* Enable the GOP internal clock logic */
3668 		gop_netc_gop_clock_logic_set(priv, 1);
3669 		/* De-assert GOP unit reset */
3670 		gop_netc_gop_reset(priv, 1);
3671 	}
3672 
3673 	return 0;
3674 }
3675 
3676 /* Set defaults to the MVPP2 port */
mvpp2_defaults_set(struct mvpp2_port * port)3677 static void mvpp2_defaults_set(struct mvpp2_port *port)
3678 {
3679 	int tx_port_num, val, queue, ptxq, lrxq;
3680 
3681 	if (port->priv->hw_version == MVPP21) {
3682 		/* Configure port to loopback if needed */
3683 		if (port->flags & MVPP2_F_LOOPBACK)
3684 			mvpp2_port_loopback_set(port);
3685 
3686 		/* Update TX FIFO MIN Threshold */
3687 		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3688 		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3689 		/* Min. TX threshold must be less than minimal packet length */
3690 		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3691 		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3692 	}
3693 
3694 	/* Disable Legacy WRR, Disable EJP, Release from reset */
3695 	tx_port_num = mvpp2_egress_port(port);
3696 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3697 		    tx_port_num);
3698 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3699 
3700 	/* Close bandwidth for all queues */
3701 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3702 		ptxq = mvpp2_txq_phys(port->id, queue);
3703 		mvpp2_write(port->priv,
3704 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3705 	}
3706 
3707 	/* Set refill period to 1 usec, refill tokens
3708 	 * and bucket size to maximum
3709 	 */
3710 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3711 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3712 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3713 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3714 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3715 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3716 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3717 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3718 
3719 	/* Set MaximumLowLatencyPacketSize value to 256 */
3720 	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3721 		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3722 		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3723 
3724 	/* Enable Rx cache snoop */
3725 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3726 		queue = port->rxqs[lrxq]->id;
3727 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3728 		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3729 			   MVPP2_SNOOP_BUF_HDR_MASK;
3730 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3731 	}
3732 }
3733 
3734 /* Enable/disable receiving packets */
mvpp2_ingress_enable(struct mvpp2_port * port)3735 static void mvpp2_ingress_enable(struct mvpp2_port *port)
3736 {
3737 	u32 val;
3738 	int lrxq, queue;
3739 
3740 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3741 		queue = port->rxqs[lrxq]->id;
3742 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3743 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3744 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3745 	}
3746 }
3747 
mvpp2_ingress_disable(struct mvpp2_port * port)3748 static void mvpp2_ingress_disable(struct mvpp2_port *port)
3749 {
3750 	u32 val;
3751 	int lrxq, queue;
3752 
3753 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3754 		queue = port->rxqs[lrxq]->id;
3755 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3756 		val |= MVPP2_RXQ_DISABLE_MASK;
3757 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3758 	}
3759 }
3760 
3761 /* Enable transmit via physical egress queue
3762  * - HW starts take descriptors from DRAM
3763  */
mvpp2_egress_enable(struct mvpp2_port * port)3764 static void mvpp2_egress_enable(struct mvpp2_port *port)
3765 {
3766 	u32 qmap;
3767 	int queue;
3768 	int tx_port_num = mvpp2_egress_port(port);
3769 
3770 	/* Enable all initialized TXs. */
3771 	qmap = 0;
3772 	for (queue = 0; queue < txq_number; queue++) {
3773 		struct mvpp2_tx_queue *txq = port->txqs[queue];
3774 
3775 		if (txq->descs != NULL)
3776 			qmap |= (1 << queue);
3777 	}
3778 
3779 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3780 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3781 }
3782 
3783 /* Disable transmit via physical egress queue
3784  * - HW doesn't take descriptors from DRAM
3785  */
mvpp2_egress_disable(struct mvpp2_port * port)3786 static void mvpp2_egress_disable(struct mvpp2_port *port)
3787 {
3788 	u32 reg_data;
3789 	int delay;
3790 	int tx_port_num = mvpp2_egress_port(port);
3791 
3792 	/* Issue stop command for active channels only */
3793 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3794 	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3795 		    MVPP2_TXP_SCHED_ENQ_MASK;
3796 	if (reg_data != 0)
3797 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3798 			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3799 
3800 	/* Wait for all Tx activity to terminate. */
3801 	delay = 0;
3802 	do {
3803 		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3804 			netdev_warn(port->dev,
3805 				    "Tx stop timed out, status=0x%08x\n",
3806 				    reg_data);
3807 			break;
3808 		}
3809 		mdelay(1);
3810 		delay++;
3811 
3812 		/* Check port TX Command register that all
3813 		 * Tx queues are stopped
3814 		 */
3815 		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3816 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3817 }
3818 
3819 /* Rx descriptors helper methods */
3820 
3821 /* Get number of Rx descriptors occupied by received packets */
3822 static inline int
mvpp2_rxq_received(struct mvpp2_port * port,int rxq_id)3823 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3824 {
3825 	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3826 
3827 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3828 }
3829 
3830 /* Update Rx queue status with the number of occupied and available
3831  * Rx descriptor slots.
3832  */
3833 static inline void
mvpp2_rxq_status_update(struct mvpp2_port * port,int rxq_id,int used_count,int free_count)3834 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3835 			int used_count, int free_count)
3836 {
3837 	/* Decrement the number of used descriptors and increment count
3838 	 * increment the number of free descriptors.
3839 	 */
3840 	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3841 
3842 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3843 }
3844 
3845 /* Get pointer to next RX descriptor to be processed by SW */
3846 static inline struct mvpp2_rx_desc *
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue * rxq)3847 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3848 {
3849 	int rx_desc = rxq->next_desc_to_proc;
3850 
3851 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3852 	prefetch(rxq->descs + rxq->next_desc_to_proc);
3853 	return rxq->descs + rx_desc;
3854 }
3855 
3856 /* Set rx queue offset */
mvpp2_rxq_offset_set(struct mvpp2_port * port,int prxq,int offset)3857 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3858 				 int prxq, int offset)
3859 {
3860 	u32 val;
3861 
3862 	/* Convert offset from bytes to units of 32 bytes */
3863 	offset = offset >> 5;
3864 
3865 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3866 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3867 
3868 	/* Offset is in */
3869 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3870 		    MVPP2_RXQ_PACKET_OFFSET_MASK);
3871 
3872 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3873 }
3874 
3875 /* Obtain BM cookie information from descriptor */
mvpp2_bm_cookie_build(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)3876 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3877 				 struct mvpp2_rx_desc *rx_desc)
3878 {
3879 	int cpu = smp_processor_id();
3880 	int pool;
3881 
3882 	pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3883 		MVPP2_RXD_BM_POOL_ID_MASK) >>
3884 		MVPP2_RXD_BM_POOL_ID_OFFS;
3885 
3886 	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3887 	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3888 }
3889 
3890 /* Tx descriptors helper methods */
3891 
3892 /* Get number of Tx descriptors waiting to be transmitted by HW */
mvpp2_txq_pend_desc_num_get(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3893 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3894 				       struct mvpp2_tx_queue *txq)
3895 {
3896 	u32 val;
3897 
3898 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3899 	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3900 
3901 	return val & MVPP2_TXQ_PENDING_MASK;
3902 }
3903 
3904 /* Get pointer to next Tx descriptor to be processed (send) by HW */
3905 static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue * txq)3906 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3907 {
3908 	int tx_desc = txq->next_desc_to_proc;
3909 
3910 	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3911 	return txq->descs + tx_desc;
3912 }
3913 
3914 /* Update HW with number of aggregated Tx descriptors to be sent */
mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port * port,int pending)3915 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3916 {
3917 	/* aggregated access - relevant TXQ number is written in TX desc */
3918 	mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3919 }
3920 
3921 /* Get number of sent descriptors and decrement counter.
3922  * The number of sent descriptors is returned.
3923  * Per-CPU access
3924  */
mvpp2_txq_sent_desc_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3925 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3926 					   struct mvpp2_tx_queue *txq)
3927 {
3928 	u32 val;
3929 
3930 	/* Reading status reg resets transmitted descriptor counter */
3931 	val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3932 
3933 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3934 		MVPP2_TRANSMITTED_COUNT_OFFSET;
3935 }
3936 
mvpp2_txq_sent_counter_clear(void * arg)3937 static void mvpp2_txq_sent_counter_clear(void *arg)
3938 {
3939 	struct mvpp2_port *port = arg;
3940 	int queue;
3941 
3942 	for (queue = 0; queue < txq_number; queue++) {
3943 		int id = port->txqs[queue]->id;
3944 
3945 		mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3946 	}
3947 }
3948 
3949 /* Set max sizes for Tx queues */
mvpp2_txp_max_tx_size_set(struct mvpp2_port * port)3950 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3951 {
3952 	u32	val, size, mtu;
3953 	int	txq, tx_port_num;
3954 
3955 	mtu = port->pkt_size * 8;
3956 	if (mtu > MVPP2_TXP_MTU_MAX)
3957 		mtu = MVPP2_TXP_MTU_MAX;
3958 
3959 	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3960 	mtu = 3 * mtu;
3961 
3962 	/* Indirect access to registers */
3963 	tx_port_num = mvpp2_egress_port(port);
3964 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3965 
3966 	/* Set MTU */
3967 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3968 	val &= ~MVPP2_TXP_MTU_MAX;
3969 	val |= mtu;
3970 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3971 
3972 	/* TXP token size and all TXQs token size must be larger that MTU */
3973 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3974 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3975 	if (size < mtu) {
3976 		size = mtu;
3977 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3978 		val |= size;
3979 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3980 	}
3981 
3982 	for (txq = 0; txq < txq_number; txq++) {
3983 		val = mvpp2_read(port->priv,
3984 				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3985 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3986 
3987 		if (size < mtu) {
3988 			size = mtu;
3989 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3990 			val |= size;
3991 			mvpp2_write(port->priv,
3992 				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3993 				    val);
3994 		}
3995 	}
3996 }
3997 
3998 /* Free Tx queue skbuffs */
mvpp2_txq_bufs_free(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)3999 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4000 				struct mvpp2_tx_queue *txq,
4001 				struct mvpp2_txq_pcpu *txq_pcpu, int num)
4002 {
4003 	int i;
4004 
4005 	for (i = 0; i < num; i++)
4006 		mvpp2_txq_inc_get(txq_pcpu);
4007 }
4008 
mvpp2_get_rx_queue(struct mvpp2_port * port,u32 cause)4009 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4010 							u32 cause)
4011 {
4012 	int queue = fls(cause) - 1;
4013 
4014 	return port->rxqs[queue];
4015 }
4016 
mvpp2_get_tx_queue(struct mvpp2_port * port,u32 cause)4017 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4018 							u32 cause)
4019 {
4020 	int queue = fls(cause) - 1;
4021 
4022 	return port->txqs[queue];
4023 }
4024 
4025 /* Rx/Tx queue initialization/cleanup methods */
4026 
4027 /* Allocate and initialize descriptors for aggr TXQ */
mvpp2_aggr_txq_init(struct udevice * dev,struct mvpp2_tx_queue * aggr_txq,int desc_num,int cpu,struct mvpp2 * priv)4028 static int mvpp2_aggr_txq_init(struct udevice *dev,
4029 			       struct mvpp2_tx_queue *aggr_txq,
4030 			       int desc_num, int cpu,
4031 			       struct mvpp2 *priv)
4032 {
4033 	u32 txq_dma;
4034 
4035 	/* Allocate memory for TX descriptors */
4036 	aggr_txq->descs = buffer_loc.aggr_tx_descs;
4037 	aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
4038 	if (!aggr_txq->descs)
4039 		return -ENOMEM;
4040 
4041 	/* Make sure descriptor address is cache line size aligned  */
4042 	BUG_ON(aggr_txq->descs !=
4043 	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4044 
4045 	aggr_txq->last_desc = aggr_txq->size - 1;
4046 
4047 	/* Aggr TXQ no reset WA */
4048 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4049 						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4050 
4051 	/* Set Tx descriptors queue starting address indirect
4052 	 * access
4053 	 */
4054 	if (priv->hw_version == MVPP21)
4055 		txq_dma = aggr_txq->descs_dma;
4056 	else
4057 		txq_dma = aggr_txq->descs_dma >>
4058 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4059 
4060 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4061 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4062 
4063 	return 0;
4064 }
4065 
4066 /* Create a specified Rx queue */
mvpp2_rxq_init(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4067 static int mvpp2_rxq_init(struct mvpp2_port *port,
4068 			  struct mvpp2_rx_queue *rxq)
4069 
4070 {
4071 	u32 rxq_dma;
4072 
4073 	rxq->size = port->rx_ring_size;
4074 
4075 	/* Allocate memory for RX descriptors */
4076 	rxq->descs = buffer_loc.rx_descs;
4077 	rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
4078 	if (!rxq->descs)
4079 		return -ENOMEM;
4080 
4081 	BUG_ON(rxq->descs !=
4082 	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4083 
4084 	rxq->last_desc = rxq->size - 1;
4085 
4086 	/* Zero occupied and non-occupied counters - direct access */
4087 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4088 
4089 	/* Set Rx descriptors queue starting address - indirect access */
4090 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4091 	if (port->priv->hw_version == MVPP21)
4092 		rxq_dma = rxq->descs_dma;
4093 	else
4094 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4095 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4096 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4097 	mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4098 
4099 	/* Set Offset */
4100 	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4101 
4102 	/* Add number of descriptors ready for receiving packets */
4103 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4104 
4105 	return 0;
4106 }
4107 
4108 /* Push packets received by the RXQ to BM pool */
mvpp2_rxq_drop_pkts(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4109 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4110 				struct mvpp2_rx_queue *rxq)
4111 {
4112 	int rx_received, i;
4113 
4114 	rx_received = mvpp2_rxq_received(port, rxq->id);
4115 	if (!rx_received)
4116 		return;
4117 
4118 	for (i = 0; i < rx_received; i++) {
4119 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4120 		u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4121 
4122 		mvpp2_pool_refill(port, bm,
4123 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4124 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
4125 	}
4126 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4127 }
4128 
4129 /* Cleanup Rx queue */
mvpp2_rxq_deinit(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4130 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4131 			     struct mvpp2_rx_queue *rxq)
4132 {
4133 	mvpp2_rxq_drop_pkts(port, rxq);
4134 
4135 	rxq->descs             = NULL;
4136 	rxq->last_desc         = 0;
4137 	rxq->next_desc_to_proc = 0;
4138 	rxq->descs_dma         = 0;
4139 
4140 	/* Clear Rx descriptors queue starting address and size;
4141 	 * free descriptor number
4142 	 */
4143 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4144 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4145 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4146 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4147 }
4148 
4149 /* Create and initialize a Tx queue */
mvpp2_txq_init(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4150 static int mvpp2_txq_init(struct mvpp2_port *port,
4151 			  struct mvpp2_tx_queue *txq)
4152 {
4153 	u32 val;
4154 	int cpu, desc, desc_per_txq, tx_port_num;
4155 	struct mvpp2_txq_pcpu *txq_pcpu;
4156 
4157 	txq->size = port->tx_ring_size;
4158 
4159 	/* Allocate memory for Tx descriptors */
4160 	txq->descs = buffer_loc.tx_descs;
4161 	txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
4162 	if (!txq->descs)
4163 		return -ENOMEM;
4164 
4165 	/* Make sure descriptor address is cache line size aligned  */
4166 	BUG_ON(txq->descs !=
4167 	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4168 
4169 	txq->last_desc = txq->size - 1;
4170 
4171 	/* Set Tx descriptors queue starting address - indirect access */
4172 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4173 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
4174 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4175 					     MVPP2_TXQ_DESC_SIZE_MASK);
4176 	mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4177 	mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4178 		    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4179 	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4180 	val &= ~MVPP2_TXQ_PENDING_MASK;
4181 	mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4182 
4183 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
4184 	 * for each existing TXQ.
4185 	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4186 	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4187 	 */
4188 	desc_per_txq = 16;
4189 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4190 	       (txq->log_id * desc_per_txq);
4191 
4192 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4193 		    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4194 		    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
4195 
4196 	/* WRR / EJP configuration - indirect access */
4197 	tx_port_num = mvpp2_egress_port(port);
4198 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4199 
4200 	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4201 	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4202 	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4203 	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4204 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4205 
4206 	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4207 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4208 		    val);
4209 
4210 	for_each_present_cpu(cpu) {
4211 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4212 		txq_pcpu->size = txq->size;
4213 	}
4214 
4215 	return 0;
4216 }
4217 
4218 /* Free allocated TXQ resources */
mvpp2_txq_deinit(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4219 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4220 			     struct mvpp2_tx_queue *txq)
4221 {
4222 	txq->descs             = NULL;
4223 	txq->last_desc         = 0;
4224 	txq->next_desc_to_proc = 0;
4225 	txq->descs_dma         = 0;
4226 
4227 	/* Set minimum bandwidth for disabled TXQs */
4228 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4229 
4230 	/* Set Tx descriptors queue starting address and size */
4231 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4232 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4233 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4234 }
4235 
4236 /* Cleanup Tx ports */
mvpp2_txq_clean(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4237 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4238 {
4239 	struct mvpp2_txq_pcpu *txq_pcpu;
4240 	int delay, pending, cpu;
4241 	u32 val;
4242 
4243 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4244 	val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4245 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
4246 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4247 
4248 	/* The napi queue has been stopped so wait for all packets
4249 	 * to be transmitted.
4250 	 */
4251 	delay = 0;
4252 	do {
4253 		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4254 			netdev_warn(port->dev,
4255 				    "port %d: cleaning queue %d timed out\n",
4256 				    port->id, txq->log_id);
4257 			break;
4258 		}
4259 		mdelay(1);
4260 		delay++;
4261 
4262 		pending = mvpp2_txq_pend_desc_num_get(port, txq);
4263 	} while (pending);
4264 
4265 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4266 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4267 
4268 	for_each_present_cpu(cpu) {
4269 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4270 
4271 		/* Release all packets */
4272 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4273 
4274 		/* Reset queue */
4275 		txq_pcpu->count = 0;
4276 		txq_pcpu->txq_put_index = 0;
4277 		txq_pcpu->txq_get_index = 0;
4278 	}
4279 }
4280 
4281 /* Cleanup all Tx queues */
mvpp2_cleanup_txqs(struct mvpp2_port * port)4282 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4283 {
4284 	struct mvpp2_tx_queue *txq;
4285 	int queue;
4286 	u32 val;
4287 
4288 	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4289 
4290 	/* Reset Tx ports and delete Tx queues */
4291 	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4292 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4293 
4294 	for (queue = 0; queue < txq_number; queue++) {
4295 		txq = port->txqs[queue];
4296 		mvpp2_txq_clean(port, txq);
4297 		mvpp2_txq_deinit(port, txq);
4298 	}
4299 
4300 	mvpp2_txq_sent_counter_clear(port);
4301 
4302 	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4303 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4304 }
4305 
4306 /* Cleanup all Rx queues */
mvpp2_cleanup_rxqs(struct mvpp2_port * port)4307 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4308 {
4309 	int queue;
4310 
4311 	for (queue = 0; queue < rxq_number; queue++)
4312 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
4313 }
4314 
4315 /* Init all Rx queues for port */
mvpp2_setup_rxqs(struct mvpp2_port * port)4316 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4317 {
4318 	int queue, err;
4319 
4320 	for (queue = 0; queue < rxq_number; queue++) {
4321 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
4322 		if (err)
4323 			goto err_cleanup;
4324 	}
4325 	return 0;
4326 
4327 err_cleanup:
4328 	mvpp2_cleanup_rxqs(port);
4329 	return err;
4330 }
4331 
4332 /* Init all tx queues for port */
mvpp2_setup_txqs(struct mvpp2_port * port)4333 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4334 {
4335 	struct mvpp2_tx_queue *txq;
4336 	int queue, err;
4337 
4338 	for (queue = 0; queue < txq_number; queue++) {
4339 		txq = port->txqs[queue];
4340 		err = mvpp2_txq_init(port, txq);
4341 		if (err)
4342 			goto err_cleanup;
4343 	}
4344 
4345 	mvpp2_txq_sent_counter_clear(port);
4346 	return 0;
4347 
4348 err_cleanup:
4349 	mvpp2_cleanup_txqs(port);
4350 	return err;
4351 }
4352 
4353 /* Adjust link */
mvpp2_link_event(struct mvpp2_port * port)4354 static void mvpp2_link_event(struct mvpp2_port *port)
4355 {
4356 	struct phy_device *phydev = port->phy_dev;
4357 	int status_change = 0;
4358 	u32 val;
4359 
4360 	if (phydev->link) {
4361 		if ((port->speed != phydev->speed) ||
4362 		    (port->duplex != phydev->duplex)) {
4363 			u32 val;
4364 
4365 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4366 			val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4367 				 MVPP2_GMAC_CONFIG_GMII_SPEED |
4368 				 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4369 				 MVPP2_GMAC_AN_SPEED_EN |
4370 				 MVPP2_GMAC_AN_DUPLEX_EN);
4371 
4372 			if (phydev->duplex)
4373 				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4374 
4375 			if (phydev->speed == SPEED_1000)
4376 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4377 			else if (phydev->speed == SPEED_100)
4378 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4379 
4380 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4381 
4382 			port->duplex = phydev->duplex;
4383 			port->speed  = phydev->speed;
4384 		}
4385 	}
4386 
4387 	if (phydev->link != port->link) {
4388 		if (!phydev->link) {
4389 			port->duplex = -1;
4390 			port->speed = 0;
4391 		}
4392 
4393 		port->link = phydev->link;
4394 		status_change = 1;
4395 	}
4396 
4397 	if (status_change) {
4398 		if (phydev->link) {
4399 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4400 			val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4401 				MVPP2_GMAC_FORCE_LINK_DOWN);
4402 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4403 			mvpp2_egress_enable(port);
4404 			mvpp2_ingress_enable(port);
4405 		} else {
4406 			mvpp2_ingress_disable(port);
4407 			mvpp2_egress_disable(port);
4408 		}
4409 	}
4410 }
4411 
4412 /* Main RX/TX processing routines */
4413 
4414 /* Display more error info */
mvpp2_rx_error(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)4415 static void mvpp2_rx_error(struct mvpp2_port *port,
4416 			   struct mvpp2_rx_desc *rx_desc)
4417 {
4418 	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4419 	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
4420 
4421 	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4422 	case MVPP2_RXD_ERR_CRC:
4423 		netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
4424 			   status, sz);
4425 		break;
4426 	case MVPP2_RXD_ERR_OVERRUN:
4427 		netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
4428 			   status, sz);
4429 		break;
4430 	case MVPP2_RXD_ERR_RESOURCE:
4431 		netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
4432 			   status, sz);
4433 		break;
4434 	}
4435 }
4436 
4437 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
mvpp2_rx_refill(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,u32 bm,dma_addr_t dma_addr)4438 static int mvpp2_rx_refill(struct mvpp2_port *port,
4439 			   struct mvpp2_bm_pool *bm_pool,
4440 			   u32 bm, dma_addr_t dma_addr)
4441 {
4442 	mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
4443 	return 0;
4444 }
4445 
4446 /* Set hw internals when starting port */
mvpp2_start_dev(struct mvpp2_port * port)4447 static void mvpp2_start_dev(struct mvpp2_port *port)
4448 {
4449 	switch (port->phy_interface) {
4450 	case PHY_INTERFACE_MODE_RGMII:
4451 	case PHY_INTERFACE_MODE_RGMII_ID:
4452 	case PHY_INTERFACE_MODE_SGMII:
4453 		mvpp2_gmac_max_rx_size_set(port);
4454 	default:
4455 		break;
4456 	}
4457 
4458 	mvpp2_txp_max_tx_size_set(port);
4459 
4460 	if (port->priv->hw_version == MVPP21)
4461 		mvpp2_port_enable(port);
4462 	else
4463 		gop_port_enable(port, 1);
4464 }
4465 
4466 /* Set hw internals when stopping port */
mvpp2_stop_dev(struct mvpp2_port * port)4467 static void mvpp2_stop_dev(struct mvpp2_port *port)
4468 {
4469 	/* Stop new packets from arriving to RXQs */
4470 	mvpp2_ingress_disable(port);
4471 
4472 	mvpp2_egress_disable(port);
4473 
4474 	if (port->priv->hw_version == MVPP21)
4475 		mvpp2_port_disable(port);
4476 	else
4477 		gop_port_enable(port, 0);
4478 }
4479 
mvpp2_phy_connect(struct udevice * dev,struct mvpp2_port * port)4480 static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
4481 {
4482 	struct phy_device *phy_dev;
4483 
4484 	if (!port->init || port->link == 0) {
4485 		phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
4486 					      dev, port->phy_interface);
4487 
4488 		/*
4489 		 * If the phy doesn't match with any existing u-boot drivers the
4490 		 * phy framework will connect it to generic one which
4491 		 * uid == 0xffffffff. In this case act as if the phy wouldn't be
4492 		 * declared in dts. Otherwise in case of 3310 (for which the
4493 		 * driver doesn't exist) the link will not be correctly
4494 		 * detected. Removing phy entry from dts in case of 3310 is not
4495 		 * an option because it is required for the phy_fw_down
4496 		 * procedure.
4497 		 */
4498 		if (phy_dev &&
4499 		    phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
4500 			netdev_warn(port->dev,
4501 				    "Marking phy as invalid, link will not be checked\n");
4502 			/* set phy_addr to invalid value */
4503 			port->phyaddr = PHY_MAX_ADDR;
4504 			mvpp2_egress_enable(port);
4505 			mvpp2_ingress_enable(port);
4506 
4507 			return;
4508 		}
4509 
4510 		port->phy_dev = phy_dev;
4511 		if (!phy_dev) {
4512 			netdev_err(port->dev, "cannot connect to phy\n");
4513 			return;
4514 		}
4515 		phy_dev->supported &= PHY_GBIT_FEATURES;
4516 		phy_dev->advertising = phy_dev->supported;
4517 
4518 		port->phy_dev = phy_dev;
4519 		port->link    = 0;
4520 		port->duplex  = 0;
4521 		port->speed   = 0;
4522 
4523 		phy_config(phy_dev);
4524 		phy_startup(phy_dev);
4525 		if (!phy_dev->link)
4526 			printf("%s: No link\n", phy_dev->dev->name);
4527 		else
4528 			port->init = 1;
4529 	} else {
4530 		mvpp2_egress_enable(port);
4531 		mvpp2_ingress_enable(port);
4532 	}
4533 }
4534 
mvpp2_open(struct udevice * dev,struct mvpp2_port * port)4535 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4536 {
4537 	unsigned char mac_bcast[ETH_ALEN] = {
4538 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4539 	int err;
4540 
4541 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4542 	if (err) {
4543 		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4544 		return err;
4545 	}
4546 	err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4547 				      port->dev_addr, true);
4548 	if (err) {
4549 		netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
4550 		return err;
4551 	}
4552 	err = mvpp2_prs_def_flow(port);
4553 	if (err) {
4554 		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4555 		return err;
4556 	}
4557 
4558 	/* Allocate the Rx/Tx queues */
4559 	err = mvpp2_setup_rxqs(port);
4560 	if (err) {
4561 		netdev_err(port->dev, "cannot allocate Rx queues\n");
4562 		return err;
4563 	}
4564 
4565 	err = mvpp2_setup_txqs(port);
4566 	if (err) {
4567 		netdev_err(port->dev, "cannot allocate Tx queues\n");
4568 		return err;
4569 	}
4570 
4571 	if (port->phyaddr < PHY_MAX_ADDR) {
4572 		mvpp2_phy_connect(dev, port);
4573 		mvpp2_link_event(port);
4574 	} else {
4575 		mvpp2_egress_enable(port);
4576 		mvpp2_ingress_enable(port);
4577 	}
4578 
4579 	mvpp2_start_dev(port);
4580 
4581 	return 0;
4582 }
4583 
4584 /* No Device ops here in U-Boot */
4585 
4586 /* Driver initialization */
4587 
mvpp2_port_power_up(struct mvpp2_port * port)4588 static void mvpp2_port_power_up(struct mvpp2_port *port)
4589 {
4590 	struct mvpp2 *priv = port->priv;
4591 
4592 	/* On PPv2.2 the GoP / interface configuration has already been done */
4593 	if (priv->hw_version == MVPP21)
4594 		mvpp2_port_mii_set(port);
4595 	mvpp2_port_periodic_xon_disable(port);
4596 	if (priv->hw_version == MVPP21)
4597 		mvpp2_port_fc_adv_enable(port);
4598 	mvpp2_port_reset(port);
4599 }
4600 
4601 /* Initialize port HW */
mvpp2_port_init(struct udevice * dev,struct mvpp2_port * port)4602 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4603 {
4604 	struct mvpp2 *priv = port->priv;
4605 	struct mvpp2_txq_pcpu *txq_pcpu;
4606 	int queue, cpu, err;
4607 
4608 	if (port->first_rxq + rxq_number >
4609 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
4610 		return -EINVAL;
4611 
4612 	/* Disable port */
4613 	mvpp2_egress_disable(port);
4614 	if (priv->hw_version == MVPP21)
4615 		mvpp2_port_disable(port);
4616 	else
4617 		gop_port_enable(port, 0);
4618 
4619 	port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4620 				  GFP_KERNEL);
4621 	if (!port->txqs)
4622 		return -ENOMEM;
4623 
4624 	/* Associate physical Tx queues to this port and initialize.
4625 	 * The mapping is predefined.
4626 	 */
4627 	for (queue = 0; queue < txq_number; queue++) {
4628 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4629 		struct mvpp2_tx_queue *txq;
4630 
4631 		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4632 		if (!txq)
4633 			return -ENOMEM;
4634 
4635 		txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4636 					 GFP_KERNEL);
4637 		if (!txq->pcpu)
4638 			return -ENOMEM;
4639 
4640 		txq->id = queue_phy_id;
4641 		txq->log_id = queue;
4642 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4643 		for_each_present_cpu(cpu) {
4644 			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4645 			txq_pcpu->cpu = cpu;
4646 		}
4647 
4648 		port->txqs[queue] = txq;
4649 	}
4650 
4651 	port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4652 				  GFP_KERNEL);
4653 	if (!port->rxqs)
4654 		return -ENOMEM;
4655 
4656 	/* Allocate and initialize Rx queue for this port */
4657 	for (queue = 0; queue < rxq_number; queue++) {
4658 		struct mvpp2_rx_queue *rxq;
4659 
4660 		/* Map physical Rx queue to port's logical Rx queue */
4661 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4662 		if (!rxq)
4663 			return -ENOMEM;
4664 		/* Map this Rx queue to a physical queue */
4665 		rxq->id = port->first_rxq + queue;
4666 		rxq->port = port->id;
4667 		rxq->logic_rxq = queue;
4668 
4669 		port->rxqs[queue] = rxq;
4670 	}
4671 
4672 
4673 	/* Create Rx descriptor rings */
4674 	for (queue = 0; queue < rxq_number; queue++) {
4675 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4676 
4677 		rxq->size = port->rx_ring_size;
4678 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4679 		rxq->time_coal = MVPP2_RX_COAL_USEC;
4680 	}
4681 
4682 	mvpp2_ingress_disable(port);
4683 
4684 	/* Port default configuration */
4685 	mvpp2_defaults_set(port);
4686 
4687 	/* Port's classifier configuration */
4688 	mvpp2_cls_oversize_rxq_set(port);
4689 	mvpp2_cls_port_config(port);
4690 
4691 	/* Provide an initial Rx packet size */
4692 	port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4693 
4694 	/* Initialize pools for swf */
4695 	err = mvpp2_swf_bm_pool_init(port);
4696 	if (err)
4697 		return err;
4698 
4699 	return 0;
4700 }
4701 
phy_info_parse(struct udevice * dev,struct mvpp2_port * port)4702 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
4703 {
4704 	int port_node = dev_of_offset(dev);
4705 	const char *phy_mode_str;
4706 	int phy_node;
4707 	u32 id;
4708 	u32 phyaddr = 0;
4709 	int phy_mode = -1;
4710 	int ret;
4711 
4712 	phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
4713 
4714 	if (phy_node > 0) {
4715 		int parent;
4716 		phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
4717 		if (phyaddr < 0) {
4718 			dev_err(&pdev->dev, "could not find phy address\n");
4719 			return -1;
4720 		}
4721 		parent = fdt_parent_offset(gd->fdt_blob, phy_node);
4722 		ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
4723 						     &port->mdio_dev);
4724 		if (ret)
4725 			return ret;
4726 	} else {
4727 		/* phy_addr is set to invalid value */
4728 		phyaddr = PHY_MAX_ADDR;
4729 	}
4730 
4731 	phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
4732 	if (phy_mode_str)
4733 		phy_mode = phy_get_interface_by_name(phy_mode_str);
4734 	if (phy_mode == -1) {
4735 		dev_err(&pdev->dev, "incorrect phy mode\n");
4736 		return -EINVAL;
4737 	}
4738 
4739 	id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4740 	if (id == -1) {
4741 		dev_err(&pdev->dev, "missing port-id value\n");
4742 		return -EINVAL;
4743 	}
4744 
4745 #ifdef CONFIG_DM_GPIO
4746 	gpio_request_by_name(dev, "phy-reset-gpios", 0,
4747 			     &port->phy_reset_gpio, GPIOD_IS_OUT);
4748 	gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4749 			     &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4750 #endif
4751 
4752 	/*
4753 	 * ToDo:
4754 	 * Not sure if this DT property "phy-speed" will get accepted, so
4755 	 * this might change later
4756 	 */
4757 	/* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
4758 	port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node,
4759 					 "phy-speed", 1000);
4760 
4761 	port->id = id;
4762 	if (port->priv->hw_version == MVPP21)
4763 		port->first_rxq = port->id * rxq_number;
4764 	else
4765 		port->first_rxq = port->id * port->priv->max_port_rxqs;
4766 	port->phy_interface = phy_mode;
4767 	port->phyaddr = phyaddr;
4768 
4769 	return 0;
4770 }
4771 
4772 #ifdef CONFIG_DM_GPIO
4773 /* Port GPIO initialization */
mvpp2_gpio_init(struct mvpp2_port * port)4774 static void mvpp2_gpio_init(struct mvpp2_port *port)
4775 {
4776 	if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
4777 		dm_gpio_set_value(&port->phy_reset_gpio, 1);
4778 		mdelay(10);
4779 		dm_gpio_set_value(&port->phy_reset_gpio, 0);
4780 	}
4781 
4782 	if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4783 		dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4784 }
4785 #endif
4786 
4787 /* Ports initialization */
mvpp2_port_probe(struct udevice * dev,struct mvpp2_port * port,int port_node,struct mvpp2 * priv)4788 static int mvpp2_port_probe(struct udevice *dev,
4789 			    struct mvpp2_port *port,
4790 			    int port_node,
4791 			    struct mvpp2 *priv)
4792 {
4793 	int err;
4794 
4795 	port->tx_ring_size = MVPP2_MAX_TXD;
4796 	port->rx_ring_size = MVPP2_MAX_RXD;
4797 
4798 	err = mvpp2_port_init(dev, port);
4799 	if (err < 0) {
4800 		dev_err(&pdev->dev, "failed to init port %d\n", port->id);
4801 		return err;
4802 	}
4803 	mvpp2_port_power_up(port);
4804 
4805 #ifdef CONFIG_DM_GPIO
4806 	mvpp2_gpio_init(port);
4807 #endif
4808 
4809 	priv->port_list[port->id] = port;
4810 	priv->num_ports++;
4811 	return 0;
4812 }
4813 
4814 /* Initialize decoding windows */
mvpp2_conf_mbus_windows(const struct mbus_dram_target_info * dram,struct mvpp2 * priv)4815 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4816 				    struct mvpp2 *priv)
4817 {
4818 	u32 win_enable;
4819 	int i;
4820 
4821 	for (i = 0; i < 6; i++) {
4822 		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4823 		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4824 
4825 		if (i < 4)
4826 			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4827 	}
4828 
4829 	win_enable = 0;
4830 
4831 	for (i = 0; i < dram->num_cs; i++) {
4832 		const struct mbus_dram_window *cs = dram->cs + i;
4833 
4834 		mvpp2_write(priv, MVPP2_WIN_BASE(i),
4835 			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4836 			    dram->mbus_dram_target_id);
4837 
4838 		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4839 			    (cs->size - 1) & 0xffff0000);
4840 
4841 		win_enable |= (1 << i);
4842 	}
4843 
4844 	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4845 }
4846 
4847 /* Initialize Rx FIFO's */
mvpp2_rx_fifo_init(struct mvpp2 * priv)4848 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4849 {
4850 	int port;
4851 
4852 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4853 		if (priv->hw_version == MVPP22) {
4854 			if (port == 0) {
4855 				mvpp2_write(priv,
4856 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4857 					    MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4858 				mvpp2_write(priv,
4859 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4860 					    MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4861 			} else if (port == 1) {
4862 				mvpp2_write(priv,
4863 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4864 					    MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4865 				mvpp2_write(priv,
4866 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4867 					    MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4868 			} else {
4869 				mvpp2_write(priv,
4870 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4871 					    MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4872 				mvpp2_write(priv,
4873 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4874 					    MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4875 			}
4876 		} else {
4877 			mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4878 				    MVPP21_RX_FIFO_PORT_DATA_SIZE);
4879 			mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4880 				    MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4881 		}
4882 	}
4883 
4884 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4885 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
4886 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4887 }
4888 
4889 /* Initialize Tx FIFO's */
mvpp2_tx_fifo_init(struct mvpp2 * priv)4890 static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4891 {
4892 	int port, val;
4893 
4894 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4895 		/* Port 0 supports 10KB TX FIFO */
4896 		if (port == 0) {
4897 			val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4898 				MVPP22_TX_FIFO_SIZE_MASK;
4899 		} else {
4900 			val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4901 				MVPP22_TX_FIFO_SIZE_MASK;
4902 		}
4903 		mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4904 	}
4905 }
4906 
mvpp2_axi_init(struct mvpp2 * priv)4907 static void mvpp2_axi_init(struct mvpp2 *priv)
4908 {
4909 	u32 val, rdval, wrval;
4910 
4911 	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4912 
4913 	/* AXI Bridge Configuration */
4914 
4915 	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4916 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4917 	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4918 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4919 
4920 	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4921 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4922 	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4923 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4924 
4925 	/* BM */
4926 	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4927 	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4928 
4929 	/* Descriptors */
4930 	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4931 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4932 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4933 	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4934 
4935 	/* Buffer Data */
4936 	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4937 	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4938 
4939 	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4940 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4941 	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4942 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4943 	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4944 	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4945 
4946 	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4947 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4948 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4949 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4950 
4951 	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4952 
4953 	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4954 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4955 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4956 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4957 
4958 	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4959 }
4960 
4961 /* Initialize network controller common part HW */
mvpp2_init(struct udevice * dev,struct mvpp2 * priv)4962 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4963 {
4964 	const struct mbus_dram_target_info *dram_target_info;
4965 	int err, i;
4966 	u32 val;
4967 
4968 	/* Checks for hardware constraints (U-Boot uses only one rxq) */
4969 	if ((rxq_number > priv->max_port_rxqs) ||
4970 	    (txq_number > MVPP2_MAX_TXQ)) {
4971 		dev_err(&pdev->dev, "invalid queue size parameter\n");
4972 		return -EINVAL;
4973 	}
4974 
4975 	if (priv->hw_version == MVPP22)
4976 		mvpp2_axi_init(priv);
4977 	else {
4978 		/* MBUS windows configuration */
4979 		dram_target_info = mvebu_mbus_dram_info();
4980 		if (dram_target_info)
4981 			mvpp2_conf_mbus_windows(dram_target_info, priv);
4982 	}
4983 
4984 	if (priv->hw_version == MVPP21) {
4985 		/* Disable HW PHY polling */
4986 		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4987 		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
4988 		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4989 	} else {
4990 		/* Enable HW PHY polling */
4991 		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4992 		val |= MVPP22_SMI_POLLING_EN;
4993 		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4994 	}
4995 
4996 	/* Allocate and initialize aggregated TXQs */
4997 	priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
4998 				       sizeof(struct mvpp2_tx_queue),
4999 				       GFP_KERNEL);
5000 	if (!priv->aggr_txqs)
5001 		return -ENOMEM;
5002 
5003 	for_each_present_cpu(i) {
5004 		priv->aggr_txqs[i].id = i;
5005 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5006 		err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5007 					  MVPP2_AGGR_TXQ_SIZE, i, priv);
5008 		if (err < 0)
5009 			return err;
5010 	}
5011 
5012 	/* Rx Fifo Init */
5013 	mvpp2_rx_fifo_init(priv);
5014 
5015 	/* Tx Fifo Init */
5016 	if (priv->hw_version == MVPP22)
5017 		mvpp2_tx_fifo_init(priv);
5018 
5019 	if (priv->hw_version == MVPP21)
5020 		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5021 		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5022 
5023 	/* Allow cache snoop when transmiting packets */
5024 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5025 
5026 	/* Buffer Manager initialization */
5027 	err = mvpp2_bm_init(dev, priv);
5028 	if (err < 0)
5029 		return err;
5030 
5031 	/* Parser default initialization */
5032 	err = mvpp2_prs_default_init(dev, priv);
5033 	if (err < 0)
5034 		return err;
5035 
5036 	/* Classifier default initialization */
5037 	mvpp2_cls_init(priv);
5038 
5039 	return 0;
5040 }
5041 
mvpp2_recv(struct udevice * dev,int flags,uchar ** packetp)5042 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5043 {
5044 	struct mvpp2_port *port = dev_get_priv(dev);
5045 	struct mvpp2_rx_desc *rx_desc;
5046 	struct mvpp2_bm_pool *bm_pool;
5047 	dma_addr_t dma_addr;
5048 	u32 bm, rx_status;
5049 	int pool, rx_bytes, err;
5050 	int rx_received;
5051 	struct mvpp2_rx_queue *rxq;
5052 	u8 *data;
5053 
5054 	if (port->phyaddr < PHY_MAX_ADDR)
5055 		if (!port->phy_dev->link)
5056 			return 0;
5057 
5058 	/* Process RX packets */
5059 	rxq = port->rxqs[0];
5060 
5061 	/* Get number of received packets and clamp the to-do */
5062 	rx_received = mvpp2_rxq_received(port, rxq->id);
5063 
5064 	/* Return if no packets are received */
5065 	if (!rx_received)
5066 		return 0;
5067 
5068 	rx_desc = mvpp2_rxq_next_desc_get(rxq);
5069 	rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5070 	rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5071 	rx_bytes -= MVPP2_MH_SIZE;
5072 	dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5073 
5074 	bm = mvpp2_bm_cookie_build(port, rx_desc);
5075 	pool = mvpp2_bm_cookie_pool_get(bm);
5076 	bm_pool = &port->priv->bm_pools[pool];
5077 
5078 	/* In case of an error, release the requested buffer pointer
5079 	 * to the Buffer Manager. This request process is controlled
5080 	 * by the hardware, and the information about the buffer is
5081 	 * comprised by the RX descriptor.
5082 	 */
5083 	if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5084 		mvpp2_rx_error(port, rx_desc);
5085 		/* Return the buffer to the pool */
5086 		mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
5087 		return 0;
5088 	}
5089 
5090 	err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
5091 	if (err) {
5092 		netdev_err(port->dev, "failed to refill BM pools\n");
5093 		return 0;
5094 	}
5095 
5096 	/* Update Rx queue management counters */
5097 	mb();
5098 	mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5099 
5100 	/* give packet to stack - skip on first n bytes */
5101 	data = (u8 *)dma_addr + 2 + 32;
5102 
5103 	if (rx_bytes <= 0)
5104 		return 0;
5105 
5106 	/*
5107 	 * No cache invalidation needed here, since the rx_buffer's are
5108 	 * located in a uncached memory region
5109 	 */
5110 	*packetp = data;
5111 
5112 	return rx_bytes;
5113 }
5114 
mvpp2_send(struct udevice * dev,void * packet,int length)5115 static int mvpp2_send(struct udevice *dev, void *packet, int length)
5116 {
5117 	struct mvpp2_port *port = dev_get_priv(dev);
5118 	struct mvpp2_tx_queue *txq, *aggr_txq;
5119 	struct mvpp2_tx_desc *tx_desc;
5120 	int tx_done;
5121 	int timeout;
5122 
5123 	if (port->phyaddr < PHY_MAX_ADDR)
5124 		if (!port->phy_dev->link)
5125 			return 0;
5126 
5127 	txq = port->txqs[0];
5128 	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5129 
5130 	/* Get a descriptor for the first part of the packet */
5131 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5132 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5133 	mvpp2_txdesc_size_set(port, tx_desc, length);
5134 	mvpp2_txdesc_offset_set(port, tx_desc,
5135 				(dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5136 	mvpp2_txdesc_dma_addr_set(port, tx_desc,
5137 				  (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
5138 	/* First and Last descriptor */
5139 	mvpp2_txdesc_cmd_set(port, tx_desc,
5140 			     MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5141 			     | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
5142 
5143 	/* Flush tx data */
5144 	flush_dcache_range((unsigned long)packet,
5145 			   (unsigned long)packet + ALIGN(length, PKTALIGN));
5146 
5147 	/* Enable transmit */
5148 	mb();
5149 	mvpp2_aggr_txq_pend_desc_add(port, 1);
5150 
5151 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5152 
5153 	timeout = 0;
5154 	do {
5155 		if (timeout++ > 10000) {
5156 			printf("timeout: packet not sent from aggregated to phys TXQ\n");
5157 			return 0;
5158 		}
5159 		tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5160 	} while (tx_done);
5161 
5162 	timeout = 0;
5163 	do {
5164 		if (timeout++ > 10000) {
5165 			printf("timeout: packet not sent\n");
5166 			return 0;
5167 		}
5168 		tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5169 	} while (!tx_done);
5170 
5171 	return 0;
5172 }
5173 
mvpp2_start(struct udevice * dev)5174 static int mvpp2_start(struct udevice *dev)
5175 {
5176 	struct eth_pdata *pdata = dev_get_platdata(dev);
5177 	struct mvpp2_port *port = dev_get_priv(dev);
5178 
5179 	/* Load current MAC address */
5180 	memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5181 
5182 	/* Reconfigure parser accept the original MAC address */
5183 	mvpp2_prs_update_mac_da(port, port->dev_addr);
5184 
5185 	switch (port->phy_interface) {
5186 	case PHY_INTERFACE_MODE_RGMII:
5187 	case PHY_INTERFACE_MODE_RGMII_ID:
5188 	case PHY_INTERFACE_MODE_SGMII:
5189 		mvpp2_port_power_up(port);
5190 	default:
5191 		break;
5192 	}
5193 
5194 	mvpp2_open(dev, port);
5195 
5196 	return 0;
5197 }
5198 
mvpp2_stop(struct udevice * dev)5199 static void mvpp2_stop(struct udevice *dev)
5200 {
5201 	struct mvpp2_port *port = dev_get_priv(dev);
5202 
5203 	mvpp2_stop_dev(port);
5204 	mvpp2_cleanup_rxqs(port);
5205 	mvpp2_cleanup_txqs(port);
5206 }
5207 
mvpp2_write_hwaddr(struct udevice * dev)5208 static int mvpp2_write_hwaddr(struct udevice *dev)
5209 {
5210 	struct mvpp2_port *port = dev_get_priv(dev);
5211 
5212 	return mvpp2_prs_update_mac_da(port, port->dev_addr);
5213 }
5214 
mvpp22_smi_phy_addr_cfg(struct mvpp2_port * port)5215 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port)
5216 {
5217 	writel(port->phyaddr, port->priv->iface_base +
5218 	       MVPP22_SMI_PHY_ADDR_REG(port->gop_id));
5219 
5220 	return 0;
5221 }
5222 
mvpp2_base_probe(struct udevice * dev)5223 static int mvpp2_base_probe(struct udevice *dev)
5224 {
5225 	struct mvpp2 *priv = dev_get_priv(dev);
5226 	void *bd_space;
5227 	u32 size = 0;
5228 	int i;
5229 
5230 	/* Save hw-version */
5231 	priv->hw_version = dev_get_driver_data(dev);
5232 
5233 	/*
5234 	 * U-Boot special buffer handling:
5235 	 *
5236 	 * Allocate buffer area for descs and rx_buffers. This is only
5237 	 * done once for all interfaces. As only one interface can
5238 	 * be active. Make this area DMA-safe by disabling the D-cache
5239 	 */
5240 
5241 	/* Align buffer area for descs and rx_buffers to 1MiB */
5242 	bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
5243 	mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5244 					BD_SPACE, DCACHE_OFF);
5245 
5246 	buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5247 	size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
5248 
5249 	buffer_loc.tx_descs =
5250 		(struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
5251 	size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
5252 
5253 	buffer_loc.rx_descs =
5254 		(struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
5255 	size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
5256 
5257 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5258 		buffer_loc.bm_pool[i] =
5259 			(unsigned long *)((unsigned long)bd_space + size);
5260 		if (priv->hw_version == MVPP21)
5261 			size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5262 		else
5263 			size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
5264 	}
5265 
5266 	for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
5267 		buffer_loc.rx_buffer[i] =
5268 			(unsigned long *)((unsigned long)bd_space + size);
5269 		size += RX_BUFFER_SIZE;
5270 	}
5271 
5272 	/* Clear the complete area so that all descriptors are cleared */
5273 	memset(bd_space, 0, size);
5274 
5275 	/* Save base addresses for later use */
5276 	priv->base = (void *)devfdt_get_addr_index(dev, 0);
5277 	if (IS_ERR(priv->base))
5278 		return PTR_ERR(priv->base);
5279 
5280 	if (priv->hw_version == MVPP21) {
5281 		priv->lms_base = (void *)devfdt_get_addr_index(dev, 1);
5282 		if (IS_ERR(priv->lms_base))
5283 			return PTR_ERR(priv->lms_base);
5284 	} else {
5285 		priv->iface_base = (void *)devfdt_get_addr_index(dev, 1);
5286 		if (IS_ERR(priv->iface_base))
5287 			return PTR_ERR(priv->iface_base);
5288 
5289 		/* Store common base addresses for all ports */
5290 		priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5291 		priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5292 		priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
5293 	}
5294 
5295 	if (priv->hw_version == MVPP21)
5296 		priv->max_port_rxqs = 8;
5297 	else
5298 		priv->max_port_rxqs = 32;
5299 
5300 	return 0;
5301 }
5302 
mvpp2_probe(struct udevice * dev)5303 static int mvpp2_probe(struct udevice *dev)
5304 {
5305 	struct mvpp2_port *port = dev_get_priv(dev);
5306 	struct mvpp2 *priv = dev_get_priv(dev->parent);
5307 	int err;
5308 
5309 	/* Only call the probe function for the parent once */
5310 	if (!priv->probe_done)
5311 		err = mvpp2_base_probe(dev->parent);
5312 
5313 	port->priv = priv;
5314 
5315 	err = phy_info_parse(dev, port);
5316 	if (err)
5317 		return err;
5318 
5319 	/*
5320 	 * We need the port specific io base addresses at this stage, since
5321 	 * gop_port_init() accesses these registers
5322 	 */
5323 	if (priv->hw_version == MVPP21) {
5324 		int priv_common_regs_num = 2;
5325 
5326 		port->base = (void __iomem *)devfdt_get_addr_index(
5327 			dev->parent, priv_common_regs_num + port->id);
5328 		if (IS_ERR(port->base))
5329 			return PTR_ERR(port->base);
5330 	} else {
5331 		port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5332 					      "gop-port-id", -1);
5333 		if (port->id == -1) {
5334 			dev_err(&pdev->dev, "missing gop-port-id value\n");
5335 			return -EINVAL;
5336 		}
5337 
5338 		port->base = priv->iface_base + MVPP22_PORT_BASE +
5339 			port->gop_id * MVPP22_PORT_OFFSET;
5340 
5341 		/* Set phy address of the port */
5342 		if (port->phyaddr < PHY_MAX_ADDR)
5343 			mvpp22_smi_phy_addr_cfg(port);
5344 
5345 		/* GoP Init */
5346 		gop_port_init(port);
5347 	}
5348 
5349 	if (!priv->probe_done) {
5350 		/* Initialize network controller */
5351 		err = mvpp2_init(dev, priv);
5352 		if (err < 0) {
5353 			dev_err(&pdev->dev, "failed to initialize controller\n");
5354 			return err;
5355 		}
5356 		priv->num_ports = 0;
5357 		priv->probe_done = 1;
5358 	}
5359 
5360 	err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5361 	if (err)
5362 		return err;
5363 
5364 	if (priv->hw_version == MVPP22) {
5365 		priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5366 							   port->phy_interface);
5367 
5368 		/* Netcomplex configurations for all ports */
5369 		gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5370 		gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5371 	}
5372 
5373 	return 0;
5374 }
5375 
5376 /*
5377  * Empty BM pool and stop its activity before the OS is started
5378  */
mvpp2_remove(struct udevice * dev)5379 static int mvpp2_remove(struct udevice *dev)
5380 {
5381 	struct mvpp2_port *port = dev_get_priv(dev);
5382 	struct mvpp2 *priv = port->priv;
5383 	int i;
5384 
5385 	priv->num_ports--;
5386 
5387 	if (priv->num_ports)
5388 		return 0;
5389 
5390 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5391 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5392 
5393 	return 0;
5394 }
5395 
5396 static const struct eth_ops mvpp2_ops = {
5397 	.start		= mvpp2_start,
5398 	.send		= mvpp2_send,
5399 	.recv		= mvpp2_recv,
5400 	.stop		= mvpp2_stop,
5401 	.write_hwaddr	= mvpp2_write_hwaddr
5402 };
5403 
5404 static struct driver mvpp2_driver = {
5405 	.name	= "mvpp2",
5406 	.id	= UCLASS_ETH,
5407 	.probe	= mvpp2_probe,
5408 	.remove = mvpp2_remove,
5409 	.ops	= &mvpp2_ops,
5410 	.priv_auto_alloc_size = sizeof(struct mvpp2_port),
5411 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
5412 	.flags	= DM_FLAG_ACTIVE_DMA,
5413 };
5414 
5415 /*
5416  * Use a MISC device to bind the n instances (child nodes) of the
5417  * network base controller in UCLASS_ETH.
5418  */
mvpp2_base_bind(struct udevice * parent)5419 static int mvpp2_base_bind(struct udevice *parent)
5420 {
5421 	const void *blob = gd->fdt_blob;
5422 	int node = dev_of_offset(parent);
5423 	struct uclass_driver *drv;
5424 	struct udevice *dev;
5425 	struct eth_pdata *plat;
5426 	char *name;
5427 	int subnode;
5428 	u32 id;
5429 	int base_id_add;
5430 
5431 	/* Lookup eth driver */
5432 	drv = lists_uclass_lookup(UCLASS_ETH);
5433 	if (!drv) {
5434 		puts("Cannot find eth driver\n");
5435 		return -ENOENT;
5436 	}
5437 
5438 	base_id_add = base_id;
5439 
5440 	fdt_for_each_subnode(subnode, blob, node) {
5441 		/* Increment base_id for all subnodes, also the disabled ones */
5442 		base_id++;
5443 
5444 		/* Skip disabled ports */
5445 		if (!fdtdec_get_is_enabled(blob, subnode))
5446 			continue;
5447 
5448 		plat = calloc(1, sizeof(*plat));
5449 		if (!plat)
5450 			return -ENOMEM;
5451 
5452 		id = fdtdec_get_int(blob, subnode, "port-id", -1);
5453 		id += base_id_add;
5454 
5455 		name = calloc(1, 16);
5456 		if (!name) {
5457 			free(plat);
5458 			return -ENOMEM;
5459 		}
5460 		sprintf(name, "mvpp2-%d", id);
5461 
5462 		/* Create child device UCLASS_ETH and bind it */
5463 		device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
5464 		dev_set_of_offset(dev, subnode);
5465 	}
5466 
5467 	return 0;
5468 }
5469 
5470 static const struct udevice_id mvpp2_ids[] = {
5471 	{
5472 		.compatible = "marvell,armada-375-pp2",
5473 		.data = MVPP21,
5474 	},
5475 	{
5476 		.compatible = "marvell,armada-7k-pp22",
5477 		.data = MVPP22,
5478 	},
5479 	{ }
5480 };
5481 
5482 U_BOOT_DRIVER(mvpp2_base) = {
5483 	.name	= "mvpp2_base",
5484 	.id	= UCLASS_MISC,
5485 	.of_match = mvpp2_ids,
5486 	.bind	= mvpp2_base_bind,
5487 	.priv_auto_alloc_size = sizeof(struct mvpp2),
5488 };
5489