• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "common.h"
33 #include "regs.h"
34 
35 /*
36  * # of exact address filters.  The first one is used for the station address,
37  * the rest are available for multicast addresses.
38  */
39 #define EXACT_ADDR_FILTERS 8
40 
macidx(const struct cmac * mac)41 static inline int macidx(const struct cmac *mac)
42 {
43 	return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44 }
45 
xaui_serdes_reset(struct cmac * mac)46 static void xaui_serdes_reset(struct cmac *mac)
47 {
48 	static const unsigned int clear[] = {
49 		F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 		F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 	};
52 
53 	int i;
54 	struct adapter *adap = mac->adapter;
55 	u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56 
57 	t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 		     F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 		     F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 		     F_RESETPLL23 | F_RESETPLL01);
61 	t3_read_reg(adap, ctrl);
62 	udelay(15);
63 
64 	for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 		t3_set_reg_field(adap, ctrl, clear[i], 0);
66 		udelay(15);
67 	}
68 }
69 
t3b_pcs_reset(struct cmac * mac)70 void t3b_pcs_reset(struct cmac *mac)
71 {
72 	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 			 F_PCS_RESET_, 0);
74 	udelay(20);
75 	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 			 F_PCS_RESET_);
77 }
78 
t3_mac_reset(struct cmac * mac)79 int t3_mac_reset(struct cmac *mac)
80 {
81 	static const struct addr_val_pair mac_reset_avp[] = {
82 		{A_XGM_TX_CTRL, 0},
83 		{A_XGM_RX_CTRL, 0},
84 		{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 		 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 		{A_XGM_RX_HASH_LOW, 0},
87 		{A_XGM_RX_HASH_HIGH, 0},
88 		{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 		{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 		{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 		{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 		{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 		{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 		{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 		{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 		{A_XGM_STAT_CTRL, F_CLRSTATS}
97 	};
98 	u32 val;
99 	struct adapter *adap = mac->adapter;
100 	unsigned int oft = mac->offset;
101 
102 	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
104 
105 	t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 	t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 			 F_RXSTRFRWRD | F_DISERRFRAMES,
108 			 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109 	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
110 
111 	if (uses_xaui(adap)) {
112 		if (adap->params.rev == 0) {
113 			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
114 					 F_RXENABLE | F_TXENABLE);
115 			if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
116 					    F_CMULOCK, 1, 5, 2)) {
117 				CH_ERR(adap,
118 				       "MAC %d XAUI SERDES CMU lock failed\n",
119 				       macidx(mac));
120 				return -1;
121 			}
122 			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
123 					 F_SERDESRESET_);
124 		} else
125 			xaui_serdes_reset(mac);
126 	}
127 
128 	t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
129 			 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
130 			 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
131 	val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
132 
133 	if (is_10G(adap))
134 		val |= F_PCS_RESET_;
135 	else if (uses_xaui(adap))
136 		val |= F_PCS_RESET_ | F_XG2G_RESET_;
137 	else
138 		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
139 	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
140 	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
141 	if ((val & F_PCS_RESET_) && adap->params.rev) {
142 		msleep(1);
143 		t3b_pcs_reset(mac);
144 	}
145 
146 	memset(&mac->stats, 0, sizeof(mac->stats));
147 	return 0;
148 }
149 
t3b2_mac_reset(struct cmac * mac)150 static int t3b2_mac_reset(struct cmac *mac)
151 {
152 	struct adapter *adap = mac->adapter;
153 	unsigned int oft = mac->offset;
154 	u32 val;
155 
156 	if (!macidx(mac))
157 		t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
158 	else
159 		t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
160 
161 	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
162 	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
163 
164 	msleep(10);
165 
166 	/* Check for xgm Rx fifo empty */
167 	if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
168 			    0x80000000, 1, 5, 2)) {
169 		CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
170 		       macidx(mac));
171 		return -1;
172 	}
173 
174 	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
175 	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
176 
177 	val = F_MAC_RESET_;
178 	if (is_10G(adap))
179 		val |= F_PCS_RESET_;
180 	else if (uses_xaui(adap))
181 		val |= F_PCS_RESET_ | F_XG2G_RESET_;
182 	else
183 		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
184 	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
185 	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);  /* flush */
186 	if ((val & F_PCS_RESET_) && adap->params.rev) {
187 		msleep(1);
188 		t3b_pcs_reset(mac);
189 	}
190 	t3_write_reg(adap, A_XGM_RX_CFG + oft,
191 		     F_DISPAUSEFRAMES | F_EN1536BFRAMES |
192 		     F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
193 
194 	if (!macidx(mac))
195 		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
196 	else
197 		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
198 
199 	return 0;
200 }
201 
202 /*
203  * Set the exact match register 'idx' to recognize the given Ethernet address.
204  */
set_addr_filter(struct cmac * mac,int idx,const u8 * addr)205 static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
206 {
207 	u32 addr_lo, addr_hi;
208 	unsigned int oft = mac->offset + idx * 8;
209 
210 	addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
211 	addr_hi = (addr[5] << 8) | addr[4];
212 
213 	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
214 	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
215 }
216 
217 /* Set one of the station's unicast MAC addresses. */
t3_mac_set_address(struct cmac * mac,unsigned int idx,u8 addr[6])218 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
219 {
220 	if (idx >= mac->nucast)
221 		return -EINVAL;
222 	set_addr_filter(mac, idx, addr);
223 	return 0;
224 }
225 
226 /*
227  * Specify the number of exact address filters that should be reserved for
228  * unicast addresses.  Caller should reload the unicast and multicast addresses
229  * after calling this.
230  */
t3_mac_set_num_ucast(struct cmac * mac,int n)231 int t3_mac_set_num_ucast(struct cmac *mac, int n)
232 {
233 	if (n > EXACT_ADDR_FILTERS)
234 		return -EINVAL;
235 	mac->nucast = n;
236 	return 0;
237 }
238 
disable_exact_filters(struct cmac * mac)239 static void disable_exact_filters(struct cmac *mac)
240 {
241 	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
242 
243 	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
244 		u32 v = t3_read_reg(mac->adapter, reg);
245 		t3_write_reg(mac->adapter, reg, v);
246 	}
247 	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
248 }
249 
enable_exact_filters(struct cmac * mac)250 static void enable_exact_filters(struct cmac *mac)
251 {
252 	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
253 
254 	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
255 		u32 v = t3_read_reg(mac->adapter, reg);
256 		t3_write_reg(mac->adapter, reg, v);
257 	}
258 	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
259 }
260 
261 /* Calculate the RX hash filter index of an Ethernet address */
hash_hw_addr(const u8 * addr)262 static int hash_hw_addr(const u8 * addr)
263 {
264 	int hash = 0, octet, bit, i = 0, c;
265 
266 	for (octet = 0; octet < 6; ++octet)
267 		for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
268 			hash ^= (c & 1) << i;
269 			if (++i == 6)
270 				i = 0;
271 		}
272 	return hash;
273 }
274 
t3_mac_set_rx_mode(struct cmac * mac,struct t3_rx_mode * rm)275 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
276 {
277 	u32 val, hash_lo, hash_hi;
278 	struct adapter *adap = mac->adapter;
279 	unsigned int oft = mac->offset;
280 
281 	val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
282 	if (rm->dev->flags & IFF_PROMISC)
283 		val |= F_COPYALLFRAMES;
284 	t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
285 
286 	if (rm->dev->flags & IFF_ALLMULTI)
287 		hash_lo = hash_hi = 0xffffffff;
288 	else {
289 		u8 *addr;
290 		int exact_addr_idx = mac->nucast;
291 
292 		hash_lo = hash_hi = 0;
293 		while ((addr = t3_get_next_mcaddr(rm)))
294 			if (exact_addr_idx < EXACT_ADDR_FILTERS)
295 				set_addr_filter(mac, exact_addr_idx++, addr);
296 			else {
297 				int hash = hash_hw_addr(addr);
298 
299 				if (hash < 32)
300 					hash_lo |= (1 << hash);
301 				else
302 					hash_hi |= (1 << (hash - 32));
303 			}
304 	}
305 
306 	t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
307 	t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
308 	return 0;
309 }
310 
rx_fifo_hwm(int mtu)311 static int rx_fifo_hwm(int mtu)
312 {
313 	int hwm;
314 
315 	hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
316 	return min(hwm, MAC_RXFIFO_SIZE - 8192);
317 }
318 
t3_mac_set_mtu(struct cmac * mac,unsigned int mtu)319 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
320 {
321 	int hwm, lwm, divisor;
322 	int ipg;
323 	unsigned int thres, v, reg;
324 	struct adapter *adap = mac->adapter;
325 
326 	/*
327 	 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't.  The HW max
328 	 * packet size register includes header, but not FCS.
329 	 */
330 	mtu += 14;
331 	if (mtu > MAX_FRAME_SIZE - 4)
332 		return -EINVAL;
333 	t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
334 
335 	/*
336 	 * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
337 	 * HWM only if flow-control is enabled.
338 	 */
339 	hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
340 		    MAC_RXFIFO_SIZE * 38 / 100);
341 	hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
342 	lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
343 
344 	if (adap->params.rev >= T3_REV_B2 &&
345 	    (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
346 		disable_exact_filters(mac);
347 		v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
348 		t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
349 				 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
350 
351 		reg = adap->params.rev == T3_REV_B2 ?
352 			A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
353 
354 		/* drain RX FIFO */
355 		if (t3_wait_op_done(adap, reg + mac->offset,
356 				    F_RXFIFO_EMPTY, 1, 20, 5)) {
357 			t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
358 			enable_exact_filters(mac);
359 			return -EIO;
360 		}
361 		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
362 				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
363 				 V_RXMAXPKTSIZE(mtu));
364 		t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
365 		enable_exact_filters(mac);
366 	} else
367 		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
368 				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
369 				 V_RXMAXPKTSIZE(mtu));
370 
371 	/*
372 	 * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
373 	 * HWM only if flow-control is enabled.
374 	 */
375 	hwm = rx_fifo_hwm(mtu);
376 	lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
377 	v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
378 	v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
379 	v |= V_RXFIFOPAUSELWM(lwm / 8);
380 	if (G_RXFIFOPAUSEHWM(v))
381 		v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
382 		    V_RXFIFOPAUSEHWM(hwm / 8);
383 
384 	t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
385 
386 	/* Adjust the TX FIFO threshold based on the MTU */
387 	thres = (adap->params.vpd.cclk * 1000) / 15625;
388 	thres = (thres * mtu) / 1000;
389 	if (is_10G(adap))
390 		thres /= 10;
391 	thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
392 	thres = max(thres, 8U);	/* need at least 8 */
393 	ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
394 	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
395 			 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
396 			 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
397 
398 	if (adap->params.rev > 0) {
399 		divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
400 		t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
401 			     (hwm - lwm) * 4 / divisor);
402 	}
403 	t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
404 		     MAC_RXFIFO_SIZE * 4 * 8 / 512);
405 	return 0;
406 }
407 
t3_mac_set_speed_duplex_fc(struct cmac * mac,int speed,int duplex,int fc)408 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
409 {
410 	u32 val;
411 	struct adapter *adap = mac->adapter;
412 	unsigned int oft = mac->offset;
413 
414 	if (duplex >= 0 && duplex != DUPLEX_FULL)
415 		return -EINVAL;
416 	if (speed >= 0) {
417 		if (speed == SPEED_10)
418 			val = V_PORTSPEED(0);
419 		else if (speed == SPEED_100)
420 			val = V_PORTSPEED(1);
421 		else if (speed == SPEED_1000)
422 			val = V_PORTSPEED(2);
423 		else if (speed == SPEED_10000)
424 			val = V_PORTSPEED(3);
425 		else
426 			return -EINVAL;
427 
428 		t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
429 				 V_PORTSPEED(M_PORTSPEED), val);
430 	}
431 
432 	val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
433 	val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
434 	if (fc & PAUSE_TX)
435 		val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(
436 						t3_read_reg(adap,
437 						A_XGM_RX_MAX_PKT_SIZE
438 						+ oft)) / 8);
439 	t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
440 
441 	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
442 			 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
443 	return 0;
444 }
445 
t3_mac_enable(struct cmac * mac,int which)446 int t3_mac_enable(struct cmac *mac, int which)
447 {
448 	int idx = macidx(mac);
449 	struct adapter *adap = mac->adapter;
450 	unsigned int oft = mac->offset;
451 	struct mac_stats *s = &mac->stats;
452 
453 	if (which & MAC_DIRECTION_TX) {
454 		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
455 		t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
456 		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
457 		t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
458 
459 		t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
460 
461 		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
462 		mac->tx_mcnt = s->tx_frames;
463 		mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
464 							A_TP_PIO_DATA)));
465 		mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
466 						A_XGM_TX_SPI4_SOP_EOP_CNT +
467 						oft)));
468 		mac->rx_mcnt = s->rx_frames;
469 		mac->rx_pause = s->rx_pause;
470 		mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
471 						A_XGM_RX_SPI4_SOP_EOP_CNT +
472 						oft)));
473 		mac->rx_ocnt = s->rx_fifo_ovfl;
474 		mac->txen = F_TXEN;
475 		mac->toggle_cnt = 0;
476 	}
477 	if (which & MAC_DIRECTION_RX)
478 		t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
479 	return 0;
480 }
481 
t3_mac_disable(struct cmac * mac,int which)482 int t3_mac_disable(struct cmac *mac, int which)
483 {
484 	struct adapter *adap = mac->adapter;
485 
486 	if (which & MAC_DIRECTION_TX) {
487 		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
488 		mac->txen = 0;
489 	}
490 	if (which & MAC_DIRECTION_RX) {
491 		int val = F_MAC_RESET_;
492 
493 		t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
494 				 F_PCS_RESET_, 0);
495 		msleep(100);
496 		t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
497 		if (is_10G(adap))
498 			val |= F_PCS_RESET_;
499 		else if (uses_xaui(adap))
500 			val |= F_PCS_RESET_ | F_XG2G_RESET_;
501 		else
502 			val |= F_RGMII_RESET_ | F_XG2G_RESET_;
503 		t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
504 	}
505 	return 0;
506 }
507 
t3b2_mac_watchdog_task(struct cmac * mac)508 int t3b2_mac_watchdog_task(struct cmac *mac)
509 {
510 	struct adapter *adap = mac->adapter;
511 	struct mac_stats *s = &mac->stats;
512 	unsigned int tx_tcnt, tx_xcnt;
513 	unsigned int tx_mcnt = s->tx_frames;
514 	unsigned int rx_mcnt = s->rx_frames;
515 	unsigned int rx_xcnt;
516 	int status;
517 
518 	status = 0;
519 	tx_xcnt = 1;		/* By default tx_xcnt is making progress */
520 	tx_tcnt = mac->tx_tcnt;	/* If tx_mcnt is progressing ignore tx_tcnt */
521 	rx_xcnt = 1;		/* By default rx_xcnt is making progress */
522 	if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
523 		tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
524 						A_XGM_TX_SPI4_SOP_EOP_CNT +
525 					       	mac->offset)));
526 		if (tx_xcnt == 0) {
527 			t3_write_reg(adap, A_TP_PIO_ADDR,
528 				     A_TP_TX_DROP_CNT_CH0 + macidx(mac));
529 			tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
530 						      A_TP_PIO_DATA)));
531 		} else {
532 			goto rxcheck;
533 		}
534 	} else {
535 		mac->toggle_cnt = 0;
536 		goto rxcheck;
537 	}
538 
539 	if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
540 		if (mac->toggle_cnt > 4) {
541 			status = 2;
542 			goto out;
543 		} else {
544 			status = 1;
545 			goto out;
546 		}
547 	} else {
548 		mac->toggle_cnt = 0;
549 		goto rxcheck;
550 	}
551 
552 rxcheck:
553 	if (rx_mcnt != mac->rx_mcnt) {
554 		rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
555 						A_XGM_RX_SPI4_SOP_EOP_CNT +
556 						mac->offset))) +
557 						(s->rx_fifo_ovfl -
558 						 mac->rx_ocnt);
559 		mac->rx_ocnt = s->rx_fifo_ovfl;
560 	} else
561 		goto out;
562 
563 	if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 &&
564 	    mac->rx_xcnt == 0) {
565 		status = 2;
566 		goto out;
567 	}
568 
569 out:
570 	mac->tx_tcnt = tx_tcnt;
571 	mac->tx_xcnt = tx_xcnt;
572 	mac->tx_mcnt = s->tx_frames;
573 	mac->rx_xcnt = rx_xcnt;
574 	mac->rx_mcnt = s->rx_frames;
575 	mac->rx_pause = s->rx_pause;
576 	if (status == 1) {
577 		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
578 		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
579 		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
580 		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
581 		mac->toggle_cnt++;
582 	} else if (status == 2) {
583 		t3b2_mac_reset(mac);
584 		mac->toggle_cnt = 0;
585 	}
586 	return status;
587 }
588 
589 /*
590  * This function is called periodically to accumulate the current values of the
591  * RMON counters into the port statistics.  Since the packet counters are only
592  * 32 bits they can overflow in ~286 secs at 10G, so the function should be
593  * called more frequently than that.  The byte counters are 45-bit wide, they
594  * would overflow in ~7.8 hours.
595  */
t3_mac_update_stats(struct cmac * mac)596 const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
597 {
598 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
599 #define RMON_UPDATE(mac, name, reg) \
600 	(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
601 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
602 	(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
603 			     ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
604 
605 	u32 v, lo;
606 
607 	RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
608 	RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
609 	RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
610 	RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
611 	RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
612 	RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
613 	RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
614 	RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
615 	RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
616 
617 	RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
618 
619 	v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
620 	if (mac->adapter->params.rev == T3_REV_B2)
621 		v &= 0x7fffffff;
622 	mac->stats.rx_too_long += v;
623 
624 	RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
625 	RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
626 	RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
627 	RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
628 	RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
629 	RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
630 	RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
631 
632 	RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
633 	RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
634 	RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
635 	RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
636 	RMON_UPDATE(mac, tx_pause, TX_PAUSE);
637 	/* This counts error frames in general (bad FCS, underrun, etc). */
638 	RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
639 
640 	RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
641 	RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
642 	RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
643 	RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
644 	RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
645 	RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
646 	RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
647 
648 	/* The next stat isn't clear-on-read. */
649 	t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
650 	v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
651 	lo = (u32) mac->stats.rx_cong_drops;
652 	mac->stats.rx_cong_drops += (u64) (v - lo);
653 
654 	return &mac->stats;
655 }
656