• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
36 
37 /**
38  *	t3_wait_op_done_val - wait until an operation is completed
39  *	@adapter: the adapter performing the operation
40  *	@reg: the register to check for completion
41  *	@mask: a single-bit field within @reg that indicates completion
42  *	@polarity: the value of the field when the operation is completed
43  *	@attempts: number of check iterations
44  *	@delay: delay in usecs between iterations
45  *	@valp: where to store the value of the register at completion time
46  *
47  *	Wait until an operation is completed by checking a bit in a register
48  *	up to @attempts times.  If @valp is not NULL the value of the register
49  *	at the time it indicated completion is stored there.  Returns 0 if the
50  *	operation completes and -EAGAIN otherwise.
51  */
52 
t3_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 			int polarity, int attempts, int delay, u32 *valp)
55 {
56 	while (1) {
57 		u32 val = t3_read_reg(adapter, reg);
58 
59 		if (!!(val & mask) == polarity) {
60 			if (valp)
61 				*valp = val;
62 			return 0;
63 		}
64 		if (--attempts == 0)
65 			return -EAGAIN;
66 		if (delay)
67 			udelay(delay);
68 	}
69 }
70 
71 /**
72  *	t3_write_regs - write a bunch of registers
73  *	@adapter: the adapter to program
74  *	@p: an array of register address/register value pairs
75  *	@n: the number of address/value pairs
76  *	@offset: register address offset
77  *
78  *	Takes an array of register address/register value pairs and writes each
79  *	value to the corresponding register.  Register addresses are adjusted
80  *	by the supplied offset.
81  */
t3_write_regs(struct adapter * adapter,const struct addr_val_pair * p,int n,unsigned int offset)82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 		   int n, unsigned int offset)
84 {
85 	while (n--) {
86 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 		p++;
88 	}
89 }
90 
91 /**
92  *	t3_set_reg_field - set a register field to a value
93  *	@adapter: the adapter to program
94  *	@addr: the register address
95  *	@mask: specifies the portion of the register to modify
96  *	@val: the new value for the register field
97  *
98  *	Sets a register field specified by the supplied mask to the
99  *	given value.
100  */
t3_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 		      u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	t3_read_reg(adapter, addr);	/* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
t3_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 			     unsigned int data_reg, u32 *vals,
124 			     unsigned int nregs, unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
t3_mc7_bd_read(struct mc7 * mc7,unsigned int start,unsigned int n,u64 * buf)143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 		   u64 *buf)
145 {
146 	static const int shift[] = { 0, 0, 16, 24 };
147 	static const int step[] = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
150 	struct adapter *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 			while ((val & F_BUSY) && attempts--)
168 				val = t3_read_reg(adap,
169 						  mc7->offset + A_MC7_BD_OP);
170 			if (val & F_BUSY)
171 				return -EIO;
172 
173 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 			if (mc7->width == 0) {
175 				val64 = t3_read_reg(adap,
176 						    mc7->offset +
177 						    A_MC7_BD_DATA0);
178 				val64 |= (u64) val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64) val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Initialize MI1.
193  */
mi1_init(struct adapter * adap,const struct adapter_info * ai)194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195 {
196 	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 	u32 val = F_PREEN | V_CLKDIV(clkdiv);
198 
199 	t3_write_reg(adap, A_MI1_CFG, val);
200 }
201 
202 #define MDIO_ATTEMPTS 20
203 
204 /*
205  * MI1 read/write operations for clause 22 PHYs.
206  */
t3_mi1_read(struct adapter * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 		       int reg_addr, unsigned int *valp)
209 {
210 	int ret;
211 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
212 
213 	if (mmd_addr)
214 		return -EINVAL;
215 
216 	mutex_lock(&adapter->mdio_lock);
217 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 	t3_write_reg(adapter, A_MI1_ADDR, addr);
219 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
221 	if (!ret)
222 		*valp = t3_read_reg(adapter, A_MI1_DATA);
223 	mutex_unlock(&adapter->mdio_lock);
224 	return ret;
225 }
226 
t3_mi1_write(struct adapter * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 		     int reg_addr, unsigned int val)
229 {
230 	int ret;
231 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
232 
233 	if (mmd_addr)
234 		return -EINVAL;
235 
236 	mutex_lock(&adapter->mdio_lock);
237 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 	t3_write_reg(adapter, A_MI1_ADDR, addr);
239 	t3_write_reg(adapter, A_MI1_DATA, val);
240 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 	mutex_unlock(&adapter->mdio_lock);
243 	return ret;
244 }
245 
246 static const struct mdio_ops mi1_mdio_ops = {
247 	t3_mi1_read,
248 	t3_mi1_write
249 };
250 
251 /*
252  * Performs the address cycle for clause 45 PHYs.
253  * Must be called with the MDIO_LOCK held.
254  */
mi1_wr_addr(struct adapter * adapter,int phy_addr,int mmd_addr,int reg_addr)255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 		       int reg_addr)
257 {
258 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259 
260 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 	t3_write_reg(adapter, A_MI1_ADDR, addr);
262 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 			       MDIO_ATTEMPTS, 10);
266 }
267 
268 /*
269  * MI1 read/write operations for indirect-addressed PHYs.
270  */
mi1_ext_read(struct adapter * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 			int reg_addr, unsigned int *valp)
273 {
274 	int ret;
275 
276 	mutex_lock(&adapter->mdio_lock);
277 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
278 	if (!ret) {
279 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
281 				      MDIO_ATTEMPTS, 10);
282 		if (!ret)
283 			*valp = t3_read_reg(adapter, A_MI1_DATA);
284 	}
285 	mutex_unlock(&adapter->mdio_lock);
286 	return ret;
287 }
288 
mi1_ext_write(struct adapter * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 			 int reg_addr, unsigned int val)
291 {
292 	int ret;
293 
294 	mutex_lock(&adapter->mdio_lock);
295 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
296 	if (!ret) {
297 		t3_write_reg(adapter, A_MI1_DATA, val);
298 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
300 				      MDIO_ATTEMPTS, 10);
301 	}
302 	mutex_unlock(&adapter->mdio_lock);
303 	return ret;
304 }
305 
306 static const struct mdio_ops mi1_mdio_ext_ops = {
307 	mi1_ext_read,
308 	mi1_ext_write
309 };
310 
311 /**
312  *	t3_mdio_change_bits - modify the value of a PHY register
313  *	@phy: the PHY to operate on
314  *	@mmd: the device address
315  *	@reg: the register address
316  *	@clear: what part of the register value to mask off
317  *	@set: what part of the register value to set
318  *
319  *	Changes the value of a PHY register by applying a mask to its current
320  *	value and ORing the result with a new value.
321  */
t3_mdio_change_bits(struct cphy * phy,int mmd,int reg,unsigned int clear,unsigned int set)322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 			unsigned int set)
324 {
325 	int ret;
326 	unsigned int val;
327 
328 	ret = mdio_read(phy, mmd, reg, &val);
329 	if (!ret) {
330 		val &= ~clear;
331 		ret = mdio_write(phy, mmd, reg, val | set);
332 	}
333 	return ret;
334 }
335 
336 /**
337  *	t3_phy_reset - reset a PHY block
338  *	@phy: the PHY to operate on
339  *	@mmd: the device address of the PHY block to reset
340  *	@wait: how long to wait for the reset to complete in 1ms increments
341  *
342  *	Resets a PHY block and optionally waits for the reset to complete.
343  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344  *	for 10G PHYs.
345  */
t3_phy_reset(struct cphy * phy,int mmd,int wait)346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347 {
348 	int err;
349 	unsigned int ctl;
350 
351 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 	if (err || !wait)
353 		return err;
354 
355 	do {
356 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 		if (err)
358 			return err;
359 		ctl &= BMCR_RESET;
360 		if (ctl)
361 			msleep(1);
362 	} while (ctl && --wait);
363 
364 	return ctl ? -1 : 0;
365 }
366 
367 /**
368  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
369  *	@phy: the PHY to operate on
370  *	@advert: bitmap of capabilities the PHY should advertise
371  *
372  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
373  *	requested capabilities.
374  */
t3_phy_advertise(struct cphy * phy,unsigned int advert)375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
376 {
377 	int err;
378 	unsigned int val = 0;
379 
380 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 	if (err)
382 		return err;
383 
384 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 	if (advert & ADVERTISED_1000baseT_Half)
386 		val |= ADVERTISE_1000HALF;
387 	if (advert & ADVERTISED_1000baseT_Full)
388 		val |= ADVERTISE_1000FULL;
389 
390 	err = mdio_write(phy, 0, MII_CTRL1000, val);
391 	if (err)
392 		return err;
393 
394 	val = 1;
395 	if (advert & ADVERTISED_10baseT_Half)
396 		val |= ADVERTISE_10HALF;
397 	if (advert & ADVERTISED_10baseT_Full)
398 		val |= ADVERTISE_10FULL;
399 	if (advert & ADVERTISED_100baseT_Half)
400 		val |= ADVERTISE_100HALF;
401 	if (advert & ADVERTISED_100baseT_Full)
402 		val |= ADVERTISE_100FULL;
403 	if (advert & ADVERTISED_Pause)
404 		val |= ADVERTISE_PAUSE_CAP;
405 	if (advert & ADVERTISED_Asym_Pause)
406 		val |= ADVERTISE_PAUSE_ASYM;
407 	return mdio_write(phy, 0, MII_ADVERTISE, val);
408 }
409 
410 /**
411  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
412  *	@phy: the PHY to operate on
413  *	@advert: bitmap of capabilities the PHY should advertise
414  *
415  *	Sets a fiber PHY's advertisement register to advertise the
416  *	requested capabilities.
417  */
t3_phy_advertise_fiber(struct cphy * phy,unsigned int advert)418 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
419 {
420 	unsigned int val = 0;
421 
422 	if (advert & ADVERTISED_1000baseT_Half)
423 		val |= ADVERTISE_1000XHALF;
424 	if (advert & ADVERTISED_1000baseT_Full)
425 		val |= ADVERTISE_1000XFULL;
426 	if (advert & ADVERTISED_Pause)
427 		val |= ADVERTISE_1000XPAUSE;
428 	if (advert & ADVERTISED_Asym_Pause)
429 		val |= ADVERTISE_1000XPSE_ASYM;
430 	return mdio_write(phy, 0, MII_ADVERTISE, val);
431 }
432 
433 /**
434  *	t3_set_phy_speed_duplex - force PHY speed and duplex
435  *	@phy: the PHY to operate on
436  *	@speed: requested PHY speed
437  *	@duplex: requested PHY duplex
438  *
439  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
440  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
441  */
t3_set_phy_speed_duplex(struct cphy * phy,int speed,int duplex)442 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443 {
444 	int err;
445 	unsigned int ctl;
446 
447 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 	if (err)
449 		return err;
450 
451 	if (speed >= 0) {
452 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 		if (speed == SPEED_100)
454 			ctl |= BMCR_SPEED100;
455 		else if (speed == SPEED_1000)
456 			ctl |= BMCR_SPEED1000;
457 	}
458 	if (duplex >= 0) {
459 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 		if (duplex == DUPLEX_FULL)
461 			ctl |= BMCR_FULLDPLX;
462 	}
463 	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 		ctl |= BMCR_ANENABLE;
465 	return mdio_write(phy, 0, MII_BMCR, ctl);
466 }
467 
t3_phy_lasi_intr_enable(struct cphy * phy)468 int t3_phy_lasi_intr_enable(struct cphy *phy)
469 {
470 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
471 }
472 
t3_phy_lasi_intr_disable(struct cphy * phy)473 int t3_phy_lasi_intr_disable(struct cphy *phy)
474 {
475 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
476 }
477 
t3_phy_lasi_intr_clear(struct cphy * phy)478 int t3_phy_lasi_intr_clear(struct cphy *phy)
479 {
480 	u32 val;
481 
482 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
483 }
484 
t3_phy_lasi_intr_handler(struct cphy * phy)485 int t3_phy_lasi_intr_handler(struct cphy *phy)
486 {
487 	unsigned int status;
488 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
489 
490 	if (err)
491 		return err;
492 	return (status & 1) ?  cphy_cause_link_change : 0;
493 }
494 
495 static const struct adapter_info t3_adap_info[] = {
496 	{2, 0,
497 	 F_GPIO2_OEN | F_GPIO4_OEN |
498 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 	 &mi1_mdio_ops, "Chelsio PE9000"},
500 	{2, 0,
501 	 F_GPIO2_OEN | F_GPIO4_OEN |
502 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 	 &mi1_mdio_ops, "Chelsio T302"},
504 	{1, 0,
505 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 	 &mi1_mdio_ext_ops, "Chelsio T310"},
509 	{2, 0,
510 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 	 &mi1_mdio_ext_ops, "Chelsio T320"},
515 	{},
516 	{},
517 	{1, 0,
518 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 	 &mi1_mdio_ext_ops, "Chelsio T310" },
522 };
523 
524 /*
525  * Return the adapter_info structure with a given index.  Out-of-range indices
526  * return NULL.
527  */
t3_get_adapter_info(unsigned int id)528 const struct adapter_info *t3_get_adapter_info(unsigned int id)
529 {
530 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
531 }
532 
533 struct port_type_info {
534 	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
535 			int phy_addr, const struct mdio_ops *ops);
536 };
537 
538 static const struct port_type_info port_types[] = {
539 	{ NULL },
540 	{ t3_ael1002_phy_prep },
541 	{ t3_vsc8211_phy_prep },
542 	{ NULL},
543 	{ t3_xaui_direct_phy_prep },
544 	{ t3_ael2005_phy_prep },
545 	{ t3_qt2045_phy_prep },
546 	{ t3_ael1006_phy_prep },
547 	{ NULL },
548 };
549 
550 #define VPD_ENTRY(name, len) \
551 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
552 
553 /*
554  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
555  * VPD-R sections.
556  */
557 struct t3_vpd {
558 	u8 id_tag;
559 	u8 id_len[2];
560 	u8 id_data[16];
561 	u8 vpdr_tag;
562 	u8 vpdr_len[2];
563 	VPD_ENTRY(pn, 16);	/* part number */
564 	VPD_ENTRY(ec, 16);	/* EC level */
565 	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
566 	VPD_ENTRY(na, 12);	/* MAC address base */
567 	VPD_ENTRY(cclk, 6);	/* core clock */
568 	VPD_ENTRY(mclk, 6);	/* mem clock */
569 	VPD_ENTRY(uclk, 6);	/* uP clk */
570 	VPD_ENTRY(mdc, 6);	/* MDIO clk */
571 	VPD_ENTRY(mt, 2);	/* mem timing */
572 	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
573 	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
574 	VPD_ENTRY(port0, 2);	/* PHY0 complex */
575 	VPD_ENTRY(port1, 2);	/* PHY1 complex */
576 	VPD_ENTRY(port2, 2);	/* PHY2 complex */
577 	VPD_ENTRY(port3, 2);	/* PHY3 complex */
578 	VPD_ENTRY(rv, 1);	/* csum */
579 	u32 pad;		/* for multiple-of-4 sizing and alignment */
580 };
581 
582 #define EEPROM_MAX_POLL   40
583 #define EEPROM_STAT_ADDR  0x4000
584 #define VPD_BASE          0xc00
585 
586 /**
587  *	t3_seeprom_read - read a VPD EEPROM location
588  *	@adapter: adapter to read
589  *	@addr: EEPROM address
590  *	@data: where to store the read data
591  *
592  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
593  *	VPD ROM capability.  A zero is written to the flag bit when the
594  *	addres is written to the control register.  The hardware device will
595  *	set the flag to 1 when 4 bytes have been read into the data register.
596  */
t3_seeprom_read(struct adapter * adapter,u32 addr,__le32 * data)597 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
598 {
599 	u16 val;
600 	int attempts = EEPROM_MAX_POLL;
601 	u32 v;
602 	unsigned int base = adapter->params.pci.vpd_cap_addr;
603 
604 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
605 		return -EINVAL;
606 
607 	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
608 	do {
609 		udelay(10);
610 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
611 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
612 
613 	if (!(val & PCI_VPD_ADDR_F)) {
614 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
615 		return -EIO;
616 	}
617 	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
618 	*data = cpu_to_le32(v);
619 	return 0;
620 }
621 
622 /**
623  *	t3_seeprom_write - write a VPD EEPROM location
624  *	@adapter: adapter to write
625  *	@addr: EEPROM address
626  *	@data: value to write
627  *
628  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
629  *	VPD ROM capability.
630  */
t3_seeprom_write(struct adapter * adapter,u32 addr,__le32 data)631 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
632 {
633 	u16 val;
634 	int attempts = EEPROM_MAX_POLL;
635 	unsigned int base = adapter->params.pci.vpd_cap_addr;
636 
637 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
638 		return -EINVAL;
639 
640 	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
641 			       le32_to_cpu(data));
642 	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
643 			      addr | PCI_VPD_ADDR_F);
644 	do {
645 		msleep(1);
646 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
647 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
648 
649 	if (val & PCI_VPD_ADDR_F) {
650 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
651 		return -EIO;
652 	}
653 	return 0;
654 }
655 
656 /**
657  *	t3_seeprom_wp - enable/disable EEPROM write protection
658  *	@adapter: the adapter
659  *	@enable: 1 to enable write protection, 0 to disable it
660  *
661  *	Enables or disables write protection on the serial EEPROM.
662  */
t3_seeprom_wp(struct adapter * adapter,int enable)663 int t3_seeprom_wp(struct adapter *adapter, int enable)
664 {
665 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
666 }
667 
668 /*
669  * Convert a character holding a hex digit to a number.
670  */
hex2int(unsigned char c)671 static unsigned int hex2int(unsigned char c)
672 {
673 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
674 }
675 
676 /**
677  *	get_vpd_params - read VPD parameters from VPD EEPROM
678  *	@adapter: adapter to read
679  *	@p: where to store the parameters
680  *
681  *	Reads card parameters stored in VPD EEPROM.
682  */
get_vpd_params(struct adapter * adapter,struct vpd_params * p)683 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
684 {
685 	int i, addr, ret;
686 	struct t3_vpd vpd;
687 
688 	/*
689 	 * Card information is normally at VPD_BASE but some early cards had
690 	 * it at 0.
691 	 */
692 	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
693 	if (ret)
694 		return ret;
695 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
696 
697 	for (i = 0; i < sizeof(vpd); i += 4) {
698 		ret = t3_seeprom_read(adapter, addr + i,
699 				      (__le32 *)((u8 *)&vpd + i));
700 		if (ret)
701 			return ret;
702 	}
703 
704 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
705 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
706 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
707 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
708 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
709 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
710 
711 	/* Old eeproms didn't have port information */
712 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
713 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
714 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
715 	} else {
716 		p->port_type[0] = hex2int(vpd.port0_data[0]);
717 		p->port_type[1] = hex2int(vpd.port1_data[0]);
718 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
719 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
720 	}
721 
722 	for (i = 0; i < 6; i++)
723 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
724 				 hex2int(vpd.na_data[2 * i + 1]);
725 	return 0;
726 }
727 
728 /* serial flash and firmware constants */
729 enum {
730 	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
731 	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
732 	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
733 
734 	/* flash command opcodes */
735 	SF_PROG_PAGE = 2,	/* program page */
736 	SF_WR_DISABLE = 4,	/* disable writes */
737 	SF_RD_STATUS = 5,	/* read status register */
738 	SF_WR_ENABLE = 6,	/* enable writes */
739 	SF_RD_DATA_FAST = 0xb,	/* read flash */
740 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
741 
742 	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
743 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
744 	FW_MIN_SIZE = 8            /* at least version and csum */
745 };
746 
747 /**
748  *	sf1_read - read data from the serial flash
749  *	@adapter: the adapter
750  *	@byte_cnt: number of bytes to read
751  *	@cont: whether another operation will be chained
752  *	@valp: where to store the read data
753  *
754  *	Reads up to 4 bytes of data from the serial flash.  The location of
755  *	the read needs to be specified prior to calling this by issuing the
756  *	appropriate commands to the serial flash.
757  */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,u32 * valp)758 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
759 		    u32 *valp)
760 {
761 	int ret;
762 
763 	if (!byte_cnt || byte_cnt > 4)
764 		return -EINVAL;
765 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
766 		return -EBUSY;
767 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
768 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
769 	if (!ret)
770 		*valp = t3_read_reg(adapter, A_SF_DATA);
771 	return ret;
772 }
773 
774 /**
775  *	sf1_write - write data to the serial flash
776  *	@adapter: the adapter
777  *	@byte_cnt: number of bytes to write
778  *	@cont: whether another operation will be chained
779  *	@val: value to write
780  *
781  *	Writes up to 4 bytes of data to the serial flash.  The location of
782  *	the write needs to be specified prior to calling this by issuing the
783  *	appropriate commands to the serial flash.
784  */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,u32 val)785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786 		     u32 val)
787 {
788 	if (!byte_cnt || byte_cnt > 4)
789 		return -EINVAL;
790 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
791 		return -EBUSY;
792 	t3_write_reg(adapter, A_SF_DATA, val);
793 	t3_write_reg(adapter, A_SF_OP,
794 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
796 }
797 
798 /**
799  *	flash_wait_op - wait for a flash operation to complete
800  *	@adapter: the adapter
801  *	@attempts: max number of polls of the status register
802  *	@delay: delay between polls in ms
803  *
804  *	Wait for a flash operation to complete by polling the status register.
805  */
flash_wait_op(struct adapter * adapter,int attempts,int delay)806 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
807 {
808 	int ret;
809 	u32 status;
810 
811 	while (1) {
812 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
813 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
814 			return ret;
815 		if (!(status & 1))
816 			return 0;
817 		if (--attempts == 0)
818 			return -EAGAIN;
819 		if (delay)
820 			msleep(delay);
821 	}
822 }
823 
824 /**
825  *	t3_read_flash - read words from serial flash
826  *	@adapter: the adapter
827  *	@addr: the start address for the read
828  *	@nwords: how many 32-bit words to read
829  *	@data: where to store the read data
830  *	@byte_oriented: whether to store data as bytes or as words
831  *
832  *	Read the specified number of 32-bit words from the serial flash.
833  *	If @byte_oriented is set the read data is stored as a byte array
834  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
835  *	natural endianess.
836  */
t3_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)837 int t3_read_flash(struct adapter *adapter, unsigned int addr,
838 		  unsigned int nwords, u32 *data, int byte_oriented)
839 {
840 	int ret;
841 
842 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
843 		return -EINVAL;
844 
845 	addr = swab32(addr) | SF_RD_DATA_FAST;
846 
847 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
848 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
849 		return ret;
850 
851 	for (; nwords; nwords--, data++) {
852 		ret = sf1_read(adapter, 4, nwords > 1, data);
853 		if (ret)
854 			return ret;
855 		if (byte_oriented)
856 			*data = htonl(*data);
857 	}
858 	return 0;
859 }
860 
861 /**
862  *	t3_write_flash - write up to a page of data to the serial flash
863  *	@adapter: the adapter
864  *	@addr: the start address to write
865  *	@n: length of data to write
866  *	@data: the data to write
867  *
868  *	Writes up to a page of data (256 bytes) to the serial flash starting
869  *	at the given address.
870  */
t3_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data)871 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
872 			  unsigned int n, const u8 *data)
873 {
874 	int ret;
875 	u32 buf[64];
876 	unsigned int i, c, left, val, offset = addr & 0xff;
877 
878 	if (addr + n > SF_SIZE || offset + n > 256)
879 		return -EINVAL;
880 
881 	val = swab32(addr) | SF_PROG_PAGE;
882 
883 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
884 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
885 		return ret;
886 
887 	for (left = n; left; left -= c) {
888 		c = min(left, 4U);
889 		for (val = 0, i = 0; i < c; ++i)
890 			val = (val << 8) + *data++;
891 
892 		ret = sf1_write(adapter, c, c != left, val);
893 		if (ret)
894 			return ret;
895 	}
896 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
897 		return ret;
898 
899 	/* Read the page to verify the write succeeded */
900 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
901 	if (ret)
902 		return ret;
903 
904 	if (memcmp(data - n, (u8 *) buf + offset, n))
905 		return -EIO;
906 	return 0;
907 }
908 
909 /**
910  *	t3_get_tp_version - read the tp sram version
911  *	@adapter: the adapter
912  *	@vers: where to place the version
913  *
914  *	Reads the protocol sram version from sram.
915  */
t3_get_tp_version(struct adapter * adapter,u32 * vers)916 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
917 {
918 	int ret;
919 
920 	/* Get version loaded in SRAM */
921 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
922 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
923 			      1, 1, 5, 1);
924 	if (ret)
925 		return ret;
926 
927 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
928 
929 	return 0;
930 }
931 
932 /**
933  *	t3_check_tpsram_version - read the tp sram version
934  *	@adapter: the adapter
935  *
936  *	Reads the protocol sram version from flash.
937  */
t3_check_tpsram_version(struct adapter * adapter)938 int t3_check_tpsram_version(struct adapter *adapter)
939 {
940 	int ret;
941 	u32 vers;
942 	unsigned int major, minor;
943 
944 	if (adapter->params.rev == T3_REV_A)
945 		return 0;
946 
947 
948 	ret = t3_get_tp_version(adapter, &vers);
949 	if (ret)
950 		return ret;
951 
952 	major = G_TP_VERSION_MAJOR(vers);
953 	minor = G_TP_VERSION_MINOR(vers);
954 
955 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
956 		return 0;
957 	else {
958 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
959 		       "driver compiled for version %d.%d\n", major, minor,
960 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
961 	}
962 	return -EINVAL;
963 }
964 
965 /**
966  *	t3_check_tpsram - check if provided protocol SRAM
967  *			  is compatible with this driver
968  *	@adapter: the adapter
969  *	@tp_sram: the firmware image to write
970  *	@size: image size
971  *
972  *	Checks if an adapter's tp sram is compatible with the driver.
973  *	Returns 0 if the versions are compatible, a negative error otherwise.
974  */
t3_check_tpsram(struct adapter * adapter,const u8 * tp_sram,unsigned int size)975 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
976 		    unsigned int size)
977 {
978 	u32 csum;
979 	unsigned int i;
980 	const __be32 *p = (const __be32 *)tp_sram;
981 
982 	/* Verify checksum */
983 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
984 		csum += ntohl(p[i]);
985 	if (csum != 0xffffffff) {
986 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
987 		       csum);
988 		return -EINVAL;
989 	}
990 
991 	return 0;
992 }
993 
994 enum fw_version_type {
995 	FW_VERSION_N3,
996 	FW_VERSION_T3
997 };
998 
999 /**
1000  *	t3_get_fw_version - read the firmware version
1001  *	@adapter: the adapter
1002  *	@vers: where to place the version
1003  *
1004  *	Reads the FW version from flash.
1005  */
t3_get_fw_version(struct adapter * adapter,u32 * vers)1006 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1007 {
1008 	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1009 }
1010 
1011 /**
1012  *	t3_check_fw_version - check if the FW is compatible with this driver
1013  *	@adapter: the adapter
1014  *
1015  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1016  *	if the versions are compatible, a negative error otherwise.
1017  */
t3_check_fw_version(struct adapter * adapter)1018 int t3_check_fw_version(struct adapter *adapter)
1019 {
1020 	int ret;
1021 	u32 vers;
1022 	unsigned int type, major, minor;
1023 
1024 	ret = t3_get_fw_version(adapter, &vers);
1025 	if (ret)
1026 		return ret;
1027 
1028 	type = G_FW_VERSION_TYPE(vers);
1029 	major = G_FW_VERSION_MAJOR(vers);
1030 	minor = G_FW_VERSION_MINOR(vers);
1031 
1032 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1033 	    minor == FW_VERSION_MINOR)
1034 		return 0;
1035 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1036 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1037 		        "driver compiled for version %u.%u\n", major, minor,
1038 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1039 	else {
1040 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1041 		        "driver compiled for version %u.%u\n", major, minor,
1042 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1043 			return 0;
1044 	}
1045 	return -EINVAL;
1046 }
1047 
1048 /**
1049  *	t3_flash_erase_sectors - erase a range of flash sectors
1050  *	@adapter: the adapter
1051  *	@start: the first sector to erase
1052  *	@end: the last sector to erase
1053  *
1054  *	Erases the sectors in the given range.
1055  */
t3_flash_erase_sectors(struct adapter * adapter,int start,int end)1056 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1057 {
1058 	while (start <= end) {
1059 		int ret;
1060 
1061 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1062 		    (ret = sf1_write(adapter, 4, 0,
1063 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1064 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1065 			return ret;
1066 		start++;
1067 	}
1068 	return 0;
1069 }
1070 
1071 /*
1072  *	t3_load_fw - download firmware
1073  *	@adapter: the adapter
1074  *	@fw_data: the firmware image to write
1075  *	@size: image size
1076  *
1077  *	Write the supplied firmware image to the card's serial flash.
1078  *	The FW image has the following sections: @size - 8 bytes of code and
1079  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1080  *	1's complement checksum of the whole image.
1081  */
t3_load_fw(struct adapter * adapter,const u8 * fw_data,unsigned int size)1082 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1083 {
1084 	u32 csum;
1085 	unsigned int i;
1086 	const __be32 *p = (const __be32 *)fw_data;
1087 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1088 
1089 	if ((size & 3) || size < FW_MIN_SIZE)
1090 		return -EINVAL;
1091 	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1092 		return -EFBIG;
1093 
1094 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1095 		csum += ntohl(p[i]);
1096 	if (csum != 0xffffffff) {
1097 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1098 		       csum);
1099 		return -EINVAL;
1100 	}
1101 
1102 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1103 	if (ret)
1104 		goto out;
1105 
1106 	size -= 8;		/* trim off version and checksum */
1107 	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1108 		unsigned int chunk_size = min(size, 256U);
1109 
1110 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1111 		if (ret)
1112 			goto out;
1113 
1114 		addr += chunk_size;
1115 		fw_data += chunk_size;
1116 		size -= chunk_size;
1117 	}
1118 
1119 	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1120 out:
1121 	if (ret)
1122 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1123 	return ret;
1124 }
1125 
1126 #define CIM_CTL_BASE 0x2000
1127 
1128 /**
1129  *      t3_cim_ctl_blk_read - read a block from CIM control region
1130  *
1131  *      @adap: the adapter
1132  *      @addr: the start address within the CIM control region
1133  *      @n: number of words to read
1134  *      @valp: where to store the result
1135  *
1136  *      Reads a block of 4-byte words from the CIM control region.
1137  */
t3_cim_ctl_blk_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)1138 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1139 			unsigned int n, unsigned int *valp)
1140 {
1141 	int ret = 0;
1142 
1143 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1144 		return -EBUSY;
1145 
1146 	for ( ; !ret && n--; addr += 4) {
1147 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1148 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1149 				      0, 5, 2);
1150 		if (!ret)
1151 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1152 	}
1153 	return ret;
1154 }
1155 
1156 
1157 /**
1158  *	t3_link_changed - handle interface link changes
1159  *	@adapter: the adapter
1160  *	@port_id: the port index that changed link state
1161  *
1162  *	Called when a port's link settings change to propagate the new values
1163  *	to the associated PHY and MAC.  After performing the common tasks it
1164  *	invokes an OS-specific handler.
1165  */
t3_link_changed(struct adapter * adapter,int port_id)1166 void t3_link_changed(struct adapter *adapter, int port_id)
1167 {
1168 	int link_ok, speed, duplex, fc;
1169 	struct port_info *pi = adap2pinfo(adapter, port_id);
1170 	struct cphy *phy = &pi->phy;
1171 	struct cmac *mac = &pi->mac;
1172 	struct link_config *lc = &pi->link_config;
1173 
1174 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1175 
1176 	if (lc->requested_fc & PAUSE_AUTONEG)
1177 		fc &= lc->requested_fc;
1178 	else
1179 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1180 
1181 	if (link_ok == lc->link_ok && speed == lc->speed &&
1182 	    duplex == lc->duplex && fc == lc->fc)
1183 		return;                            /* nothing changed */
1184 
1185 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1186 	    uses_xaui(adapter)) {
1187 		if (link_ok)
1188 			t3b_pcs_reset(mac);
1189 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1190 			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1191 	}
1192 	lc->link_ok = link_ok;
1193 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1194 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1195 
1196 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1197 		/* Set MAC speed, duplex, and flow control to match PHY. */
1198 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1199 		lc->fc = fc;
1200 	}
1201 
1202 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1203 }
1204 
1205 /**
1206  *	t3_link_start - apply link configuration to MAC/PHY
1207  *	@phy: the PHY to setup
1208  *	@mac: the MAC to setup
1209  *	@lc: the requested link configuration
1210  *
1211  *	Set up a port's MAC and PHY according to a desired link configuration.
1212  *	- If the PHY can auto-negotiate first decide what to advertise, then
1213  *	  enable/disable auto-negotiation as desired, and reset.
1214  *	- If the PHY does not auto-negotiate just reset it.
1215  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1216  *	  otherwise do it later based on the outcome of auto-negotiation.
1217  */
t3_link_start(struct cphy * phy,struct cmac * mac,struct link_config * lc)1218 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1219 {
1220 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1221 
1222 	lc->link_ok = 0;
1223 	if (lc->supported & SUPPORTED_Autoneg) {
1224 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1225 		if (fc) {
1226 			lc->advertising |= ADVERTISED_Asym_Pause;
1227 			if (fc & PAUSE_RX)
1228 				lc->advertising |= ADVERTISED_Pause;
1229 		}
1230 		phy->ops->advertise(phy, lc->advertising);
1231 
1232 		if (lc->autoneg == AUTONEG_DISABLE) {
1233 			lc->speed = lc->requested_speed;
1234 			lc->duplex = lc->requested_duplex;
1235 			lc->fc = (unsigned char)fc;
1236 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1237 						   fc);
1238 			/* Also disables autoneg */
1239 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1240 		} else
1241 			phy->ops->autoneg_enable(phy);
1242 	} else {
1243 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1244 		lc->fc = (unsigned char)fc;
1245 		phy->ops->reset(phy, 0);
1246 	}
1247 	return 0;
1248 }
1249 
1250 /**
1251  *	t3_set_vlan_accel - control HW VLAN extraction
1252  *	@adapter: the adapter
1253  *	@ports: bitmap of adapter ports to operate on
1254  *	@on: enable (1) or disable (0) HW VLAN extraction
1255  *
1256  *	Enables or disables HW extraction of VLAN tags for the given port.
1257  */
t3_set_vlan_accel(struct adapter * adapter,unsigned int ports,int on)1258 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1259 {
1260 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1261 			 ports << S_VLANEXTRACTIONENABLE,
1262 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1263 }
1264 
1265 struct intr_info {
1266 	unsigned int mask;	/* bits to check in interrupt status */
1267 	const char *msg;	/* message to print or NULL */
1268 	short stat_idx;		/* stat counter to increment or -1 */
1269 	unsigned short fatal;	/* whether the condition reported is fatal */
1270 };
1271 
1272 /**
1273  *	t3_handle_intr_status - table driven interrupt handler
1274  *	@adapter: the adapter that generated the interrupt
1275  *	@reg: the interrupt status register to process
1276  *	@mask: a mask to apply to the interrupt status
1277  *	@acts: table of interrupt actions
1278  *	@stats: statistics counters tracking interrupt occurences
1279  *
1280  *	A table driven interrupt handler that applies a set of masks to an
1281  *	interrupt status word and performs the corresponding actions if the
1282  *	interrupts described by the mask have occured.  The actions include
1283  *	optionally printing a warning or alert message, and optionally
1284  *	incrementing a stat counter.  The table is terminated by an entry
1285  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1286  */
t3_handle_intr_status(struct adapter * adapter,unsigned int reg,unsigned int mask,const struct intr_info * acts,unsigned long * stats)1287 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1288 				 unsigned int mask,
1289 				 const struct intr_info *acts,
1290 				 unsigned long *stats)
1291 {
1292 	int fatal = 0;
1293 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1294 
1295 	for (; acts->mask; ++acts) {
1296 		if (!(status & acts->mask))
1297 			continue;
1298 		if (acts->fatal) {
1299 			fatal++;
1300 			CH_ALERT(adapter, "%s (0x%x)\n",
1301 				 acts->msg, status & acts->mask);
1302 		} else if (acts->msg)
1303 			CH_WARN(adapter, "%s (0x%x)\n",
1304 				acts->msg, status & acts->mask);
1305 		if (acts->stat_idx >= 0)
1306 			stats[acts->stat_idx]++;
1307 	}
1308 	if (status)		/* clear processed interrupts */
1309 		t3_write_reg(adapter, reg, status);
1310 	return fatal;
1311 }
1312 
1313 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1314 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1315 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1316 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1317 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1318 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1319 		       F_HIRCQPARITYERROR)
1320 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1321 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1322 		       F_NFASRCHFAIL)
1323 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1324 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1325 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1326 		       F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1327 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1328 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1329 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1330 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1331 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1332 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1333 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1334 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1335 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1336 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1337 			F_TXPARERR | V_BISTERR(M_BISTERR))
1338 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1339 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1340 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1341 #define ULPTX_INTR_MASK 0xfc
1342 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1343 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1344 			 F_ZERO_SWITCH_ERROR)
1345 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1346 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1347 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1348 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1349 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1350 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1351 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1352 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1353 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1354 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1355 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1356 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1357 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1358 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1359 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1360 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1361 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1362 		       V_MCAPARERRENB(M_MCAPARERRENB))
1363 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1364 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1365 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1366 		      F_MPS0 | F_CPL_SWITCH)
1367 
1368 /*
1369  * Interrupt handler for the PCIX1 module.
1370  */
pci_intr_handler(struct adapter * adapter)1371 static void pci_intr_handler(struct adapter *adapter)
1372 {
1373 	static const struct intr_info pcix1_intr_info[] = {
1374 		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1375 		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1376 		{F_RCVTARABT, "PCI received target abort", -1, 1},
1377 		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1378 		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1379 		{F_DETPARERR, "PCI detected parity error", -1, 1},
1380 		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1381 		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1382 		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1383 		 1},
1384 		{F_DETCORECCERR, "PCI correctable ECC error",
1385 		 STAT_PCI_CORR_ECC, 0},
1386 		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1387 		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1388 		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1389 		 1},
1390 		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1391 		 1},
1392 		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1393 		 1},
1394 		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1395 		 "error", -1, 1},
1396 		{0}
1397 	};
1398 
1399 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1400 				  pcix1_intr_info, adapter->irq_stats))
1401 		t3_fatal_err(adapter);
1402 }
1403 
1404 /*
1405  * Interrupt handler for the PCIE module.
1406  */
pcie_intr_handler(struct adapter * adapter)1407 static void pcie_intr_handler(struct adapter *adapter)
1408 {
1409 	static const struct intr_info pcie_intr_info[] = {
1410 		{F_PEXERR, "PCI PEX error", -1, 1},
1411 		{F_UNXSPLCPLERRR,
1412 		 "PCI unexpected split completion DMA read error", -1, 1},
1413 		{F_UNXSPLCPLERRC,
1414 		 "PCI unexpected split completion DMA command error", -1, 1},
1415 		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1416 		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1417 		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1418 		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1419 		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1420 		 "PCI MSI-X table/PBA parity error", -1, 1},
1421 		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1422 		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1423 		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1424 		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1425 		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1426 		{0}
1427 	};
1428 
1429 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1430 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1431 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1432 
1433 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1434 				  pcie_intr_info, adapter->irq_stats))
1435 		t3_fatal_err(adapter);
1436 }
1437 
1438 /*
1439  * TP interrupt handler.
1440  */
tp_intr_handler(struct adapter * adapter)1441 static void tp_intr_handler(struct adapter *adapter)
1442 {
1443 	static const struct intr_info tp_intr_info[] = {
1444 		{0xffffff, "TP parity error", -1, 1},
1445 		{0x1000000, "TP out of Rx pages", -1, 1},
1446 		{0x2000000, "TP out of Tx pages", -1, 1},
1447 		{0}
1448 	};
1449 
1450 	static struct intr_info tp_intr_info_t3c[] = {
1451 		{0x1fffffff, "TP parity error", -1, 1},
1452 		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1453 		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1454 		{0}
1455 	};
1456 
1457 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1458 				  adapter->params.rev < T3_REV_C ?
1459 				  tp_intr_info : tp_intr_info_t3c, NULL))
1460 		t3_fatal_err(adapter);
1461 }
1462 
1463 /*
1464  * CIM interrupt handler.
1465  */
cim_intr_handler(struct adapter * adapter)1466 static void cim_intr_handler(struct adapter *adapter)
1467 {
1468 	static const struct intr_info cim_intr_info[] = {
1469 		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1470 		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1471 		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1472 		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1473 		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1474 		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1475 		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1476 		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1477 		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1478 		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1479 		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1480 		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1481 		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1482 		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1483 		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1484 		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1485 		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1486 		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1487 		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1488 		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1489 		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1490 		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1491 		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1492 		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1493 		{0}
1494 	};
1495 
1496 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1497 				  cim_intr_info, NULL))
1498 		t3_fatal_err(adapter);
1499 }
1500 
1501 /*
1502  * ULP RX interrupt handler.
1503  */
ulprx_intr_handler(struct adapter * adapter)1504 static void ulprx_intr_handler(struct adapter *adapter)
1505 {
1506 	static const struct intr_info ulprx_intr_info[] = {
1507 		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1508 		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1509 		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1510 		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1511 		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1512 		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1513 		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1514 		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1515 		{0}
1516 	};
1517 
1518 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1519 				  ulprx_intr_info, NULL))
1520 		t3_fatal_err(adapter);
1521 }
1522 
1523 /*
1524  * ULP TX interrupt handler.
1525  */
ulptx_intr_handler(struct adapter * adapter)1526 static void ulptx_intr_handler(struct adapter *adapter)
1527 {
1528 	static const struct intr_info ulptx_intr_info[] = {
1529 		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1530 		 STAT_ULP_CH0_PBL_OOB, 0},
1531 		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1532 		 STAT_ULP_CH1_PBL_OOB, 0},
1533 		{0xfc, "ULP TX parity error", -1, 1},
1534 		{0}
1535 	};
1536 
1537 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1538 				  ulptx_intr_info, adapter->irq_stats))
1539 		t3_fatal_err(adapter);
1540 }
1541 
1542 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1543 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1544 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1545 	F_ICSPI1_TX_FRAMING_ERROR)
1546 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1547 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1548 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1549 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1550 
1551 /*
1552  * PM TX interrupt handler.
1553  */
pmtx_intr_handler(struct adapter * adapter)1554 static void pmtx_intr_handler(struct adapter *adapter)
1555 {
1556 	static const struct intr_info pmtx_intr_info[] = {
1557 		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1558 		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1559 		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1560 		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1561 		 "PMTX ispi parity error", -1, 1},
1562 		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1563 		 "PMTX ospi parity error", -1, 1},
1564 		{0}
1565 	};
1566 
1567 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1568 				  pmtx_intr_info, NULL))
1569 		t3_fatal_err(adapter);
1570 }
1571 
1572 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1573 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1574 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1575 	F_IESPI1_TX_FRAMING_ERROR)
1576 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1577 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1578 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1579 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1580 
1581 /*
1582  * PM RX interrupt handler.
1583  */
pmrx_intr_handler(struct adapter * adapter)1584 static void pmrx_intr_handler(struct adapter *adapter)
1585 {
1586 	static const struct intr_info pmrx_intr_info[] = {
1587 		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1588 		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1589 		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1590 		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1591 		 "PMRX ispi parity error", -1, 1},
1592 		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1593 		 "PMRX ospi parity error", -1, 1},
1594 		{0}
1595 	};
1596 
1597 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1598 				  pmrx_intr_info, NULL))
1599 		t3_fatal_err(adapter);
1600 }
1601 
1602 /*
1603  * CPL switch interrupt handler.
1604  */
cplsw_intr_handler(struct adapter * adapter)1605 static void cplsw_intr_handler(struct adapter *adapter)
1606 {
1607 	static const struct intr_info cplsw_intr_info[] = {
1608 		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1609 		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1610 		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1611 		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1612 		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1613 		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1614 		{0}
1615 	};
1616 
1617 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1618 				  cplsw_intr_info, NULL))
1619 		t3_fatal_err(adapter);
1620 }
1621 
1622 /*
1623  * MPS interrupt handler.
1624  */
mps_intr_handler(struct adapter * adapter)1625 static void mps_intr_handler(struct adapter *adapter)
1626 {
1627 	static const struct intr_info mps_intr_info[] = {
1628 		{0x1ff, "MPS parity error", -1, 1},
1629 		{0}
1630 	};
1631 
1632 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1633 				  mps_intr_info, NULL))
1634 		t3_fatal_err(adapter);
1635 }
1636 
1637 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1638 
1639 /*
1640  * MC7 interrupt handler.
1641  */
mc7_intr_handler(struct mc7 * mc7)1642 static void mc7_intr_handler(struct mc7 *mc7)
1643 {
1644 	struct adapter *adapter = mc7->adapter;
1645 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1646 
1647 	if (cause & F_CE) {
1648 		mc7->stats.corr_err++;
1649 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1650 			"data 0x%x 0x%x 0x%x\n", mc7->name,
1651 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1652 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1653 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1654 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1655 	}
1656 
1657 	if (cause & F_UE) {
1658 		mc7->stats.uncorr_err++;
1659 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1660 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1661 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1662 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1663 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1664 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1665 	}
1666 
1667 	if (G_PE(cause)) {
1668 		mc7->stats.parity_err++;
1669 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1670 			 mc7->name, G_PE(cause));
1671 	}
1672 
1673 	if (cause & F_AE) {
1674 		u32 addr = 0;
1675 
1676 		if (adapter->params.rev > 0)
1677 			addr = t3_read_reg(adapter,
1678 					   mc7->offset + A_MC7_ERR_ADDR);
1679 		mc7->stats.addr_err++;
1680 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1681 			 mc7->name, addr);
1682 	}
1683 
1684 	if (cause & MC7_INTR_FATAL)
1685 		t3_fatal_err(adapter);
1686 
1687 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1688 }
1689 
1690 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1691 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1692 /*
1693  * XGMAC interrupt handler.
1694  */
mac_intr_handler(struct adapter * adap,unsigned int idx)1695 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1696 {
1697 	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1698 	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1699 
1700 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1701 		mac->stats.tx_fifo_parity_err++;
1702 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1703 	}
1704 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1705 		mac->stats.rx_fifo_parity_err++;
1706 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1707 	}
1708 	if (cause & F_TXFIFO_UNDERRUN)
1709 		mac->stats.tx_fifo_urun++;
1710 	if (cause & F_RXFIFO_OVERFLOW)
1711 		mac->stats.rx_fifo_ovfl++;
1712 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1713 		mac->stats.serdes_signal_loss++;
1714 	if (cause & F_XAUIPCSCTCERR)
1715 		mac->stats.xaui_pcs_ctc_err++;
1716 	if (cause & F_XAUIPCSALIGNCHANGE)
1717 		mac->stats.xaui_pcs_align_change++;
1718 
1719 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1720 	if (cause & XGM_INTR_FATAL)
1721 		t3_fatal_err(adap);
1722 	return cause != 0;
1723 }
1724 
1725 /*
1726  * Interrupt handler for PHY events.
1727  */
t3_phy_intr_handler(struct adapter * adapter)1728 int t3_phy_intr_handler(struct adapter *adapter)
1729 {
1730 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1731 
1732 	for_each_port(adapter, i) {
1733 		struct port_info *p = adap2pinfo(adapter, i);
1734 
1735 		if (!(p->phy.caps & SUPPORTED_IRQ))
1736 			continue;
1737 
1738 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1739 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1740 
1741 			if (phy_cause & cphy_cause_link_change)
1742 				t3_link_changed(adapter, i);
1743 			if (phy_cause & cphy_cause_fifo_error)
1744 				p->phy.fifo_errors++;
1745 			if (phy_cause & cphy_cause_module_change)
1746 				t3_os_phymod_changed(adapter, i);
1747 		}
1748 	}
1749 
1750 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1751 	return 0;
1752 }
1753 
1754 /*
1755  * T3 slow path (non-data) interrupt handler.
1756  */
t3_slow_intr_handler(struct adapter * adapter)1757 int t3_slow_intr_handler(struct adapter *adapter)
1758 {
1759 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1760 
1761 	cause &= adapter->slow_intr_mask;
1762 	if (!cause)
1763 		return 0;
1764 	if (cause & F_PCIM0) {
1765 		if (is_pcie(adapter))
1766 			pcie_intr_handler(adapter);
1767 		else
1768 			pci_intr_handler(adapter);
1769 	}
1770 	if (cause & F_SGE3)
1771 		t3_sge_err_intr_handler(adapter);
1772 	if (cause & F_MC7_PMRX)
1773 		mc7_intr_handler(&adapter->pmrx);
1774 	if (cause & F_MC7_PMTX)
1775 		mc7_intr_handler(&adapter->pmtx);
1776 	if (cause & F_MC7_CM)
1777 		mc7_intr_handler(&adapter->cm);
1778 	if (cause & F_CIM)
1779 		cim_intr_handler(adapter);
1780 	if (cause & F_TP1)
1781 		tp_intr_handler(adapter);
1782 	if (cause & F_ULP2_RX)
1783 		ulprx_intr_handler(adapter);
1784 	if (cause & F_ULP2_TX)
1785 		ulptx_intr_handler(adapter);
1786 	if (cause & F_PM1_RX)
1787 		pmrx_intr_handler(adapter);
1788 	if (cause & F_PM1_TX)
1789 		pmtx_intr_handler(adapter);
1790 	if (cause & F_CPL_SWITCH)
1791 		cplsw_intr_handler(adapter);
1792 	if (cause & F_MPS0)
1793 		mps_intr_handler(adapter);
1794 	if (cause & F_MC5A)
1795 		t3_mc5_intr_handler(&adapter->mc5);
1796 	if (cause & F_XGMAC0_0)
1797 		mac_intr_handler(adapter, 0);
1798 	if (cause & F_XGMAC0_1)
1799 		mac_intr_handler(adapter, 1);
1800 	if (cause & F_T3DBG)
1801 		t3_os_ext_intr_handler(adapter);
1802 
1803 	/* Clear the interrupts just processed. */
1804 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1805 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1806 	return 1;
1807 }
1808 
calc_gpio_intr(struct adapter * adap)1809 static unsigned int calc_gpio_intr(struct adapter *adap)
1810 {
1811 	unsigned int i, gpi_intr = 0;
1812 
1813 	for_each_port(adap, i)
1814 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1815 		    adapter_info(adap)->gpio_intr[i])
1816 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1817 	return gpi_intr;
1818 }
1819 
1820 /**
1821  *	t3_intr_enable - enable interrupts
1822  *	@adapter: the adapter whose interrupts should be enabled
1823  *
1824  *	Enable interrupts by setting the interrupt enable registers of the
1825  *	various HW modules and then enabling the top-level interrupt
1826  *	concentrator.
1827  */
t3_intr_enable(struct adapter * adapter)1828 void t3_intr_enable(struct adapter *adapter)
1829 {
1830 	static const struct addr_val_pair intr_en_avp[] = {
1831 		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1832 		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1833 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1834 		 MC7_INTR_MASK},
1835 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1836 		 MC7_INTR_MASK},
1837 		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1838 		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1839 		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1840 		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1841 		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1842 		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1843 	};
1844 
1845 	adapter->slow_intr_mask = PL_INTR_MASK;
1846 
1847 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1848 	t3_write_reg(adapter, A_TP_INT_ENABLE,
1849 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1850 
1851 	if (adapter->params.rev > 0) {
1852 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1853 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1854 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1855 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1856 			     F_PBL_BOUND_ERR_CH1);
1857 	} else {
1858 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1859 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1860 	}
1861 
1862 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1863 
1864 	if (is_pcie(adapter))
1865 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1866 	else
1867 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1868 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1869 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1870 }
1871 
1872 /**
1873  *	t3_intr_disable - disable a card's interrupts
1874  *	@adapter: the adapter whose interrupts should be disabled
1875  *
1876  *	Disable interrupts.  We only disable the top-level interrupt
1877  *	concentrator and the SGE data interrupts.
1878  */
t3_intr_disable(struct adapter * adapter)1879 void t3_intr_disable(struct adapter *adapter)
1880 {
1881 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1882 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1883 	adapter->slow_intr_mask = 0;
1884 }
1885 
1886 /**
1887  *	t3_intr_clear - clear all interrupts
1888  *	@adapter: the adapter whose interrupts should be cleared
1889  *
1890  *	Clears all interrupts.
1891  */
t3_intr_clear(struct adapter * adapter)1892 void t3_intr_clear(struct adapter *adapter)
1893 {
1894 	static const unsigned int cause_reg_addr[] = {
1895 		A_SG_INT_CAUSE,
1896 		A_SG_RSPQ_FL_STATUS,
1897 		A_PCIX_INT_CAUSE,
1898 		A_MC7_INT_CAUSE,
1899 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1900 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1901 		A_CIM_HOST_INT_CAUSE,
1902 		A_TP_INT_CAUSE,
1903 		A_MC5_DB_INT_CAUSE,
1904 		A_ULPRX_INT_CAUSE,
1905 		A_ULPTX_INT_CAUSE,
1906 		A_CPL_INTR_CAUSE,
1907 		A_PM1_TX_INT_CAUSE,
1908 		A_PM1_RX_INT_CAUSE,
1909 		A_MPS_INT_CAUSE,
1910 		A_T3DBG_INT_CAUSE,
1911 	};
1912 	unsigned int i;
1913 
1914 	/* Clear PHY and MAC interrupts for each port. */
1915 	for_each_port(adapter, i)
1916 	    t3_port_intr_clear(adapter, i);
1917 
1918 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1919 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1920 
1921 	if (is_pcie(adapter))
1922 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1923 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1924 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1925 }
1926 
1927 /**
1928  *	t3_port_intr_enable - enable port-specific interrupts
1929  *	@adapter: associated adapter
1930  *	@idx: index of port whose interrupts should be enabled
1931  *
1932  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
1933  *	adapter port.
1934  */
t3_port_intr_enable(struct adapter * adapter,int idx)1935 void t3_port_intr_enable(struct adapter *adapter, int idx)
1936 {
1937 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1938 
1939 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1940 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1941 	phy->ops->intr_enable(phy);
1942 }
1943 
1944 /**
1945  *	t3_port_intr_disable - disable port-specific interrupts
1946  *	@adapter: associated adapter
1947  *	@idx: index of port whose interrupts should be disabled
1948  *
1949  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
1950  *	adapter port.
1951  */
t3_port_intr_disable(struct adapter * adapter,int idx)1952 void t3_port_intr_disable(struct adapter *adapter, int idx)
1953 {
1954 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1955 
1956 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1957 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1958 	phy->ops->intr_disable(phy);
1959 }
1960 
1961 /**
1962  *	t3_port_intr_clear - clear port-specific interrupts
1963  *	@adapter: associated adapter
1964  *	@idx: index of port whose interrupts to clear
1965  *
1966  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
1967  *	adapter port.
1968  */
t3_port_intr_clear(struct adapter * adapter,int idx)1969 void t3_port_intr_clear(struct adapter *adapter, int idx)
1970 {
1971 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1972 
1973 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1974 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1975 	phy->ops->intr_clear(phy);
1976 }
1977 
1978 #define SG_CONTEXT_CMD_ATTEMPTS 100
1979 
1980 /**
1981  * 	t3_sge_write_context - write an SGE context
1982  * 	@adapter: the adapter
1983  * 	@id: the context id
1984  * 	@type: the context type
1985  *
1986  * 	Program an SGE context with the values already loaded in the
1987  * 	CONTEXT_DATA? registers.
1988  */
t3_sge_write_context(struct adapter * adapter,unsigned int id,unsigned int type)1989 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1990 				unsigned int type)
1991 {
1992 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1993 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1994 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1995 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1996 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1997 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1998 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1999 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2000 }
2001 
clear_sge_ctxt(struct adapter * adap,unsigned int id,unsigned int type)2002 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2003 			  unsigned int type)
2004 {
2005 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2006 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2007 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2008 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2009 	return t3_sge_write_context(adap, id, type);
2010 }
2011 
2012 /**
2013  *	t3_sge_init_ecntxt - initialize an SGE egress context
2014  *	@adapter: the adapter to configure
2015  *	@id: the context id
2016  *	@gts_enable: whether to enable GTS for the context
2017  *	@type: the egress context type
2018  *	@respq: associated response queue
2019  *	@base_addr: base address of queue
2020  *	@size: number of queue entries
2021  *	@token: uP token
2022  *	@gen: initial generation value for the context
2023  *	@cidx: consumer pointer
2024  *
2025  *	Initialize an SGE egress context and make it ready for use.  If the
2026  *	platform allows concurrent context operations, the caller is
2027  *	responsible for appropriate locking.
2028  */
t3_sge_init_ecntxt(struct adapter * adapter,unsigned int id,int gts_enable,enum sge_context_type type,int respq,u64 base_addr,unsigned int size,unsigned int token,int gen,unsigned int cidx)2029 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2030 		       enum sge_context_type type, int respq, u64 base_addr,
2031 		       unsigned int size, unsigned int token, int gen,
2032 		       unsigned int cidx)
2033 {
2034 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2035 
2036 	if (base_addr & 0xfff)	/* must be 4K aligned */
2037 		return -EINVAL;
2038 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2039 		return -EBUSY;
2040 
2041 	base_addr >>= 12;
2042 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2043 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2044 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2045 		     V_EC_BASE_LO(base_addr & 0xffff));
2046 	base_addr >>= 16;
2047 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2048 	base_addr >>= 32;
2049 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2050 		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2051 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2052 		     F_EC_VALID);
2053 	return t3_sge_write_context(adapter, id, F_EGRESS);
2054 }
2055 
2056 /**
2057  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2058  *	@adapter: the adapter to configure
2059  *	@id: the context id
2060  *	@gts_enable: whether to enable GTS for the context
2061  *	@base_addr: base address of queue
2062  *	@size: number of queue entries
2063  *	@bsize: size of each buffer for this queue
2064  *	@cong_thres: threshold to signal congestion to upstream producers
2065  *	@gen: initial generation value for the context
2066  *	@cidx: consumer pointer
2067  *
2068  *	Initialize an SGE free list context and make it ready for use.  The
2069  *	caller is responsible for ensuring only one context operation occurs
2070  *	at a time.
2071  */
t3_sge_init_flcntxt(struct adapter * adapter,unsigned int id,int gts_enable,u64 base_addr,unsigned int size,unsigned int bsize,unsigned int cong_thres,int gen,unsigned int cidx)2072 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2073 			int gts_enable, u64 base_addr, unsigned int size,
2074 			unsigned int bsize, unsigned int cong_thres, int gen,
2075 			unsigned int cidx)
2076 {
2077 	if (base_addr & 0xfff)	/* must be 4K aligned */
2078 		return -EINVAL;
2079 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2080 		return -EBUSY;
2081 
2082 	base_addr >>= 12;
2083 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2084 	base_addr >>= 32;
2085 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2086 		     V_FL_BASE_HI((u32) base_addr) |
2087 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2088 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2089 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2090 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2091 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2092 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2093 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2094 	return t3_sge_write_context(adapter, id, F_FREELIST);
2095 }
2096 
2097 /**
2098  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2099  *	@adapter: the adapter to configure
2100  *	@id: the context id
2101  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2102  *	@base_addr: base address of queue
2103  *	@size: number of queue entries
2104  *	@fl_thres: threshold for selecting the normal or jumbo free list
2105  *	@gen: initial generation value for the context
2106  *	@cidx: consumer pointer
2107  *
2108  *	Initialize an SGE response queue context and make it ready for use.
2109  *	The caller is responsible for ensuring only one context operation
2110  *	occurs at a time.
2111  */
t3_sge_init_rspcntxt(struct adapter * adapter,unsigned int id,int irq_vec_idx,u64 base_addr,unsigned int size,unsigned int fl_thres,int gen,unsigned int cidx)2112 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2113 			 int irq_vec_idx, u64 base_addr, unsigned int size,
2114 			 unsigned int fl_thres, int gen, unsigned int cidx)
2115 {
2116 	unsigned int intr = 0;
2117 
2118 	if (base_addr & 0xfff)	/* must be 4K aligned */
2119 		return -EINVAL;
2120 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2121 		return -EBUSY;
2122 
2123 	base_addr >>= 12;
2124 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2125 		     V_CQ_INDEX(cidx));
2126 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2127 	base_addr >>= 32;
2128 	if (irq_vec_idx >= 0)
2129 		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2130 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2131 		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2132 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2133 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2134 }
2135 
2136 /**
2137  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2138  *	@adapter: the adapter to configure
2139  *	@id: the context id
2140  *	@base_addr: base address of queue
2141  *	@size: number of queue entries
2142  *	@rspq: response queue for async notifications
2143  *	@ovfl_mode: CQ overflow mode
2144  *	@credits: completion queue credits
2145  *	@credit_thres: the credit threshold
2146  *
2147  *	Initialize an SGE completion queue context and make it ready for use.
2148  *	The caller is responsible for ensuring only one context operation
2149  *	occurs at a time.
2150  */
t3_sge_init_cqcntxt(struct adapter * adapter,unsigned int id,u64 base_addr,unsigned int size,int rspq,int ovfl_mode,unsigned int credits,unsigned int credit_thres)2151 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2152 			unsigned int size, int rspq, int ovfl_mode,
2153 			unsigned int credits, unsigned int credit_thres)
2154 {
2155 	if (base_addr & 0xfff)	/* must be 4K aligned */
2156 		return -EINVAL;
2157 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2158 		return -EBUSY;
2159 
2160 	base_addr >>= 12;
2161 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2162 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2163 	base_addr >>= 32;
2164 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2165 		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2166 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2167 		     V_CQ_ERR(ovfl_mode));
2168 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2169 		     V_CQ_CREDIT_THRES(credit_thres));
2170 	return t3_sge_write_context(adapter, id, F_CQ);
2171 }
2172 
2173 /**
2174  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2175  *	@adapter: the adapter
2176  *	@id: the egress context id
2177  *	@enable: enable (1) or disable (0) the context
2178  *
2179  *	Enable or disable an SGE egress context.  The caller is responsible for
2180  *	ensuring only one context operation occurs at a time.
2181  */
t3_sge_enable_ecntxt(struct adapter * adapter,unsigned int id,int enable)2182 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2183 {
2184 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2185 		return -EBUSY;
2186 
2187 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2188 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2189 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2190 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2191 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2192 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2193 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2194 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2195 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2196 }
2197 
2198 /**
2199  *	t3_sge_disable_fl - disable an SGE free-buffer list
2200  *	@adapter: the adapter
2201  *	@id: the free list context id
2202  *
2203  *	Disable an SGE free-buffer list.  The caller is responsible for
2204  *	ensuring only one context operation occurs at a time.
2205  */
t3_sge_disable_fl(struct adapter * adapter,unsigned int id)2206 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2207 {
2208 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2209 		return -EBUSY;
2210 
2211 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2212 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2213 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2214 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2215 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2216 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2217 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2218 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2219 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2220 }
2221 
2222 /**
2223  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2224  *	@adapter: the adapter
2225  *	@id: the response queue context id
2226  *
2227  *	Disable an SGE response queue.  The caller is responsible for
2228  *	ensuring only one context operation occurs at a time.
2229  */
t3_sge_disable_rspcntxt(struct adapter * adapter,unsigned int id)2230 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2231 {
2232 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2233 		return -EBUSY;
2234 
2235 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2236 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2237 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2238 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2239 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2240 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2241 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2242 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2243 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2244 }
2245 
2246 /**
2247  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2248  *	@adapter: the adapter
2249  *	@id: the completion queue context id
2250  *
2251  *	Disable an SGE completion queue.  The caller is responsible for
2252  *	ensuring only one context operation occurs at a time.
2253  */
t3_sge_disable_cqcntxt(struct adapter * adapter,unsigned int id)2254 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2255 {
2256 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2257 		return -EBUSY;
2258 
2259 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2260 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2261 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2262 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2263 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2264 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2265 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2266 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2267 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2268 }
2269 
2270 /**
2271  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2272  *	@adapter: the adapter
2273  *	@id: the context id
2274  *	@op: the operation to perform
2275  *
2276  *	Perform the selected operation on an SGE completion queue context.
2277  *	The caller is responsible for ensuring only one context operation
2278  *	occurs at a time.
2279  */
t3_sge_cqcntxt_op(struct adapter * adapter,unsigned int id,unsigned int op,unsigned int credits)2280 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2281 		      unsigned int credits)
2282 {
2283 	u32 val;
2284 
2285 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2286 		return -EBUSY;
2287 
2288 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2289 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2290 		     V_CONTEXT(id) | F_CQ);
2291 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2292 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2293 		return -EIO;
2294 
2295 	if (op >= 2 && op < 7) {
2296 		if (adapter->params.rev > 0)
2297 			return G_CQ_INDEX(val);
2298 
2299 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2300 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2301 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2302 				    F_CONTEXT_CMD_BUSY, 0,
2303 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2304 			return -EIO;
2305 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2306 	}
2307 	return 0;
2308 }
2309 
2310 /**
2311  * 	t3_sge_read_context - read an SGE context
2312  * 	@type: the context type
2313  * 	@adapter: the adapter
2314  * 	@id: the context id
2315  * 	@data: holds the retrieved context
2316  *
2317  * 	Read an SGE egress context.  The caller is responsible for ensuring
2318  * 	only one context operation occurs at a time.
2319  */
t3_sge_read_context(unsigned int type,struct adapter * adapter,unsigned int id,u32 data[4])2320 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2321 			       unsigned int id, u32 data[4])
2322 {
2323 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2324 		return -EBUSY;
2325 
2326 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2327 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2328 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2329 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2330 		return -EIO;
2331 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2332 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2333 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2334 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2335 	return 0;
2336 }
2337 
2338 /**
2339  * 	t3_sge_read_ecntxt - read an SGE egress context
2340  * 	@adapter: the adapter
2341  * 	@id: the context id
2342  * 	@data: holds the retrieved context
2343  *
2344  * 	Read an SGE egress context.  The caller is responsible for ensuring
2345  * 	only one context operation occurs at a time.
2346  */
t3_sge_read_ecntxt(struct adapter * adapter,unsigned int id,u32 data[4])2347 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2348 {
2349 	if (id >= 65536)
2350 		return -EINVAL;
2351 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2352 }
2353 
2354 /**
2355  * 	t3_sge_read_cq - read an SGE CQ context
2356  * 	@adapter: the adapter
2357  * 	@id: the context id
2358  * 	@data: holds the retrieved context
2359  *
2360  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2361  * 	only one context operation occurs at a time.
2362  */
t3_sge_read_cq(struct adapter * adapter,unsigned int id,u32 data[4])2363 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2364 {
2365 	if (id >= 65536)
2366 		return -EINVAL;
2367 	return t3_sge_read_context(F_CQ, adapter, id, data);
2368 }
2369 
2370 /**
2371  * 	t3_sge_read_fl - read an SGE free-list context
2372  * 	@adapter: the adapter
2373  * 	@id: the context id
2374  * 	@data: holds the retrieved context
2375  *
2376  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2377  * 	only one context operation occurs at a time.
2378  */
t3_sge_read_fl(struct adapter * adapter,unsigned int id,u32 data[4])2379 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2380 {
2381 	if (id >= SGE_QSETS * 2)
2382 		return -EINVAL;
2383 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2384 }
2385 
2386 /**
2387  * 	t3_sge_read_rspq - read an SGE response queue context
2388  * 	@adapter: the adapter
2389  * 	@id: the context id
2390  * 	@data: holds the retrieved context
2391  *
2392  * 	Read an SGE response queue context.  The caller is responsible for
2393  * 	ensuring only one context operation occurs at a time.
2394  */
t3_sge_read_rspq(struct adapter * adapter,unsigned int id,u32 data[4])2395 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2396 {
2397 	if (id >= SGE_QSETS)
2398 		return -EINVAL;
2399 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2400 }
2401 
2402 /**
2403  *	t3_config_rss - configure Rx packet steering
2404  *	@adapter: the adapter
2405  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2406  *	@cpus: values for the CPU lookup table (0xff terminated)
2407  *	@rspq: values for the response queue lookup table (0xffff terminated)
2408  *
2409  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2410  *	the values for the CPU and response queue lookup tables.  If they
2411  *	provide fewer values than the size of the tables the supplied values
2412  *	are used repeatedly until the tables are fully populated.
2413  */
t3_config_rss(struct adapter * adapter,unsigned int rss_config,const u8 * cpus,const u16 * rspq)2414 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2415 		   const u8 * cpus, const u16 *rspq)
2416 {
2417 	int i, j, cpu_idx = 0, q_idx = 0;
2418 
2419 	if (cpus)
2420 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2421 			u32 val = i << 16;
2422 
2423 			for (j = 0; j < 2; ++j) {
2424 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2425 				if (cpus[cpu_idx] == 0xff)
2426 					cpu_idx = 0;
2427 			}
2428 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2429 		}
2430 
2431 	if (rspq)
2432 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2433 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2434 				     (i << 16) | rspq[q_idx++]);
2435 			if (rspq[q_idx] == 0xffff)
2436 				q_idx = 0;
2437 		}
2438 
2439 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2440 }
2441 
2442 /**
2443  *	t3_read_rss - read the contents of the RSS tables
2444  *	@adapter: the adapter
2445  *	@lkup: holds the contents of the RSS lookup table
2446  *	@map: holds the contents of the RSS map table
2447  *
2448  *	Reads the contents of the receive packet steering tables.
2449  */
t3_read_rss(struct adapter * adapter,u8 * lkup,u16 * map)2450 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2451 {
2452 	int i;
2453 	u32 val;
2454 
2455 	if (lkup)
2456 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2457 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2458 				     0xffff0000 | i);
2459 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2460 			if (!(val & 0x80000000))
2461 				return -EAGAIN;
2462 			*lkup++ = val;
2463 			*lkup++ = (val >> 8);
2464 		}
2465 
2466 	if (map)
2467 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2468 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2469 				     0xffff0000 | i);
2470 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2471 			if (!(val & 0x80000000))
2472 				return -EAGAIN;
2473 			*map++ = val;
2474 		}
2475 	return 0;
2476 }
2477 
2478 /**
2479  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2480  *	@adap: the adapter
2481  *	@enable: 1 to select offload mode, 0 for regular NIC
2482  *
2483  *	Switches TP to NIC/offload mode.
2484  */
t3_tp_set_offload_mode(struct adapter * adap,int enable)2485 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2486 {
2487 	if (is_offload(adap) || !enable)
2488 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2489 				 V_NICMODE(!enable));
2490 }
2491 
2492 /**
2493  *	pm_num_pages - calculate the number of pages of the payload memory
2494  *	@mem_size: the size of the payload memory
2495  *	@pg_size: the size of each payload memory page
2496  *
2497  *	Calculate the number of pages, each of the given size, that fit in a
2498  *	memory of the specified size, respecting the HW requirement that the
2499  *	number of pages must be a multiple of 24.
2500  */
pm_num_pages(unsigned int mem_size,unsigned int pg_size)2501 static inline unsigned int pm_num_pages(unsigned int mem_size,
2502 					unsigned int pg_size)
2503 {
2504 	unsigned int n = mem_size / pg_size;
2505 
2506 	return n - n % 24;
2507 }
2508 
2509 #define mem_region(adap, start, size, reg) \
2510 	t3_write_reg((adap), A_ ## reg, (start)); \
2511 	start += size
2512 
2513 /**
2514  *	partition_mem - partition memory and configure TP memory settings
2515  *	@adap: the adapter
2516  *	@p: the TP parameters
2517  *
2518  *	Partitions context and payload memory and configures TP's memory
2519  *	registers.
2520  */
partition_mem(struct adapter * adap,const struct tp_params * p)2521 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2522 {
2523 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2524 	unsigned int timers = 0, timers_shift = 22;
2525 
2526 	if (adap->params.rev > 0) {
2527 		if (tids <= 16 * 1024) {
2528 			timers = 1;
2529 			timers_shift = 16;
2530 		} else if (tids <= 64 * 1024) {
2531 			timers = 2;
2532 			timers_shift = 18;
2533 		} else if (tids <= 256 * 1024) {
2534 			timers = 3;
2535 			timers_shift = 20;
2536 		}
2537 	}
2538 
2539 	t3_write_reg(adap, A_TP_PMM_SIZE,
2540 		     p->chan_rx_size | (p->chan_tx_size >> 16));
2541 
2542 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2543 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2544 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2545 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2546 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2547 
2548 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2549 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2550 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2551 
2552 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2553 	/* Add a bit of headroom and make multiple of 24 */
2554 	pstructs += 48;
2555 	pstructs -= pstructs % 24;
2556 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2557 
2558 	m = tids * TCB_SIZE;
2559 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2560 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2561 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2562 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2563 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2564 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2565 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2566 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2567 
2568 	m = (m + 4095) & ~0xfff;
2569 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2570 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2571 
2572 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2573 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2574 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2575 	if (tids < m)
2576 		adap->params.mc5.nservers += m - tids;
2577 }
2578 
tp_wr_indirect(struct adapter * adap,unsigned int addr,u32 val)2579 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2580 				  u32 val)
2581 {
2582 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2583 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2584 }
2585 
tp_config(struct adapter * adap,const struct tp_params * p)2586 static void tp_config(struct adapter *adap, const struct tp_params *p)
2587 {
2588 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2589 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2590 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2591 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2592 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2593 		     V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2594 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2595 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2596 		     V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2597 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2598 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2599 			 F_IPV6ENABLE | F_NICMODE);
2600 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2601 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2602 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2603 			 adap->params.rev > 0 ? F_ENABLEESND :
2604 			 F_T3A_ENABLEESND);
2605 
2606 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2607 			 F_ENABLEEPCMDAFULL,
2608 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2609 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2610 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2611 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2612 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2613 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2614 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2615 
2616 	if (adap->params.rev > 0) {
2617 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2618 		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2619 				 F_TXPACEAUTO);
2620 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2621 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2622 	} else
2623 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2624 
2625 	if (adap->params.rev == T3_REV_C)
2626 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2627 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2628 				 V_TABLELATENCYDELTA(4));
2629 
2630 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2631 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2632 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2633 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2634 }
2635 
2636 /* Desired TP timer resolution in usec */
2637 #define TP_TMR_RES 50
2638 
2639 /* TCP timer values in ms */
2640 #define TP_DACK_TIMER 50
2641 #define TP_RTO_MIN    250
2642 
2643 /**
2644  *	tp_set_timers - set TP timing parameters
2645  *	@adap: the adapter to set
2646  *	@core_clk: the core clock frequency in Hz
2647  *
2648  *	Set TP's timing parameters, such as the various timer resolutions and
2649  *	the TCP timer values.
2650  */
tp_set_timers(struct adapter * adap,unsigned int core_clk)2651 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2652 {
2653 	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2654 	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2655 	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2656 	unsigned int tps = core_clk >> tre;
2657 
2658 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2659 		     V_DELAYEDACKRESOLUTION(dack_re) |
2660 		     V_TIMESTAMPRESOLUTION(tstamp_re));
2661 	t3_write_reg(adap, A_TP_DACK_TIMER,
2662 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2663 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2664 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2665 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2666 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2667 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2668 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2669 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2670 		     V_KEEPALIVEMAX(9));
2671 
2672 #define SECONDS * tps
2673 
2674 	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2675 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2676 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2677 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2678 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2679 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2680 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2681 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2682 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2683 
2684 #undef SECONDS
2685 }
2686 
2687 /**
2688  *	t3_tp_set_coalescing_size - set receive coalescing size
2689  *	@adap: the adapter
2690  *	@size: the receive coalescing size
2691  *	@psh: whether a set PSH bit should deliver coalesced data
2692  *
2693  *	Set the receive coalescing size and PSH bit handling.
2694  */
t3_tp_set_coalescing_size(struct adapter * adap,unsigned int size,int psh)2695 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2696 {
2697 	u32 val;
2698 
2699 	if (size > MAX_RX_COALESCING_LEN)
2700 		return -EINVAL;
2701 
2702 	val = t3_read_reg(adap, A_TP_PARA_REG3);
2703 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2704 
2705 	if (size) {
2706 		val |= F_RXCOALESCEENABLE;
2707 		if (psh)
2708 			val |= F_RXCOALESCEPSHEN;
2709 		size = min(MAX_RX_COALESCING_LEN, size);
2710 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2711 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2712 	}
2713 	t3_write_reg(adap, A_TP_PARA_REG3, val);
2714 	return 0;
2715 }
2716 
2717 /**
2718  *	t3_tp_set_max_rxsize - set the max receive size
2719  *	@adap: the adapter
2720  *	@size: the max receive size
2721  *
2722  *	Set TP's max receive size.  This is the limit that applies when
2723  *	receive coalescing is disabled.
2724  */
t3_tp_set_max_rxsize(struct adapter * adap,unsigned int size)2725 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2726 {
2727 	t3_write_reg(adap, A_TP_PARA_REG7,
2728 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2729 }
2730 
init_mtus(unsigned short mtus[])2731 static void init_mtus(unsigned short mtus[])
2732 {
2733 	/*
2734 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2735 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
2736 	 * are enabled and still have at least 8 bytes of payload.
2737 	 */
2738 	mtus[0] = 88;
2739 	mtus[1] = 88;
2740 	mtus[2] = 256;
2741 	mtus[3] = 512;
2742 	mtus[4] = 576;
2743 	mtus[5] = 1024;
2744 	mtus[6] = 1280;
2745 	mtus[7] = 1492;
2746 	mtus[8] = 1500;
2747 	mtus[9] = 2002;
2748 	mtus[10] = 2048;
2749 	mtus[11] = 4096;
2750 	mtus[12] = 4352;
2751 	mtus[13] = 8192;
2752 	mtus[14] = 9000;
2753 	mtus[15] = 9600;
2754 }
2755 
2756 /*
2757  * Initial congestion control parameters.
2758  */
init_cong_ctrl(unsigned short * a,unsigned short * b)2759 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2760 {
2761 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2762 	a[9] = 2;
2763 	a[10] = 3;
2764 	a[11] = 4;
2765 	a[12] = 5;
2766 	a[13] = 6;
2767 	a[14] = 7;
2768 	a[15] = 8;
2769 	a[16] = 9;
2770 	a[17] = 10;
2771 	a[18] = 14;
2772 	a[19] = 17;
2773 	a[20] = 21;
2774 	a[21] = 25;
2775 	a[22] = 30;
2776 	a[23] = 35;
2777 	a[24] = 45;
2778 	a[25] = 60;
2779 	a[26] = 80;
2780 	a[27] = 100;
2781 	a[28] = 200;
2782 	a[29] = 300;
2783 	a[30] = 400;
2784 	a[31] = 500;
2785 
2786 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2787 	b[9] = b[10] = 1;
2788 	b[11] = b[12] = 2;
2789 	b[13] = b[14] = b[15] = b[16] = 3;
2790 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2791 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2792 	b[28] = b[29] = 6;
2793 	b[30] = b[31] = 7;
2794 }
2795 
2796 /* The minimum additive increment value for the congestion control table */
2797 #define CC_MIN_INCR 2U
2798 
2799 /**
2800  *	t3_load_mtus - write the MTU and congestion control HW tables
2801  *	@adap: the adapter
2802  *	@mtus: the unrestricted values for the MTU table
2803  *	@alphs: the values for the congestion control alpha parameter
2804  *	@beta: the values for the congestion control beta parameter
2805  *	@mtu_cap: the maximum permitted effective MTU
2806  *
2807  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2808  *	Update the high-speed congestion control table with the supplied alpha,
2809  * 	beta, and MTUs.
2810  */
t3_load_mtus(struct adapter * adap,unsigned short mtus[NMTUS],unsigned short alpha[NCCTRL_WIN],unsigned short beta[NCCTRL_WIN],unsigned short mtu_cap)2811 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2812 		  unsigned short alpha[NCCTRL_WIN],
2813 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2814 {
2815 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2816 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2817 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2818 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2819 	};
2820 
2821 	unsigned int i, w;
2822 
2823 	for (i = 0; i < NMTUS; ++i) {
2824 		unsigned int mtu = min(mtus[i], mtu_cap);
2825 		unsigned int log2 = fls(mtu);
2826 
2827 		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2828 			log2--;
2829 		t3_write_reg(adap, A_TP_MTU_TABLE,
2830 			     (i << 24) | (log2 << 16) | mtu);
2831 
2832 		for (w = 0; w < NCCTRL_WIN; ++w) {
2833 			unsigned int inc;
2834 
2835 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2836 				  CC_MIN_INCR);
2837 
2838 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2839 				     (w << 16) | (beta[w] << 13) | inc);
2840 		}
2841 	}
2842 }
2843 
2844 /**
2845  *	t3_read_hw_mtus - returns the values in the HW MTU table
2846  *	@adap: the adapter
2847  *	@mtus: where to store the HW MTU values
2848  *
2849  *	Reads the HW MTU table.
2850  */
t3_read_hw_mtus(struct adapter * adap,unsigned short mtus[NMTUS])2851 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2852 {
2853 	int i;
2854 
2855 	for (i = 0; i < NMTUS; ++i) {
2856 		unsigned int val;
2857 
2858 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2859 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
2860 		mtus[i] = val & 0x3fff;
2861 	}
2862 }
2863 
2864 /**
2865  *	t3_get_cong_cntl_tab - reads the congestion control table
2866  *	@adap: the adapter
2867  *	@incr: where to store the alpha values
2868  *
2869  *	Reads the additive increments programmed into the HW congestion
2870  *	control table.
2871  */
t3_get_cong_cntl_tab(struct adapter * adap,unsigned short incr[NMTUS][NCCTRL_WIN])2872 void t3_get_cong_cntl_tab(struct adapter *adap,
2873 			  unsigned short incr[NMTUS][NCCTRL_WIN])
2874 {
2875 	unsigned int mtu, w;
2876 
2877 	for (mtu = 0; mtu < NMTUS; ++mtu)
2878 		for (w = 0; w < NCCTRL_WIN; ++w) {
2879 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
2880 				     0xffff0000 | (mtu << 5) | w);
2881 			incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2882 				       0x1fff;
2883 		}
2884 }
2885 
2886 /**
2887  *	t3_tp_get_mib_stats - read TP's MIB counters
2888  *	@adap: the adapter
2889  *	@tps: holds the returned counter values
2890  *
2891  *	Returns the values of TP's MIB counters.
2892  */
t3_tp_get_mib_stats(struct adapter * adap,struct tp_mib_stats * tps)2893 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2894 {
2895 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2896 			 sizeof(*tps) / sizeof(u32), 0);
2897 }
2898 
2899 #define ulp_region(adap, name, start, len) \
2900 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2901 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2902 		     (start) + (len) - 1); \
2903 	start += len
2904 
2905 #define ulptx_region(adap, name, start, len) \
2906 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2907 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2908 		     (start) + (len) - 1)
2909 
ulp_config(struct adapter * adap,const struct tp_params * p)2910 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2911 {
2912 	unsigned int m = p->chan_rx_size;
2913 
2914 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2915 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2916 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2917 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2918 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2919 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2920 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2921 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2922 }
2923 
2924 /**
2925  *	t3_set_proto_sram - set the contents of the protocol sram
2926  *	@adapter: the adapter
2927  *	@data: the protocol image
2928  *
2929  *	Write the contents of the protocol SRAM.
2930  */
t3_set_proto_sram(struct adapter * adap,const u8 * data)2931 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2932 {
2933 	int i;
2934 	const __be32 *buf = (const __be32 *)data;
2935 
2936 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2937 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2938 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2939 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2940 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2941 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2942 
2943 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2944 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2945 			return -EIO;
2946 	}
2947 	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2948 
2949 	return 0;
2950 }
2951 
t3_config_trace_filter(struct adapter * adapter,const struct trace_params * tp,int filter_index,int invert,int enable)2952 void t3_config_trace_filter(struct adapter *adapter,
2953 			    const struct trace_params *tp, int filter_index,
2954 			    int invert, int enable)
2955 {
2956 	u32 addr, key[4], mask[4];
2957 
2958 	key[0] = tp->sport | (tp->sip << 16);
2959 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2960 	key[2] = tp->dip;
2961 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2962 
2963 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2964 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2965 	mask[2] = tp->dip_mask;
2966 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2967 
2968 	if (invert)
2969 		key[3] |= (1 << 29);
2970 	if (enable)
2971 		key[3] |= (1 << 28);
2972 
2973 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2974 	tp_wr_indirect(adapter, addr++, key[0]);
2975 	tp_wr_indirect(adapter, addr++, mask[0]);
2976 	tp_wr_indirect(adapter, addr++, key[1]);
2977 	tp_wr_indirect(adapter, addr++, mask[1]);
2978 	tp_wr_indirect(adapter, addr++, key[2]);
2979 	tp_wr_indirect(adapter, addr++, mask[2]);
2980 	tp_wr_indirect(adapter, addr++, key[3]);
2981 	tp_wr_indirect(adapter, addr, mask[3]);
2982 	t3_read_reg(adapter, A_TP_PIO_DATA);
2983 }
2984 
2985 /**
2986  *	t3_config_sched - configure a HW traffic scheduler
2987  *	@adap: the adapter
2988  *	@kbps: target rate in Kbps
2989  *	@sched: the scheduler index
2990  *
2991  *	Configure a HW scheduler for the target rate
2992  */
t3_config_sched(struct adapter * adap,unsigned int kbps,int sched)2993 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2994 {
2995 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2996 	unsigned int clk = adap->params.vpd.cclk * 1000;
2997 	unsigned int selected_cpt = 0, selected_bpt = 0;
2998 
2999 	if (kbps > 0) {
3000 		kbps *= 125;	/* -> bytes */
3001 		for (cpt = 1; cpt <= 255; cpt++) {
3002 			tps = clk / cpt;
3003 			bpt = (kbps + tps / 2) / tps;
3004 			if (bpt > 0 && bpt <= 255) {
3005 				v = bpt * tps;
3006 				delta = v >= kbps ? v - kbps : kbps - v;
3007 				if (delta <= mindelta) {
3008 					mindelta = delta;
3009 					selected_cpt = cpt;
3010 					selected_bpt = bpt;
3011 				}
3012 			} else if (selected_cpt)
3013 				break;
3014 		}
3015 		if (!selected_cpt)
3016 			return -EINVAL;
3017 	}
3018 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3019 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3020 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3021 	if (sched & 1)
3022 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3023 	else
3024 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3025 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3026 	return 0;
3027 }
3028 
tp_init(struct adapter * adap,const struct tp_params * p)3029 static int tp_init(struct adapter *adap, const struct tp_params *p)
3030 {
3031 	int busy = 0;
3032 
3033 	tp_config(adap, p);
3034 	t3_set_vlan_accel(adap, 3, 0);
3035 
3036 	if (is_offload(adap)) {
3037 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3038 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3039 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3040 				       0, 1000, 5);
3041 		if (busy)
3042 			CH_ERR(adap, "TP initialization timed out\n");
3043 	}
3044 
3045 	if (!busy)
3046 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3047 	return busy;
3048 }
3049 
t3_mps_set_active_ports(struct adapter * adap,unsigned int port_mask)3050 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3051 {
3052 	if (port_mask & ~((1 << adap->params.nports) - 1))
3053 		return -EINVAL;
3054 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3055 			 port_mask << S_PORT0ACTIVE);
3056 	return 0;
3057 }
3058 
3059 /*
3060  * Perform the bits of HW initialization that are dependent on the number
3061  * of available ports.
3062  */
init_hw_for_avail_ports(struct adapter * adap,int nports)3063 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3064 {
3065 	int i;
3066 
3067 	if (nports == 1) {
3068 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3069 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3070 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3071 			     F_PORT0ACTIVE | F_ENFORCEPKT);
3072 		t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3073 	} else {
3074 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3075 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3076 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3077 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3078 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3079 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3080 			     F_ENFORCEPKT);
3081 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3082 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3083 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3084 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3085 		for (i = 0; i < 16; i++)
3086 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3087 				     (i << 16) | 0x1010);
3088 	}
3089 }
3090 
calibrate_xgm(struct adapter * adapter)3091 static int calibrate_xgm(struct adapter *adapter)
3092 {
3093 	if (uses_xaui(adapter)) {
3094 		unsigned int v, i;
3095 
3096 		for (i = 0; i < 5; ++i) {
3097 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3098 			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3099 			msleep(1);
3100 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3102 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3103 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3104 				return 0;
3105 			}
3106 		}
3107 		CH_ERR(adapter, "MAC calibration failed\n");
3108 		return -1;
3109 	} else {
3110 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3111 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3112 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3113 				 F_XGM_IMPSETUPDATE);
3114 	}
3115 	return 0;
3116 }
3117 
calibrate_xgm_t3b(struct adapter * adapter)3118 static void calibrate_xgm_t3b(struct adapter *adapter)
3119 {
3120 	if (!uses_xaui(adapter)) {
3121 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3122 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3123 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3124 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3125 				 F_XGM_IMPSETUPDATE);
3126 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3127 				 0);
3128 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3129 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3130 	}
3131 }
3132 
3133 struct mc7_timing_params {
3134 	unsigned char ActToPreDly;
3135 	unsigned char ActToRdWrDly;
3136 	unsigned char PreCyc;
3137 	unsigned char RefCyc[5];
3138 	unsigned char BkCyc;
3139 	unsigned char WrToRdDly;
3140 	unsigned char RdToWrDly;
3141 };
3142 
3143 /*
3144  * Write a value to a register and check that the write completed.  These
3145  * writes normally complete in a cycle or two, so one read should suffice.
3146  * The very first read exists to flush the posted write to the device.
3147  */
wrreg_wait(struct adapter * adapter,unsigned int addr,u32 val)3148 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3149 {
3150 	t3_write_reg(adapter, addr, val);
3151 	t3_read_reg(adapter, addr);	/* flush */
3152 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3153 		return 0;
3154 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3155 	return -EIO;
3156 }
3157 
mc7_init(struct mc7 * mc7,unsigned int mc7_clock,int mem_type)3158 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3159 {
3160 	static const unsigned int mc7_mode[] = {
3161 		0x632, 0x642, 0x652, 0x432, 0x442
3162 	};
3163 	static const struct mc7_timing_params mc7_timings[] = {
3164 		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3165 		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3166 		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3167 		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3168 		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3169 	};
3170 
3171 	u32 val;
3172 	unsigned int width, density, slow, attempts;
3173 	struct adapter *adapter = mc7->adapter;
3174 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3175 
3176 	if (!mc7->size)
3177 		return 0;
3178 
3179 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3180 	slow = val & F_SLOW;
3181 	width = G_WIDTH(val);
3182 	density = G_DEN(val);
3183 
3184 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3185 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3186 	msleep(1);
3187 
3188 	if (!slow) {
3189 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3190 		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3191 		msleep(1);
3192 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3193 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3194 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3195 			       mc7->name);
3196 			goto out_fail;
3197 		}
3198 	}
3199 
3200 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3201 		     V_ACTTOPREDLY(p->ActToPreDly) |
3202 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3203 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3204 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3205 
3206 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3207 		     val | F_CLKEN | F_TERM150);
3208 	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3209 
3210 	if (!slow)
3211 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3212 				 F_DLLENB);
3213 	udelay(1);
3214 
3215 	val = slow ? 3 : 6;
3216 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3217 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3218 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3219 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3220 		goto out_fail;
3221 
3222 	if (!slow) {
3223 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3224 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3225 		udelay(5);
3226 	}
3227 
3228 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3229 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3230 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3231 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3232 		       mc7_mode[mem_type]) ||
3233 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3234 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3235 		goto out_fail;
3236 
3237 	/* clock value is in KHz */
3238 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3239 	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3240 
3241 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3242 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3243 	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3244 
3245 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3246 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3247 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3248 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3249 		     (mc7->size << width) - 1);
3250 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3251 	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3252 
3253 	attempts = 50;
3254 	do {
3255 		msleep(250);
3256 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3257 	} while ((val & F_BUSY) && --attempts);
3258 	if (val & F_BUSY) {
3259 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3260 		goto out_fail;
3261 	}
3262 
3263 	/* Enable normal memory accesses. */
3264 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3265 	return 0;
3266 
3267 out_fail:
3268 	return -1;
3269 }
3270 
config_pcie(struct adapter * adap)3271 static void config_pcie(struct adapter *adap)
3272 {
3273 	static const u16 ack_lat[4][6] = {
3274 		{237, 416, 559, 1071, 2095, 4143},
3275 		{128, 217, 289, 545, 1057, 2081},
3276 		{73, 118, 154, 282, 538, 1050},
3277 		{67, 107, 86, 150, 278, 534}
3278 	};
3279 	static const u16 rpl_tmr[4][6] = {
3280 		{711, 1248, 1677, 3213, 6285, 12429},
3281 		{384, 651, 867, 1635, 3171, 6243},
3282 		{219, 354, 462, 846, 1614, 3150},
3283 		{201, 321, 258, 450, 834, 1602}
3284 	};
3285 
3286 	u16 val;
3287 	unsigned int log2_width, pldsize;
3288 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3289 
3290 	pci_read_config_word(adap->pdev,
3291 			     adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3292 			     &val);
3293 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3294 	pci_read_config_word(adap->pdev,
3295 			     adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3296 			     &val);
3297 
3298 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3299 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3300 	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3301 	log2_width = fls(adap->params.pci.width) - 1;
3302 	acklat = ack_lat[log2_width][pldsize];
3303 	if (val & 1)		/* check LOsEnable */
3304 		acklat += fst_trn_tx * 4;
3305 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3306 
3307 	if (adap->params.rev == 0)
3308 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3309 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3310 				 V_T3A_ACKLAT(acklat));
3311 	else
3312 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3313 				 V_ACKLAT(acklat));
3314 
3315 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3316 			 V_REPLAYLMT(rpllmt));
3317 
3318 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3319 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3320 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3321 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3322 }
3323 
3324 /*
3325  * Initialize and configure T3 HW modules.  This performs the
3326  * initialization steps that need to be done once after a card is reset.
3327  * MAC and PHY initialization is handled separarely whenever a port is enabled.
3328  *
3329  * fw_params are passed to FW and their value is platform dependent.  Only the
3330  * top 8 bits are available for use, the rest must be 0.
3331  */
t3_init_hw(struct adapter * adapter,u32 fw_params)3332 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3333 {
3334 	int err = -EIO, attempts, i;
3335 	const struct vpd_params *vpd = &adapter->params.vpd;
3336 
3337 	if (adapter->params.rev > 0)
3338 		calibrate_xgm_t3b(adapter);
3339 	else if (calibrate_xgm(adapter))
3340 		goto out_err;
3341 
3342 	if (vpd->mclk) {
3343 		partition_mem(adapter, &adapter->params.tp);
3344 
3345 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3346 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3347 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3348 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3349 				adapter->params.mc5.nfilters,
3350 				adapter->params.mc5.nroutes))
3351 			goto out_err;
3352 
3353 		for (i = 0; i < 32; i++)
3354 			if (clear_sge_ctxt(adapter, i, F_CQ))
3355 				goto out_err;
3356 	}
3357 
3358 	if (tp_init(adapter, &adapter->params.tp))
3359 		goto out_err;
3360 
3361 	t3_tp_set_coalescing_size(adapter,
3362 				  min(adapter->params.sge.max_pkt_size,
3363 				      MAX_RX_COALESCING_LEN), 1);
3364 	t3_tp_set_max_rxsize(adapter,
3365 			     min(adapter->params.sge.max_pkt_size, 16384U));
3366 	ulp_config(adapter, &adapter->params.tp);
3367 
3368 	if (is_pcie(adapter))
3369 		config_pcie(adapter);
3370 	else
3371 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3372 				 F_DMASTOPEN | F_CLIDECEN);
3373 
3374 	if (adapter->params.rev == T3_REV_C)
3375 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3376 				 F_CFG_CQE_SOP_MASK);
3377 
3378 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3379 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3380 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3381 	init_hw_for_avail_ports(adapter, adapter->params.nports);
3382 	t3_sge_init(adapter, &adapter->params.sge);
3383 
3384 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3385 
3386 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3387 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3388 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3389 	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3390 
3391 	attempts = 100;
3392 	do {			/* wait for uP to initialize */
3393 		msleep(20);
3394 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3395 	if (!attempts) {
3396 		CH_ERR(adapter, "uP initialization timed out\n");
3397 		goto out_err;
3398 	}
3399 
3400 	err = 0;
3401 out_err:
3402 	return err;
3403 }
3404 
3405 /**
3406  *	get_pci_mode - determine a card's PCI mode
3407  *	@adapter: the adapter
3408  *	@p: where to store the PCI settings
3409  *
3410  *	Determines a card's PCI mode and associated parameters, such as speed
3411  *	and width.
3412  */
get_pci_mode(struct adapter * adapter,struct pci_params * p)3413 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3414 {
3415 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3416 	u32 pci_mode, pcie_cap;
3417 
3418 	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3419 	if (pcie_cap) {
3420 		u16 val;
3421 
3422 		p->variant = PCI_VARIANT_PCIE;
3423 		p->pcie_cap_addr = pcie_cap;
3424 		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3425 					&val);
3426 		p->width = (val >> 4) & 0x3f;
3427 		return;
3428 	}
3429 
3430 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3431 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3432 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3433 	pci_mode = G_PCIXINITPAT(pci_mode);
3434 	if (pci_mode == 0)
3435 		p->variant = PCI_VARIANT_PCI;
3436 	else if (pci_mode < 4)
3437 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3438 	else if (pci_mode < 8)
3439 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3440 	else
3441 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3442 }
3443 
3444 /**
3445  *	init_link_config - initialize a link's SW state
3446  *	@lc: structure holding the link state
3447  *	@ai: information about the current card
3448  *
3449  *	Initializes the SW state maintained for each link, including the link's
3450  *	capabilities and default speed/duplex/flow-control/autonegotiation
3451  *	settings.
3452  */
init_link_config(struct link_config * lc,unsigned int caps)3453 static void init_link_config(struct link_config *lc, unsigned int caps)
3454 {
3455 	lc->supported = caps;
3456 	lc->requested_speed = lc->speed = SPEED_INVALID;
3457 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3458 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3459 	if (lc->supported & SUPPORTED_Autoneg) {
3460 		lc->advertising = lc->supported;
3461 		lc->autoneg = AUTONEG_ENABLE;
3462 		lc->requested_fc |= PAUSE_AUTONEG;
3463 	} else {
3464 		lc->advertising = 0;
3465 		lc->autoneg = AUTONEG_DISABLE;
3466 	}
3467 }
3468 
3469 /**
3470  *	mc7_calc_size - calculate MC7 memory size
3471  *	@cfg: the MC7 configuration
3472  *
3473  *	Calculates the size of an MC7 memory in bytes from the value of its
3474  *	configuration register.
3475  */
mc7_calc_size(u32 cfg)3476 static unsigned int mc7_calc_size(u32 cfg)
3477 {
3478 	unsigned int width = G_WIDTH(cfg);
3479 	unsigned int banks = !!(cfg & F_BKS) + 1;
3480 	unsigned int org = !!(cfg & F_ORG) + 1;
3481 	unsigned int density = G_DEN(cfg);
3482 	unsigned int MBs = ((256 << density) * banks) / (org << width);
3483 
3484 	return MBs << 20;
3485 }
3486 
mc7_prep(struct adapter * adapter,struct mc7 * mc7,unsigned int base_addr,const char * name)3487 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3488 		     unsigned int base_addr, const char *name)
3489 {
3490 	u32 cfg;
3491 
3492 	mc7->adapter = adapter;
3493 	mc7->name = name;
3494 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3495 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3496 	mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3497 	mc7->width = G_WIDTH(cfg);
3498 }
3499 
mac_prep(struct cmac * mac,struct adapter * adapter,int index)3500 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3501 {
3502 	mac->adapter = adapter;
3503 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3504 	mac->nucast = 1;
3505 
3506 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3507 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3508 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3509 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3510 				 F_ENRGMII, 0);
3511 	}
3512 }
3513 
early_hw_init(struct adapter * adapter,const struct adapter_info * ai)3514 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3515 {
3516 	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3517 
3518 	mi1_init(adapter, ai);
3519 	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3520 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3521 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3522 		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3523 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3524 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3525 
3526 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3527 		val |= F_ENRGMII;
3528 
3529 	/* Enable MAC clocks so we can access the registers */
3530 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3531 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3532 
3533 	val |= F_CLKDIVRESET_;
3534 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3535 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3536 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3537 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3538 }
3539 
3540 /*
3541  * Reset the adapter.
3542  * Older PCIe cards lose their config space during reset, PCI-X
3543  * ones don't.
3544  */
t3_reset_adapter(struct adapter * adapter)3545 int t3_reset_adapter(struct adapter *adapter)
3546 {
3547 	int i, save_and_restore_pcie =
3548 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3549 	uint16_t devid = 0;
3550 
3551 	if (save_and_restore_pcie)
3552 		pci_save_state(adapter->pdev);
3553 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3554 
3555 	/*
3556 	 * Delay. Give Some time to device to reset fully.
3557 	 * XXX The delay time should be modified.
3558 	 */
3559 	for (i = 0; i < 10; i++) {
3560 		msleep(50);
3561 		pci_read_config_word(adapter->pdev, 0x00, &devid);
3562 		if (devid == 0x1425)
3563 			break;
3564 	}
3565 
3566 	if (devid != 0x1425)
3567 		return -1;
3568 
3569 	if (save_and_restore_pcie)
3570 		pci_restore_state(adapter->pdev);
3571 	return 0;
3572 }
3573 
init_parity(struct adapter * adap)3574 static int init_parity(struct adapter *adap)
3575 {
3576 		int i, err, addr;
3577 
3578 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3579 		return -EBUSY;
3580 
3581 	for (err = i = 0; !err && i < 16; i++)
3582 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3583 	for (i = 0xfff0; !err && i <= 0xffff; i++)
3584 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3585 	for (i = 0; !err && i < SGE_QSETS; i++)
3586 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3587 	if (err)
3588 		return err;
3589 
3590 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3591 	for (i = 0; i < 4; i++)
3592 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3593 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3594 				     F_IBQDBGWR | V_IBQDBGQID(i) |
3595 				     V_IBQDBGADDR(addr));
3596 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3597 					      F_IBQDBGBUSY, 0, 2, 1);
3598 			if (err)
3599 				return err;
3600 		}
3601 	return 0;
3602 }
3603 
3604 /*
3605  * Initialize adapter SW state for the various HW modules, set initial values
3606  * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3607  * interface.
3608  */
t3_prep_adapter(struct adapter * adapter,const struct adapter_info * ai,int reset)3609 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3610 		    int reset)
3611 {
3612 	int ret;
3613 	unsigned int i, j = -1;
3614 
3615 	get_pci_mode(adapter, &adapter->params.pci);
3616 
3617 	adapter->params.info = ai;
3618 	adapter->params.nports = ai->nports;
3619 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3620 	adapter->params.linkpoll_period = 0;
3621 	adapter->params.stats_update_period = is_10G(adapter) ?
3622 	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3623 	adapter->params.pci.vpd_cap_addr =
3624 	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3625 	ret = get_vpd_params(adapter, &adapter->params.vpd);
3626 	if (ret < 0)
3627 		return ret;
3628 
3629 	if (reset && t3_reset_adapter(adapter))
3630 		return -1;
3631 
3632 	t3_sge_prep(adapter, &adapter->params.sge);
3633 
3634 	if (adapter->params.vpd.mclk) {
3635 		struct tp_params *p = &adapter->params.tp;
3636 
3637 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3638 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3639 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3640 
3641 		p->nchan = ai->nports;
3642 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3643 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3644 		p->cm_size = t3_mc7_size(&adapter->cm);
3645 		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3646 		p->chan_tx_size = p->pmtx_size / p->nchan;
3647 		p->rx_pg_size = 64 * 1024;
3648 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3649 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3650 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3651 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3652 		    adapter->params.rev > 0 ? 12 : 6;
3653 	}
3654 
3655 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3656 				  t3_mc7_size(&adapter->pmtx) &&
3657 				  t3_mc7_size(&adapter->cm);
3658 
3659 	if (is_offload(adapter)) {
3660 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3661 		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3662 		    DEFAULT_NFILTERS : 0;
3663 		adapter->params.mc5.nroutes = 0;
3664 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3665 
3666 		init_mtus(adapter->params.mtus);
3667 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3668 	}
3669 
3670 	early_hw_init(adapter, ai);
3671 	ret = init_parity(adapter);
3672 	if (ret)
3673 		return ret;
3674 
3675 	for_each_port(adapter, i) {
3676 		u8 hw_addr[6];
3677 		const struct port_type_info *pti;
3678 		struct port_info *p = adap2pinfo(adapter, i);
3679 
3680 		while (!adapter->params.vpd.port_type[++j])
3681 			;
3682 
3683 		pti = &port_types[adapter->params.vpd.port_type[j]];
3684 		if (!pti->phy_prep) {
3685 			CH_ALERT(adapter, "Invalid port type index %d\n",
3686 				 adapter->params.vpd.port_type[j]);
3687 			return -EINVAL;
3688 		}
3689 
3690 		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3691 				    ai->mdio_ops);
3692 		if (ret)
3693 			return ret;
3694 		mac_prep(&p->mac, adapter, j);
3695 
3696 		/*
3697 		 * The VPD EEPROM stores the base Ethernet address for the
3698 		 * card.  A port's address is derived from the base by adding
3699 		 * the port's index to the base's low octet.
3700 		 */
3701 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3702 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3703 
3704 		memcpy(adapter->port[i]->dev_addr, hw_addr,
3705 		       ETH_ALEN);
3706 		memcpy(adapter->port[i]->perm_addr, hw_addr,
3707 		       ETH_ALEN);
3708 		init_link_config(&p->link_config, p->phy.caps);
3709 		p->phy.ops->power_down(&p->phy, 1);
3710 		if (!(p->phy.caps & SUPPORTED_IRQ))
3711 			adapter->params.linkpoll_period = 10;
3712 	}
3713 
3714 	return 0;
3715 }
3716 
t3_led_ready(struct adapter * adapter)3717 void t3_led_ready(struct adapter *adapter)
3718 {
3719 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3720 			 F_GPIO0_OUT_VAL);
3721 }
3722 
t3_replay_prep_adapter(struct adapter * adapter)3723 int t3_replay_prep_adapter(struct adapter *adapter)
3724 {
3725 	const struct adapter_info *ai = adapter->params.info;
3726 	unsigned int i, j = -1;
3727 	int ret;
3728 
3729 	early_hw_init(adapter, ai);
3730 	ret = init_parity(adapter);
3731 	if (ret)
3732 		return ret;
3733 
3734 	for_each_port(adapter, i) {
3735 		const struct port_type_info *pti;
3736 		struct port_info *p = adap2pinfo(adapter, i);
3737 
3738 		while (!adapter->params.vpd.port_type[++j])
3739 			;
3740 
3741 		pti = &port_types[adapter->params.vpd.port_type[j]];
3742 		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3743 		if (ret)
3744 			return ret;
3745 		p->phy.ops->power_down(&p->phy, 1);
3746 	}
3747 
3748 return 0;
3749 }
3750 
3751