• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59 
60 #define DRV_MODULE_VERSION	"1.45.27"
61 #define DRV_MODULE_RELDATE	"2009/01/26"
62 #define BNX2X_BC_VER		0x040200
63 
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT		(5*HZ)
66 
67 static char version[] __devinitdata =
68 	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75 
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82 
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91 
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97 
98 enum bnx2x_board_type {
99 	BCM57710 = 0,
100 	BCM57711 = 1,
101 	BCM57711E = 2,
102 };
103 
104 /* indexed by board_type, above */
105 static struct {
106 	char *name;
107 } board_info[] __devinitdata = {
108 	{ "Broadcom NetXtreme II BCM57710 XGb" },
109 	{ "Broadcom NetXtreme II BCM57711 XGb" },
110 	{ "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112 
113 
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121 	{ 0 }
122 };
123 
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125 
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129 
130 /* used only at init
131  * locking is done by mcp
132  */
bnx2x_reg_wr_ind(struct bnx2x * bp,u32 addr,u32 val)133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 			       PCICFG_VENDOR_ID_OFFSET);
139 }
140 
bnx2x_reg_rd_ind(struct bnx2x * bp,u32 addr)141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143 	u32 val;
144 
145 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 			       PCICFG_VENDOR_ID_OFFSET);
149 
150 	return val;
151 }
152 
153 static const u32 dmae_reg_go_c[] = {
154 	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159 
160 /* copy command into DMAE command memory and set DMAE command go */
bnx2x_post_dmae(struct bnx2x * bp,struct dmae_command * dmae,int idx)161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 			    int idx)
163 {
164 	u32 cmd_offset;
165 	int i;
166 
167 	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170 
171 		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 	}
174 	REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176 
bnx2x_write_dmae(struct bnx2x * bp,dma_addr_t dma_addr,u32 dst_addr,u32 len32)177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 		      u32 len32)
179 {
180 	struct dmae_command *dmae = &bp->init_dmae;
181 	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182 	int cnt = 200;
183 
184 	if (!bp->dmae_ready) {
185 		u32 *data = bnx2x_sp(bp, wb_data[0]);
186 
187 		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188 		   "  using indirect\n", dst_addr, len32);
189 		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 		return;
191 	}
192 
193 	mutex_lock(&bp->dmae_mutex);
194 
195 	memset(dmae, 0, sizeof(struct dmae_command));
196 
197 	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201 			DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203 			DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205 			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 	dmae->src_addr_lo = U64_LO(dma_addr);
208 	dmae->src_addr_hi = U64_HI(dma_addr);
209 	dmae->dst_addr_lo = dst_addr >> 2;
210 	dmae->dst_addr_hi = 0;
211 	dmae->len = len32;
212 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 	dmae->comp_val = DMAE_COMP_VAL;
215 
216 	DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218 		    "dst_addr [%x:%08x (%08x)]\n"
219 	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220 	   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 	   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 	   dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226 
227 	*wb_comp = 0;
228 
229 	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230 
231 	udelay(5);
232 
233 	while (*wb_comp != DMAE_COMP_VAL) {
234 		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235 
236 		if (!cnt) {
237 			BNX2X_ERR("dmae timeout!\n");
238 			break;
239 		}
240 		cnt--;
241 		/* adjust delay for emulation/FPGA */
242 		if (CHIP_REV_IS_SLOW(bp))
243 			msleep(100);
244 		else
245 			udelay(5);
246 	}
247 
248 	mutex_unlock(&bp->dmae_mutex);
249 }
250 
bnx2x_read_dmae(struct bnx2x * bp,u32 src_addr,u32 len32)251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253 	struct dmae_command *dmae = &bp->init_dmae;
254 	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255 	int cnt = 200;
256 
257 	if (!bp->dmae_ready) {
258 		u32 *data = bnx2x_sp(bp, wb_data[0]);
259 		int i;
260 
261 		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262 		   "  using indirect\n", src_addr, len32);
263 		for (i = 0; i < len32; i++)
264 			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 		return;
266 	}
267 
268 	mutex_lock(&bp->dmae_mutex);
269 
270 	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 	memset(dmae, 0, sizeof(struct dmae_command));
272 
273 	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277 			DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279 			DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281 			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 	dmae->src_addr_lo = src_addr >> 2;
284 	dmae->src_addr_hi = 0;
285 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 	dmae->len = len32;
288 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 	dmae->comp_val = DMAE_COMP_VAL;
291 
292 	DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294 		    "dst_addr [%x:%08x (%08x)]\n"
295 	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296 	   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 	   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 	   dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299 
300 	*wb_comp = 0;
301 
302 	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303 
304 	udelay(5);
305 
306 	while (*wb_comp != DMAE_COMP_VAL) {
307 
308 		if (!cnt) {
309 			BNX2X_ERR("dmae timeout!\n");
310 			break;
311 		}
312 		cnt--;
313 		/* adjust delay for emulation/FPGA */
314 		if (CHIP_REV_IS_SLOW(bp))
315 			msleep(100);
316 		else
317 			udelay(5);
318 	}
319 	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322 
323 	mutex_unlock(&bp->dmae_mutex);
324 }
325 
326 /* used only for slowpath so not inlined */
bnx2x_wb_wr(struct bnx2x * bp,int reg,u32 val_hi,u32 val_lo)327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329 	u32 wb_write[2];
330 
331 	wb_write[0] = val_hi;
332 	wb_write[1] = val_lo;
333 	REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335 
336 #ifdef USE_WB_RD
bnx2x_wb_rd(struct bnx2x * bp,int reg)337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339 	u32 wb_data[2];
340 
341 	REG_RD_DMAE(bp, reg, wb_data, 2);
342 
343 	return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346 
bnx2x_mc_assert(struct bnx2x * bp)347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349 	char last_idx;
350 	int i, rc = 0;
351 	u32 row0, row1, row2, row3;
352 
353 	/* XSTORM */
354 	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 	if (last_idx)
357 		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358 
359 	/* print the asserts */
360 	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361 
362 		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 			      XSTORM_ASSERT_LIST_OFFSET(i));
364 		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370 
371 		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 				  " 0x%08x 0x%08x 0x%08x\n",
374 				  i, row3, row2, row1, row0);
375 			rc++;
376 		} else {
377 			break;
378 		}
379 	}
380 
381 	/* TSTORM */
382 	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 	if (last_idx)
385 		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386 
387 	/* print the asserts */
388 	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389 
390 		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 			      TSTORM_ASSERT_LIST_OFFSET(i));
392 		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398 
399 		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 				  " 0x%08x 0x%08x 0x%08x\n",
402 				  i, row3, row2, row1, row0);
403 			rc++;
404 		} else {
405 			break;
406 		}
407 	}
408 
409 	/* CSTORM */
410 	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 	if (last_idx)
413 		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414 
415 	/* print the asserts */
416 	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417 
418 		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 			      CSTORM_ASSERT_LIST_OFFSET(i));
420 		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426 
427 		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 				  " 0x%08x 0x%08x 0x%08x\n",
430 				  i, row3, row2, row1, row0);
431 			rc++;
432 		} else {
433 			break;
434 		}
435 	}
436 
437 	/* USTORM */
438 	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 			   USTORM_ASSERT_LIST_INDEX_OFFSET);
440 	if (last_idx)
441 		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442 
443 	/* print the asserts */
444 	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445 
446 		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 			      USTORM_ASSERT_LIST_OFFSET(i));
448 		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
454 
455 		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 				  " 0x%08x 0x%08x 0x%08x\n",
458 				  i, row3, row2, row1, row0);
459 			rc++;
460 		} else {
461 			break;
462 		}
463 	}
464 
465 	return rc;
466 }
467 
bnx2x_fw_dump(struct bnx2x * bp)468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470 	u32 mark, offset;
471 	u32 data[9];
472 	int word;
473 
474 	mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 	mark = ((mark + 0x3) & ~0x3);
476 	printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477 
478 	for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 		for (word = 0; word < 8; word++)
480 			data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 						  offset + 4*word));
482 		data[8] = 0x0;
483 		printk(KERN_CONT "%s", (char *)data);
484 	}
485 	for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 		for (word = 0; word < 8; word++)
487 			data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 						  offset + 4*word));
489 		data[8] = 0x0;
490 		printk(KERN_CONT "%s", (char *)data);
491 	}
492 	printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494 
bnx2x_panic_dump(struct bnx2x * bp)495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497 	int i;
498 	u16 j, start, end;
499 
500 	bp->stats_state = STATS_STATE_DISABLED;
501 	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502 
503 	BNX2X_ERR("begin crash dump -----------------\n");
504 
505 	for_each_queue(bp, i) {
506 		struct bnx2x_fastpath *fp = &bp->fp[i];
507 		struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508 
509 		BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510 			  "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511 			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 		BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514 			  "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515 			  "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516 			  fp->rx_bd_prod, fp->rx_bd_cons,
517 			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 		BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520 			  "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521 			  "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522 			  fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 			  fp->status_blk->c_status_block.status_block_index,
524 			  fp->fp_u_idx,
525 			  fp->status_blk->u_status_block.status_block_index,
526 			  hw_prods->packets_prod, hw_prods->bds_prod);
527 
528 		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 		for (j = start; j < end; j++) {
531 			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532 
533 			BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 				  sw_bd->skb, sw_bd->first_bd);
535 		}
536 
537 		start = TX_BD(fp->tx_bd_cons - 10);
538 		end = TX_BD(fp->tx_bd_cons + 254);
539 		for (j = start; j < end; j++) {
540 			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541 
542 			BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 				  j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 		}
545 
546 		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 		for (j = start; j < end; j++) {
549 			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551 
552 			BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553 				  j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 		}
555 
556 		start = RX_SGE(fp->rx_sge_prod);
557 		end = RX_SGE(fp->last_max_sge);
558 		for (j = start; j < end; j++) {
559 			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561 
562 			BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563 				  j, rx_sge[1], rx_sge[0], sw_page->page);
564 		}
565 
566 		start = RCQ_BD(fp->rx_comp_cons - 10);
567 		end = RCQ_BD(fp->rx_comp_cons + 503);
568 		for (j = start; j < end; j++) {
569 			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570 
571 			BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 				  j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 		}
574 	}
575 
576 	BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577 		  "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578 		  "  spq_prod_idx(%u)\n",
579 		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581 
582 	bnx2x_fw_dump(bp);
583 	bnx2x_mc_assert(bp);
584 	BNX2X_ERR("end crash dump -----------------\n");
585 }
586 
bnx2x_int_enable(struct bnx2x * bp)587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589 	int port = BP_PORT(bp);
590 	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 	u32 val = REG_RD(bp, addr);
592 	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 
594 	if (msix) {
595 		val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 	} else {
599 		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 			HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 
604 		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605 		   val, port, addr, msix);
606 
607 		REG_WR(bp, addr, val);
608 
609 		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 	}
611 
612 	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613 	   val, port, addr, msix);
614 
615 	REG_WR(bp, addr, val);
616 
617 	if (CHIP_IS_E1H(bp)) {
618 		/* init leading/trailing edge */
619 		if (IS_E1HMF(bp)) {
620 			val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 			if (bp->port.pmf)
622 				/* enable nig attention */
623 				val |= 0x0100;
624 		} else
625 			val = 0xffff;
626 
627 		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 	}
630 }
631 
bnx2x_int_disable(struct bnx2x * bp)632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634 	int port = BP_PORT(bp);
635 	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 	u32 val = REG_RD(bp, addr);
637 
638 	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642 
643 	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 	   val, port, addr);
645 
646 	REG_WR(bp, addr, val);
647 	if (REG_RD(bp, addr) != val)
648 		BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650 
bnx2x_int_disable_sync(struct bnx2x * bp,int disable_hw)651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653 	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 	int i;
655 
656 	/* disable interrupt handling */
657 	atomic_inc(&bp->intr_sem);
658 	if (disable_hw)
659 		/* prevent the HW from sending interrupts */
660 		bnx2x_int_disable(bp);
661 
662 	/* make sure all ISRs are done */
663 	if (msix) {
664 		for_each_queue(bp, i)
665 			synchronize_irq(bp->msix_table[i].vector);
666 
667 		/* one more for the Slow Path IRQ */
668 		synchronize_irq(bp->msix_table[i].vector);
669 	} else
670 		synchronize_irq(bp->pdev->irq);
671 
672 	/* make sure sp_task is not running */
673 	cancel_delayed_work(&bp->sp_task);
674 	flush_workqueue(bnx2x_wq);
675 }
676 
677 /* fast path */
678 
679 /*
680  * General service functions
681  */
682 
bnx2x_ack_sb(struct bnx2x * bp,u8 sb_id,u8 storm,u16 index,u8 op,u8 update)683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 				u8 storm, u16 index, u8 op, u8 update)
685 {
686 	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 		       COMMAND_REG_INT_ACK);
688 	struct igu_ack_register igu_ack;
689 
690 	igu_ack.status_block_index = index;
691 	igu_ack.sb_id_and_flags =
692 			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 
697 	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 	   (*(u32 *)&igu_ack), hc_addr);
699 	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701 
bnx2x_update_fpsb_idx(struct bnx2x_fastpath * fp)702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704 	struct host_status_block *fpsb = fp->status_blk;
705 	u16 rc = 0;
706 
707 	barrier(); /* status block is written to by the chip */
708 	if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 		fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 		rc |= 1;
711 	}
712 	if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 		fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 		rc |= 2;
715 	}
716 	return rc;
717 }
718 
bnx2x_ack_int(struct bnx2x * bp)719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721 	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 		       COMMAND_REG_SIMD_MASK);
723 	u32 result = REG_RD(bp, hc_addr);
724 
725 	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 	   result, hc_addr);
727 
728 	return result;
729 }
730 
731 
732 /*
733  * fast path service functions
734  */
735 
bnx2x_has_tx_work(struct bnx2x_fastpath * fp)736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738 	u16 tx_cons_sb;
739 
740 	/* Tell compiler that status block fields can change */
741 	barrier();
742 	tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 	return (fp->tx_pkt_cons != tx_cons_sb);
744 }
745 
bnx2x_has_tx_work_unload(struct bnx2x_fastpath * fp)746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747 {
748 	/* Tell compiler that consumer and producer can change */
749 	barrier();
750 	return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751 
752 }
753 
754 /* free skb in the packet ring at pos idx
755  * return idx of last bd freed
756  */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 idx)757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758 			     u16 idx)
759 {
760 	struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761 	struct eth_tx_bd *tx_bd;
762 	struct sk_buff *skb = tx_buf->skb;
763 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
764 	int nbd;
765 
766 	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
767 	   idx, tx_buf, skb);
768 
769 	/* unmap first bd */
770 	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771 	tx_bd = &fp->tx_desc_ring[bd_idx];
772 	pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773 			 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
774 
775 	nbd = le16_to_cpu(tx_bd->nbd) - 1;
776 	new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778 	if (nbd > (MAX_SKB_FRAGS + 2)) {
779 		BNX2X_ERR("BAD nbd!\n");
780 		bnx2x_panic();
781 	}
782 #endif
783 
784 	/* Skip a parse bd and the TSO split header bd
785 	   since they have no mapping */
786 	if (nbd)
787 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788 
789 	if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790 					   ETH_TX_BD_FLAGS_TCP_CSUM |
791 					   ETH_TX_BD_FLAGS_SW_LSO)) {
792 		if (--nbd)
793 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 		tx_bd = &fp->tx_desc_ring[bd_idx];
795 		/* is this a TSO split header bd? */
796 		if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
797 			if (--nbd)
798 				bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799 		}
800 	}
801 
802 	/* now free frags */
803 	while (nbd > 0) {
804 
805 		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806 		tx_bd = &fp->tx_desc_ring[bd_idx];
807 		pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 			       BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809 		if (--nbd)
810 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
811 	}
812 
813 	/* release skb */
814 	WARN_ON(!skb);
815 	dev_kfree_skb(skb);
816 	tx_buf->first_bd = 0;
817 	tx_buf->skb = NULL;
818 
819 	return new_cons;
820 }
821 
bnx2x_tx_avail(struct bnx2x_fastpath * fp)822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
823 {
824 	s16 used;
825 	u16 prod;
826 	u16 cons;
827 
828 	barrier(); /* Tell compiler that prod and cons can change */
829 	prod = fp->tx_bd_prod;
830 	cons = fp->tx_bd_cons;
831 
832 	/* NUM_TX_RINGS = number of "next-page" entries
833 	   It will be used as a threshold */
834 	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
835 
836 #ifdef BNX2X_STOP_ON_ERROR
837 	WARN_ON(used < 0);
838 	WARN_ON(used > fp->bp->tx_ring_size);
839 	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
840 #endif
841 
842 	return (s16)(fp->bp->tx_ring_size) - used;
843 }
844 
bnx2x_tx_int(struct bnx2x_fastpath * fp,int work)845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
846 {
847 	struct bnx2x *bp = fp->bp;
848 	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
849 	int done = 0;
850 
851 #ifdef BNX2X_STOP_ON_ERROR
852 	if (unlikely(bp->panic))
853 		return;
854 #endif
855 
856 	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 	sw_cons = fp->tx_pkt_cons;
858 
859 	while (sw_cons != hw_cons) {
860 		u16 pkt_cons;
861 
862 		pkt_cons = TX_BD(sw_cons);
863 
864 		/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865 
866 		DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
867 		   hw_cons, sw_cons, pkt_cons);
868 
869 /*		if (NEXT_TX_IDX(sw_cons) != hw_cons) {
870 			rmb();
871 			prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872 		}
873 */
874 		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875 		sw_cons++;
876 		done++;
877 
878 		if (done == work)
879 			break;
880 	}
881 
882 	fp->tx_pkt_cons = sw_cons;
883 	fp->tx_bd_cons = bd_cons;
884 
885 	/* Need to make the tx_cons update visible to start_xmit()
886 	 * before checking for netif_queue_stopped().  Without the
887 	 * memory barrier, there is a small possibility that start_xmit()
888 	 * will miss it and cause the queue to be stopped forever.
889 	 */
890 	smp_mb();
891 
892 	/* TBD need a thresh? */
893 	if (unlikely(netif_queue_stopped(bp->dev))) {
894 
895 		netif_tx_lock(bp->dev);
896 
897 		if (netif_queue_stopped(bp->dev) &&
898 		    (bp->state == BNX2X_STATE_OPEN) &&
899 		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 			netif_wake_queue(bp->dev);
901 
902 		netif_tx_unlock(bp->dev);
903 	}
904 }
905 
906 
bnx2x_sp_event(struct bnx2x_fastpath * fp,union eth_rx_cqe * rr_cqe)907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 			   union eth_rx_cqe *rr_cqe)
909 {
910 	struct bnx2x *bp = fp->bp;
911 	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913 
914 	DP(BNX2X_MSG_SP,
915 	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
916 	   FP_IDX(fp), cid, command, bp->state,
917 	   rr_cqe->ramrod_cqe.ramrod_type);
918 
919 	bp->spq_left++;
920 
921 	if (FP_IDX(fp)) {
922 		switch (command | fp->state) {
923 		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 						BNX2X_FP_STATE_OPENING):
925 			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926 			   cid);
927 			fp->state = BNX2X_FP_STATE_OPEN;
928 			break;
929 
930 		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932 			   cid);
933 			fp->state = BNX2X_FP_STATE_HALTED;
934 			break;
935 
936 		default:
937 			BNX2X_ERR("unexpected MC reply (%d)  "
938 				  "fp->state is %x\n", command, fp->state);
939 			break;
940 		}
941 		mb(); /* force bnx2x_wait_ramrod() to see the change */
942 		return;
943 	}
944 
945 	switch (command | bp->state) {
946 	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 		bp->state = BNX2X_STATE_OPEN;
949 		break;
950 
951 	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 		fp->state = BNX2X_FP_STATE_HALTED;
955 		break;
956 
957 	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958 		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959 		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
960 		break;
961 
962 
963 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966 		bp->set_mac_pending = 0;
967 		break;
968 
969 	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970 		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
971 		break;
972 
973 	default:
974 		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
975 			  command, bp->state);
976 		break;
977 	}
978 	mb(); /* force bnx2x_wait_ramrod() to see the change */
979 }
980 
bnx2x_free_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index)981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 				     struct bnx2x_fastpath *fp, u16 index)
983 {
984 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 	struct page *page = sw_buf->page;
986 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987 
988 	/* Skip "next page" elements */
989 	if (!page)
990 		return;
991 
992 	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993 		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994 	__free_pages(page, PAGES_PER_SGE_SHIFT);
995 
996 	sw_buf->page = NULL;
997 	sge->addr_hi = 0;
998 	sge->addr_lo = 0;
999 }
1000 
bnx2x_free_rx_sge_range(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 					   struct bnx2x_fastpath *fp, int last)
1003 {
1004 	int i;
1005 
1006 	for (i = 0; i < last; i++)
1007 		bnx2x_free_rx_sge(bp, fp, i);
1008 }
1009 
bnx2x_alloc_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index)1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 				     struct bnx2x_fastpath *fp, u16 index)
1012 {
1013 	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 	dma_addr_t mapping;
1017 
1018 	if (unlikely(page == NULL))
1019 		return -ENOMEM;
1020 
1021 	mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022 			       PCI_DMA_FROMDEVICE);
1023 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024 		__free_pages(page, PAGES_PER_SGE_SHIFT);
1025 		return -ENOMEM;
1026 	}
1027 
1028 	sw_buf->page = page;
1029 	pci_unmap_addr_set(sw_buf, mapping, mapping);
1030 
1031 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033 
1034 	return 0;
1035 }
1036 
bnx2x_alloc_rx_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index)1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 				     struct bnx2x_fastpath *fp, u16 index)
1039 {
1040 	struct sk_buff *skb;
1041 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 	dma_addr_t mapping;
1044 
1045 	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 	if (unlikely(skb == NULL))
1047 		return -ENOMEM;
1048 
1049 	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050 				 PCI_DMA_FROMDEVICE);
1051 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052 		dev_kfree_skb(skb);
1053 		return -ENOMEM;
1054 	}
1055 
1056 	rx_buf->skb = skb;
1057 	pci_unmap_addr_set(rx_buf, mapping, mapping);
1058 
1059 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061 
1062 	return 0;
1063 }
1064 
1065 /* note that we are not allocating a new skb,
1066  * we are just moving one from cons to prod
1067  * we are not creating a new mapping,
1068  * so there is no need to check for dma_mapping_error().
1069  */
bnx2x_reuse_rx_skb(struct bnx2x_fastpath * fp,struct sk_buff * skb,u16 cons,u16 prod)1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 			       struct sk_buff *skb, u16 cons, u16 prod)
1072 {
1073 	struct bnx2x *bp = fp->bp;
1074 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078 
1079 	pci_dma_sync_single_for_device(bp->pdev,
1080 				       pci_unmap_addr(cons_rx_buf, mapping),
1081 				       bp->rx_offset + RX_COPY_THRESH,
1082 				       PCI_DMA_FROMDEVICE);
1083 
1084 	prod_rx_buf->skb = cons_rx_buf->skb;
1085 	pci_unmap_addr_set(prod_rx_buf, mapping,
1086 			   pci_unmap_addr(cons_rx_buf, mapping));
1087 	*prod_bd = *cons_bd;
1088 }
1089 
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 					     u16 idx)
1092 {
1093 	u16 last_max = fp->last_max_sge;
1094 
1095 	if (SUB_S16(idx, last_max) > 0)
1096 		fp->last_max_sge = idx;
1097 }
1098 
bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath * fp)1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100 {
1101 	int i, j;
1102 
1103 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 		int idx = RX_SGE_CNT * i - 1;
1105 
1106 		for (j = 0; j < 2; j++) {
1107 			SGE_MASK_CLEAR_BIT(fp, idx);
1108 			idx--;
1109 		}
1110 	}
1111 }
1112 
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,struct eth_fast_path_rx_cqe * fp_cqe)1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 				  struct eth_fast_path_rx_cqe *fp_cqe)
1115 {
1116 	struct bnx2x *bp = fp->bp;
1117 	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118 				     le16_to_cpu(fp_cqe->len_on_bd)) >>
1119 		      SGE_PAGE_SHIFT;
1120 	u16 last_max, last_elem, first_elem;
1121 	u16 delta = 0;
1122 	u16 i;
1123 
1124 	if (!sge_len)
1125 		return;
1126 
1127 	/* First mark all used pages */
1128 	for (i = 0; i < sge_len; i++)
1129 		SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130 
1131 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 	   sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133 
1134 	/* Here we assume that the last SGE index is the biggest */
1135 	prefetch((void *)(fp->sge_mask));
1136 	bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137 
1138 	last_max = RX_SGE(fp->last_max_sge);
1139 	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141 
1142 	/* If ring is not full */
1143 	if (last_elem + 1 != first_elem)
1144 		last_elem++;
1145 
1146 	/* Now update the prod */
1147 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 		if (likely(fp->sge_mask[i]))
1149 			break;
1150 
1151 		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 		delta += RX_SGE_MASK_ELEM_SZ;
1153 	}
1154 
1155 	if (delta > 0) {
1156 		fp->rx_sge_prod += delta;
1157 		/* clear page-end entries */
1158 		bnx2x_clear_sge_mask_next_elems(fp);
1159 	}
1160 
1161 	DP(NETIF_MSG_RX_STATUS,
1162 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1163 	   fp->last_max_sge, fp->rx_sge_prod);
1164 }
1165 
bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath * fp)1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167 {
1168 	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 	memset(fp->sge_mask, 0xff,
1170 	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 
1172 	/* Clear the two last indices in the page to 1:
1173 	   these are the indices that correspond to the "next" element,
1174 	   hence will never be indicated and should be removed from
1175 	   the calculations. */
1176 	bnx2x_clear_sge_mask_next_elems(fp);
1177 }
1178 
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,struct sk_buff * skb,u16 cons,u16 prod)1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 			    struct sk_buff *skb, u16 cons, u16 prod)
1181 {
1182 	struct bnx2x *bp = fp->bp;
1183 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 	dma_addr_t mapping;
1187 
1188 	/* move empty skb from pool to prod and map it */
1189 	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 	mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 				 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192 	pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193 
1194 	/* move partial skb from cons to pool (don't unmap yet) */
1195 	fp->tpa_pool[queue] = *cons_rx_buf;
1196 
1197 	/* mark bin state as start - print error if current state != stop */
1198 	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200 
1201 	fp->tpa_state[queue] = BNX2X_TPA_START;
1202 
1203 	/* point prod_bd to new skb */
1204 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206 
1207 #ifdef BNX2X_STOP_ON_ERROR
1208 	fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211 #else
1212 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213 #endif
1214 	   fp->tpa_queue_used);
1215 #endif
1216 }
1217 
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct sk_buff * skb,struct eth_fast_path_rx_cqe * fp_cqe,u16 cqe_idx)1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 			       struct sk_buff *skb,
1220 			       struct eth_fast_path_rx_cqe *fp_cqe,
1221 			       u16 cqe_idx)
1222 {
1223 	struct sw_rx_page *rx_pg, old_rx_pg;
1224 	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 	u32 i, frag_len, frag_size, pages;
1226 	int err;
1227 	int j;
1228 
1229 	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230 	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1231 
1232 	/* This is needed in order to enable forwarding support */
1233 	if (frag_size)
1234 		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235 					       max(frag_size, (u32)len_on_bd));
1236 
1237 #ifdef BNX2X_STOP_ON_ERROR
1238 	if (pages >
1239 	    min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241 			  pages, cqe_idx);
1242 		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1243 			  fp_cqe->pkt_len, len_on_bd);
1244 		bnx2x_panic();
1245 		return -EINVAL;
1246 	}
1247 #endif
1248 
1249 	/* Run through the SGL and compose the fragmented skb */
1250 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 		u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252 
1253 		/* FW gives the indices of the SGE as if the ring is an array
1254 		   (meaning that "next" element will consume 2 indices) */
1255 		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256 		rx_pg = &fp->rx_page_ring[sge_idx];
1257 		old_rx_pg = *rx_pg;
1258 
1259 		/* If we fail to allocate a substitute page, we simply stop
1260 		   where we are and drop the whole packet */
1261 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 		if (unlikely(err)) {
1263 			bp->eth_stats.rx_skb_alloc_failed++;
1264 			return err;
1265 		}
1266 
1267 		/* Unmap the page as we r going to pass it to the stack */
1268 		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269 			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1270 
1271 		/* Add one frag and update the appropriate fields in the skb */
1272 		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273 
1274 		skb->data_len += frag_len;
1275 		skb->truesize += frag_len;
1276 		skb->len += frag_len;
1277 
1278 		frag_size -= frag_len;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 queue,int pad,int len,union eth_rx_cqe * cqe,u16 cqe_idx)1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286 			   u16 cqe_idx)
1287 {
1288 	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 	struct sk_buff *skb = rx_buf->skb;
1290 	/* alloc new skb */
1291 	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292 
1293 	/* Unmap skb in the pool anyway, as we are going to change
1294 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 	   fails. */
1296 	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297 			 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1298 
1299 	if (likely(new_skb)) {
1300 		/* fix ip xsum and give it to the stack */
1301 		/* (no need to map the new skb) */
1302 #ifdef BCM_VLAN
1303 		int is_vlan_cqe =
1304 			(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 			 PARSING_FLAGS_VLAN);
1306 		int is_not_hwaccel_vlan_cqe =
1307 			(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308 #endif
1309 
1310 		prefetch(skb);
1311 		prefetch(((char *)(skb)) + 128);
1312 
1313 #ifdef BNX2X_STOP_ON_ERROR
1314 		if (pad + len > bp->rx_buf_size) {
1315 			BNX2X_ERR("skb_put is about to fail...  "
1316 				  "pad %d  len %d  rx_buf_size %d\n",
1317 				  pad, len, bp->rx_buf_size);
1318 			bnx2x_panic();
1319 			return;
1320 		}
1321 #endif
1322 
1323 		skb_reserve(skb, pad);
1324 		skb_put(skb, len);
1325 
1326 		skb->protocol = eth_type_trans(skb, bp->dev);
1327 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1328 
1329 		{
1330 			struct iphdr *iph;
1331 
1332 			iph = (struct iphdr *)skb->data;
1333 #ifdef BCM_VLAN
1334 			/* If there is no Rx VLAN offloading -
1335 			   take VLAN tag into an account */
1336 			if (unlikely(is_not_hwaccel_vlan_cqe))
1337 				iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1338 #endif
1339 			iph->check = 0;
1340 			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1341 		}
1342 
1343 		if (!bnx2x_fill_frag_skb(bp, fp, skb,
1344 					 &cqe->fast_path_cqe, cqe_idx)) {
1345 #ifdef BCM_VLAN
1346 			if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1347 			    (!is_not_hwaccel_vlan_cqe))
1348 				vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1349 						le16_to_cpu(cqe->fast_path_cqe.
1350 							    vlan_tag));
1351 			else
1352 #endif
1353 				netif_receive_skb(skb);
1354 		} else {
1355 			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1356 			   " - dropping packet!\n");
1357 			dev_kfree_skb(skb);
1358 		}
1359 
1360 
1361 		/* put new skb in bin */
1362 		fp->tpa_pool[queue].skb = new_skb;
1363 
1364 	} else {
1365 		/* else drop the packet and keep the buffer in the bin */
1366 		DP(NETIF_MSG_RX_STATUS,
1367 		   "Failed to allocate new skb - dropping packet!\n");
1368 		bp->eth_stats.rx_skb_alloc_failed++;
1369 	}
1370 
1371 	fp->tpa_state[queue] = BNX2X_TPA_STOP;
1372 }
1373 
bnx2x_update_rx_prod(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 bd_prod,u16 rx_comp_prod,u16 rx_sge_prod)1374 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1375 					struct bnx2x_fastpath *fp,
1376 					u16 bd_prod, u16 rx_comp_prod,
1377 					u16 rx_sge_prod)
1378 {
1379 	struct tstorm_eth_rx_producers rx_prods = {0};
1380 	int i;
1381 
1382 	/* Update producers */
1383 	rx_prods.bd_prod = bd_prod;
1384 	rx_prods.cqe_prod = rx_comp_prod;
1385 	rx_prods.sge_prod = rx_sge_prod;
1386 
1387 	/*
1388 	 * Make sure that the BD and SGE data is updated before updating the
1389 	 * producers since FW might read the BD/SGE right after the producer
1390 	 * is updated.
1391 	 * This is only applicable for weak-ordered memory model archs such
1392 	 * as IA-64. The following barrier is also mandatory since FW will
1393 	 * assumes BDs must have buffers.
1394 	 */
1395 	wmb();
1396 
1397 	for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1398 		REG_WR(bp, BAR_TSTRORM_INTMEM +
1399 		       TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1400 		       ((u32 *)&rx_prods)[i]);
1401 
1402 	mmiowb(); /* keep prod updates ordered */
1403 
1404 	DP(NETIF_MSG_RX_STATUS,
1405 	   "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1406 	   bd_prod, rx_comp_prod, rx_sge_prod);
1407 }
1408 
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)1409 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1410 {
1411 	struct bnx2x *bp = fp->bp;
1412 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1413 	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1414 	int rx_pkt = 0;
1415 
1416 #ifdef BNX2X_STOP_ON_ERROR
1417 	if (unlikely(bp->panic))
1418 		return 0;
1419 #endif
1420 
1421 	/* CQ "next element" is of the size of the regular element,
1422 	   that's why it's ok here */
1423 	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1424 	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1425 		hw_comp_cons++;
1426 
1427 	bd_cons = fp->rx_bd_cons;
1428 	bd_prod = fp->rx_bd_prod;
1429 	bd_prod_fw = bd_prod;
1430 	sw_comp_cons = fp->rx_comp_cons;
1431 	sw_comp_prod = fp->rx_comp_prod;
1432 
1433 	/* Memory barrier necessary as speculative reads of the rx
1434 	 * buffer can be ahead of the index in the status block
1435 	 */
1436 	rmb();
1437 
1438 	DP(NETIF_MSG_RX_STATUS,
1439 	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1440 	   FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1441 
1442 	while (sw_comp_cons != hw_comp_cons) {
1443 		struct sw_rx_bd *rx_buf = NULL;
1444 		struct sk_buff *skb;
1445 		union eth_rx_cqe *cqe;
1446 		u8 cqe_fp_flags;
1447 		u16 len, pad;
1448 
1449 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1450 		bd_prod = RX_BD(bd_prod);
1451 		bd_cons = RX_BD(bd_cons);
1452 
1453 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1454 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1455 
1456 		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1457 		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1458 		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1459 		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1460 		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1461 		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1462 
1463 		/* is this a slowpath msg? */
1464 		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1465 			bnx2x_sp_event(fp, cqe);
1466 			goto next_cqe;
1467 
1468 		/* this is an rx packet */
1469 		} else {
1470 			rx_buf = &fp->rx_buf_ring[bd_cons];
1471 			skb = rx_buf->skb;
1472 			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1473 			pad = cqe->fast_path_cqe.placement_offset;
1474 
1475 			/* If CQE is marked both TPA_START and TPA_END
1476 			   it is a non-TPA CQE */
1477 			if ((!fp->disable_tpa) &&
1478 			    (TPA_TYPE(cqe_fp_flags) !=
1479 					(TPA_TYPE_START | TPA_TYPE_END))) {
1480 				u16 queue = cqe->fast_path_cqe.queue_index;
1481 
1482 				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1483 					DP(NETIF_MSG_RX_STATUS,
1484 					   "calling tpa_start on queue %d\n",
1485 					   queue);
1486 
1487 					bnx2x_tpa_start(fp, queue, skb,
1488 							bd_cons, bd_prod);
1489 					goto next_rx;
1490 				}
1491 
1492 				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1493 					DP(NETIF_MSG_RX_STATUS,
1494 					   "calling tpa_stop on queue %d\n",
1495 					   queue);
1496 
1497 					if (!BNX2X_RX_SUM_FIX(cqe))
1498 						BNX2X_ERR("STOP on none TCP "
1499 							  "data\n");
1500 
1501 					/* This is a size of the linear data
1502 					   on this skb */
1503 					len = le16_to_cpu(cqe->fast_path_cqe.
1504 								len_on_bd);
1505 					bnx2x_tpa_stop(bp, fp, queue, pad,
1506 						    len, cqe, comp_ring_cons);
1507 #ifdef BNX2X_STOP_ON_ERROR
1508 					if (bp->panic)
1509 						return -EINVAL;
1510 #endif
1511 
1512 					bnx2x_update_sge_prod(fp,
1513 							&cqe->fast_path_cqe);
1514 					goto next_cqe;
1515 				}
1516 			}
1517 
1518 			pci_dma_sync_single_for_device(bp->pdev,
1519 					pci_unmap_addr(rx_buf, mapping),
1520 						       pad + RX_COPY_THRESH,
1521 						       PCI_DMA_FROMDEVICE);
1522 			prefetch(skb);
1523 			prefetch(((char *)(skb)) + 128);
1524 
1525 			/* is this an error packet? */
1526 			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1527 				DP(NETIF_MSG_RX_ERR,
1528 				   "ERROR  flags %x  rx packet %u\n",
1529 				   cqe_fp_flags, sw_comp_cons);
1530 				bp->eth_stats.rx_err_discard_pkt++;
1531 				goto reuse_rx;
1532 			}
1533 
1534 			/* Since we don't have a jumbo ring
1535 			 * copy small packets if mtu > 1500
1536 			 */
1537 			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1538 			    (len <= RX_COPY_THRESH)) {
1539 				struct sk_buff *new_skb;
1540 
1541 				new_skb = netdev_alloc_skb(bp->dev,
1542 							   len + pad);
1543 				if (new_skb == NULL) {
1544 					DP(NETIF_MSG_RX_ERR,
1545 					   "ERROR  packet dropped "
1546 					   "because of alloc failure\n");
1547 					bp->eth_stats.rx_skb_alloc_failed++;
1548 					goto reuse_rx;
1549 				}
1550 
1551 				/* aligned copy */
1552 				skb_copy_from_linear_data_offset(skb, pad,
1553 						    new_skb->data + pad, len);
1554 				skb_reserve(new_skb, pad);
1555 				skb_put(new_skb, len);
1556 
1557 				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558 
1559 				skb = new_skb;
1560 
1561 			} else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1562 				pci_unmap_single(bp->pdev,
1563 					pci_unmap_addr(rx_buf, mapping),
1564 						 bp->rx_buf_size,
1565 						 PCI_DMA_FROMDEVICE);
1566 				skb_reserve(skb, pad);
1567 				skb_put(skb, len);
1568 
1569 			} else {
1570 				DP(NETIF_MSG_RX_ERR,
1571 				   "ERROR  packet dropped because "
1572 				   "of alloc failure\n");
1573 				bp->eth_stats.rx_skb_alloc_failed++;
1574 reuse_rx:
1575 				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1576 				goto next_rx;
1577 			}
1578 
1579 			skb->protocol = eth_type_trans(skb, bp->dev);
1580 
1581 			skb->ip_summed = CHECKSUM_NONE;
1582 			if (bp->rx_csum) {
1583 				if (likely(BNX2X_RX_CSUM_OK(cqe)))
1584 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1585 				else
1586 					bp->eth_stats.hw_csum_err++;
1587 			}
1588 		}
1589 
1590 #ifdef BCM_VLAN
1591 		if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1592 		    (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1593 		     PARSING_FLAGS_VLAN))
1594 			vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1595 				le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1596 		else
1597 #endif
1598 			netif_receive_skb(skb);
1599 
1600 
1601 next_rx:
1602 		rx_buf->skb = NULL;
1603 
1604 		bd_cons = NEXT_RX_IDX(bd_cons);
1605 		bd_prod = NEXT_RX_IDX(bd_prod);
1606 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1607 		rx_pkt++;
1608 next_cqe:
1609 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1610 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1611 
1612 		if (rx_pkt == budget)
1613 			break;
1614 	} /* while */
1615 
1616 	fp->rx_bd_cons = bd_cons;
1617 	fp->rx_bd_prod = bd_prod_fw;
1618 	fp->rx_comp_cons = sw_comp_cons;
1619 	fp->rx_comp_prod = sw_comp_prod;
1620 
1621 	/* Update producers */
1622 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1623 			     fp->rx_sge_prod);
1624 
1625 	fp->rx_pkt += rx_pkt;
1626 	fp->rx_calls++;
1627 
1628 	return rx_pkt;
1629 }
1630 
bnx2x_msix_fp_int(int irq,void * fp_cookie)1631 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1632 {
1633 	struct bnx2x_fastpath *fp = fp_cookie;
1634 	struct bnx2x *bp = fp->bp;
1635 	int index = FP_IDX(fp);
1636 
1637 	/* Return here if interrupt is disabled */
1638 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639 		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1640 		return IRQ_HANDLED;
1641 	}
1642 
1643 	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1644 	   index, FP_SB_ID(fp));
1645 	bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1646 
1647 #ifdef BNX2X_STOP_ON_ERROR
1648 	if (unlikely(bp->panic))
1649 		return IRQ_HANDLED;
1650 #endif
1651 
1652 	prefetch(fp->rx_cons_sb);
1653 	prefetch(fp->tx_cons_sb);
1654 	prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 	prefetch(&fp->status_blk->u_status_block.status_block_index);
1656 
1657 	netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1658 
1659 	return IRQ_HANDLED;
1660 }
1661 
bnx2x_interrupt(int irq,void * dev_instance)1662 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1663 {
1664 	struct net_device *dev = dev_instance;
1665 	struct bnx2x *bp = netdev_priv(dev);
1666 	u16 status = bnx2x_ack_int(bp);
1667 	u16 mask;
1668 
1669 	/* Return here if interrupt is shared and it's not for us */
1670 	if (unlikely(status == 0)) {
1671 		DP(NETIF_MSG_INTR, "not our interrupt!\n");
1672 		return IRQ_NONE;
1673 	}
1674 	DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1675 
1676 	/* Return here if interrupt is disabled */
1677 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1678 		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1679 		return IRQ_HANDLED;
1680 	}
1681 
1682 #ifdef BNX2X_STOP_ON_ERROR
1683 	if (unlikely(bp->panic))
1684 		return IRQ_HANDLED;
1685 #endif
1686 
1687 	mask = 0x2 << bp->fp[0].sb_id;
1688 	if (status & mask) {
1689 		struct bnx2x_fastpath *fp = &bp->fp[0];
1690 
1691 		prefetch(fp->rx_cons_sb);
1692 		prefetch(fp->tx_cons_sb);
1693 		prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 		prefetch(&fp->status_blk->u_status_block.status_block_index);
1695 
1696 		netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1697 
1698 		status &= ~mask;
1699 	}
1700 
1701 
1702 	if (unlikely(status & 0x1)) {
1703 		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1704 
1705 		status &= ~0x1;
1706 		if (!status)
1707 			return IRQ_HANDLED;
1708 	}
1709 
1710 	if (status)
1711 		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1712 		   status);
1713 
1714 	return IRQ_HANDLED;
1715 }
1716 
1717 /* end of fast path */
1718 
1719 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1720 
1721 /* Link */
1722 
1723 /*
1724  * General service functions
1725  */
1726 
bnx2x_acquire_hw_lock(struct bnx2x * bp,u32 resource)1727 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1728 {
1729 	u32 lock_status;
1730 	u32 resource_bit = (1 << resource);
1731 	int func = BP_FUNC(bp);
1732 	u32 hw_lock_control_reg;
1733 	int cnt;
1734 
1735 	/* Validating that the resource is within range */
1736 	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1737 		DP(NETIF_MSG_HW,
1738 		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1739 		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1740 		return -EINVAL;
1741 	}
1742 
1743 	if (func <= 5) {
1744 		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1745 	} else {
1746 		hw_lock_control_reg =
1747 				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1748 	}
1749 
1750 	/* Validating that the resource is not already taken */
1751 	lock_status = REG_RD(bp, hw_lock_control_reg);
1752 	if (lock_status & resource_bit) {
1753 		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1754 		   lock_status, resource_bit);
1755 		return -EEXIST;
1756 	}
1757 
1758 	/* Try for 5 second every 5ms */
1759 	for (cnt = 0; cnt < 1000; cnt++) {
1760 		/* Try to acquire the lock */
1761 		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1762 		lock_status = REG_RD(bp, hw_lock_control_reg);
1763 		if (lock_status & resource_bit)
1764 			return 0;
1765 
1766 		msleep(5);
1767 	}
1768 	DP(NETIF_MSG_HW, "Timeout\n");
1769 	return -EAGAIN;
1770 }
1771 
bnx2x_release_hw_lock(struct bnx2x * bp,u32 resource)1772 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1773 {
1774 	u32 lock_status;
1775 	u32 resource_bit = (1 << resource);
1776 	int func = BP_FUNC(bp);
1777 	u32 hw_lock_control_reg;
1778 
1779 	/* Validating that the resource is within range */
1780 	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1781 		DP(NETIF_MSG_HW,
1782 		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1783 		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1784 		return -EINVAL;
1785 	}
1786 
1787 	if (func <= 5) {
1788 		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1789 	} else {
1790 		hw_lock_control_reg =
1791 				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1792 	}
1793 
1794 	/* Validating that the resource is currently taken */
1795 	lock_status = REG_RD(bp, hw_lock_control_reg);
1796 	if (!(lock_status & resource_bit)) {
1797 		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1798 		   lock_status, resource_bit);
1799 		return -EFAULT;
1800 	}
1801 
1802 	REG_WR(bp, hw_lock_control_reg, resource_bit);
1803 	return 0;
1804 }
1805 
1806 /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)1807 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1808 {
1809 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1810 
1811 	mutex_lock(&bp->port.phy_mutex);
1812 
1813 	if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1814 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1815 		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1816 }
1817 
bnx2x_release_phy_lock(struct bnx2x * bp)1818 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1819 {
1820 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1821 
1822 	if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1823 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1824 		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1825 
1826 	mutex_unlock(&bp->port.phy_mutex);
1827 }
1828 
bnx2x_set_gpio(struct bnx2x * bp,int gpio_num,u32 mode,u8 port)1829 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1830 {
1831 	/* The GPIO should be swapped if swap register is set and active */
1832 	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1833 			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1834 	int gpio_shift = gpio_num +
1835 			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1836 	u32 gpio_mask = (1 << gpio_shift);
1837 	u32 gpio_reg;
1838 
1839 	if (gpio_num > MISC_REGISTERS_GPIO_3) {
1840 		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1841 		return -EINVAL;
1842 	}
1843 
1844 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1845 	/* read GPIO and mask except the float bits */
1846 	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1847 
1848 	switch (mode) {
1849 	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1850 		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1851 		   gpio_num, gpio_shift);
1852 		/* clear FLOAT and set CLR */
1853 		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1854 		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1855 		break;
1856 
1857 	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1858 		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1859 		   gpio_num, gpio_shift);
1860 		/* clear FLOAT and set SET */
1861 		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1862 		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1863 		break;
1864 
1865 	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1866 		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1867 		   gpio_num, gpio_shift);
1868 		/* set FLOAT */
1869 		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1870 		break;
1871 
1872 	default:
1873 		break;
1874 	}
1875 
1876 	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1877 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1878 
1879 	return 0;
1880 }
1881 
bnx2x_set_spio(struct bnx2x * bp,int spio_num,u32 mode)1882 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1883 {
1884 	u32 spio_mask = (1 << spio_num);
1885 	u32 spio_reg;
1886 
1887 	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1888 	    (spio_num > MISC_REGISTERS_SPIO_7)) {
1889 		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1890 		return -EINVAL;
1891 	}
1892 
1893 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1894 	/* read SPIO and mask except the float bits */
1895 	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1896 
1897 	switch (mode) {
1898 	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1899 		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1900 		/* clear FLOAT and set CLR */
1901 		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1903 		break;
1904 
1905 	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1906 		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1907 		/* clear FLOAT and set SET */
1908 		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1910 		break;
1911 
1912 	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1913 		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1914 		/* set FLOAT */
1915 		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1916 		break;
1917 
1918 	default:
1919 		break;
1920 	}
1921 
1922 	REG_WR(bp, MISC_REG_SPIO, spio_reg);
1923 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1924 
1925 	return 0;
1926 }
1927 
bnx2x_calc_fc_adv(struct bnx2x * bp)1928 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1929 {
1930 	switch (bp->link_vars.ieee_fc &
1931 		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1932 	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1933 		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1934 					  ADVERTISED_Pause);
1935 		break;
1936 	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1937 		bp->port.advertising |= (ADVERTISED_Asym_Pause |
1938 					 ADVERTISED_Pause);
1939 		break;
1940 	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1941 		bp->port.advertising |= ADVERTISED_Asym_Pause;
1942 		break;
1943 	default:
1944 		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1945 					  ADVERTISED_Pause);
1946 		break;
1947 	}
1948 }
1949 
bnx2x_link_report(struct bnx2x * bp)1950 static void bnx2x_link_report(struct bnx2x *bp)
1951 {
1952 	if (bp->link_vars.link_up) {
1953 		if (bp->state == BNX2X_STATE_OPEN)
1954 			netif_carrier_on(bp->dev);
1955 		printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1956 
1957 		printk("%d Mbps ", bp->link_vars.line_speed);
1958 
1959 		if (bp->link_vars.duplex == DUPLEX_FULL)
1960 			printk("full duplex");
1961 		else
1962 			printk("half duplex");
1963 
1964 		if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1965 			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1966 				printk(", receive ");
1967 				if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1968 					printk("& transmit ");
1969 			} else {
1970 				printk(", transmit ");
1971 			}
1972 			printk("flow control ON");
1973 		}
1974 		printk("\n");
1975 
1976 	} else { /* link_down */
1977 		netif_carrier_off(bp->dev);
1978 		printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1979 	}
1980 }
1981 
bnx2x_initial_phy_init(struct bnx2x * bp)1982 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1983 {
1984 	if (!BP_NOMCP(bp)) {
1985 		u8 rc;
1986 
1987 		/* Initialize link parameters structure variables */
1988 		/* It is recommended to turn off RX FC for jumbo frames
1989 		   for better performance */
1990 		if (IS_E1HMF(bp))
1991 			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1992 		else if (bp->dev->mtu > 5000)
1993 			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1994 		else
1995 			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1996 
1997 		bnx2x_acquire_phy_lock(bp);
1998 		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1999 		bnx2x_release_phy_lock(bp);
2000 
2001 		bnx2x_calc_fc_adv(bp);
2002 
2003 		if (bp->link_vars.link_up)
2004 			bnx2x_link_report(bp);
2005 
2006 
2007 		return rc;
2008 	}
2009 	BNX2X_ERR("Bootcode is missing -not initializing link\n");
2010 	return -EINVAL;
2011 }
2012 
bnx2x_link_set(struct bnx2x * bp)2013 static void bnx2x_link_set(struct bnx2x *bp)
2014 {
2015 	if (!BP_NOMCP(bp)) {
2016 		bnx2x_acquire_phy_lock(bp);
2017 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2018 		bnx2x_release_phy_lock(bp);
2019 
2020 		bnx2x_calc_fc_adv(bp);
2021 	} else
2022 		BNX2X_ERR("Bootcode is missing -not setting link\n");
2023 }
2024 
bnx2x__link_reset(struct bnx2x * bp)2025 static void bnx2x__link_reset(struct bnx2x *bp)
2026 {
2027 	if (!BP_NOMCP(bp)) {
2028 		bnx2x_acquire_phy_lock(bp);
2029 		bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2030 		bnx2x_release_phy_lock(bp);
2031 	} else
2032 		BNX2X_ERR("Bootcode is missing -not resetting link\n");
2033 }
2034 
bnx2x_link_test(struct bnx2x * bp)2035 static u8 bnx2x_link_test(struct bnx2x *bp)
2036 {
2037 	u8 rc;
2038 
2039 	bnx2x_acquire_phy_lock(bp);
2040 	rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2041 	bnx2x_release_phy_lock(bp);
2042 
2043 	return rc;
2044 }
2045 
2046 /* Calculates the sum of vn_min_rates.
2047    It's needed for further normalizing of the min_rates.
2048 
2049    Returns:
2050      sum of vn_min_rates
2051        or
2052      0 - if all the min_rates are 0.
2053      In the later case fairness algorithm should be deactivated.
2054      If not all min_rates are zero then those that are zeroes will
2055      be set to 1.
2056  */
bnx2x_calc_vn_wsum(struct bnx2x * bp)2057 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2058 {
2059 	int i, port = BP_PORT(bp);
2060 	u32 wsum = 0;
2061 	int all_zero = 1;
2062 
2063 	for (i = 0; i < E1HVN_MAX; i++) {
2064 		u32 vn_cfg =
2065 			SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2066 		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2067 				     FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2068 		if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2069 			/* If min rate is zero - set it to 1 */
2070 			if (!vn_min_rate)
2071 				vn_min_rate = DEF_MIN_RATE;
2072 			else
2073 				all_zero = 0;
2074 
2075 			wsum += vn_min_rate;
2076 		}
2077 	}
2078 
2079 	/* ... only if all min rates are zeros - disable FAIRNESS */
2080 	if (all_zero)
2081 		return 0;
2082 
2083 	return wsum;
2084 }
2085 
bnx2x_init_port_minmax(struct bnx2x * bp,int en_fness,u16 port_rate,struct cmng_struct_per_port * m_cmng_port)2086 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2087 				   int en_fness,
2088 				   u16 port_rate,
2089 				   struct cmng_struct_per_port *m_cmng_port)
2090 {
2091 	u32 r_param = port_rate / 8;
2092 	int port = BP_PORT(bp);
2093 	int i;
2094 
2095 	memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2096 
2097 	/* Enable minmax only if we are in e1hmf mode */
2098 	if (IS_E1HMF(bp)) {
2099 		u32 fair_periodic_timeout_usec;
2100 		u32 t_fair;
2101 
2102 		/* Enable rate shaping and fairness */
2103 		m_cmng_port->flags.cmng_vn_enable = 1;
2104 		m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2105 		m_cmng_port->flags.rate_shaping_enable = 1;
2106 
2107 		if (!en_fness)
2108 			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2109 			   "  fairness will be disabled\n");
2110 
2111 		/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2112 		m_cmng_port->rs_vars.rs_periodic_timeout =
2113 						RS_PERIODIC_TIMEOUT_USEC / 4;
2114 
2115 		/* this is the threshold below which no timer arming will occur
2116 		   1.25 coefficient is for the threshold to be a little bigger
2117 		   than the real time, to compensate for timer in-accuracy */
2118 		m_cmng_port->rs_vars.rs_threshold =
2119 				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2120 
2121 		/* resolution of fairness timer */
2122 		fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2123 		/* for 10G it is 1000usec. for 1G it is 10000usec. */
2124 		t_fair = T_FAIR_COEF / port_rate;
2125 
2126 		/* this is the threshold below which we won't arm
2127 		   the timer anymore */
2128 		m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2129 
2130 		/* we multiply by 1e3/8 to get bytes/msec.
2131 		   We don't want the credits to pass a credit
2132 		   of the T_FAIR*FAIR_MEM (algorithm resolution) */
2133 		m_cmng_port->fair_vars.upper_bound =
2134 						r_param * t_fair * FAIR_MEM;
2135 		/* since each tick is 4 usec */
2136 		m_cmng_port->fair_vars.fairness_timeout =
2137 						fair_periodic_timeout_usec / 4;
2138 
2139 	} else {
2140 		/* Disable rate shaping and fairness */
2141 		m_cmng_port->flags.cmng_vn_enable = 0;
2142 		m_cmng_port->flags.fairness_enable = 0;
2143 		m_cmng_port->flags.rate_shaping_enable = 0;
2144 
2145 		DP(NETIF_MSG_IFUP,
2146 		   "Single function mode  minmax will be disabled\n");
2147 	}
2148 
2149 	/* Store it to internal memory */
2150 	for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2151 		REG_WR(bp, BAR_XSTRORM_INTMEM +
2152 		       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2153 		       ((u32 *)(m_cmng_port))[i]);
2154 }
2155 
bnx2x_init_vn_minmax(struct bnx2x * bp,int func,u32 wsum,u16 port_rate,struct cmng_struct_per_port * m_cmng_port)2156 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2157 				   u32 wsum, u16 port_rate,
2158 				 struct cmng_struct_per_port *m_cmng_port)
2159 {
2160 	struct rate_shaping_vars_per_vn m_rs_vn;
2161 	struct fairness_vars_per_vn m_fair_vn;
2162 	u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2163 	u16 vn_min_rate, vn_max_rate;
2164 	int i;
2165 
2166 	/* If function is hidden - set min and max to zeroes */
2167 	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2168 		vn_min_rate = 0;
2169 		vn_max_rate = 0;
2170 
2171 	} else {
2172 		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2173 				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2174 		/* If FAIRNESS is enabled (not all min rates are zeroes) and
2175 		   if current min rate is zero - set it to 1.
2176 		   This is a requirement of the algorithm. */
2177 		if ((vn_min_rate == 0) && wsum)
2178 			vn_min_rate = DEF_MIN_RATE;
2179 		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2180 				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2181 	}
2182 
2183 	DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2184 	   "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2185 
2186 	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2187 	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2188 
2189 	/* global vn counter - maximal Mbps for this vn */
2190 	m_rs_vn.vn_counter.rate = vn_max_rate;
2191 
2192 	/* quota - number of bytes transmitted in this period */
2193 	m_rs_vn.vn_counter.quota =
2194 				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2195 
2196 #ifdef BNX2X_PER_PROT_QOS
2197 	/* per protocol counter */
2198 	for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2199 		/* maximal Mbps for this protocol */
2200 		m_rs_vn.protocol_counters[protocol].rate =
2201 						protocol_max_rate[protocol];
2202 		/* the quota in each timer period -
2203 		   number of bytes transmitted in this period */
2204 		m_rs_vn.protocol_counters[protocol].quota =
2205 			(u32)(rs_periodic_timeout_usec *
2206 			  ((double)m_rs_vn.
2207 				   protocol_counters[protocol].rate/8));
2208 	}
2209 #endif
2210 
2211 	if (wsum) {
2212 		/* credit for each period of the fairness algorithm:
2213 		   number of bytes in T_FAIR (the vn share the port rate).
2214 		   wsum should not be larger than 10000, thus
2215 		   T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2216 		m_fair_vn.vn_credit_delta =
2217 			max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2218 			    (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2219 		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2220 		   m_fair_vn.vn_credit_delta);
2221 	}
2222 
2223 #ifdef BNX2X_PER_PROT_QOS
2224 	do {
2225 		u32 protocolWeightSum = 0;
2226 
2227 		for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2228 			protocolWeightSum +=
2229 					drvInit.protocol_min_rate[protocol];
2230 		/* per protocol counter -
2231 		   NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2232 		if (protocolWeightSum > 0) {
2233 			for (protocol = 0;
2234 			     protocol < NUM_OF_PROTOCOLS; protocol++)
2235 				/* credit for each period of the
2236 				   fairness algorithm - number of bytes in
2237 				   T_FAIR (the protocol share the vn rate) */
2238 				m_fair_vn.protocol_credit_delta[protocol] =
2239 					(u32)((vn_min_rate / 8) * t_fair *
2240 					protocol_min_rate / protocolWeightSum);
2241 		}
2242 	} while (0);
2243 #endif
2244 
2245 	/* Store it to internal memory */
2246 	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 		REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 		       ((u32 *)(&m_rs_vn))[i]);
2250 
2251 	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 		REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 		       ((u32 *)(&m_fair_vn))[i]);
2255 }
2256 
2257 /* This function is called upon link interrupt */
bnx2x_link_attn(struct bnx2x * bp)2258 static void bnx2x_link_attn(struct bnx2x *bp)
2259 {
2260 	int vn;
2261 
2262 	/* Make sure that we are synced with the current statistics */
2263 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264 
2265 	bnx2x_link_update(&bp->link_params, &bp->link_vars);
2266 
2267 	if (bp->link_vars.link_up) {
2268 
2269 		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2270 			struct host_port_stats *pstats;
2271 
2272 			pstats = bnx2x_sp(bp, port_stats);
2273 			/* reset old bmac stats */
2274 			memset(&(pstats->mac_stx[0]), 0,
2275 			       sizeof(struct mac_stx));
2276 		}
2277 		if ((bp->state == BNX2X_STATE_OPEN) ||
2278 		    (bp->state == BNX2X_STATE_DISABLED))
2279 			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2280 	}
2281 
2282 	/* indicate link status */
2283 	bnx2x_link_report(bp);
2284 
2285 	if (IS_E1HMF(bp)) {
2286 		int func;
2287 
2288 		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2289 			if (vn == BP_E1HVN(bp))
2290 				continue;
2291 
2292 			func = ((vn << 1) | BP_PORT(bp));
2293 
2294 			/* Set the attention towards other drivers
2295 			   on the same port */
2296 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2297 			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2298 		}
2299 	}
2300 
2301 	if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2302 		struct cmng_struct_per_port m_cmng_port;
2303 		u32 wsum;
2304 		int port = BP_PORT(bp);
2305 
2306 		/* Init RATE SHAPING and FAIRNESS contexts */
2307 		wsum = bnx2x_calc_vn_wsum(bp);
2308 		bnx2x_init_port_minmax(bp, (int)wsum,
2309 					bp->link_vars.line_speed,
2310 					&m_cmng_port);
2311 		if (IS_E1HMF(bp))
2312 			for (vn = VN_0; vn < E1HVN_MAX; vn++)
2313 				bnx2x_init_vn_minmax(bp, 2*vn + port,
2314 					wsum, bp->link_vars.line_speed,
2315 						     &m_cmng_port);
2316 	}
2317 }
2318 
bnx2x__link_status_update(struct bnx2x * bp)2319 static void bnx2x__link_status_update(struct bnx2x *bp)
2320 {
2321 	if (bp->state != BNX2X_STATE_OPEN)
2322 		return;
2323 
2324 	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2325 
2326 	if (bp->link_vars.link_up)
2327 		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2328 	else
2329 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2330 
2331 	/* indicate link status */
2332 	bnx2x_link_report(bp);
2333 }
2334 
bnx2x_pmf_update(struct bnx2x * bp)2335 static void bnx2x_pmf_update(struct bnx2x *bp)
2336 {
2337 	int port = BP_PORT(bp);
2338 	u32 val;
2339 
2340 	bp->port.pmf = 1;
2341 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2342 
2343 	/* enable nig attention */
2344 	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2345 	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2346 	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2347 
2348 	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2349 }
2350 
2351 /* end of Link */
2352 
2353 /* slow path */
2354 
2355 /*
2356  * General service functions
2357  */
2358 
2359 /* the slow path queue is odd since completions arrive on the fastpath ring */
bnx2x_sp_post(struct bnx2x * bp,int command,int cid,u32 data_hi,u32 data_lo,int common)2360 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2361 			 u32 data_hi, u32 data_lo, int common)
2362 {
2363 	int func = BP_FUNC(bp);
2364 
2365 	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2366 	   "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2367 	   (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2368 	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2369 	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2370 
2371 #ifdef BNX2X_STOP_ON_ERROR
2372 	if (unlikely(bp->panic))
2373 		return -EIO;
2374 #endif
2375 
2376 	spin_lock_bh(&bp->spq_lock);
2377 
2378 	if (!bp->spq_left) {
2379 		BNX2X_ERR("BUG! SPQ ring full!\n");
2380 		spin_unlock_bh(&bp->spq_lock);
2381 		bnx2x_panic();
2382 		return -EBUSY;
2383 	}
2384 
2385 	/* CID needs port number to be encoded int it */
2386 	bp->spq_prod_bd->hdr.conn_and_cmd_data =
2387 			cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2388 				     HW_CID(bp, cid)));
2389 	bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2390 	if (common)
2391 		bp->spq_prod_bd->hdr.type |=
2392 			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2393 
2394 	bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2395 	bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2396 
2397 	bp->spq_left--;
2398 
2399 	if (bp->spq_prod_bd == bp->spq_last_bd) {
2400 		bp->spq_prod_bd = bp->spq;
2401 		bp->spq_prod_idx = 0;
2402 		DP(NETIF_MSG_TIMER, "end of spq\n");
2403 
2404 	} else {
2405 		bp->spq_prod_bd++;
2406 		bp->spq_prod_idx++;
2407 	}
2408 
2409 	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2410 	       bp->spq_prod_idx);
2411 
2412 	spin_unlock_bh(&bp->spq_lock);
2413 	return 0;
2414 }
2415 
2416 /* acquire split MCP access lock register */
bnx2x_acquire_alr(struct bnx2x * bp)2417 static int bnx2x_acquire_alr(struct bnx2x *bp)
2418 {
2419 	u32 i, j, val;
2420 	int rc = 0;
2421 
2422 	might_sleep();
2423 	i = 100;
2424 	for (j = 0; j < i*10; j++) {
2425 		val = (1UL << 31);
2426 		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2427 		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2428 		if (val & (1L << 31))
2429 			break;
2430 
2431 		msleep(5);
2432 	}
2433 	if (!(val & (1L << 31))) {
2434 		BNX2X_ERR("Cannot acquire MCP access lock register\n");
2435 		rc = -EBUSY;
2436 	}
2437 
2438 	return rc;
2439 }
2440 
2441 /* release split MCP access lock register */
bnx2x_release_alr(struct bnx2x * bp)2442 static void bnx2x_release_alr(struct bnx2x *bp)
2443 {
2444 	u32 val = 0;
2445 
2446 	REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2447 }
2448 
bnx2x_update_dsb_idx(struct bnx2x * bp)2449 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2450 {
2451 	struct host_def_status_block *def_sb = bp->def_status_blk;
2452 	u16 rc = 0;
2453 
2454 	barrier(); /* status block is written to by the chip */
2455 	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2456 		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2457 		rc |= 1;
2458 	}
2459 	if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2460 		bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2461 		rc |= 2;
2462 	}
2463 	if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2464 		bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2465 		rc |= 4;
2466 	}
2467 	if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2468 		bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2469 		rc |= 8;
2470 	}
2471 	if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2472 		bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2473 		rc |= 16;
2474 	}
2475 	return rc;
2476 }
2477 
2478 /*
2479  * slow path service functions
2480  */
2481 
bnx2x_attn_int_asserted(struct bnx2x * bp,u32 asserted)2482 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2483 {
2484 	int port = BP_PORT(bp);
2485 	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2486 		       COMMAND_REG_ATTN_BITS_SET);
2487 	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2488 			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
2489 	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2490 				       NIG_REG_MASK_INTERRUPT_PORT0;
2491 	u32 aeu_mask;
2492 
2493 	if (bp->attn_state & asserted)
2494 		BNX2X_ERR("IGU ERROR\n");
2495 
2496 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 	aeu_mask = REG_RD(bp, aeu_addr);
2498 
2499 	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2500 	   aeu_mask, asserted);
2501 	aeu_mask &= ~(asserted & 0xff);
2502 	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2503 
2504 	REG_WR(bp, aeu_addr, aeu_mask);
2505 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2506 
2507 	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2508 	bp->attn_state |= asserted;
2509 	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2510 
2511 	if (asserted & ATTN_HARD_WIRED_MASK) {
2512 		if (asserted & ATTN_NIG_FOR_FUNC) {
2513 
2514 			bnx2x_acquire_phy_lock(bp);
2515 
2516 			/* save nig interrupt mask */
2517 			bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2518 			REG_WR(bp, nig_int_mask_addr, 0);
2519 
2520 			bnx2x_link_attn(bp);
2521 
2522 			/* handle unicore attn? */
2523 		}
2524 		if (asserted & ATTN_SW_TIMER_4_FUNC)
2525 			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2526 
2527 		if (asserted & GPIO_2_FUNC)
2528 			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2529 
2530 		if (asserted & GPIO_3_FUNC)
2531 			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2532 
2533 		if (asserted & GPIO_4_FUNC)
2534 			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2535 
2536 		if (port == 0) {
2537 			if (asserted & ATTN_GENERAL_ATTN_1) {
2538 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2539 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2540 			}
2541 			if (asserted & ATTN_GENERAL_ATTN_2) {
2542 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2543 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2544 			}
2545 			if (asserted & ATTN_GENERAL_ATTN_3) {
2546 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2547 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2548 			}
2549 		} else {
2550 			if (asserted & ATTN_GENERAL_ATTN_4) {
2551 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2552 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2553 			}
2554 			if (asserted & ATTN_GENERAL_ATTN_5) {
2555 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2556 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2557 			}
2558 			if (asserted & ATTN_GENERAL_ATTN_6) {
2559 				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2560 				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2561 			}
2562 		}
2563 
2564 	} /* if hardwired */
2565 
2566 	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2567 	   asserted, hc_addr);
2568 	REG_WR(bp, hc_addr, asserted);
2569 
2570 	/* now set back the mask */
2571 	if (asserted & ATTN_NIG_FOR_FUNC) {
2572 		REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2573 		bnx2x_release_phy_lock(bp);
2574 	}
2575 }
2576 
bnx2x_attn_int_deasserted0(struct bnx2x * bp,u32 attn)2577 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2578 {
2579 	int port = BP_PORT(bp);
2580 	int reg_offset;
2581 	u32 val;
2582 
2583 	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2584 			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2585 
2586 	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2587 
2588 		val = REG_RD(bp, reg_offset);
2589 		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2590 		REG_WR(bp, reg_offset, val);
2591 
2592 		BNX2X_ERR("SPIO5 hw attention\n");
2593 
2594 		switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2595 		case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2596 		case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2597 			/* Fan failure attention */
2598 
2599 			/* The PHY reset is controlled by GPIO 1 */
2600 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2601 				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2602 			/* Low power mode is controlled by GPIO 2 */
2603 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2604 				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2605 			/* mark the failure */
2606 			bp->link_params.ext_phy_config &=
2607 					~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2608 			bp->link_params.ext_phy_config |=
2609 					PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2610 			SHMEM_WR(bp,
2611 				 dev_info.port_hw_config[port].
2612 							external_phy_config,
2613 				 bp->link_params.ext_phy_config);
2614 			/* log the failure */
2615 			printk(KERN_ERR PFX "Fan Failure on Network"
2616 			       " Controller %s has caused the driver to"
2617 			       " shutdown the card to prevent permanent"
2618 			       " damage.  Please contact Dell Support for"
2619 			       " assistance\n", bp->dev->name);
2620 			break;
2621 
2622 		default:
2623 			break;
2624 		}
2625 	}
2626 
2627 	if (attn & HW_INTERRUT_ASSERT_SET_0) {
2628 
2629 		val = REG_RD(bp, reg_offset);
2630 		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2631 		REG_WR(bp, reg_offset, val);
2632 
2633 		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2634 			  (attn & HW_INTERRUT_ASSERT_SET_0));
2635 		bnx2x_panic();
2636 	}
2637 }
2638 
bnx2x_attn_int_deasserted1(struct bnx2x * bp,u32 attn)2639 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2640 {
2641 	u32 val;
2642 
2643 	if (attn & BNX2X_DOORQ_ASSERT) {
2644 
2645 		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2646 		BNX2X_ERR("DB hw attention 0x%x\n", val);
2647 		/* DORQ discard attention */
2648 		if (val & 0x2)
2649 			BNX2X_ERR("FATAL error from DORQ\n");
2650 	}
2651 
2652 	if (attn & HW_INTERRUT_ASSERT_SET_1) {
2653 
2654 		int port = BP_PORT(bp);
2655 		int reg_offset;
2656 
2657 		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2658 				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2659 
2660 		val = REG_RD(bp, reg_offset);
2661 		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2662 		REG_WR(bp, reg_offset, val);
2663 
2664 		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2665 			  (attn & HW_INTERRUT_ASSERT_SET_1));
2666 		bnx2x_panic();
2667 	}
2668 }
2669 
bnx2x_attn_int_deasserted2(struct bnx2x * bp,u32 attn)2670 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2671 {
2672 	u32 val;
2673 
2674 	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2675 
2676 		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2677 		BNX2X_ERR("CFC hw attention 0x%x\n", val);
2678 		/* CFC error attention */
2679 		if (val & 0x2)
2680 			BNX2X_ERR("FATAL error from CFC\n");
2681 	}
2682 
2683 	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2684 
2685 		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2686 		BNX2X_ERR("PXP hw attention 0x%x\n", val);
2687 		/* RQ_USDMDP_FIFO_OVERFLOW */
2688 		if (val & 0x18000)
2689 			BNX2X_ERR("FATAL error from PXP\n");
2690 	}
2691 
2692 	if (attn & HW_INTERRUT_ASSERT_SET_2) {
2693 
2694 		int port = BP_PORT(bp);
2695 		int reg_offset;
2696 
2697 		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2698 				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2699 
2700 		val = REG_RD(bp, reg_offset);
2701 		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2702 		REG_WR(bp, reg_offset, val);
2703 
2704 		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2705 			  (attn & HW_INTERRUT_ASSERT_SET_2));
2706 		bnx2x_panic();
2707 	}
2708 }
2709 
bnx2x_attn_int_deasserted3(struct bnx2x * bp,u32 attn)2710 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2711 {
2712 	u32 val;
2713 
2714 	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2715 
2716 		if (attn & BNX2X_PMF_LINK_ASSERT) {
2717 			int func = BP_FUNC(bp);
2718 
2719 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2720 			bnx2x__link_status_update(bp);
2721 			if (SHMEM_RD(bp, func_mb[func].drv_status) &
2722 							DRV_STATUS_PMF)
2723 				bnx2x_pmf_update(bp);
2724 
2725 		} else if (attn & BNX2X_MC_ASSERT_BITS) {
2726 
2727 			BNX2X_ERR("MC assert!\n");
2728 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2729 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2730 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2731 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2732 			bnx2x_panic();
2733 
2734 		} else if (attn & BNX2X_MCP_ASSERT) {
2735 
2736 			BNX2X_ERR("MCP assert!\n");
2737 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2738 			bnx2x_fw_dump(bp);
2739 
2740 		} else
2741 			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2742 	}
2743 
2744 	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2745 		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2746 		if (attn & BNX2X_GRC_TIMEOUT) {
2747 			val = CHIP_IS_E1H(bp) ?
2748 				REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2749 			BNX2X_ERR("GRC time-out 0x%08x\n", val);
2750 		}
2751 		if (attn & BNX2X_GRC_RSV) {
2752 			val = CHIP_IS_E1H(bp) ?
2753 				REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2754 			BNX2X_ERR("GRC reserved 0x%08x\n", val);
2755 		}
2756 		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2757 	}
2758 }
2759 
bnx2x_attn_int_deasserted(struct bnx2x * bp,u32 deasserted)2760 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2761 {
2762 	struct attn_route attn;
2763 	struct attn_route group_mask;
2764 	int port = BP_PORT(bp);
2765 	int index;
2766 	u32 reg_addr;
2767 	u32 val;
2768 	u32 aeu_mask;
2769 
2770 	/* need to take HW lock because MCP or other port might also
2771 	   try to handle this event */
2772 	bnx2x_acquire_alr(bp);
2773 
2774 	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2775 	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2776 	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2777 	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2778 	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2779 	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2780 
2781 	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2782 		if (deasserted & (1 << index)) {
2783 			group_mask = bp->attn_group[index];
2784 
2785 			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2786 			   index, group_mask.sig[0], group_mask.sig[1],
2787 			   group_mask.sig[2], group_mask.sig[3]);
2788 
2789 			bnx2x_attn_int_deasserted3(bp,
2790 					attn.sig[3] & group_mask.sig[3]);
2791 			bnx2x_attn_int_deasserted1(bp,
2792 					attn.sig[1] & group_mask.sig[1]);
2793 			bnx2x_attn_int_deasserted2(bp,
2794 					attn.sig[2] & group_mask.sig[2]);
2795 			bnx2x_attn_int_deasserted0(bp,
2796 					attn.sig[0] & group_mask.sig[0]);
2797 
2798 			if ((attn.sig[0] & group_mask.sig[0] &
2799 						HW_PRTY_ASSERT_SET_0) ||
2800 			    (attn.sig[1] & group_mask.sig[1] &
2801 						HW_PRTY_ASSERT_SET_1) ||
2802 			    (attn.sig[2] & group_mask.sig[2] &
2803 						HW_PRTY_ASSERT_SET_2))
2804 				BNX2X_ERR("FATAL HW block parity attention\n");
2805 		}
2806 	}
2807 
2808 	bnx2x_release_alr(bp);
2809 
2810 	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2811 
2812 	val = ~deasserted;
2813 	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2814 	   val, reg_addr);
2815 	REG_WR(bp, reg_addr, val);
2816 
2817 	if (~bp->attn_state & deasserted)
2818 		BNX2X_ERR("IGU ERROR\n");
2819 
2820 	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2821 			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
2822 
2823 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824 	aeu_mask = REG_RD(bp, reg_addr);
2825 
2826 	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2827 	   aeu_mask, deasserted);
2828 	aeu_mask |= (deasserted & 0xff);
2829 	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2830 
2831 	REG_WR(bp, reg_addr, aeu_mask);
2832 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833 
2834 	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2835 	bp->attn_state &= ~deasserted;
2836 	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2837 }
2838 
bnx2x_attn_int(struct bnx2x * bp)2839 static void bnx2x_attn_int(struct bnx2x *bp)
2840 {
2841 	/* read local copy of bits */
2842 	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2843 								attn_bits);
2844 	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845 								attn_bits_ack);
2846 	u32 attn_state = bp->attn_state;
2847 
2848 	/* look for changed bits */
2849 	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2850 	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2851 
2852 	DP(NETIF_MSG_HW,
2853 	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2854 	   attn_bits, attn_ack, asserted, deasserted);
2855 
2856 	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2857 		BNX2X_ERR("BAD attention state\n");
2858 
2859 	/* handle bits that were raised */
2860 	if (asserted)
2861 		bnx2x_attn_int_asserted(bp, asserted);
2862 
2863 	if (deasserted)
2864 		bnx2x_attn_int_deasserted(bp, deasserted);
2865 }
2866 
bnx2x_sp_task(struct work_struct * work)2867 static void bnx2x_sp_task(struct work_struct *work)
2868 {
2869 	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2870 	u16 status;
2871 
2872 
2873 	/* Return here if interrupt is disabled */
2874 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2875 		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2876 		return;
2877 	}
2878 
2879 	status = bnx2x_update_dsb_idx(bp);
2880 /*	if (status == 0)				     */
2881 /*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
2882 
2883 	DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2884 
2885 	/* HW attentions */
2886 	if (status & 0x1)
2887 		bnx2x_attn_int(bp);
2888 
2889 	/* CStorm events: query_stats, port delete ramrod */
2890 	if (status & 0x2)
2891 		bp->stats_pending = 0;
2892 
2893 	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2894 		     IGU_INT_NOP, 1);
2895 	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2896 		     IGU_INT_NOP, 1);
2897 	bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2898 		     IGU_INT_NOP, 1);
2899 	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2900 		     IGU_INT_NOP, 1);
2901 	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2902 		     IGU_INT_ENABLE, 1);
2903 
2904 }
2905 
bnx2x_msix_sp_int(int irq,void * dev_instance)2906 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2907 {
2908 	struct net_device *dev = dev_instance;
2909 	struct bnx2x *bp = netdev_priv(dev);
2910 
2911 	/* Return here if interrupt is disabled */
2912 	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2913 		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2914 		return IRQ_HANDLED;
2915 	}
2916 
2917 	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2918 
2919 #ifdef BNX2X_STOP_ON_ERROR
2920 	if (unlikely(bp->panic))
2921 		return IRQ_HANDLED;
2922 #endif
2923 
2924 	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2925 
2926 	return IRQ_HANDLED;
2927 }
2928 
2929 /* end of slow path */
2930 
2931 /* Statistics */
2932 
2933 /****************************************************************************
2934 * Macros
2935 ****************************************************************************/
2936 
2937 /* sum[hi:lo] += add[hi:lo] */
2938 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2939 	do { \
2940 		s_lo += a_lo; \
2941 		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2942 	} while (0)
2943 
2944 /* difference = minuend - subtrahend */
2945 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2946 	do { \
2947 		if (m_lo < s_lo) { \
2948 			/* underflow */ \
2949 			d_hi = m_hi - s_hi; \
2950 			if (d_hi > 0) { \
2951 				/* we can 'loan' 1 */ \
2952 				d_hi--; \
2953 				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2954 			} else { \
2955 				/* m_hi <= s_hi */ \
2956 				d_hi = 0; \
2957 				d_lo = 0; \
2958 			} \
2959 		} else { \
2960 			/* m_lo >= s_lo */ \
2961 			if (m_hi < s_hi) { \
2962 				d_hi = 0; \
2963 				d_lo = 0; \
2964 			} else { \
2965 				/* m_hi >= s_hi */ \
2966 				d_hi = m_hi - s_hi; \
2967 				d_lo = m_lo - s_lo; \
2968 			} \
2969 		} \
2970 	} while (0)
2971 
2972 #define UPDATE_STAT64(s, t) \
2973 	do { \
2974 		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2975 			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2976 		pstats->mac_stx[0].t##_hi = new->s##_hi; \
2977 		pstats->mac_stx[0].t##_lo = new->s##_lo; \
2978 		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2979 		       pstats->mac_stx[1].t##_lo, diff.lo); \
2980 	} while (0)
2981 
2982 #define UPDATE_STAT64_NIG(s, t) \
2983 	do { \
2984 		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2985 			diff.lo, new->s##_lo, old->s##_lo); \
2986 		ADD_64(estats->t##_hi, diff.hi, \
2987 		       estats->t##_lo, diff.lo); \
2988 	} while (0)
2989 
2990 /* sum[hi:lo] += add */
2991 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2992 	do { \
2993 		s_lo += a; \
2994 		s_hi += (s_lo < a) ? 1 : 0; \
2995 	} while (0)
2996 
2997 #define UPDATE_EXTEND_STAT(s) \
2998 	do { \
2999 		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3000 			      pstats->mac_stx[1].s##_lo, \
3001 			      new->s); \
3002 	} while (0)
3003 
3004 #define UPDATE_EXTEND_TSTAT(s, t) \
3005 	do { \
3006 		diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3007 		old_tclient->s = le32_to_cpu(tclient->s); \
3008 		ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009 	} while (0)
3010 
3011 #define UPDATE_EXTEND_XSTAT(s, t) \
3012 	do { \
3013 		diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3014 		old_xclient->s = le32_to_cpu(xclient->s); \
3015 		ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3016 	} while (0)
3017 
3018 /*
3019  * General service functions
3020  */
3021 
bnx2x_hilo(u32 * hiref)3022 static inline long bnx2x_hilo(u32 *hiref)
3023 {
3024 	u32 lo = *(hiref + 1);
3025 #if (BITS_PER_LONG == 64)
3026 	u32 hi = *hiref;
3027 
3028 	return HILO_U64(hi, lo);
3029 #else
3030 	return lo;
3031 #endif
3032 }
3033 
3034 /*
3035  * Init service functions
3036  */
3037 
bnx2x_storm_stats_post(struct bnx2x * bp)3038 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3039 {
3040 	if (!bp->stats_pending) {
3041 		struct eth_query_ramrod_data ramrod_data = {0};
3042 		int rc;
3043 
3044 		ramrod_data.drv_counter = bp->stats_counter++;
3045 		ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3046 		ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3047 
3048 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3049 				   ((u32 *)&ramrod_data)[1],
3050 				   ((u32 *)&ramrod_data)[0], 0);
3051 		if (rc == 0) {
3052 			/* stats ramrod has it's own slot on the spq */
3053 			bp->spq_left++;
3054 			bp->stats_pending = 1;
3055 		}
3056 	}
3057 }
3058 
bnx2x_stats_init(struct bnx2x * bp)3059 static void bnx2x_stats_init(struct bnx2x *bp)
3060 {
3061 	int port = BP_PORT(bp);
3062 
3063 	bp->executer_idx = 0;
3064 	bp->stats_counter = 0;
3065 
3066 	/* port stats */
3067 	if (!BP_NOMCP(bp))
3068 		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3069 	else
3070 		bp->port.port_stx = 0;
3071 	DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3072 
3073 	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3074 	bp->port.old_nig_stats.brb_discard =
3075 			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3076 	bp->port.old_nig_stats.brb_truncate =
3077 			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3078 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3079 		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3080 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3081 		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3082 
3083 	/* function stats */
3084 	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3085 	memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3086 	memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3087 	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3088 
3089 	bp->stats_state = STATS_STATE_DISABLED;
3090 	if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3091 		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3092 }
3093 
bnx2x_hw_stats_post(struct bnx2x * bp)3094 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3095 {
3096 	struct dmae_command *dmae = &bp->stats_dmae;
3097 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098 
3099 	*stats_comp = DMAE_COMP_VAL;
3100 
3101 	/* loader */
3102 	if (bp->executer_idx) {
3103 		int loader_idx = PMF_DMAE_C(bp);
3104 
3105 		memset(dmae, 0, sizeof(struct dmae_command));
3106 
3107 		dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3108 				DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3109 				DMAE_CMD_DST_RESET |
3110 #ifdef __BIG_ENDIAN
3111 				DMAE_CMD_ENDIANITY_B_DW_SWAP |
3112 #else
3113 				DMAE_CMD_ENDIANITY_DW_SWAP |
3114 #endif
3115 				(BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3116 					       DMAE_CMD_PORT_0) |
3117 				(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3119 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3120 		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3121 				     sizeof(struct dmae_command) *
3122 				     (loader_idx + 1)) >> 2;
3123 		dmae->dst_addr_hi = 0;
3124 		dmae->len = sizeof(struct dmae_command) >> 2;
3125 		if (CHIP_IS_E1(bp))
3126 			dmae->len--;
3127 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3128 		dmae->comp_addr_hi = 0;
3129 		dmae->comp_val = 1;
3130 
3131 		*stats_comp = 0;
3132 		bnx2x_post_dmae(bp, dmae, loader_idx);
3133 
3134 	} else if (bp->func_stx) {
3135 		*stats_comp = 0;
3136 		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3137 	}
3138 }
3139 
bnx2x_stats_comp(struct bnx2x * bp)3140 static int bnx2x_stats_comp(struct bnx2x *bp)
3141 {
3142 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3143 	int cnt = 10;
3144 
3145 	might_sleep();
3146 	while (*stats_comp != DMAE_COMP_VAL) {
3147 		if (!cnt) {
3148 			BNX2X_ERR("timeout waiting for stats finished\n");
3149 			break;
3150 		}
3151 		cnt--;
3152 		msleep(1);
3153 	}
3154 	return 1;
3155 }
3156 
3157 /*
3158  * Statistics service functions
3159  */
3160 
bnx2x_stats_pmf_update(struct bnx2x * bp)3161 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3162 {
3163 	struct dmae_command *dmae;
3164 	u32 opcode;
3165 	int loader_idx = PMF_DMAE_C(bp);
3166 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3167 
3168 	/* sanity */
3169 	if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3170 		BNX2X_ERR("BUG!\n");
3171 		return;
3172 	}
3173 
3174 	bp->executer_idx = 0;
3175 
3176 	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3177 		  DMAE_CMD_C_ENABLE |
3178 		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3179 #ifdef __BIG_ENDIAN
3180 		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
3181 #else
3182 		  DMAE_CMD_ENDIANITY_DW_SWAP |
3183 #endif
3184 		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3185 		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3186 
3187 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3188 	dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3189 	dmae->src_addr_lo = bp->port.port_stx >> 2;
3190 	dmae->src_addr_hi = 0;
3191 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3192 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3193 	dmae->len = DMAE_LEN32_RD_MAX;
3194 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3195 	dmae->comp_addr_hi = 0;
3196 	dmae->comp_val = 1;
3197 
3198 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 	dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3200 	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3201 	dmae->src_addr_hi = 0;
3202 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3203 				   DMAE_LEN32_RD_MAX * 4);
3204 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3205 				   DMAE_LEN32_RD_MAX * 4);
3206 	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3207 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3208 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3209 	dmae->comp_val = DMAE_COMP_VAL;
3210 
3211 	*stats_comp = 0;
3212 	bnx2x_hw_stats_post(bp);
3213 	bnx2x_stats_comp(bp);
3214 }
3215 
bnx2x_port_stats_init(struct bnx2x * bp)3216 static void bnx2x_port_stats_init(struct bnx2x *bp)
3217 {
3218 	struct dmae_command *dmae;
3219 	int port = BP_PORT(bp);
3220 	int vn = BP_E1HVN(bp);
3221 	u32 opcode;
3222 	int loader_idx = PMF_DMAE_C(bp);
3223 	u32 mac_addr;
3224 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225 
3226 	/* sanity */
3227 	if (!bp->link_vars.link_up || !bp->port.pmf) {
3228 		BNX2X_ERR("BUG!\n");
3229 		return;
3230 	}
3231 
3232 	bp->executer_idx = 0;
3233 
3234 	/* MCP */
3235 	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3236 		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3237 		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238 #ifdef __BIG_ENDIAN
3239 		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240 #else
3241 		  DMAE_CMD_ENDIANITY_DW_SWAP |
3242 #endif
3243 		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 		  (vn << DMAE_CMD_E1HVN_SHIFT));
3245 
3246 	if (bp->port.port_stx) {
3247 
3248 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 		dmae->opcode = opcode;
3250 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
3253 		dmae->dst_addr_hi = 0;
3254 		dmae->len = sizeof(struct host_port_stats) >> 2;
3255 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3256 		dmae->comp_addr_hi = 0;
3257 		dmae->comp_val = 1;
3258 	}
3259 
3260 	if (bp->func_stx) {
3261 
3262 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263 		dmae->opcode = opcode;
3264 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3265 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3266 		dmae->dst_addr_lo = bp->func_stx >> 2;
3267 		dmae->dst_addr_hi = 0;
3268 		dmae->len = sizeof(struct host_func_stats) >> 2;
3269 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 		dmae->comp_addr_hi = 0;
3271 		dmae->comp_val = 1;
3272 	}
3273 
3274 	/* MAC */
3275 	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3276 		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3277 		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3278 #ifdef __BIG_ENDIAN
3279 		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
3280 #else
3281 		  DMAE_CMD_ENDIANITY_DW_SWAP |
3282 #endif
3283 		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3284 		  (vn << DMAE_CMD_E1HVN_SHIFT));
3285 
3286 	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3287 
3288 		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3289 				   NIG_REG_INGRESS_BMAC0_MEM);
3290 
3291 		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3292 		   BIGMAC_REGISTER_TX_STAT_GTBYT */
3293 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 		dmae->opcode = opcode;
3295 		dmae->src_addr_lo = (mac_addr +
3296 				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3297 		dmae->src_addr_hi = 0;
3298 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 		dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3301 			     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3302 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 		dmae->comp_addr_hi = 0;
3304 		dmae->comp_val = 1;
3305 
3306 		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
3307 		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
3308 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 		dmae->opcode = opcode;
3310 		dmae->src_addr_lo = (mac_addr +
3311 				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 		dmae->src_addr_hi = 0;
3313 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3314 				offsetof(struct bmac_stats, rx_stat_gr64_lo));
3315 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3316 				offsetof(struct bmac_stats, rx_stat_gr64_lo));
3317 		dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3318 			     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3319 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 		dmae->comp_addr_hi = 0;
3321 		dmae->comp_val = 1;
3322 
3323 	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3324 
3325 		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3326 
3327 		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3328 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 		dmae->opcode = opcode;
3330 		dmae->src_addr_lo = (mac_addr +
3331 				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3332 		dmae->src_addr_hi = 0;
3333 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3334 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3335 		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3336 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 		dmae->comp_addr_hi = 0;
3338 		dmae->comp_val = 1;
3339 
3340 		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
3341 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 		dmae->opcode = opcode;
3343 		dmae->src_addr_lo = (mac_addr +
3344 				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3345 		dmae->src_addr_hi = 0;
3346 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3347 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3348 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3349 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3350 		dmae->len = 1;
3351 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 		dmae->comp_addr_hi = 0;
3353 		dmae->comp_val = 1;
3354 
3355 		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3356 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3357 		dmae->opcode = opcode;
3358 		dmae->src_addr_lo = (mac_addr +
3359 				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3360 		dmae->src_addr_hi = 0;
3361 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3362 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3363 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3364 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3365 		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3366 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 		dmae->comp_addr_hi = 0;
3368 		dmae->comp_val = 1;
3369 	}
3370 
3371 	/* NIG */
3372 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 	dmae->opcode = opcode;
3374 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3375 				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
3376 	dmae->src_addr_hi = 0;
3377 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3378 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3379 	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3380 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 	dmae->comp_addr_hi = 0;
3382 	dmae->comp_val = 1;
3383 
3384 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3385 	dmae->opcode = opcode;
3386 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3387 				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3388 	dmae->src_addr_hi = 0;
3389 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3390 			offsetof(struct nig_stats, egress_mac_pkt0_lo));
3391 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3392 			offsetof(struct nig_stats, egress_mac_pkt0_lo));
3393 	dmae->len = (2*sizeof(u32)) >> 2;
3394 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 	dmae->comp_addr_hi = 0;
3396 	dmae->comp_val = 1;
3397 
3398 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3400 			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3401 			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3402 #ifdef __BIG_ENDIAN
3403 			DMAE_CMD_ENDIANITY_B_DW_SWAP |
3404 #else
3405 			DMAE_CMD_ENDIANITY_DW_SWAP |
3406 #endif
3407 			(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3408 			(vn << DMAE_CMD_E1HVN_SHIFT));
3409 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3410 				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3411 	dmae->src_addr_hi = 0;
3412 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3413 			offsetof(struct nig_stats, egress_mac_pkt1_lo));
3414 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3415 			offsetof(struct nig_stats, egress_mac_pkt1_lo));
3416 	dmae->len = (2*sizeof(u32)) >> 2;
3417 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3418 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3419 	dmae->comp_val = DMAE_COMP_VAL;
3420 
3421 	*stats_comp = 0;
3422 }
3423 
bnx2x_func_stats_init(struct bnx2x * bp)3424 static void bnx2x_func_stats_init(struct bnx2x *bp)
3425 {
3426 	struct dmae_command *dmae = &bp->stats_dmae;
3427 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3428 
3429 	/* sanity */
3430 	if (!bp->func_stx) {
3431 		BNX2X_ERR("BUG!\n");
3432 		return;
3433 	}
3434 
3435 	bp->executer_idx = 0;
3436 	memset(dmae, 0, sizeof(struct dmae_command));
3437 
3438 	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3439 			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3440 			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3441 #ifdef __BIG_ENDIAN
3442 			DMAE_CMD_ENDIANITY_B_DW_SWAP |
3443 #else
3444 			DMAE_CMD_ENDIANITY_DW_SWAP |
3445 #endif
3446 			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3447 			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3448 	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3449 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3450 	dmae->dst_addr_lo = bp->func_stx >> 2;
3451 	dmae->dst_addr_hi = 0;
3452 	dmae->len = sizeof(struct host_func_stats) >> 2;
3453 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3454 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3455 	dmae->comp_val = DMAE_COMP_VAL;
3456 
3457 	*stats_comp = 0;
3458 }
3459 
bnx2x_stats_start(struct bnx2x * bp)3460 static void bnx2x_stats_start(struct bnx2x *bp)
3461 {
3462 	if (bp->port.pmf)
3463 		bnx2x_port_stats_init(bp);
3464 
3465 	else if (bp->func_stx)
3466 		bnx2x_func_stats_init(bp);
3467 
3468 	bnx2x_hw_stats_post(bp);
3469 	bnx2x_storm_stats_post(bp);
3470 }
3471 
bnx2x_stats_pmf_start(struct bnx2x * bp)3472 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3473 {
3474 	bnx2x_stats_comp(bp);
3475 	bnx2x_stats_pmf_update(bp);
3476 	bnx2x_stats_start(bp);
3477 }
3478 
bnx2x_stats_restart(struct bnx2x * bp)3479 static void bnx2x_stats_restart(struct bnx2x *bp)
3480 {
3481 	bnx2x_stats_comp(bp);
3482 	bnx2x_stats_start(bp);
3483 }
3484 
bnx2x_bmac_stats_update(struct bnx2x * bp)3485 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3486 {
3487 	struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3488 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3489 	struct regpair diff;
3490 
3491 	UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3492 	UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3493 	UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3494 	UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3495 	UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3496 	UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3497 	UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3498 	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3499 	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3500 	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3501 	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3502 	UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3503 	UPDATE_STAT64(tx_stat_gt127,
3504 				tx_stat_etherstatspkts65octetsto127octets);
3505 	UPDATE_STAT64(tx_stat_gt255,
3506 				tx_stat_etherstatspkts128octetsto255octets);
3507 	UPDATE_STAT64(tx_stat_gt511,
3508 				tx_stat_etherstatspkts256octetsto511octets);
3509 	UPDATE_STAT64(tx_stat_gt1023,
3510 				tx_stat_etherstatspkts512octetsto1023octets);
3511 	UPDATE_STAT64(tx_stat_gt1518,
3512 				tx_stat_etherstatspkts1024octetsto1522octets);
3513 	UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3514 	UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3515 	UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3516 	UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3517 	UPDATE_STAT64(tx_stat_gterr,
3518 				tx_stat_dot3statsinternalmactransmiterrors);
3519 	UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3520 }
3521 
bnx2x_emac_stats_update(struct bnx2x * bp)3522 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3523 {
3524 	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3525 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3526 
3527 	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3528 	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3529 	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3530 	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3531 	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3532 	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3533 	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3534 	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3535 	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3536 	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3537 	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3538 	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3539 	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3540 	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3541 	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3542 	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3543 	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3544 	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3545 	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3546 	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3547 	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3548 	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3549 	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3550 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3551 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3552 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3553 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3554 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3555 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3556 	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3557 	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3558 }
3559 
bnx2x_hw_stats_update(struct bnx2x * bp)3560 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3561 {
3562 	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3563 	struct nig_stats *old = &(bp->port.old_nig_stats);
3564 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3565 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
3566 	struct regpair diff;
3567 
3568 	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3569 		bnx2x_bmac_stats_update(bp);
3570 
3571 	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3572 		bnx2x_emac_stats_update(bp);
3573 
3574 	else { /* unreached */
3575 		BNX2X_ERR("stats updated by dmae but no MAC active\n");
3576 		return -1;
3577 	}
3578 
3579 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3580 		      new->brb_discard - old->brb_discard);
3581 	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3582 		      new->brb_truncate - old->brb_truncate);
3583 
3584 	UPDATE_STAT64_NIG(egress_mac_pkt0,
3585 					etherstatspkts1024octetsto1522octets);
3586 	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3587 
3588 	memcpy(old, new, sizeof(struct nig_stats));
3589 
3590 	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3591 	       sizeof(struct mac_stx));
3592 	estats->brb_drop_hi = pstats->brb_drop_hi;
3593 	estats->brb_drop_lo = pstats->brb_drop_lo;
3594 
3595 	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3596 
3597 	return 0;
3598 }
3599 
bnx2x_storm_stats_update(struct bnx2x * bp)3600 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3601 {
3602 	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3603 	int cl_id = BP_CL_ID(bp);
3604 	struct tstorm_per_port_stats *tport =
3605 				&stats->tstorm_common.port_statistics;
3606 	struct tstorm_per_client_stats *tclient =
3607 			&stats->tstorm_common.client_statistics[cl_id];
3608 	struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3609 	struct xstorm_per_client_stats *xclient =
3610 			&stats->xstorm_common.client_statistics[cl_id];
3611 	struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3612 	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3613 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
3614 	u32 diff;
3615 
3616 	/* are storm stats valid? */
3617 	if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3618 							bp->stats_counter) {
3619 		DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3620 		   "  tstorm counter (%d) != stats_counter (%d)\n",
3621 		   tclient->stats_counter, bp->stats_counter);
3622 		return -1;
3623 	}
3624 	if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3625 							bp->stats_counter) {
3626 		DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3627 		   "  xstorm counter (%d) != stats_counter (%d)\n",
3628 		   xclient->stats_counter, bp->stats_counter);
3629 		return -2;
3630 	}
3631 
3632 	fstats->total_bytes_received_hi =
3633 	fstats->valid_bytes_received_hi =
3634 				le32_to_cpu(tclient->total_rcv_bytes.hi);
3635 	fstats->total_bytes_received_lo =
3636 	fstats->valid_bytes_received_lo =
3637 				le32_to_cpu(tclient->total_rcv_bytes.lo);
3638 
3639 	estats->error_bytes_received_hi =
3640 				le32_to_cpu(tclient->rcv_error_bytes.hi);
3641 	estats->error_bytes_received_lo =
3642 				le32_to_cpu(tclient->rcv_error_bytes.lo);
3643 	ADD_64(estats->error_bytes_received_hi,
3644 	       estats->rx_stat_ifhcinbadoctets_hi,
3645 	       estats->error_bytes_received_lo,
3646 	       estats->rx_stat_ifhcinbadoctets_lo);
3647 
3648 	ADD_64(fstats->total_bytes_received_hi,
3649 	       estats->error_bytes_received_hi,
3650 	       fstats->total_bytes_received_lo,
3651 	       estats->error_bytes_received_lo);
3652 
3653 	UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3654 	UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3655 				total_multicast_packets_received);
3656 	UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3657 				total_broadcast_packets_received);
3658 
3659 	fstats->total_bytes_transmitted_hi =
3660 				le32_to_cpu(xclient->total_sent_bytes.hi);
3661 	fstats->total_bytes_transmitted_lo =
3662 				le32_to_cpu(xclient->total_sent_bytes.lo);
3663 
3664 	UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3665 				total_unicast_packets_transmitted);
3666 	UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3667 				total_multicast_packets_transmitted);
3668 	UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3669 				total_broadcast_packets_transmitted);
3670 
3671 	memcpy(estats, &(fstats->total_bytes_received_hi),
3672 	       sizeof(struct host_func_stats) - 2*sizeof(u32));
3673 
3674 	estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3675 	estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3676 	estats->brb_truncate_discard =
3677 				le32_to_cpu(tport->brb_truncate_discard);
3678 	estats->mac_discard = le32_to_cpu(tport->mac_discard);
3679 
3680 	old_tclient->rcv_unicast_bytes.hi =
3681 				le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3682 	old_tclient->rcv_unicast_bytes.lo =
3683 				le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3684 	old_tclient->rcv_broadcast_bytes.hi =
3685 				le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3686 	old_tclient->rcv_broadcast_bytes.lo =
3687 				le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3688 	old_tclient->rcv_multicast_bytes.hi =
3689 				le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3690 	old_tclient->rcv_multicast_bytes.lo =
3691 				le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3692 	old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3693 
3694 	old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3695 	old_tclient->packets_too_big_discard =
3696 				le32_to_cpu(tclient->packets_too_big_discard);
3697 	estats->no_buff_discard =
3698 	old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3699 	old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3700 
3701 	old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3702 	old_xclient->unicast_bytes_sent.hi =
3703 				le32_to_cpu(xclient->unicast_bytes_sent.hi);
3704 	old_xclient->unicast_bytes_sent.lo =
3705 				le32_to_cpu(xclient->unicast_bytes_sent.lo);
3706 	old_xclient->multicast_bytes_sent.hi =
3707 				le32_to_cpu(xclient->multicast_bytes_sent.hi);
3708 	old_xclient->multicast_bytes_sent.lo =
3709 				le32_to_cpu(xclient->multicast_bytes_sent.lo);
3710 	old_xclient->broadcast_bytes_sent.hi =
3711 				le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3712 	old_xclient->broadcast_bytes_sent.lo =
3713 				le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3714 
3715 	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3716 
3717 	return 0;
3718 }
3719 
bnx2x_net_stats_update(struct bnx2x * bp)3720 static void bnx2x_net_stats_update(struct bnx2x *bp)
3721 {
3722 	struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3723 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
3724 	struct net_device_stats *nstats = &bp->dev->stats;
3725 
3726 	nstats->rx_packets =
3727 		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3728 		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3729 		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3730 
3731 	nstats->tx_packets =
3732 		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3733 		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3734 		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3735 
3736 	nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3737 
3738 	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3739 
3740 	nstats->rx_dropped = old_tclient->checksum_discard +
3741 			     estats->mac_discard;
3742 	nstats->tx_dropped = 0;
3743 
3744 	nstats->multicast =
3745 		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3746 
3747 	nstats->collisions =
3748 			estats->tx_stat_dot3statssinglecollisionframes_lo +
3749 			estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3750 			estats->tx_stat_dot3statslatecollisions_lo +
3751 			estats->tx_stat_dot3statsexcessivecollisions_lo;
3752 
3753 	estats->jabber_packets_received =
3754 				old_tclient->packets_too_big_discard +
3755 				estats->rx_stat_dot3statsframestoolong_lo;
3756 
3757 	nstats->rx_length_errors =
3758 				estats->rx_stat_etherstatsundersizepkts_lo +
3759 				estats->jabber_packets_received;
3760 	nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3761 	nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3762 	nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3763 	nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3764 	nstats->rx_missed_errors = estats->xxoverflow_discard;
3765 
3766 	nstats->rx_errors = nstats->rx_length_errors +
3767 			    nstats->rx_over_errors +
3768 			    nstats->rx_crc_errors +
3769 			    nstats->rx_frame_errors +
3770 			    nstats->rx_fifo_errors +
3771 			    nstats->rx_missed_errors;
3772 
3773 	nstats->tx_aborted_errors =
3774 			estats->tx_stat_dot3statslatecollisions_lo +
3775 			estats->tx_stat_dot3statsexcessivecollisions_lo;
3776 	nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3777 	nstats->tx_fifo_errors = 0;
3778 	nstats->tx_heartbeat_errors = 0;
3779 	nstats->tx_window_errors = 0;
3780 
3781 	nstats->tx_errors = nstats->tx_aborted_errors +
3782 			    nstats->tx_carrier_errors;
3783 }
3784 
bnx2x_stats_update(struct bnx2x * bp)3785 static void bnx2x_stats_update(struct bnx2x *bp)
3786 {
3787 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3788 	int update = 0;
3789 
3790 	if (*stats_comp != DMAE_COMP_VAL)
3791 		return;
3792 
3793 	if (bp->port.pmf)
3794 		update = (bnx2x_hw_stats_update(bp) == 0);
3795 
3796 	update |= (bnx2x_storm_stats_update(bp) == 0);
3797 
3798 	if (update)
3799 		bnx2x_net_stats_update(bp);
3800 
3801 	else {
3802 		if (bp->stats_pending) {
3803 			bp->stats_pending++;
3804 			if (bp->stats_pending == 3) {
3805 				BNX2X_ERR("stats not updated for 3 times\n");
3806 				bnx2x_panic();
3807 				return;
3808 			}
3809 		}
3810 	}
3811 
3812 	if (bp->msglevel & NETIF_MSG_TIMER) {
3813 		struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3814 		struct bnx2x_eth_stats *estats = &bp->eth_stats;
3815 		struct net_device_stats *nstats = &bp->dev->stats;
3816 		int i;
3817 
3818 		printk(KERN_DEBUG "%s:\n", bp->dev->name);
3819 		printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3820 				  "  tx pkt (%lx)\n",
3821 		       bnx2x_tx_avail(bp->fp),
3822 		       le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3823 		printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3824 				  "  rx pkt (%lx)\n",
3825 		       (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3826 			     bp->fp->rx_comp_cons),
3827 		       le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3828 		printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3829 		       netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3830 		       estats->driver_xoff, estats->brb_drop_lo);
3831 		printk(KERN_DEBUG "tstats: checksum_discard %u  "
3832 			"packets_too_big_discard %u  no_buff_discard %u  "
3833 			"mac_discard %u  mac_filter_discard %u  "
3834 			"xxovrflow_discard %u  brb_truncate_discard %u  "
3835 			"ttl0_discard %u\n",
3836 		       old_tclient->checksum_discard,
3837 		       old_tclient->packets_too_big_discard,
3838 		       old_tclient->no_buff_discard, estats->mac_discard,
3839 		       estats->mac_filter_discard, estats->xxoverflow_discard,
3840 		       estats->brb_truncate_discard,
3841 		       old_tclient->ttl0_discard);
3842 
3843 		for_each_queue(bp, i) {
3844 			printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3845 			       bnx2x_fp(bp, i, tx_pkt),
3846 			       bnx2x_fp(bp, i, rx_pkt),
3847 			       bnx2x_fp(bp, i, rx_calls));
3848 		}
3849 	}
3850 
3851 	bnx2x_hw_stats_post(bp);
3852 	bnx2x_storm_stats_post(bp);
3853 }
3854 
bnx2x_port_stats_stop(struct bnx2x * bp)3855 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3856 {
3857 	struct dmae_command *dmae;
3858 	u32 opcode;
3859 	int loader_idx = PMF_DMAE_C(bp);
3860 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3861 
3862 	bp->executer_idx = 0;
3863 
3864 	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3865 		  DMAE_CMD_C_ENABLE |
3866 		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3867 #ifdef __BIG_ENDIAN
3868 		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
3869 #else
3870 		  DMAE_CMD_ENDIANITY_DW_SWAP |
3871 #endif
3872 		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3873 		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3874 
3875 	if (bp->port.port_stx) {
3876 
3877 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3878 		if (bp->func_stx)
3879 			dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3880 		else
3881 			dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3882 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3883 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3884 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
3885 		dmae->dst_addr_hi = 0;
3886 		dmae->len = sizeof(struct host_port_stats) >> 2;
3887 		if (bp->func_stx) {
3888 			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3889 			dmae->comp_addr_hi = 0;
3890 			dmae->comp_val = 1;
3891 		} else {
3892 			dmae->comp_addr_lo =
3893 				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 			dmae->comp_addr_hi =
3895 				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 			dmae->comp_val = DMAE_COMP_VAL;
3897 
3898 			*stats_comp = 0;
3899 		}
3900 	}
3901 
3902 	if (bp->func_stx) {
3903 
3904 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3905 		dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3906 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3907 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3908 		dmae->dst_addr_lo = bp->func_stx >> 2;
3909 		dmae->dst_addr_hi = 0;
3910 		dmae->len = sizeof(struct host_func_stats) >> 2;
3911 		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3912 		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3913 		dmae->comp_val = DMAE_COMP_VAL;
3914 
3915 		*stats_comp = 0;
3916 	}
3917 }
3918 
bnx2x_stats_stop(struct bnx2x * bp)3919 static void bnx2x_stats_stop(struct bnx2x *bp)
3920 {
3921 	int update = 0;
3922 
3923 	bnx2x_stats_comp(bp);
3924 
3925 	if (bp->port.pmf)
3926 		update = (bnx2x_hw_stats_update(bp) == 0);
3927 
3928 	update |= (bnx2x_storm_stats_update(bp) == 0);
3929 
3930 	if (update) {
3931 		bnx2x_net_stats_update(bp);
3932 
3933 		if (bp->port.pmf)
3934 			bnx2x_port_stats_stop(bp);
3935 
3936 		bnx2x_hw_stats_post(bp);
3937 		bnx2x_stats_comp(bp);
3938 	}
3939 }
3940 
bnx2x_stats_do_nothing(struct bnx2x * bp)3941 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3942 {
3943 }
3944 
3945 static const struct {
3946 	void (*action)(struct bnx2x *bp);
3947 	enum bnx2x_stats_state next_state;
3948 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3949 /* state	event	*/
3950 {
3951 /* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3952 /*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3953 /*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3954 /*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3955 },
3956 {
3957 /* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3958 /*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3959 /*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3960 /*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3961 }
3962 };
3963 
bnx2x_stats_handle(struct bnx2x * bp,enum bnx2x_stats_event event)3964 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3965 {
3966 	enum bnx2x_stats_state state = bp->stats_state;
3967 
3968 	bnx2x_stats_stm[state][event].action(bp);
3969 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3970 
3971 	if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3972 		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3973 		   state, event, bp->stats_state);
3974 }
3975 
bnx2x_timer(unsigned long data)3976 static void bnx2x_timer(unsigned long data)
3977 {
3978 	struct bnx2x *bp = (struct bnx2x *) data;
3979 
3980 	if (!netif_running(bp->dev))
3981 		return;
3982 
3983 	if (atomic_read(&bp->intr_sem) != 0)
3984 		goto timer_restart;
3985 
3986 	if (poll) {
3987 		struct bnx2x_fastpath *fp = &bp->fp[0];
3988 		int rc;
3989 
3990 		bnx2x_tx_int(fp, 1000);
3991 		rc = bnx2x_rx_int(fp, 1000);
3992 	}
3993 
3994 	if (!BP_NOMCP(bp)) {
3995 		int func = BP_FUNC(bp);
3996 		u32 drv_pulse;
3997 		u32 mcp_pulse;
3998 
3999 		++bp->fw_drv_pulse_wr_seq;
4000 		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4001 		/* TBD - add SYSTEM_TIME */
4002 		drv_pulse = bp->fw_drv_pulse_wr_seq;
4003 		SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4004 
4005 		mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4006 			     MCP_PULSE_SEQ_MASK);
4007 		/* The delta between driver pulse and mcp response
4008 		 * should be 1 (before mcp response) or 0 (after mcp response)
4009 		 */
4010 		if ((drv_pulse != mcp_pulse) &&
4011 		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4012 			/* someone lost a heartbeat... */
4013 			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4014 				  drv_pulse, mcp_pulse);
4015 		}
4016 	}
4017 
4018 	if ((bp->state == BNX2X_STATE_OPEN) ||
4019 	    (bp->state == BNX2X_STATE_DISABLED))
4020 		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4021 
4022 timer_restart:
4023 	mod_timer(&bp->timer, jiffies + bp->current_interval);
4024 }
4025 
4026 /* end of Statistics */
4027 
4028 /* nic init */
4029 
4030 /*
4031  * nic init service functions
4032  */
4033 
bnx2x_zero_sb(struct bnx2x * bp,int sb_id)4034 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4035 {
4036 	int port = BP_PORT(bp);
4037 
4038 	bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4039 			USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4040 			sizeof(struct ustorm_status_block)/4);
4041 	bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4042 			CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4043 			sizeof(struct cstorm_status_block)/4);
4044 }
4045 
bnx2x_init_sb(struct bnx2x * bp,struct host_status_block * sb,dma_addr_t mapping,int sb_id)4046 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4047 			  dma_addr_t mapping, int sb_id)
4048 {
4049 	int port = BP_PORT(bp);
4050 	int func = BP_FUNC(bp);
4051 	int index;
4052 	u64 section;
4053 
4054 	/* USTORM */
4055 	section = ((u64)mapping) + offsetof(struct host_status_block,
4056 					    u_status_block);
4057 	sb->u_status_block.status_block_id = sb_id;
4058 
4059 	REG_WR(bp, BAR_USTRORM_INTMEM +
4060 	       USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4061 	REG_WR(bp, BAR_USTRORM_INTMEM +
4062 	       ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4063 	       U64_HI(section));
4064 	REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4065 		USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4066 
4067 	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4068 		REG_WR16(bp, BAR_USTRORM_INTMEM +
4069 			 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4070 
4071 	/* CSTORM */
4072 	section = ((u64)mapping) + offsetof(struct host_status_block,
4073 					    c_status_block);
4074 	sb->c_status_block.status_block_id = sb_id;
4075 
4076 	REG_WR(bp, BAR_CSTRORM_INTMEM +
4077 	       CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4078 	REG_WR(bp, BAR_CSTRORM_INTMEM +
4079 	       ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4080 	       U64_HI(section));
4081 	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4082 		CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4083 
4084 	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4085 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
4086 			 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4087 
4088 	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4089 }
4090 
bnx2x_zero_def_sb(struct bnx2x * bp)4091 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4092 {
4093 	int func = BP_FUNC(bp);
4094 
4095 	bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4096 			TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 			sizeof(struct tstorm_def_status_block)/4);
4098 	bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4099 			USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4100 			sizeof(struct ustorm_def_status_block)/4);
4101 	bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4102 			CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4103 			sizeof(struct cstorm_def_status_block)/4);
4104 	bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4105 			XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4106 			sizeof(struct xstorm_def_status_block)/4);
4107 }
4108 
bnx2x_init_def_sb(struct bnx2x * bp,struct host_def_status_block * def_sb,dma_addr_t mapping,int sb_id)4109 static void bnx2x_init_def_sb(struct bnx2x *bp,
4110 			      struct host_def_status_block *def_sb,
4111 			      dma_addr_t mapping, int sb_id)
4112 {
4113 	int port = BP_PORT(bp);
4114 	int func = BP_FUNC(bp);
4115 	int index, val, reg_offset;
4116 	u64 section;
4117 
4118 	/* ATTN */
4119 	section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 					    atten_status_block);
4121 	def_sb->atten_status_block.status_block_id = sb_id;
4122 
4123 	bp->attn_state = 0;
4124 
4125 	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4126 			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4127 
4128 	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4129 		bp->attn_group[index].sig[0] = REG_RD(bp,
4130 						     reg_offset + 0x10*index);
4131 		bp->attn_group[index].sig[1] = REG_RD(bp,
4132 					       reg_offset + 0x4 + 0x10*index);
4133 		bp->attn_group[index].sig[2] = REG_RD(bp,
4134 					       reg_offset + 0x8 + 0x10*index);
4135 		bp->attn_group[index].sig[3] = REG_RD(bp,
4136 					       reg_offset + 0xc + 0x10*index);
4137 	}
4138 
4139 	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4140 			     HC_REG_ATTN_MSG0_ADDR_L);
4141 
4142 	REG_WR(bp, reg_offset, U64_LO(section));
4143 	REG_WR(bp, reg_offset + 4, U64_HI(section));
4144 
4145 	reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4146 
4147 	val = REG_RD(bp, reg_offset);
4148 	val |= sb_id;
4149 	REG_WR(bp, reg_offset, val);
4150 
4151 	/* USTORM */
4152 	section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153 					    u_def_status_block);
4154 	def_sb->u_def_status_block.status_block_id = sb_id;
4155 
4156 	REG_WR(bp, BAR_USTRORM_INTMEM +
4157 	       USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4158 	REG_WR(bp, BAR_USTRORM_INTMEM +
4159 	       ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4160 	       U64_HI(section));
4161 	REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4162 		USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 
4164 	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4165 		REG_WR16(bp, BAR_USTRORM_INTMEM +
4166 			 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4167 
4168 	/* CSTORM */
4169 	section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 					    c_def_status_block);
4171 	def_sb->c_def_status_block.status_block_id = sb_id;
4172 
4173 	REG_WR(bp, BAR_CSTRORM_INTMEM +
4174 	       CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4175 	REG_WR(bp, BAR_CSTRORM_INTMEM +
4176 	       ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4177 	       U64_HI(section));
4178 	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4179 		CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4180 
4181 	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4182 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
4183 			 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4184 
4185 	/* TSTORM */
4186 	section = ((u64)mapping) + offsetof(struct host_def_status_block,
4187 					    t_def_status_block);
4188 	def_sb->t_def_status_block.status_block_id = sb_id;
4189 
4190 	REG_WR(bp, BAR_TSTRORM_INTMEM +
4191 	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4192 	REG_WR(bp, BAR_TSTRORM_INTMEM +
4193 	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4194 	       U64_HI(section));
4195 	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4196 		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 
4198 	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4199 		REG_WR16(bp, BAR_TSTRORM_INTMEM +
4200 			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4201 
4202 	/* XSTORM */
4203 	section = ((u64)mapping) + offsetof(struct host_def_status_block,
4204 					    x_def_status_block);
4205 	def_sb->x_def_status_block.status_block_id = sb_id;
4206 
4207 	REG_WR(bp, BAR_XSTRORM_INTMEM +
4208 	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4209 	REG_WR(bp, BAR_XSTRORM_INTMEM +
4210 	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4211 	       U64_HI(section));
4212 	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4213 		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4214 
4215 	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4216 		REG_WR16(bp, BAR_XSTRORM_INTMEM +
4217 			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4218 
4219 	bp->stats_pending = 0;
4220 	bp->set_mac_pending = 0;
4221 
4222 	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4223 }
4224 
bnx2x_update_coalesce(struct bnx2x * bp)4225 static void bnx2x_update_coalesce(struct bnx2x *bp)
4226 {
4227 	int port = BP_PORT(bp);
4228 	int i;
4229 
4230 	for_each_queue(bp, i) {
4231 		int sb_id = bp->fp[i].sb_id;
4232 
4233 		/* HC_INDEX_U_ETH_RX_CQ_CONS */
4234 		REG_WR8(bp, BAR_USTRORM_INTMEM +
4235 			USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4236 						    U_SB_ETH_RX_CQ_INDEX),
4237 			bp->rx_ticks/12);
4238 		REG_WR16(bp, BAR_USTRORM_INTMEM +
4239 			 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4240 						     U_SB_ETH_RX_CQ_INDEX),
4241 			 bp->rx_ticks ? 0 : 1);
4242 		REG_WR16(bp, BAR_USTRORM_INTMEM +
4243 			 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4244 						     U_SB_ETH_RX_BD_INDEX),
4245 			 bp->rx_ticks ? 0 : 1);
4246 
4247 		/* HC_INDEX_C_ETH_TX_CQ_CONS */
4248 		REG_WR8(bp, BAR_CSTRORM_INTMEM +
4249 			CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4250 						    C_SB_ETH_TX_CQ_INDEX),
4251 			bp->tx_ticks/12);
4252 		REG_WR16(bp, BAR_CSTRORM_INTMEM +
4253 			 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4254 						     C_SB_ETH_TX_CQ_INDEX),
4255 			 bp->tx_ticks ? 0 : 1);
4256 	}
4257 }
4258 
bnx2x_free_tpa_pool(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)4259 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4260 				       struct bnx2x_fastpath *fp, int last)
4261 {
4262 	int i;
4263 
4264 	for (i = 0; i < last; i++) {
4265 		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4266 		struct sk_buff *skb = rx_buf->skb;
4267 
4268 		if (skb == NULL) {
4269 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4270 			continue;
4271 		}
4272 
4273 		if (fp->tpa_state[i] == BNX2X_TPA_START)
4274 			pci_unmap_single(bp->pdev,
4275 					 pci_unmap_addr(rx_buf, mapping),
4276 					 bp->rx_buf_size,
4277 					 PCI_DMA_FROMDEVICE);
4278 
4279 		dev_kfree_skb(skb);
4280 		rx_buf->skb = NULL;
4281 	}
4282 }
4283 
bnx2x_init_rx_rings(struct bnx2x * bp)4284 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4285 {
4286 	int func = BP_FUNC(bp);
4287 	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4288 					      ETH_MAX_AGGREGATION_QUEUES_E1H;
4289 	u16 ring_prod, cqe_ring_prod;
4290 	int i, j;
4291 
4292 	bp->rx_buf_size = bp->dev->mtu;
4293 	bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4294 		BCM_RX_ETH_PAYLOAD_ALIGN;
4295 
4296 	if (bp->flags & TPA_ENABLE_FLAG) {
4297 		DP(NETIF_MSG_IFUP,
4298 		   "rx_buf_size %d  effective_mtu %d\n",
4299 		   bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4300 
4301 		for_each_queue(bp, j) {
4302 			struct bnx2x_fastpath *fp = &bp->fp[j];
4303 
4304 			for (i = 0; i < max_agg_queues; i++) {
4305 				fp->tpa_pool[i].skb =
4306 				   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4307 				if (!fp->tpa_pool[i].skb) {
4308 					BNX2X_ERR("Failed to allocate TPA "
4309 						  "skb pool for queue[%d] - "
4310 						  "disabling TPA on this "
4311 						  "queue!\n", j);
4312 					bnx2x_free_tpa_pool(bp, fp, i);
4313 					fp->disable_tpa = 1;
4314 					break;
4315 				}
4316 				pci_unmap_addr_set((struct sw_rx_bd *)
4317 							&bp->fp->tpa_pool[i],
4318 						   mapping, 0);
4319 				fp->tpa_state[i] = BNX2X_TPA_STOP;
4320 			}
4321 		}
4322 	}
4323 
4324 	for_each_queue(bp, j) {
4325 		struct bnx2x_fastpath *fp = &bp->fp[j];
4326 
4327 		fp->rx_bd_cons = 0;
4328 		fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4329 		fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4330 
4331 		/* "next page" elements initialization */
4332 		/* SGE ring */
4333 		for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4334 			struct eth_rx_sge *sge;
4335 
4336 			sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4337 			sge->addr_hi =
4338 				cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4339 					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4340 			sge->addr_lo =
4341 				cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4342 					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4343 		}
4344 
4345 		bnx2x_init_sge_ring_bit_mask(fp);
4346 
4347 		/* RX BD ring */
4348 		for (i = 1; i <= NUM_RX_RINGS; i++) {
4349 			struct eth_rx_bd *rx_bd;
4350 
4351 			rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4352 			rx_bd->addr_hi =
4353 				cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4354 					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4355 			rx_bd->addr_lo =
4356 				cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4357 					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4358 		}
4359 
4360 		/* CQ ring */
4361 		for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4362 			struct eth_rx_cqe_next_page *nextpg;
4363 
4364 			nextpg = (struct eth_rx_cqe_next_page *)
4365 				&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4366 			nextpg->addr_hi =
4367 				cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4368 					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4369 			nextpg->addr_lo =
4370 				cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4371 					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4372 		}
4373 
4374 		/* Allocate SGEs and initialize the ring elements */
4375 		for (i = 0, ring_prod = 0;
4376 		     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4377 
4378 			if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4379 				BNX2X_ERR("was only able to allocate "
4380 					  "%d rx sges\n", i);
4381 				BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4382 				/* Cleanup already allocated elements */
4383 				bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4384 				bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4385 				fp->disable_tpa = 1;
4386 				ring_prod = 0;
4387 				break;
4388 			}
4389 			ring_prod = NEXT_SGE_IDX(ring_prod);
4390 		}
4391 		fp->rx_sge_prod = ring_prod;
4392 
4393 		/* Allocate BDs and initialize BD ring */
4394 		fp->rx_comp_cons = 0;
4395 		cqe_ring_prod = ring_prod = 0;
4396 		for (i = 0; i < bp->rx_ring_size; i++) {
4397 			if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4398 				BNX2X_ERR("was only able to allocate "
4399 					  "%d rx skbs\n", i);
4400 				bp->eth_stats.rx_skb_alloc_failed++;
4401 				break;
4402 			}
4403 			ring_prod = NEXT_RX_IDX(ring_prod);
4404 			cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4405 			WARN_ON(ring_prod <= i);
4406 		}
4407 
4408 		fp->rx_bd_prod = ring_prod;
4409 		/* must not have more available CQEs than BDs */
4410 		fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4411 				       cqe_ring_prod);
4412 		fp->rx_pkt = fp->rx_calls = 0;
4413 
4414 		/* Warning!
4415 		 * this will generate an interrupt (to the TSTORM)
4416 		 * must only be done after chip is initialized
4417 		 */
4418 		bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4419 				     fp->rx_sge_prod);
4420 		if (j != 0)
4421 			continue;
4422 
4423 		REG_WR(bp, BAR_USTRORM_INTMEM +
4424 		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4425 		       U64_LO(fp->rx_comp_mapping));
4426 		REG_WR(bp, BAR_USTRORM_INTMEM +
4427 		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4428 		       U64_HI(fp->rx_comp_mapping));
4429 	}
4430 }
4431 
bnx2x_init_tx_ring(struct bnx2x * bp)4432 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4433 {
4434 	int i, j;
4435 
4436 	for_each_queue(bp, j) {
4437 		struct bnx2x_fastpath *fp = &bp->fp[j];
4438 
4439 		for (i = 1; i <= NUM_TX_RINGS; i++) {
4440 			struct eth_tx_bd *tx_bd =
4441 				&fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4442 
4443 			tx_bd->addr_hi =
4444 				cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4445 					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4446 			tx_bd->addr_lo =
4447 				cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4448 					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4449 		}
4450 
4451 		fp->tx_pkt_prod = 0;
4452 		fp->tx_pkt_cons = 0;
4453 		fp->tx_bd_prod = 0;
4454 		fp->tx_bd_cons = 0;
4455 		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4456 		fp->tx_pkt = 0;
4457 	}
4458 }
4459 
bnx2x_init_sp_ring(struct bnx2x * bp)4460 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4461 {
4462 	int func = BP_FUNC(bp);
4463 
4464 	spin_lock_init(&bp->spq_lock);
4465 
4466 	bp->spq_left = MAX_SPQ_PENDING;
4467 	bp->spq_prod_idx = 0;
4468 	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4469 	bp->spq_prod_bd = bp->spq;
4470 	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4471 
4472 	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4473 	       U64_LO(bp->spq_mapping));
4474 	REG_WR(bp,
4475 	       XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4476 	       U64_HI(bp->spq_mapping));
4477 
4478 	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4479 	       bp->spq_prod_idx);
4480 }
4481 
bnx2x_init_context(struct bnx2x * bp)4482 static void bnx2x_init_context(struct bnx2x *bp)
4483 {
4484 	int i;
4485 
4486 	for_each_queue(bp, i) {
4487 		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4488 		struct bnx2x_fastpath *fp = &bp->fp[i];
4489 		u8 sb_id = FP_SB_ID(fp);
4490 
4491 		context->xstorm_st_context.tx_bd_page_base_hi =
4492 						U64_HI(fp->tx_desc_mapping);
4493 		context->xstorm_st_context.tx_bd_page_base_lo =
4494 						U64_LO(fp->tx_desc_mapping);
4495 		context->xstorm_st_context.db_data_addr_hi =
4496 						U64_HI(fp->tx_prods_mapping);
4497 		context->xstorm_st_context.db_data_addr_lo =
4498 						U64_LO(fp->tx_prods_mapping);
4499 		context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4500 				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4501 
4502 		context->ustorm_st_context.common.sb_index_numbers =
4503 						BNX2X_RX_SB_INDEX_NUM;
4504 		context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4505 		context->ustorm_st_context.common.status_block_id = sb_id;
4506 		context->ustorm_st_context.common.flags =
4507 			USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4508 		context->ustorm_st_context.common.mc_alignment_size =
4509 			BCM_RX_ETH_PAYLOAD_ALIGN;
4510 		context->ustorm_st_context.common.bd_buff_size =
4511 						bp->rx_buf_size;
4512 		context->ustorm_st_context.common.bd_page_base_hi =
4513 						U64_HI(fp->rx_desc_mapping);
4514 		context->ustorm_st_context.common.bd_page_base_lo =
4515 						U64_LO(fp->rx_desc_mapping);
4516 		if (!fp->disable_tpa) {
4517 			context->ustorm_st_context.common.flags |=
4518 				(USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4519 				 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4520 			context->ustorm_st_context.common.sge_buff_size =
4521 				(u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4522 					 (u32)0xffff);
4523 			context->ustorm_st_context.common.sge_page_base_hi =
4524 						U64_HI(fp->rx_sge_mapping);
4525 			context->ustorm_st_context.common.sge_page_base_lo =
4526 						U64_LO(fp->rx_sge_mapping);
4527 		}
4528 
4529 		context->cstorm_st_context.sb_index_number =
4530 						C_SB_ETH_TX_CQ_INDEX;
4531 		context->cstorm_st_context.status_block_id = sb_id;
4532 
4533 		context->xstorm_ag_context.cdu_reserved =
4534 			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4535 					       CDU_REGION_NUMBER_XCM_AG,
4536 					       ETH_CONNECTION_TYPE);
4537 		context->ustorm_ag_context.cdu_usage =
4538 			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4539 					       CDU_REGION_NUMBER_UCM_AG,
4540 					       ETH_CONNECTION_TYPE);
4541 	}
4542 }
4543 
bnx2x_init_ind_table(struct bnx2x * bp)4544 static void bnx2x_init_ind_table(struct bnx2x *bp)
4545 {
4546 	int func = BP_FUNC(bp);
4547 	int i;
4548 
4549 	if (!is_multi(bp))
4550 		return;
4551 
4552 	DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4553 	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4554 		REG_WR8(bp, BAR_TSTRORM_INTMEM +
4555 			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4556 			BP_CL_ID(bp) + (i % bp->num_queues));
4557 }
4558 
bnx2x_set_client_config(struct bnx2x * bp)4559 static void bnx2x_set_client_config(struct bnx2x *bp)
4560 {
4561 	struct tstorm_eth_client_config tstorm_client = {0};
4562 	int port = BP_PORT(bp);
4563 	int i;
4564 
4565 	tstorm_client.mtu = bp->dev->mtu;
4566 	tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4567 	tstorm_client.config_flags =
4568 				TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4569 #ifdef BCM_VLAN
4570 	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4571 		tstorm_client.config_flags |=
4572 				TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4573 		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4574 	}
4575 #endif
4576 
4577 	if (bp->flags & TPA_ENABLE_FLAG) {
4578 		tstorm_client.max_sges_for_packet =
4579 			SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4580 		tstorm_client.max_sges_for_packet =
4581 			((tstorm_client.max_sges_for_packet +
4582 			  PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4583 			PAGES_PER_SGE_SHIFT;
4584 
4585 		tstorm_client.config_flags |=
4586 				TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4587 	}
4588 
4589 	for_each_queue(bp, i) {
4590 		REG_WR(bp, BAR_TSTRORM_INTMEM +
4591 		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4592 		       ((u32 *)&tstorm_client)[0]);
4593 		REG_WR(bp, BAR_TSTRORM_INTMEM +
4594 		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4595 		       ((u32 *)&tstorm_client)[1]);
4596 	}
4597 
4598 	DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4599 	   ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4600 }
4601 
bnx2x_set_storm_rx_mode(struct bnx2x * bp)4602 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4603 {
4604 	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4605 	int mode = bp->rx_mode;
4606 	int mask = (1 << BP_L_ID(bp));
4607 	int func = BP_FUNC(bp);
4608 	int i;
4609 
4610 	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4611 
4612 	switch (mode) {
4613 	case BNX2X_RX_MODE_NONE: /* no Rx */
4614 		tstorm_mac_filter.ucast_drop_all = mask;
4615 		tstorm_mac_filter.mcast_drop_all = mask;
4616 		tstorm_mac_filter.bcast_drop_all = mask;
4617 		break;
4618 	case BNX2X_RX_MODE_NORMAL:
4619 		tstorm_mac_filter.bcast_accept_all = mask;
4620 		break;
4621 	case BNX2X_RX_MODE_ALLMULTI:
4622 		tstorm_mac_filter.mcast_accept_all = mask;
4623 		tstorm_mac_filter.bcast_accept_all = mask;
4624 		break;
4625 	case BNX2X_RX_MODE_PROMISC:
4626 		tstorm_mac_filter.ucast_accept_all = mask;
4627 		tstorm_mac_filter.mcast_accept_all = mask;
4628 		tstorm_mac_filter.bcast_accept_all = mask;
4629 		break;
4630 	default:
4631 		BNX2X_ERR("BAD rx mode (%d)\n", mode);
4632 		break;
4633 	}
4634 
4635 	for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4636 		REG_WR(bp, BAR_TSTRORM_INTMEM +
4637 		       TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4638 		       ((u32 *)&tstorm_mac_filter)[i]);
4639 
4640 /*		DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4641 		   ((u32 *)&tstorm_mac_filter)[i]); */
4642 	}
4643 
4644 	if (mode != BNX2X_RX_MODE_NONE)
4645 		bnx2x_set_client_config(bp);
4646 }
4647 
bnx2x_init_internal_common(struct bnx2x * bp)4648 static void bnx2x_init_internal_common(struct bnx2x *bp)
4649 {
4650 	int i;
4651 
4652 	if (bp->flags & TPA_ENABLE_FLAG) {
4653 		struct tstorm_eth_tpa_exist tpa = {0};
4654 
4655 		tpa.tpa_exist = 1;
4656 
4657 		REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4658 		       ((u32 *)&tpa)[0]);
4659 		REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4660 		       ((u32 *)&tpa)[1]);
4661 	}
4662 
4663 	/* Zero this manually as its initialization is
4664 	   currently missing in the initTool */
4665 	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4666 		REG_WR(bp, BAR_USTRORM_INTMEM +
4667 		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
4668 }
4669 
bnx2x_init_internal_port(struct bnx2x * bp)4670 static void bnx2x_init_internal_port(struct bnx2x *bp)
4671 {
4672 	int port = BP_PORT(bp);
4673 
4674 	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675 	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4676 	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4677 	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4678 }
4679 
bnx2x_init_internal_func(struct bnx2x * bp)4680 static void bnx2x_init_internal_func(struct bnx2x *bp)
4681 {
4682 	struct tstorm_eth_function_common_config tstorm_config = {0};
4683 	struct stats_indication_flags stats_flags = {0};
4684 	int port = BP_PORT(bp);
4685 	int func = BP_FUNC(bp);
4686 	int i;
4687 	u16 max_agg_size;
4688 
4689 	if (is_multi(bp)) {
4690 		tstorm_config.config_flags = MULTI_FLAGS;
4691 		tstorm_config.rss_result_mask = MULTI_MASK;
4692 	}
4693 
4694 	tstorm_config.leading_client_id = BP_L_ID(bp);
4695 
4696 	REG_WR(bp, BAR_TSTRORM_INTMEM +
4697 	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4698 	       (*(u32 *)&tstorm_config));
4699 
4700 	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4701 	bnx2x_set_storm_rx_mode(bp);
4702 
4703 	/* reset xstorm per client statistics */
4704 	for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705 		REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 		       XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4707 		       i*4, 0);
4708 	}
4709 	/* reset tstorm per client statistics */
4710 	for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711 		REG_WR(bp, BAR_TSTRORM_INTMEM +
4712 		       TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4713 		       i*4, 0);
4714 	}
4715 
4716 	/* Init statistics related context */
4717 	stats_flags.collect_eth = 1;
4718 
4719 	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4720 	       ((u32 *)&stats_flags)[0]);
4721 	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4722 	       ((u32 *)&stats_flags)[1]);
4723 
4724 	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4725 	       ((u32 *)&stats_flags)[0]);
4726 	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4727 	       ((u32 *)&stats_flags)[1]);
4728 
4729 	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4730 	       ((u32 *)&stats_flags)[0]);
4731 	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4732 	       ((u32 *)&stats_flags)[1]);
4733 
4734 	REG_WR(bp, BAR_XSTRORM_INTMEM +
4735 	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736 	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737 	REG_WR(bp, BAR_XSTRORM_INTMEM +
4738 	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739 	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740 
4741 	REG_WR(bp, BAR_TSTRORM_INTMEM +
4742 	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743 	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744 	REG_WR(bp, BAR_TSTRORM_INTMEM +
4745 	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746 	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4747 
4748 	if (CHIP_IS_E1H(bp)) {
4749 		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750 			IS_E1HMF(bp));
4751 		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752 			IS_E1HMF(bp));
4753 		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754 			IS_E1HMF(bp));
4755 		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4756 			IS_E1HMF(bp));
4757 
4758 		REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4759 			 bp->e1hov);
4760 	}
4761 
4762 	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763 	max_agg_size =
4764 		min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765 			  SGE_PAGE_SIZE * PAGES_PER_SGE),
4766 		    (u32)0xffff);
4767 	for_each_queue(bp, i) {
4768 		struct bnx2x_fastpath *fp = &bp->fp[i];
4769 
4770 		REG_WR(bp, BAR_USTRORM_INTMEM +
4771 		       USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772 		       U64_LO(fp->rx_comp_mapping));
4773 		REG_WR(bp, BAR_USTRORM_INTMEM +
4774 		       USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775 		       U64_HI(fp->rx_comp_mapping));
4776 
4777 		REG_WR16(bp, BAR_USTRORM_INTMEM +
4778 			 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4779 			 max_agg_size);
4780 	}
4781 }
4782 
bnx2x_init_internal(struct bnx2x * bp,u32 load_code)4783 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784 {
4785 	switch (load_code) {
4786 	case FW_MSG_CODE_DRV_LOAD_COMMON:
4787 		bnx2x_init_internal_common(bp);
4788 		/* no break */
4789 
4790 	case FW_MSG_CODE_DRV_LOAD_PORT:
4791 		bnx2x_init_internal_port(bp);
4792 		/* no break */
4793 
4794 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795 		bnx2x_init_internal_func(bp);
4796 		break;
4797 
4798 	default:
4799 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4800 		break;
4801 	}
4802 }
4803 
bnx2x_nic_init(struct bnx2x * bp,u32 load_code)4804 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4805 {
4806 	int i;
4807 
4808 	for_each_queue(bp, i) {
4809 		struct bnx2x_fastpath *fp = &bp->fp[i];
4810 
4811 		fp->bp = bp;
4812 		fp->state = BNX2X_FP_STATE_CLOSED;
4813 		fp->index = i;
4814 		fp->cl_id = BP_L_ID(bp) + i;
4815 		fp->sb_id = fp->cl_id;
4816 		DP(NETIF_MSG_IFUP,
4817 		   "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4818 		   bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4819 		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820 			      FP_SB_ID(fp));
4821 		bnx2x_update_fpsb_idx(fp);
4822 	}
4823 
4824 	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825 			  DEF_SB_ID);
4826 	bnx2x_update_dsb_idx(bp);
4827 	bnx2x_update_coalesce(bp);
4828 	bnx2x_init_rx_rings(bp);
4829 	bnx2x_init_tx_ring(bp);
4830 	bnx2x_init_sp_ring(bp);
4831 	bnx2x_init_context(bp);
4832 	bnx2x_init_internal(bp, load_code);
4833 	bnx2x_init_ind_table(bp);
4834 	bnx2x_stats_init(bp);
4835 
4836 	/* At this point, we are ready for interrupts */
4837 	atomic_set(&bp->intr_sem, 0);
4838 
4839 	/* flush all before enabling interrupts */
4840 	mb();
4841 	mmiowb();
4842 
4843 	bnx2x_int_enable(bp);
4844 }
4845 
4846 /* end of nic init */
4847 
4848 /*
4849  * gzip service functions
4850  */
4851 
bnx2x_gunzip_init(struct bnx2x * bp)4852 static int bnx2x_gunzip_init(struct bnx2x *bp)
4853 {
4854 	bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855 					      &bp->gunzip_mapping);
4856 	if (bp->gunzip_buf  == NULL)
4857 		goto gunzip_nomem1;
4858 
4859 	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860 	if (bp->strm  == NULL)
4861 		goto gunzip_nomem2;
4862 
4863 	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864 				      GFP_KERNEL);
4865 	if (bp->strm->workspace == NULL)
4866 		goto gunzip_nomem3;
4867 
4868 	return 0;
4869 
4870 gunzip_nomem3:
4871 	kfree(bp->strm);
4872 	bp->strm = NULL;
4873 
4874 gunzip_nomem2:
4875 	pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876 			    bp->gunzip_mapping);
4877 	bp->gunzip_buf = NULL;
4878 
4879 gunzip_nomem1:
4880 	printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4881 	       " un-compression\n", bp->dev->name);
4882 	return -ENOMEM;
4883 }
4884 
bnx2x_gunzip_end(struct bnx2x * bp)4885 static void bnx2x_gunzip_end(struct bnx2x *bp)
4886 {
4887 	kfree(bp->strm->workspace);
4888 
4889 	kfree(bp->strm);
4890 	bp->strm = NULL;
4891 
4892 	if (bp->gunzip_buf) {
4893 		pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894 				    bp->gunzip_mapping);
4895 		bp->gunzip_buf = NULL;
4896 	}
4897 }
4898 
bnx2x_gunzip(struct bnx2x * bp,u8 * zbuf,int len)4899 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4900 {
4901 	int n, rc;
4902 
4903 	/* check gzip header */
4904 	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4905 		return -EINVAL;
4906 
4907 	n = 10;
4908 
4909 #define FNAME				0x8
4910 
4911 	if (zbuf[3] & FNAME)
4912 		while ((zbuf[n++] != 0) && (n < len));
4913 
4914 	bp->strm->next_in = zbuf + n;
4915 	bp->strm->avail_in = len - n;
4916 	bp->strm->next_out = bp->gunzip_buf;
4917 	bp->strm->avail_out = FW_BUF_SIZE;
4918 
4919 	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4920 	if (rc != Z_OK)
4921 		return rc;
4922 
4923 	rc = zlib_inflate(bp->strm, Z_FINISH);
4924 	if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925 		printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926 		       bp->dev->name, bp->strm->msg);
4927 
4928 	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929 	if (bp->gunzip_outlen & 0x3)
4930 		printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931 				    " gunzip_outlen (%d) not aligned\n",
4932 		       bp->dev->name, bp->gunzip_outlen);
4933 	bp->gunzip_outlen >>= 2;
4934 
4935 	zlib_inflateEnd(bp->strm);
4936 
4937 	if (rc == Z_STREAM_END)
4938 		return 0;
4939 
4940 	return rc;
4941 }
4942 
4943 /* nic load/unload */
4944 
4945 /*
4946  * General service functions
4947  */
4948 
4949 /* send a NIG loopback debug packet */
bnx2x_lb_pckt(struct bnx2x * bp)4950 static void bnx2x_lb_pckt(struct bnx2x *bp)
4951 {
4952 	u32 wb_write[3];
4953 
4954 	/* Ethernet source and destination addresses */
4955 	wb_write[0] = 0x55555555;
4956 	wb_write[1] = 0x55555555;
4957 	wb_write[2] = 0x20;		/* SOP */
4958 	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4959 
4960 	/* NON-IP protocol */
4961 	wb_write[0] = 0x09000000;
4962 	wb_write[1] = 0x55555555;
4963 	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
4964 	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4965 }
4966 
4967 /* some of the internal memories
4968  * are not directly readable from the driver
4969  * to test them we send debug packets
4970  */
bnx2x_int_mem_test(struct bnx2x * bp)4971 static int bnx2x_int_mem_test(struct bnx2x *bp)
4972 {
4973 	int factor;
4974 	int count, i;
4975 	u32 val = 0;
4976 
4977 	if (CHIP_REV_IS_FPGA(bp))
4978 		factor = 120;
4979 	else if (CHIP_REV_IS_EMUL(bp))
4980 		factor = 200;
4981 	else
4982 		factor = 1;
4983 
4984 	DP(NETIF_MSG_HW, "start part1\n");
4985 
4986 	/* Disable inputs of parser neighbor blocks */
4987 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990 	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4991 
4992 	/*  Write 0 to parser credits for CFC search request */
4993 	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994 
4995 	/* send Ethernet packet */
4996 	bnx2x_lb_pckt(bp);
4997 
4998 	/* TODO do i reset NIG statistic? */
4999 	/* Wait until NIG register shows 1 packet of size 0x10 */
5000 	count = 1000 * factor;
5001 	while (count) {
5002 
5003 		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004 		val = *bnx2x_sp(bp, wb_data[0]);
5005 		if (val == 0x10)
5006 			break;
5007 
5008 		msleep(10);
5009 		count--;
5010 	}
5011 	if (val != 0x10) {
5012 		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5013 		return -1;
5014 	}
5015 
5016 	/* Wait until PRS register shows 1 packet */
5017 	count = 1000 * factor;
5018 	while (count) {
5019 		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020 		if (val == 1)
5021 			break;
5022 
5023 		msleep(10);
5024 		count--;
5025 	}
5026 	if (val != 0x1) {
5027 		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5028 		return -2;
5029 	}
5030 
5031 	/* Reset and init BRB, PRS */
5032 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033 	msleep(50);
5034 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035 	msleep(50);
5036 	bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037 	bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 
5039 	DP(NETIF_MSG_HW, "part2\n");
5040 
5041 	/* Disable inputs of parser neighbor blocks */
5042 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044 	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5045 	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5046 
5047 	/* Write 0 to parser credits for CFC search request */
5048 	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049 
5050 	/* send 10 Ethernet packets */
5051 	for (i = 0; i < 10; i++)
5052 		bnx2x_lb_pckt(bp);
5053 
5054 	/* Wait until NIG register shows 10 + 1
5055 	   packets of size 11*0x10 = 0xb0 */
5056 	count = 1000 * factor;
5057 	while (count) {
5058 
5059 		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060 		val = *bnx2x_sp(bp, wb_data[0]);
5061 		if (val == 0xb0)
5062 			break;
5063 
5064 		msleep(10);
5065 		count--;
5066 	}
5067 	if (val != 0xb0) {
5068 		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5069 		return -3;
5070 	}
5071 
5072 	/* Wait until PRS register shows 2 packets */
5073 	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074 	if (val != 2)
5075 		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5076 
5077 	/* Write 1 to parser credits for CFC search request */
5078 	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079 
5080 	/* Wait until PRS register shows 3 packets */
5081 	msleep(10 * factor);
5082 	/* Wait until NIG register shows 1 packet of size 0x10 */
5083 	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084 	if (val != 3)
5085 		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5086 
5087 	/* clear NIG EOP FIFO */
5088 	for (i = 0; i < 11; i++)
5089 		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090 	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091 	if (val != 1) {
5092 		BNX2X_ERR("clear of NIG failed\n");
5093 		return -4;
5094 	}
5095 
5096 	/* Reset and init BRB, PRS, NIG */
5097 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098 	msleep(50);
5099 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100 	msleep(50);
5101 	bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102 	bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5103 #ifndef BCM_ISCSI
5104 	/* set NIC mode */
5105 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106 #endif
5107 
5108 	/* Enable inputs of parser neighbor blocks */
5109 	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110 	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111 	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5112 	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5113 
5114 	DP(NETIF_MSG_HW, "done\n");
5115 
5116 	return 0; /* OK */
5117 }
5118 
enable_blocks_attention(struct bnx2x * bp)5119 static void enable_blocks_attention(struct bnx2x *bp)
5120 {
5121 	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122 	REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123 	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124 	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125 	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126 	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127 	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128 	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129 	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5130 /*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131 /*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5132 	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133 	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134 	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5135 /*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136 /*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5137 	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138 	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139 	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140 	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5141 /*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142 /*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143 	if (CHIP_REV_IS_FPGA(bp))
5144 		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145 	else
5146 		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5147 	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148 	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149 	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5150 /*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151 /*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5152 	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153 	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5154 /*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155 	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
5156 }
5157 
5158 
bnx2x_reset_common(struct bnx2x * bp)5159 static void bnx2x_reset_common(struct bnx2x *bp)
5160 {
5161 	/* reset_common */
5162 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163 	       0xd3ffff7f);
5164 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5165 }
5166 
bnx2x_init_common(struct bnx2x * bp)5167 static int bnx2x_init_common(struct bnx2x *bp)
5168 {
5169 	u32 val, i;
5170 
5171 	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5172 
5173 	bnx2x_reset_common(bp);
5174 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5176 
5177 	bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178 	if (CHIP_IS_E1H(bp))
5179 		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5180 
5181 	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182 	msleep(30);
5183 	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5184 
5185 	bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186 	if (CHIP_IS_E1(bp)) {
5187 		/* enable HW interrupt from PXP on USDM overflow
5188 		   bit 16 on INT_MASK_0 */
5189 		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5190 	}
5191 
5192 	bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5193 	bnx2x_init_pxp(bp);
5194 
5195 #ifdef __BIG_ENDIAN
5196 	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197 	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198 	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199 	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200 	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5201 
5202 /*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203 	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204 	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205 	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206 	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5207 #endif
5208 
5209 	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5210 #ifdef BCM_ISCSI
5211 	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212 	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213 	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5214 #endif
5215 
5216 	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217 		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5218 
5219 	/* let the HW do it's magic ... */
5220 	msleep(100);
5221 	/* finish PXP init */
5222 	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223 	if (val != 1) {
5224 		BNX2X_ERR("PXP2 CFG failed\n");
5225 		return -EBUSY;
5226 	}
5227 	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228 	if (val != 1) {
5229 		BNX2X_ERR("PXP2 RD_INIT failed\n");
5230 		return -EBUSY;
5231 	}
5232 
5233 	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234 	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5235 
5236 	bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5237 
5238 	/* clean the DMAE memory */
5239 	bp->dmae_ready = 1;
5240 	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5241 
5242 	bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243 	bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244 	bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245 	bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5246 
5247 	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248 	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249 	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250 	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251 
5252 	bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253 	/* soft reset pulse */
5254 	REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255 	REG_WR(bp, QM_REG_SOFT_RESET, 0);
5256 
5257 #ifdef BCM_ISCSI
5258 	bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5259 #endif
5260 
5261 	bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262 	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263 	if (!CHIP_REV_IS_SLOW(bp)) {
5264 		/* enable hw interrupt from doorbell Q */
5265 		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5266 	}
5267 
5268 	bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269 	if (CHIP_REV_IS_SLOW(bp)) {
5270 		/* fix for emulation and FPGA for no pause */
5271 		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272 		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273 		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274 		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5275 	}
5276 
5277 	bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5278 	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5279 	/* set NIC mode */
5280 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
5281 	if (CHIP_IS_E1H(bp))
5282 		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5283 
5284 	bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285 	bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286 	bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287 	bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5288 
5289 	if (CHIP_IS_E1H(bp)) {
5290 		bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291 				STORM_INTMEM_SIZE_E1H/2);
5292 		bnx2x_init_fill(bp,
5293 				TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294 				0, STORM_INTMEM_SIZE_E1H/2);
5295 		bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296 				STORM_INTMEM_SIZE_E1H/2);
5297 		bnx2x_init_fill(bp,
5298 				CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299 				0, STORM_INTMEM_SIZE_E1H/2);
5300 		bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301 				STORM_INTMEM_SIZE_E1H/2);
5302 		bnx2x_init_fill(bp,
5303 				XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 				0, STORM_INTMEM_SIZE_E1H/2);
5305 		bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306 				STORM_INTMEM_SIZE_E1H/2);
5307 		bnx2x_init_fill(bp,
5308 				USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 				0, STORM_INTMEM_SIZE_E1H/2);
5310 	} else { /* E1 */
5311 		bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312 				STORM_INTMEM_SIZE_E1);
5313 		bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314 				STORM_INTMEM_SIZE_E1);
5315 		bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316 				STORM_INTMEM_SIZE_E1);
5317 		bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318 				STORM_INTMEM_SIZE_E1);
5319 	}
5320 
5321 	bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322 	bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323 	bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324 	bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5325 
5326 	/* sync semi rtc */
5327 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328 	       0x80000000);
5329 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5330 	       0x80000000);
5331 
5332 	bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333 	bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334 	bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5335 
5336 	REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337 	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338 		REG_WR(bp, i, 0xc0cac01a);
5339 		/* TODO: replace with something meaningful */
5340 	}
5341 	if (CHIP_IS_E1H(bp))
5342 		bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5343 	REG_WR(bp, SRC_REG_SOFT_RST, 0);
5344 
5345 	if (sizeof(union cdu_context) != 1024)
5346 		/* we currently assume that a context is 1024 bytes */
5347 		printk(KERN_ALERT PFX "please adjust the size of"
5348 		       " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5349 
5350 	bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5351 	val = (4 << 24) + (0 << 12) + 1024;
5352 	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5353 	if (CHIP_IS_E1(bp)) {
5354 		/* !!! fix pxp client crdit until excel update */
5355 		REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5356 		REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5357 	}
5358 
5359 	bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5360 	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5361 
5362 	bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5363 	bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5364 
5365 	/* PXPCS COMMON comes here */
5366 	/* Reset PCIE errors for debug */
5367 	REG_WR(bp, 0x2814, 0xffffffff);
5368 	REG_WR(bp, 0x3820, 0xffffffff);
5369 
5370 	/* EMAC0 COMMON comes here */
5371 	/* EMAC1 COMMON comes here */
5372 	/* DBU COMMON comes here */
5373 	/* DBG COMMON comes here */
5374 
5375 	bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5376 	if (CHIP_IS_E1H(bp)) {
5377 		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5378 		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5379 	}
5380 
5381 	if (CHIP_REV_IS_SLOW(bp))
5382 		msleep(200);
5383 
5384 	/* finish CFC init */
5385 	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5386 	if (val != 1) {
5387 		BNX2X_ERR("CFC LL_INIT failed\n");
5388 		return -EBUSY;
5389 	}
5390 	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5391 	if (val != 1) {
5392 		BNX2X_ERR("CFC AC_INIT failed\n");
5393 		return -EBUSY;
5394 	}
5395 	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5396 	if (val != 1) {
5397 		BNX2X_ERR("CFC CAM_INIT failed\n");
5398 		return -EBUSY;
5399 	}
5400 	REG_WR(bp, CFC_REG_DEBUG0, 0);
5401 
5402 	/* read NIG statistic
5403 	   to see if this is our first up since powerup */
5404 	bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405 	val = *bnx2x_sp(bp, wb_data[0]);
5406 
5407 	/* do internal memory self test */
5408 	if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5409 		BNX2X_ERR("internal mem self test failed\n");
5410 		return -EBUSY;
5411 	}
5412 
5413 	switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5414 	case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5415 	case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5416 		/* Fan failure is indicated by SPIO 5 */
5417 		bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5418 			       MISC_REGISTERS_SPIO_INPUT_HI_Z);
5419 
5420 		/* set to active low mode */
5421 		val = REG_RD(bp, MISC_REG_SPIO_INT);
5422 		val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5423 					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5424 		REG_WR(bp, MISC_REG_SPIO_INT, val);
5425 
5426 		/* enable interrupt to signal the IGU */
5427 		val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5428 		val |= (1 << MISC_REGISTERS_SPIO_5);
5429 		REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5430 		break;
5431 
5432 	default:
5433 		break;
5434 	}
5435 
5436 	/* clear PXP2 attentions */
5437 	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5438 
5439 	enable_blocks_attention(bp);
5440 
5441 	if (!BP_NOMCP(bp)) {
5442 		bnx2x_acquire_phy_lock(bp);
5443 		bnx2x_common_init_phy(bp, bp->common.shmem_base);
5444 		bnx2x_release_phy_lock(bp);
5445 	} else
5446 		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5447 
5448 	return 0;
5449 }
5450 
bnx2x_init_port(struct bnx2x * bp)5451 static int bnx2x_init_port(struct bnx2x *bp)
5452 {
5453 	int port = BP_PORT(bp);
5454 	u32 val;
5455 
5456 	DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5457 
5458 	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5459 
5460 	/* Port PXP comes here */
5461 	/* Port PXP2 comes here */
5462 #ifdef BCM_ISCSI
5463 	/* Port0  1
5464 	 * Port1  385 */
5465 	i++;
5466 	wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5467 	wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5468 	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469 	REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5470 
5471 	/* Port0  2
5472 	 * Port1  386 */
5473 	i++;
5474 	wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5475 	wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5476 	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477 	REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5478 
5479 	/* Port0  3
5480 	 * Port1  387 */
5481 	i++;
5482 	wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5483 	wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5484 	REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5485 	REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5486 #endif
5487 	/* Port CMs come here */
5488 
5489 	/* Port QM comes here */
5490 #ifdef BCM_ISCSI
5491 	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5492 	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5493 
5494 	bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5495 			     func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5496 #endif
5497 	/* Port DQ comes here */
5498 	/* Port BRB1 comes here */
5499 	/* Port PRS comes here */
5500 	/* Port TSDM comes here */
5501 	/* Port CSDM comes here */
5502 	/* Port USDM comes here */
5503 	/* Port XSDM comes here */
5504 	bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5505 			     port ? TSEM_PORT1_END : TSEM_PORT0_END);
5506 	bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5507 			     port ? USEM_PORT1_END : USEM_PORT0_END);
5508 	bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5509 			     port ? CSEM_PORT1_END : CSEM_PORT0_END);
5510 	bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5511 			     port ? XSEM_PORT1_END : XSEM_PORT0_END);
5512 	/* Port UPB comes here */
5513 	/* Port XPB comes here */
5514 
5515 	bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5516 			     port ? PBF_PORT1_END : PBF_PORT0_END);
5517 
5518 	/* configure PBF to work without PAUSE mtu 9000 */
5519 	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5520 
5521 	/* update threshold */
5522 	REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5523 	/* update init credit */
5524 	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5525 
5526 	/* probe changes */
5527 	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5528 	msleep(5);
5529 	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5530 
5531 #ifdef BCM_ISCSI
5532 	/* tell the searcher where the T2 table is */
5533 	REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5534 
5535 	wb_write[0] = U64_LO(bp->t2_mapping);
5536 	wb_write[1] = U64_HI(bp->t2_mapping);
5537 	REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5538 	wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5539 	wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5540 	REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5541 
5542 	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5543 	/* Port SRCH comes here */
5544 #endif
5545 	/* Port CDU comes here */
5546 	/* Port CFC comes here */
5547 
5548 	if (CHIP_IS_E1(bp)) {
5549 		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5550 		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5551 	}
5552 	bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5553 			     port ? HC_PORT1_END : HC_PORT0_END);
5554 
5555 	bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5556 				    MISC_AEU_PORT0_START,
5557 			     port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5558 	/* init aeu_mask_attn_func_0/1:
5559 	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5560 	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5561 	 *             bits 4-7 are used for "per vn group attention" */
5562 	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5563 	       (IS_E1HMF(bp) ? 0xF7 : 0x7));
5564 
5565 	/* Port PXPCS comes here */
5566 	/* Port EMAC0 comes here */
5567 	/* Port EMAC1 comes here */
5568 	/* Port DBU comes here */
5569 	/* Port DBG comes here */
5570 	bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5571 			     port ? NIG_PORT1_END : NIG_PORT0_END);
5572 
5573 	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5574 
5575 	if (CHIP_IS_E1H(bp)) {
5576 		u32 wsum;
5577 		struct cmng_struct_per_port m_cmng_port;
5578 		int vn;
5579 
5580 		/* 0x2 disable e1hov, 0x1 enable */
5581 		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5582 		       (IS_E1HMF(bp) ? 0x1 : 0x2));
5583 
5584 		/* Init RATE SHAPING and FAIRNESS contexts.
5585 		   Initialize as if there is 10G link. */
5586 		wsum = bnx2x_calc_vn_wsum(bp);
5587 		bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5588 		if (IS_E1HMF(bp))
5589 			for (vn = VN_0; vn < E1HVN_MAX; vn++)
5590 				bnx2x_init_vn_minmax(bp, 2*vn + port,
5591 					wsum, 10000, &m_cmng_port);
5592 	}
5593 
5594 	/* Port MCP comes here */
5595 	/* Port DMAE comes here */
5596 
5597 	switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5598 	case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5599 	case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5600 		/* add SPIO 5 to group 0 */
5601 		val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5602 		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5603 		REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5604 		break;
5605 
5606 	default:
5607 		break;
5608 	}
5609 
5610 	bnx2x__link_reset(bp);
5611 
5612 	return 0;
5613 }
5614 
5615 #define ILT_PER_FUNC		(768/2)
5616 #define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
5617 /* the phys address is shifted right 12 bits and has an added
5618    1=valid bit added to the 53rd bit
5619    then since this is a wide register(TM)
5620    we split it into two 32 bit writes
5621  */
5622 #define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5623 #define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
5624 #define PXP_ONE_ILT(x)		(((x) << 10) | x)
5625 #define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)
5626 
5627 #define CNIC_ILT_LINES		0
5628 
bnx2x_ilt_wr(struct bnx2x * bp,u32 index,dma_addr_t addr)5629 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5630 {
5631 	int reg;
5632 
5633 	if (CHIP_IS_E1H(bp))
5634 		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5635 	else /* E1 */
5636 		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5637 
5638 	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5639 }
5640 
bnx2x_init_func(struct bnx2x * bp)5641 static int bnx2x_init_func(struct bnx2x *bp)
5642 {
5643 	int port = BP_PORT(bp);
5644 	int func = BP_FUNC(bp);
5645 	int i;
5646 
5647 	DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5648 
5649 	i = FUNC_ILT_BASE(func);
5650 
5651 	bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5652 	if (CHIP_IS_E1H(bp)) {
5653 		REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5654 		REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5655 	} else /* E1 */
5656 		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5657 		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5658 
5659 
5660 	if (CHIP_IS_E1H(bp)) {
5661 		for (i = 0; i < 9; i++)
5662 			bnx2x_init_block(bp,
5663 					 cm_start[func][i], cm_end[func][i]);
5664 
5665 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5666 		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5667 	}
5668 
5669 	/* HC init per function */
5670 	if (CHIP_IS_E1H(bp)) {
5671 		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5672 
5673 		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5674 		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5675 	}
5676 	bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5677 
5678 	if (CHIP_IS_E1H(bp))
5679 		REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5680 
5681 	/* Reset PCIE errors for debug */
5682 	REG_WR(bp, 0x2114, 0xffffffff);
5683 	REG_WR(bp, 0x2120, 0xffffffff);
5684 
5685 	return 0;
5686 }
5687 
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)5688 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5689 {
5690 	int i, rc = 0;
5691 
5692 	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5693 	   BP_FUNC(bp), load_code);
5694 
5695 	bp->dmae_ready = 0;
5696 	mutex_init(&bp->dmae_mutex);
5697 	bnx2x_gunzip_init(bp);
5698 
5699 	switch (load_code) {
5700 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5701 		rc = bnx2x_init_common(bp);
5702 		if (rc)
5703 			goto init_hw_err;
5704 		/* no break */
5705 
5706 	case FW_MSG_CODE_DRV_LOAD_PORT:
5707 		bp->dmae_ready = 1;
5708 		rc = bnx2x_init_port(bp);
5709 		if (rc)
5710 			goto init_hw_err;
5711 		/* no break */
5712 
5713 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5714 		bp->dmae_ready = 1;
5715 		rc = bnx2x_init_func(bp);
5716 		if (rc)
5717 			goto init_hw_err;
5718 		break;
5719 
5720 	default:
5721 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5722 		break;
5723 	}
5724 
5725 	if (!BP_NOMCP(bp)) {
5726 		int func = BP_FUNC(bp);
5727 
5728 		bp->fw_drv_pulse_wr_seq =
5729 				(SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5730 				 DRV_PULSE_SEQ_MASK);
5731 		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5732 		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5733 		   bp->fw_drv_pulse_wr_seq, bp->func_stx);
5734 	} else
5735 		bp->func_stx = 0;
5736 
5737 	/* this needs to be done before gunzip end */
5738 	bnx2x_zero_def_sb(bp);
5739 	for_each_queue(bp, i)
5740 		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5741 
5742 init_hw_err:
5743 	bnx2x_gunzip_end(bp);
5744 
5745 	return rc;
5746 }
5747 
5748 /* send the MCP a request, block until there is a reply */
bnx2x_fw_command(struct bnx2x * bp,u32 command)5749 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5750 {
5751 	int func = BP_FUNC(bp);
5752 	u32 seq = ++bp->fw_seq;
5753 	u32 rc = 0;
5754 	u32 cnt = 1;
5755 	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5756 
5757 	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5758 	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5759 
5760 	do {
5761 		/* let the FW do it's magic ... */
5762 		msleep(delay);
5763 
5764 		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5765 
5766 		/* Give the FW up to 2 second (200*10ms) */
5767 	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5768 
5769 	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5770 	   cnt*delay, rc, seq);
5771 
5772 	/* is this a reply to our command? */
5773 	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5774 		rc &= FW_MSG_CODE_MASK;
5775 
5776 	} else {
5777 		/* FW BUG! */
5778 		BNX2X_ERR("FW failed to respond!\n");
5779 		bnx2x_fw_dump(bp);
5780 		rc = 0;
5781 	}
5782 
5783 	return rc;
5784 }
5785 
bnx2x_free_mem(struct bnx2x * bp)5786 static void bnx2x_free_mem(struct bnx2x *bp)
5787 {
5788 
5789 #define BNX2X_PCI_FREE(x, y, size) \
5790 	do { \
5791 		if (x) { \
5792 			pci_free_consistent(bp->pdev, size, x, y); \
5793 			x = NULL; \
5794 			y = 0; \
5795 		} \
5796 	} while (0)
5797 
5798 #define BNX2X_FREE(x) \
5799 	do { \
5800 		if (x) { \
5801 			vfree(x); \
5802 			x = NULL; \
5803 		} \
5804 	} while (0)
5805 
5806 	int i;
5807 
5808 	/* fastpath */
5809 	for_each_queue(bp, i) {
5810 
5811 		/* Status blocks */
5812 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5813 			       bnx2x_fp(bp, i, status_blk_mapping),
5814 			       sizeof(struct host_status_block) +
5815 			       sizeof(struct eth_tx_db_data));
5816 
5817 		/* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5818 		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5819 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5820 			       bnx2x_fp(bp, i, tx_desc_mapping),
5821 			       sizeof(struct eth_tx_bd) * NUM_TX_BD);
5822 
5823 		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5824 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5825 			       bnx2x_fp(bp, i, rx_desc_mapping),
5826 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
5827 
5828 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5829 			       bnx2x_fp(bp, i, rx_comp_mapping),
5830 			       sizeof(struct eth_fast_path_rx_cqe) *
5831 			       NUM_RCQ_BD);
5832 
5833 		/* SGE ring */
5834 		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5835 		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5836 			       bnx2x_fp(bp, i, rx_sge_mapping),
5837 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5838 	}
5839 	/* end of fastpath */
5840 
5841 	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5842 		       sizeof(struct host_def_status_block));
5843 
5844 	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5845 		       sizeof(struct bnx2x_slowpath));
5846 
5847 #ifdef BCM_ISCSI
5848 	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5849 	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5850 	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5851 	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5852 #endif
5853 	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5854 
5855 #undef BNX2X_PCI_FREE
5856 #undef BNX2X_KFREE
5857 }
5858 
bnx2x_alloc_mem(struct bnx2x * bp)5859 static int bnx2x_alloc_mem(struct bnx2x *bp)
5860 {
5861 
5862 #define BNX2X_PCI_ALLOC(x, y, size) \
5863 	do { \
5864 		x = pci_alloc_consistent(bp->pdev, size, y); \
5865 		if (x == NULL) \
5866 			goto alloc_mem_err; \
5867 		memset(x, 0, size); \
5868 	} while (0)
5869 
5870 #define BNX2X_ALLOC(x, size) \
5871 	do { \
5872 		x = vmalloc(size); \
5873 		if (x == NULL) \
5874 			goto alloc_mem_err; \
5875 		memset(x, 0, size); \
5876 	} while (0)
5877 
5878 	int i;
5879 
5880 	/* fastpath */
5881 	for_each_queue(bp, i) {
5882 		bnx2x_fp(bp, i, bp) = bp;
5883 
5884 		/* Status blocks */
5885 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5886 				&bnx2x_fp(bp, i, status_blk_mapping),
5887 				sizeof(struct host_status_block) +
5888 				sizeof(struct eth_tx_db_data));
5889 
5890 		bnx2x_fp(bp, i, hw_tx_prods) =
5891 				(void *)(bnx2x_fp(bp, i, status_blk) + 1);
5892 
5893 		bnx2x_fp(bp, i, tx_prods_mapping) =
5894 				bnx2x_fp(bp, i, status_blk_mapping) +
5895 				sizeof(struct host_status_block);
5896 
5897 		/* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5898 		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5899 				sizeof(struct sw_tx_bd) * NUM_TX_BD);
5900 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5901 				&bnx2x_fp(bp, i, tx_desc_mapping),
5902 				sizeof(struct eth_tx_bd) * NUM_TX_BD);
5903 
5904 		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905 				sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907 				&bnx2x_fp(bp, i, rx_desc_mapping),
5908 				sizeof(struct eth_rx_bd) * NUM_RX_BD);
5909 
5910 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911 				&bnx2x_fp(bp, i, rx_comp_mapping),
5912 				sizeof(struct eth_fast_path_rx_cqe) *
5913 				NUM_RCQ_BD);
5914 
5915 		/* SGE ring */
5916 		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917 				sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918 		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919 				&bnx2x_fp(bp, i, rx_sge_mapping),
5920 				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5921 	}
5922 	/* end of fastpath */
5923 
5924 	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5925 			sizeof(struct host_def_status_block));
5926 
5927 	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5928 			sizeof(struct bnx2x_slowpath));
5929 
5930 #ifdef BCM_ISCSI
5931 	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5932 
5933 	/* Initialize T1 */
5934 	for (i = 0; i < 64*1024; i += 64) {
5935 		*(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5936 		*(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5937 	}
5938 
5939 	/* allocate searcher T2 table
5940 	   we allocate 1/4 of alloc num for T2
5941 	  (which is not entered into the ILT) */
5942 	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5943 
5944 	/* Initialize T2 */
5945 	for (i = 0; i < 16*1024; i += 64)
5946 		* (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5947 
5948 	/* now fixup the last line in the block to point to the next block */
5949 	*(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5950 
5951 	/* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5952 	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5953 
5954 	/* QM queues (128*MAX_CONN) */
5955 	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5956 #endif
5957 
5958 	/* Slow path ring */
5959 	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5960 
5961 	return 0;
5962 
5963 alloc_mem_err:
5964 	bnx2x_free_mem(bp);
5965 	return -ENOMEM;
5966 
5967 #undef BNX2X_PCI_ALLOC
5968 #undef BNX2X_ALLOC
5969 }
5970 
bnx2x_free_tx_skbs(struct bnx2x * bp)5971 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5972 {
5973 	int i;
5974 
5975 	for_each_queue(bp, i) {
5976 		struct bnx2x_fastpath *fp = &bp->fp[i];
5977 
5978 		u16 bd_cons = fp->tx_bd_cons;
5979 		u16 sw_prod = fp->tx_pkt_prod;
5980 		u16 sw_cons = fp->tx_pkt_cons;
5981 
5982 		while (sw_cons != sw_prod) {
5983 			bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5984 			sw_cons++;
5985 		}
5986 	}
5987 }
5988 
bnx2x_free_rx_skbs(struct bnx2x * bp)5989 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5990 {
5991 	int i, j;
5992 
5993 	for_each_queue(bp, j) {
5994 		struct bnx2x_fastpath *fp = &bp->fp[j];
5995 
5996 		for (i = 0; i < NUM_RX_BD; i++) {
5997 			struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5998 			struct sk_buff *skb = rx_buf->skb;
5999 
6000 			if (skb == NULL)
6001 				continue;
6002 
6003 			pci_unmap_single(bp->pdev,
6004 					 pci_unmap_addr(rx_buf, mapping),
6005 					 bp->rx_buf_size,
6006 					 PCI_DMA_FROMDEVICE);
6007 
6008 			rx_buf->skb = NULL;
6009 			dev_kfree_skb(skb);
6010 		}
6011 		if (!fp->disable_tpa)
6012 			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6013 					    ETH_MAX_AGGREGATION_QUEUES_E1 :
6014 					    ETH_MAX_AGGREGATION_QUEUES_E1H);
6015 	}
6016 }
6017 
bnx2x_free_skbs(struct bnx2x * bp)6018 static void bnx2x_free_skbs(struct bnx2x *bp)
6019 {
6020 	bnx2x_free_tx_skbs(bp);
6021 	bnx2x_free_rx_skbs(bp);
6022 }
6023 
bnx2x_free_msix_irqs(struct bnx2x * bp)6024 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6025 {
6026 	int i, offset = 1;
6027 
6028 	free_irq(bp->msix_table[0].vector, bp->dev);
6029 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6030 	   bp->msix_table[0].vector);
6031 
6032 	for_each_queue(bp, i) {
6033 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6034 		   "state %x\n", i, bp->msix_table[i + offset].vector,
6035 		   bnx2x_fp(bp, i, state));
6036 
6037 		if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6038 			BNX2X_ERR("IRQ of fp #%d being freed while "
6039 				  "state != closed\n", i);
6040 
6041 		free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6042 	}
6043 }
6044 
bnx2x_free_irq(struct bnx2x * bp)6045 static void bnx2x_free_irq(struct bnx2x *bp)
6046 {
6047 	if (bp->flags & USING_MSIX_FLAG) {
6048 		bnx2x_free_msix_irqs(bp);
6049 		pci_disable_msix(bp->pdev);
6050 		bp->flags &= ~USING_MSIX_FLAG;
6051 
6052 	} else
6053 		free_irq(bp->pdev->irq, bp->dev);
6054 }
6055 
bnx2x_enable_msix(struct bnx2x * bp)6056 static int bnx2x_enable_msix(struct bnx2x *bp)
6057 {
6058 	int i, rc, offset;
6059 
6060 	bp->msix_table[0].entry = 0;
6061 	offset = 1;
6062 	DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6063 
6064 	for_each_queue(bp, i) {
6065 		int igu_vec = offset + i + BP_L_ID(bp);
6066 
6067 		bp->msix_table[i + offset].entry = igu_vec;
6068 		DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6069 		   "(fastpath #%u)\n", i + offset, igu_vec, i);
6070 	}
6071 
6072 	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6073 			     bp->num_queues + offset);
6074 	if (rc) {
6075 		DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6076 		return -1;
6077 	}
6078 	bp->flags |= USING_MSIX_FLAG;
6079 
6080 	return 0;
6081 }
6082 
bnx2x_req_msix_irqs(struct bnx2x * bp)6083 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6084 {
6085 	int i, rc, offset = 1;
6086 
6087 	rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6088 			 bp->dev->name, bp->dev);
6089 	if (rc) {
6090 		BNX2X_ERR("request sp irq failed\n");
6091 		return -EBUSY;
6092 	}
6093 
6094 	for_each_queue(bp, i) {
6095 		rc = request_irq(bp->msix_table[i + offset].vector,
6096 				 bnx2x_msix_fp_int, 0,
6097 				 bp->dev->name, &bp->fp[i]);
6098 		if (rc) {
6099 			BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6100 				  i + offset, -rc);
6101 			bnx2x_free_msix_irqs(bp);
6102 			return -EBUSY;
6103 		}
6104 
6105 		bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6106 	}
6107 
6108 	return 0;
6109 }
6110 
bnx2x_req_irq(struct bnx2x * bp)6111 static int bnx2x_req_irq(struct bnx2x *bp)
6112 {
6113 	int rc;
6114 
6115 	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6116 			 bp->dev->name, bp->dev);
6117 	if (!rc)
6118 		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6119 
6120 	return rc;
6121 }
6122 
bnx2x_napi_enable(struct bnx2x * bp)6123 static void bnx2x_napi_enable(struct bnx2x *bp)
6124 {
6125 	int i;
6126 
6127 	for_each_queue(bp, i)
6128 		napi_enable(&bnx2x_fp(bp, i, napi));
6129 }
6130 
bnx2x_napi_disable(struct bnx2x * bp)6131 static void bnx2x_napi_disable(struct bnx2x *bp)
6132 {
6133 	int i;
6134 
6135 	for_each_queue(bp, i)
6136 		napi_disable(&bnx2x_fp(bp, i, napi));
6137 }
6138 
bnx2x_netif_start(struct bnx2x * bp)6139 static void bnx2x_netif_start(struct bnx2x *bp)
6140 {
6141 	if (atomic_dec_and_test(&bp->intr_sem)) {
6142 		if (netif_running(bp->dev)) {
6143 			if (bp->state == BNX2X_STATE_OPEN)
6144 				netif_wake_queue(bp->dev);
6145 			bnx2x_napi_enable(bp);
6146 			bnx2x_int_enable(bp);
6147 		}
6148 	}
6149 }
6150 
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)6151 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6152 {
6153 	bnx2x_int_disable_sync(bp, disable_hw);
6154 	bnx2x_napi_disable(bp);
6155 	if (netif_running(bp->dev)) {
6156 		netif_tx_disable(bp->dev);
6157 		bp->dev->trans_start = jiffies;	/* prevent tx timeout */
6158 	}
6159 }
6160 
6161 /*
6162  * Init service functions
6163  */
6164 
bnx2x_set_mac_addr_e1(struct bnx2x * bp,int set)6165 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6166 {
6167 	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6168 	int port = BP_PORT(bp);
6169 
6170 	/* CAM allocation
6171 	 * unicasts 0-31:port0 32-63:port1
6172 	 * multicast 64-127:port0 128-191:port1
6173 	 */
6174 	config->hdr.length_6b = 2;
6175 	config->hdr.offset = port ? 32 : 0;
6176 	config->hdr.client_id = BP_CL_ID(bp);
6177 	config->hdr.reserved1 = 0;
6178 
6179 	/* primary MAC */
6180 	config->config_table[0].cam_entry.msb_mac_addr =
6181 					swab16(*(u16 *)&bp->dev->dev_addr[0]);
6182 	config->config_table[0].cam_entry.middle_mac_addr =
6183 					swab16(*(u16 *)&bp->dev->dev_addr[2]);
6184 	config->config_table[0].cam_entry.lsb_mac_addr =
6185 					swab16(*(u16 *)&bp->dev->dev_addr[4]);
6186 	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6187 	if (set)
6188 		config->config_table[0].target_table_entry.flags = 0;
6189 	else
6190 		CAM_INVALIDATE(config->config_table[0]);
6191 	config->config_table[0].target_table_entry.client_id = 0;
6192 	config->config_table[0].target_table_entry.vlan_id = 0;
6193 
6194 	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6195 	   (set ? "setting" : "clearing"),
6196 	   config->config_table[0].cam_entry.msb_mac_addr,
6197 	   config->config_table[0].cam_entry.middle_mac_addr,
6198 	   config->config_table[0].cam_entry.lsb_mac_addr);
6199 
6200 	/* broadcast */
6201 	config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6202 	config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6203 	config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6204 	config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6205 	if (set)
6206 		config->config_table[1].target_table_entry.flags =
6207 				TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6208 	else
6209 		CAM_INVALIDATE(config->config_table[1]);
6210 	config->config_table[1].target_table_entry.client_id = 0;
6211 	config->config_table[1].target_table_entry.vlan_id = 0;
6212 
6213 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6214 		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6215 		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6216 }
6217 
bnx2x_set_mac_addr_e1h(struct bnx2x * bp,int set)6218 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6219 {
6220 	struct mac_configuration_cmd_e1h *config =
6221 		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6222 
6223 	if (set && (bp->state != BNX2X_STATE_OPEN)) {
6224 		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6225 		return;
6226 	}
6227 
6228 	/* CAM allocation for E1H
6229 	 * unicasts: by func number
6230 	 * multicast: 20+FUNC*20, 20 each
6231 	 */
6232 	config->hdr.length_6b = 1;
6233 	config->hdr.offset = BP_FUNC(bp);
6234 	config->hdr.client_id = BP_CL_ID(bp);
6235 	config->hdr.reserved1 = 0;
6236 
6237 	/* primary MAC */
6238 	config->config_table[0].msb_mac_addr =
6239 					swab16(*(u16 *)&bp->dev->dev_addr[0]);
6240 	config->config_table[0].middle_mac_addr =
6241 					swab16(*(u16 *)&bp->dev->dev_addr[2]);
6242 	config->config_table[0].lsb_mac_addr =
6243 					swab16(*(u16 *)&bp->dev->dev_addr[4]);
6244 	config->config_table[0].client_id = BP_L_ID(bp);
6245 	config->config_table[0].vlan_id = 0;
6246 	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6247 	if (set)
6248 		config->config_table[0].flags = BP_PORT(bp);
6249 	else
6250 		config->config_table[0].flags =
6251 				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6252 
6253 	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6254 	   (set ? "setting" : "clearing"),
6255 	   config->config_table[0].msb_mac_addr,
6256 	   config->config_table[0].middle_mac_addr,
6257 	   config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6258 
6259 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6260 		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6261 		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6262 }
6263 
bnx2x_wait_ramrod(struct bnx2x * bp,int state,int idx,int * state_p,int poll)6264 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6265 			     int *state_p, int poll)
6266 {
6267 	/* can take a while if any port is running */
6268 	int cnt = 500;
6269 
6270 	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6271 	   poll ? "polling" : "waiting", state, idx);
6272 
6273 	might_sleep();
6274 	while (cnt--) {
6275 		if (poll) {
6276 			bnx2x_rx_int(bp->fp, 10);
6277 			/* if index is different from 0
6278 			 * the reply for some commands will
6279 			 * be on the non default queue
6280 			 */
6281 			if (idx)
6282 				bnx2x_rx_int(&bp->fp[idx], 10);
6283 		}
6284 
6285 		mb(); /* state is changed by bnx2x_sp_event() */
6286 		if (*state_p == state)
6287 			return 0;
6288 
6289 		msleep(1);
6290 	}
6291 
6292 	/* timeout! */
6293 	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6294 		  poll ? "polling" : "waiting", state, idx);
6295 #ifdef BNX2X_STOP_ON_ERROR
6296 	bnx2x_panic();
6297 #endif
6298 
6299 	return -EBUSY;
6300 }
6301 
bnx2x_setup_leading(struct bnx2x * bp)6302 static int bnx2x_setup_leading(struct bnx2x *bp)
6303 {
6304 	int rc;
6305 
6306 	/* reset IGU state */
6307 	bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6308 
6309 	/* SETUP ramrod */
6310 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6311 
6312 	/* Wait for completion */
6313 	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6314 
6315 	return rc;
6316 }
6317 
bnx2x_setup_multi(struct bnx2x * bp,int index)6318 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6319 {
6320 	/* reset IGU state */
6321 	bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6322 
6323 	/* SETUP ramrod */
6324 	bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6325 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6326 
6327 	/* Wait for completion */
6328 	return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6329 				 &(bp->fp[index].state), 0);
6330 }
6331 
6332 static int bnx2x_poll(struct napi_struct *napi, int budget);
6333 static void bnx2x_set_rx_mode(struct net_device *dev);
6334 
6335 /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)6336 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6337 {
6338 	u32 load_code;
6339 	int i, rc = 0;
6340 #ifdef BNX2X_STOP_ON_ERROR
6341 	if (unlikely(bp->panic))
6342 		return -EPERM;
6343 #endif
6344 
6345 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6346 
6347 	if (use_inta) {
6348 		bp->num_queues = 1;
6349 
6350 	} else {
6351 		if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6352 			/* user requested number */
6353 			bp->num_queues = use_multi;
6354 
6355 		else if (use_multi)
6356 			bp->num_queues = min_t(u32, num_online_cpus(),
6357 					       BP_MAX_QUEUES(bp));
6358 		else
6359 			bp->num_queues = 1;
6360 
6361 		DP(NETIF_MSG_IFUP,
6362 		   "set number of queues to %d\n", bp->num_queues);
6363 
6364 		/* if we can't use MSI-X we only need one fp,
6365 		 * so try to enable MSI-X with the requested number of fp's
6366 		 * and fallback to MSI or legacy INTx with one fp
6367 		 */
6368 		rc = bnx2x_enable_msix(bp);
6369 		if (rc) {
6370 			/* failed to enable MSI-X */
6371 			bp->num_queues = 1;
6372 			if (use_multi)
6373 				BNX2X_ERR("Multi requested but failed"
6374 					  " to enable MSI-X\n");
6375 		}
6376 	}
6377 
6378 	if (bnx2x_alloc_mem(bp))
6379 		return -ENOMEM;
6380 
6381 	for_each_queue(bp, i)
6382 		bnx2x_fp(bp, i, disable_tpa) =
6383 					((bp->flags & TPA_ENABLE_FLAG) == 0);
6384 
6385 	for_each_queue(bp, i)
6386 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6387 			       bnx2x_poll, 128);
6388 
6389 #ifdef BNX2X_STOP_ON_ERROR
6390 	for_each_queue(bp, i) {
6391 		struct bnx2x_fastpath *fp = &bp->fp[i];
6392 
6393 		fp->poll_no_work = 0;
6394 		fp->poll_calls = 0;
6395 		fp->poll_max_calls = 0;
6396 		fp->poll_complete = 0;
6397 		fp->poll_exit = 0;
6398 	}
6399 #endif
6400 	bnx2x_napi_enable(bp);
6401 
6402 	if (bp->flags & USING_MSIX_FLAG) {
6403 		rc = bnx2x_req_msix_irqs(bp);
6404 		if (rc) {
6405 			pci_disable_msix(bp->pdev);
6406 			goto load_error1;
6407 		}
6408 		printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6409 	} else {
6410 		bnx2x_ack_int(bp);
6411 		rc = bnx2x_req_irq(bp);
6412 		if (rc) {
6413 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6414 			goto load_error1;
6415 		}
6416 	}
6417 
6418 	/* Send LOAD_REQUEST command to MCP
6419 	   Returns the type of LOAD command:
6420 	   if it is the first port to be initialized
6421 	   common blocks should be initialized, otherwise - not
6422 	*/
6423 	if (!BP_NOMCP(bp)) {
6424 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6425 		if (!load_code) {
6426 			BNX2X_ERR("MCP response failure, aborting\n");
6427 			rc = -EBUSY;
6428 			goto load_error2;
6429 		}
6430 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6431 			rc = -EBUSY; /* other port in diagnostic mode */
6432 			goto load_error2;
6433 		}
6434 
6435 	} else {
6436 		int port = BP_PORT(bp);
6437 
6438 		DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6439 		   load_count[0], load_count[1], load_count[2]);
6440 		load_count[0]++;
6441 		load_count[1 + port]++;
6442 		DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6443 		   load_count[0], load_count[1], load_count[2]);
6444 		if (load_count[0] == 1)
6445 			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6446 		else if (load_count[1 + port] == 1)
6447 			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6448 		else
6449 			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6450 	}
6451 
6452 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6453 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6454 		bp->port.pmf = 1;
6455 	else
6456 		bp->port.pmf = 0;
6457 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6458 
6459 	/* Initialize HW */
6460 	rc = bnx2x_init_hw(bp, load_code);
6461 	if (rc) {
6462 		BNX2X_ERR("HW init failed, aborting\n");
6463 		goto load_error2;
6464 	}
6465 
6466 	/* Setup NIC internals and enable interrupts */
6467 	bnx2x_nic_init(bp, load_code);
6468 
6469 	/* Send LOAD_DONE command to MCP */
6470 	if (!BP_NOMCP(bp)) {
6471 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6472 		if (!load_code) {
6473 			BNX2X_ERR("MCP response failure, aborting\n");
6474 			rc = -EBUSY;
6475 			goto load_error3;
6476 		}
6477 	}
6478 
6479 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6480 
6481 	rc = bnx2x_setup_leading(bp);
6482 	if (rc) {
6483 		BNX2X_ERR("Setup leading failed!\n");
6484 		goto load_error3;
6485 	}
6486 
6487 	if (CHIP_IS_E1H(bp))
6488 		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6489 			BNX2X_ERR("!!!  mf_cfg function disabled\n");
6490 			bp->state = BNX2X_STATE_DISABLED;
6491 		}
6492 
6493 	if (bp->state == BNX2X_STATE_OPEN)
6494 		for_each_nondefault_queue(bp, i) {
6495 			rc = bnx2x_setup_multi(bp, i);
6496 			if (rc)
6497 				goto load_error3;
6498 		}
6499 
6500 	if (CHIP_IS_E1(bp))
6501 		bnx2x_set_mac_addr_e1(bp, 1);
6502 	else
6503 		bnx2x_set_mac_addr_e1h(bp, 1);
6504 
6505 	if (bp->port.pmf)
6506 		bnx2x_initial_phy_init(bp);
6507 
6508 	/* Start fast path */
6509 	switch (load_mode) {
6510 	case LOAD_NORMAL:
6511 		/* Tx queue should be only reenabled */
6512 		netif_wake_queue(bp->dev);
6513 		/* Initialize the receive filter. */
6514 		bnx2x_set_rx_mode(bp->dev);
6515 		break;
6516 
6517 	case LOAD_OPEN:
6518 		netif_start_queue(bp->dev);
6519 		/* Initialize the receive filter. */
6520 		bnx2x_set_rx_mode(bp->dev);
6521 		break;
6522 
6523 	case LOAD_DIAG:
6524 		/* Initialize the receive filter. */
6525 		bnx2x_set_rx_mode(bp->dev);
6526 		bp->state = BNX2X_STATE_DIAG;
6527 		break;
6528 
6529 	default:
6530 		break;
6531 	}
6532 
6533 	if (!bp->port.pmf)
6534 		bnx2x__link_status_update(bp);
6535 
6536 	/* start the timer */
6537 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6538 
6539 
6540 	return 0;
6541 
6542 load_error3:
6543 	bnx2x_int_disable_sync(bp, 1);
6544 	if (!BP_NOMCP(bp)) {
6545 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6546 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6547 	}
6548 	bp->port.pmf = 0;
6549 	/* Free SKBs, SGEs, TPA pool and driver internals */
6550 	bnx2x_free_skbs(bp);
6551 	for_each_queue(bp, i)
6552 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6553 load_error2:
6554 	/* Release IRQs */
6555 	bnx2x_free_irq(bp);
6556 load_error1:
6557 	bnx2x_napi_disable(bp);
6558 	for_each_queue(bp, i)
6559 		netif_napi_del(&bnx2x_fp(bp, i, napi));
6560 	bnx2x_free_mem(bp);
6561 
6562 	/* TBD we really need to reset the chip
6563 	   if we want to recover from this */
6564 	return rc;
6565 }
6566 
bnx2x_stop_multi(struct bnx2x * bp,int index)6567 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6568 {
6569 	int rc;
6570 
6571 	/* halt the connection */
6572 	bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6573 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6574 
6575 	/* Wait for completion */
6576 	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6577 			       &(bp->fp[index].state), 1);
6578 	if (rc) /* timeout */
6579 		return rc;
6580 
6581 	/* delete cfc entry */
6582 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6583 
6584 	/* Wait for completion */
6585 	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6586 			       &(bp->fp[index].state), 1);
6587 	return rc;
6588 }
6589 
bnx2x_stop_leading(struct bnx2x * bp)6590 static int bnx2x_stop_leading(struct bnx2x *bp)
6591 {
6592 	u16 dsb_sp_prod_idx;
6593 	/* if the other port is handling traffic,
6594 	   this can take a lot of time */
6595 	int cnt = 500;
6596 	int rc;
6597 
6598 	might_sleep();
6599 
6600 	/* Send HALT ramrod */
6601 	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6602 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6603 
6604 	/* Wait for completion */
6605 	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6606 			       &(bp->fp[0].state), 1);
6607 	if (rc) /* timeout */
6608 		return rc;
6609 
6610 	dsb_sp_prod_idx = *bp->dsb_sp_prod;
6611 
6612 	/* Send PORT_DELETE ramrod */
6613 	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6614 
6615 	/* Wait for completion to arrive on default status block
6616 	   we are going to reset the chip anyway
6617 	   so there is not much to do if this times out
6618 	 */
6619 	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6620 		if (!cnt) {
6621 			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6622 			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6623 			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
6624 #ifdef BNX2X_STOP_ON_ERROR
6625 			bnx2x_panic();
6626 #else
6627 			rc = -EBUSY;
6628 #endif
6629 			break;
6630 		}
6631 		cnt--;
6632 		msleep(1);
6633 		rmb(); /* Refresh the dsb_sp_prod */
6634 	}
6635 	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6636 	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6637 
6638 	return rc;
6639 }
6640 
bnx2x_reset_func(struct bnx2x * bp)6641 static void bnx2x_reset_func(struct bnx2x *bp)
6642 {
6643 	int port = BP_PORT(bp);
6644 	int func = BP_FUNC(bp);
6645 	int base, i;
6646 
6647 	/* Configure IGU */
6648 	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6649 	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6650 
6651 	REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6652 
6653 	/* Clear ILT */
6654 	base = FUNC_ILT_BASE(func);
6655 	for (i = base; i < base + ILT_PER_FUNC; i++)
6656 		bnx2x_ilt_wr(bp, i, 0);
6657 }
6658 
bnx2x_reset_port(struct bnx2x * bp)6659 static void bnx2x_reset_port(struct bnx2x *bp)
6660 {
6661 	int port = BP_PORT(bp);
6662 	u32 val;
6663 
6664 	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6665 
6666 	/* Do not rcv packets to BRB */
6667 	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6668 	/* Do not direct rcv packets that are not for MCP to the BRB */
6669 	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6670 			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6671 
6672 	/* Configure AEU */
6673 	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6674 
6675 	msleep(100);
6676 	/* Check for BRB port occupancy */
6677 	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6678 	if (val)
6679 		DP(NETIF_MSG_IFDOWN,
6680 		   "BRB1 is not empty  %d blocks are occupied\n", val);
6681 
6682 	/* TODO: Close Doorbell port? */
6683 }
6684 
bnx2x_reset_chip(struct bnx2x * bp,u32 reset_code)6685 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6686 {
6687 	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6688 	   BP_FUNC(bp), reset_code);
6689 
6690 	switch (reset_code) {
6691 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6692 		bnx2x_reset_port(bp);
6693 		bnx2x_reset_func(bp);
6694 		bnx2x_reset_common(bp);
6695 		break;
6696 
6697 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
6698 		bnx2x_reset_port(bp);
6699 		bnx2x_reset_func(bp);
6700 		break;
6701 
6702 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6703 		bnx2x_reset_func(bp);
6704 		break;
6705 
6706 	default:
6707 		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6708 		break;
6709 	}
6710 }
6711 
6712 /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode)6713 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6714 {
6715 	int port = BP_PORT(bp);
6716 	u32 reset_code = 0;
6717 	int i, cnt, rc;
6718 
6719 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6720 
6721 	bp->rx_mode = BNX2X_RX_MODE_NONE;
6722 	bnx2x_set_storm_rx_mode(bp);
6723 
6724 	bnx2x_netif_stop(bp, 1);
6725 
6726 	del_timer_sync(&bp->timer);
6727 	SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6728 		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6729 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6730 
6731 	/* Release IRQs */
6732 	bnx2x_free_irq(bp);
6733 
6734 	/* Wait until tx fast path tasks complete */
6735 	for_each_queue(bp, i) {
6736 		struct bnx2x_fastpath *fp = &bp->fp[i];
6737 
6738 		cnt = 1000;
6739 		smp_rmb();
6740 		while (bnx2x_has_tx_work_unload(fp)) {
6741 
6742 			bnx2x_tx_int(fp, 1000);
6743 			if (!cnt) {
6744 				BNX2X_ERR("timeout waiting for queue[%d]\n",
6745 					  i);
6746 #ifdef BNX2X_STOP_ON_ERROR
6747 				bnx2x_panic();
6748 				return -EBUSY;
6749 #else
6750 				break;
6751 #endif
6752 			}
6753 			cnt--;
6754 			msleep(1);
6755 			smp_rmb();
6756 		}
6757 	}
6758 	/* Give HW time to discard old tx messages */
6759 	msleep(1);
6760 
6761 	if (CHIP_IS_E1(bp)) {
6762 		struct mac_configuration_cmd *config =
6763 						bnx2x_sp(bp, mcast_config);
6764 
6765 		bnx2x_set_mac_addr_e1(bp, 0);
6766 
6767 		for (i = 0; i < config->hdr.length_6b; i++)
6768 			CAM_INVALIDATE(config->config_table[i]);
6769 
6770 		config->hdr.length_6b = i;
6771 		if (CHIP_REV_IS_SLOW(bp))
6772 			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6773 		else
6774 			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6775 		config->hdr.client_id = BP_CL_ID(bp);
6776 		config->hdr.reserved1 = 0;
6777 
6778 		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6779 			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6780 			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6781 
6782 	} else { /* E1H */
6783 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6784 
6785 		bnx2x_set_mac_addr_e1h(bp, 0);
6786 
6787 		for (i = 0; i < MC_HASH_SIZE; i++)
6788 			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6789 	}
6790 
6791 	if (unload_mode == UNLOAD_NORMAL)
6792 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6793 
6794 	else if (bp->flags & NO_WOL_FLAG) {
6795 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6796 		if (CHIP_IS_E1H(bp))
6797 			REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6798 
6799 	} else if (bp->wol) {
6800 		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6801 		u8 *mac_addr = bp->dev->dev_addr;
6802 		u32 val;
6803 		/* The mac address is written to entries 1-4 to
6804 		   preserve entry 0 which is used by the PMF */
6805 		u8 entry = (BP_E1HVN(bp) + 1)*8;
6806 
6807 		val = (mac_addr[0] << 8) | mac_addr[1];
6808 		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6809 
6810 		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6811 		      (mac_addr[4] << 8) | mac_addr[5];
6812 		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6813 
6814 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6815 
6816 	} else
6817 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6818 
6819 	/* Close multi and leading connections
6820 	   Completions for ramrods are collected in a synchronous way */
6821 	for_each_nondefault_queue(bp, i)
6822 		if (bnx2x_stop_multi(bp, i))
6823 			goto unload_error;
6824 
6825 	rc = bnx2x_stop_leading(bp);
6826 	if (rc) {
6827 		BNX2X_ERR("Stop leading failed!\n");
6828 #ifdef BNX2X_STOP_ON_ERROR
6829 		return -EBUSY;
6830 #else
6831 		goto unload_error;
6832 #endif
6833 	}
6834 
6835 unload_error:
6836 	if (!BP_NOMCP(bp))
6837 		reset_code = bnx2x_fw_command(bp, reset_code);
6838 	else {
6839 		DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6840 		   load_count[0], load_count[1], load_count[2]);
6841 		load_count[0]--;
6842 		load_count[1 + port]--;
6843 		DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6844 		   load_count[0], load_count[1], load_count[2]);
6845 		if (load_count[0] == 0)
6846 			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6847 		else if (load_count[1 + port] == 0)
6848 			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6849 		else
6850 			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6851 	}
6852 
6853 	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6854 	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6855 		bnx2x__link_reset(bp);
6856 
6857 	/* Reset the chip */
6858 	bnx2x_reset_chip(bp, reset_code);
6859 
6860 	/* Report UNLOAD_DONE to MCP */
6861 	if (!BP_NOMCP(bp))
6862 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6863 	bp->port.pmf = 0;
6864 
6865 	/* Free SKBs, SGEs, TPA pool and driver internals */
6866 	bnx2x_free_skbs(bp);
6867 	for_each_queue(bp, i)
6868 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6869 	for_each_queue(bp, i)
6870 		netif_napi_del(&bnx2x_fp(bp, i, napi));
6871 	bnx2x_free_mem(bp);
6872 
6873 	bp->state = BNX2X_STATE_CLOSED;
6874 
6875 	netif_carrier_off(bp->dev);
6876 
6877 	return 0;
6878 }
6879 
bnx2x_reset_task(struct work_struct * work)6880 static void bnx2x_reset_task(struct work_struct *work)
6881 {
6882 	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6883 
6884 #ifdef BNX2X_STOP_ON_ERROR
6885 	BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6886 		  " so reset not done to allow debug dump,\n"
6887 	 KERN_ERR " you will need to reboot when done\n");
6888 	return;
6889 #endif
6890 
6891 	rtnl_lock();
6892 
6893 	if (!netif_running(bp->dev))
6894 		goto reset_task_exit;
6895 
6896 	bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6897 	bnx2x_nic_load(bp, LOAD_NORMAL);
6898 
6899 reset_task_exit:
6900 	rtnl_unlock();
6901 }
6902 
6903 /* end of nic load/unload */
6904 
6905 /* ethtool_ops */
6906 
6907 /*
6908  * Init service functions
6909  */
6910 
bnx2x_undi_unload(struct bnx2x * bp)6911 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6912 {
6913 	u32 val;
6914 
6915 	/* Check if there is any driver already loaded */
6916 	val = REG_RD(bp, MISC_REG_UNPREPARED);
6917 	if (val == 0x1) {
6918 		/* Check if it is the UNDI driver
6919 		 * UNDI driver initializes CID offset for normal bell to 0x7
6920 		 */
6921 		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6922 		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6923 		if (val == 0x7) {
6924 			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6925 			/* save our func */
6926 			int func = BP_FUNC(bp);
6927 			u32 swap_en;
6928 			u32 swap_val;
6929 
6930 			/* clear the UNDI indication */
6931 			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6932 
6933 			BNX2X_DEV_INFO("UNDI is active! reset device\n");
6934 
6935 			/* try unload UNDI on port 0 */
6936 			bp->func = 0;
6937 			bp->fw_seq =
6938 			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6939 				DRV_MSG_SEQ_NUMBER_MASK);
6940 			reset_code = bnx2x_fw_command(bp, reset_code);
6941 
6942 			/* if UNDI is loaded on the other port */
6943 			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6944 
6945 				/* send "DONE" for previous unload */
6946 				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6947 
6948 				/* unload UNDI on port 1 */
6949 				bp->func = 1;
6950 				bp->fw_seq =
6951 			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6952 					DRV_MSG_SEQ_NUMBER_MASK);
6953 				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6954 
6955 				bnx2x_fw_command(bp, reset_code);
6956 			}
6957 
6958 			/* now it's safe to release the lock */
6959 			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6960 
6961 			REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6962 				    HC_REG_CONFIG_0), 0x1000);
6963 
6964 			/* close input traffic and wait for it */
6965 			/* Do not rcv packets to BRB */
6966 			REG_WR(bp,
6967 			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6968 					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6969 			/* Do not direct rcv packets that are not for MCP to
6970 			 * the BRB */
6971 			REG_WR(bp,
6972 			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6973 					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6974 			/* clear AEU */
6975 			REG_WR(bp,
6976 			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6977 					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6978 			msleep(10);
6979 
6980 			/* save NIG port swap info */
6981 			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6982 			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6983 			/* reset device */
6984 			REG_WR(bp,
6985 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6986 			       0xd3ffffff);
6987 			REG_WR(bp,
6988 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6989 			       0x1403);
6990 			/* take the NIG out of reset and restore swap values */
6991 			REG_WR(bp,
6992 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6993 			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
6994 			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6995 			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6996 
6997 			/* send unload done to the MCP */
6998 			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6999 
7000 			/* restore our func and fw_seq */
7001 			bp->func = func;
7002 			bp->fw_seq =
7003 			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7004 				DRV_MSG_SEQ_NUMBER_MASK);
7005 
7006 		} else
7007 			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7008 	}
7009 }
7010 
bnx2x_get_common_hwinfo(struct bnx2x * bp)7011 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7012 {
7013 	u32 val, val2, val3, val4, id;
7014 	u16 pmc;
7015 
7016 	/* Get the chip revision id and number. */
7017 	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7018 	val = REG_RD(bp, MISC_REG_CHIP_NUM);
7019 	id = ((val & 0xffff) << 16);
7020 	val = REG_RD(bp, MISC_REG_CHIP_REV);
7021 	id |= ((val & 0xf) << 12);
7022 	val = REG_RD(bp, MISC_REG_CHIP_METAL);
7023 	id |= ((val & 0xff) << 4);
7024 	val = REG_RD(bp, MISC_REG_BOND_ID);
7025 	id |= (val & 0xf);
7026 	bp->common.chip_id = id;
7027 	bp->link_params.chip_id = bp->common.chip_id;
7028 	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7029 
7030 	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7031 	bp->common.flash_size = (NVRAM_1MB_SIZE <<
7032 				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7033 	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7034 		       bp->common.flash_size, bp->common.flash_size);
7035 
7036 	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7037 	bp->link_params.shmem_base = bp->common.shmem_base;
7038 	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7039 
7040 	if (!bp->common.shmem_base ||
7041 	    (bp->common.shmem_base < 0xA0000) ||
7042 	    (bp->common.shmem_base >= 0xC0000)) {
7043 		BNX2X_DEV_INFO("MCP not active\n");
7044 		bp->flags |= NO_MCP_FLAG;
7045 		return;
7046 	}
7047 
7048 	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7049 	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7050 		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7051 		BNX2X_ERR("BAD MCP validity signature\n");
7052 
7053 	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7054 	bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7055 
7056 	BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7057 		       bp->common.hw_config, bp->common.board);
7058 
7059 	bp->link_params.hw_led_mode = ((bp->common.hw_config &
7060 					SHARED_HW_CFG_LED_MODE_MASK) >>
7061 				       SHARED_HW_CFG_LED_MODE_SHIFT);
7062 
7063 	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7064 	bp->common.bc_ver = val;
7065 	BNX2X_DEV_INFO("bc_ver %X\n", val);
7066 	if (val < BNX2X_BC_VER) {
7067 		/* for now only warn
7068 		 * later we might need to enforce this */
7069 		BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7070 			  " please upgrade BC\n", BNX2X_BC_VER, val);
7071 	}
7072 
7073 	if (BP_E1HVN(bp) == 0) {
7074 		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7075 		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7076 	} else {
7077 		/* no WOL capability for E1HVN != 0 */
7078 		bp->flags |= NO_WOL_FLAG;
7079 	}
7080 	BNX2X_DEV_INFO("%sWoL capable\n",
7081 		       (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7082 
7083 	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7084 	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7085 	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7086 	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7087 
7088 	printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7089 	       val, val2, val3, val4);
7090 }
7091 
bnx2x_link_settings_supported(struct bnx2x * bp,u32 switch_cfg)7092 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7093 						    u32 switch_cfg)
7094 {
7095 	int port = BP_PORT(bp);
7096 	u32 ext_phy_type;
7097 
7098 	switch (switch_cfg) {
7099 	case SWITCH_CFG_1G:
7100 		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7101 
7102 		ext_phy_type =
7103 			SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7104 		switch (ext_phy_type) {
7105 		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7106 			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7107 				       ext_phy_type);
7108 
7109 			bp->port.supported |= (SUPPORTED_10baseT_Half |
7110 					       SUPPORTED_10baseT_Full |
7111 					       SUPPORTED_100baseT_Half |
7112 					       SUPPORTED_100baseT_Full |
7113 					       SUPPORTED_1000baseT_Full |
7114 					       SUPPORTED_2500baseX_Full |
7115 					       SUPPORTED_TP |
7116 					       SUPPORTED_FIBRE |
7117 					       SUPPORTED_Autoneg |
7118 					       SUPPORTED_Pause |
7119 					       SUPPORTED_Asym_Pause);
7120 			break;
7121 
7122 		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7123 			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7124 				       ext_phy_type);
7125 
7126 			bp->port.supported |= (SUPPORTED_10baseT_Half |
7127 					       SUPPORTED_10baseT_Full |
7128 					       SUPPORTED_100baseT_Half |
7129 					       SUPPORTED_100baseT_Full |
7130 					       SUPPORTED_1000baseT_Full |
7131 					       SUPPORTED_TP |
7132 					       SUPPORTED_FIBRE |
7133 					       SUPPORTED_Autoneg |
7134 					       SUPPORTED_Pause |
7135 					       SUPPORTED_Asym_Pause);
7136 			break;
7137 
7138 		default:
7139 			BNX2X_ERR("NVRAM config error. "
7140 				  "BAD SerDes ext_phy_config 0x%x\n",
7141 				  bp->link_params.ext_phy_config);
7142 			return;
7143 		}
7144 
7145 		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7146 					   port*0x10);
7147 		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7148 		break;
7149 
7150 	case SWITCH_CFG_10G:
7151 		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7152 
7153 		ext_phy_type =
7154 			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7155 		switch (ext_phy_type) {
7156 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7157 			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7158 				       ext_phy_type);
7159 
7160 			bp->port.supported |= (SUPPORTED_10baseT_Half |
7161 					       SUPPORTED_10baseT_Full |
7162 					       SUPPORTED_100baseT_Half |
7163 					       SUPPORTED_100baseT_Full |
7164 					       SUPPORTED_1000baseT_Full |
7165 					       SUPPORTED_2500baseX_Full |
7166 					       SUPPORTED_10000baseT_Full |
7167 					       SUPPORTED_TP |
7168 					       SUPPORTED_FIBRE |
7169 					       SUPPORTED_Autoneg |
7170 					       SUPPORTED_Pause |
7171 					       SUPPORTED_Asym_Pause);
7172 			break;
7173 
7174 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7175 			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7176 				       ext_phy_type);
7177 
7178 			bp->port.supported |= (SUPPORTED_10000baseT_Full |
7179 					       SUPPORTED_FIBRE |
7180 					       SUPPORTED_Pause |
7181 					       SUPPORTED_Asym_Pause);
7182 			break;
7183 
7184 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7185 			BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7186 				       ext_phy_type);
7187 
7188 			bp->port.supported |= (SUPPORTED_10000baseT_Full |
7189 					       SUPPORTED_1000baseT_Full |
7190 					       SUPPORTED_FIBRE |
7191 					       SUPPORTED_Pause |
7192 					       SUPPORTED_Asym_Pause);
7193 			break;
7194 
7195 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7196 			BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7197 				       ext_phy_type);
7198 
7199 			bp->port.supported |= (SUPPORTED_10000baseT_Full |
7200 					       SUPPORTED_1000baseT_Full |
7201 					       SUPPORTED_FIBRE |
7202 					       SUPPORTED_Autoneg |
7203 					       SUPPORTED_Pause |
7204 					       SUPPORTED_Asym_Pause);
7205 			break;
7206 
7207 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7208 			BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7209 				       ext_phy_type);
7210 
7211 			bp->port.supported |= (SUPPORTED_10000baseT_Full |
7212 					       SUPPORTED_2500baseX_Full |
7213 					       SUPPORTED_1000baseT_Full |
7214 					       SUPPORTED_FIBRE |
7215 					       SUPPORTED_Autoneg |
7216 					       SUPPORTED_Pause |
7217 					       SUPPORTED_Asym_Pause);
7218 			break;
7219 
7220 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7221 			BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7222 				       ext_phy_type);
7223 
7224 			bp->port.supported |= (SUPPORTED_10000baseT_Full |
7225 					       SUPPORTED_TP |
7226 					       SUPPORTED_Autoneg |
7227 					       SUPPORTED_Pause |
7228 					       SUPPORTED_Asym_Pause);
7229 			break;
7230 
7231 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7232 			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7233 				  bp->link_params.ext_phy_config);
7234 			break;
7235 
7236 		default:
7237 			BNX2X_ERR("NVRAM config error. "
7238 				  "BAD XGXS ext_phy_config 0x%x\n",
7239 				  bp->link_params.ext_phy_config);
7240 			return;
7241 		}
7242 
7243 		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7244 					   port*0x18);
7245 		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7246 
7247 		break;
7248 
7249 	default:
7250 		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7251 			  bp->port.link_config);
7252 		return;
7253 	}
7254 	bp->link_params.phy_addr = bp->port.phy_addr;
7255 
7256 	/* mask what we support according to speed_cap_mask */
7257 	if (!(bp->link_params.speed_cap_mask &
7258 				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7259 		bp->port.supported &= ~SUPPORTED_10baseT_Half;
7260 
7261 	if (!(bp->link_params.speed_cap_mask &
7262 				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7263 		bp->port.supported &= ~SUPPORTED_10baseT_Full;
7264 
7265 	if (!(bp->link_params.speed_cap_mask &
7266 				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7267 		bp->port.supported &= ~SUPPORTED_100baseT_Half;
7268 
7269 	if (!(bp->link_params.speed_cap_mask &
7270 				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7271 		bp->port.supported &= ~SUPPORTED_100baseT_Full;
7272 
7273 	if (!(bp->link_params.speed_cap_mask &
7274 					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7275 		bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7276 					SUPPORTED_1000baseT_Full);
7277 
7278 	if (!(bp->link_params.speed_cap_mask &
7279 					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7280 		bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7281 
7282 	if (!(bp->link_params.speed_cap_mask &
7283 					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7284 		bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7285 
7286 	BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7287 }
7288 
bnx2x_link_settings_requested(struct bnx2x * bp)7289 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7290 {
7291 	bp->link_params.req_duplex = DUPLEX_FULL;
7292 
7293 	switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7294 	case PORT_FEATURE_LINK_SPEED_AUTO:
7295 		if (bp->port.supported & SUPPORTED_Autoneg) {
7296 			bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7297 			bp->port.advertising = bp->port.supported;
7298 		} else {
7299 			u32 ext_phy_type =
7300 			    XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7301 
7302 			if ((ext_phy_type ==
7303 			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7304 			    (ext_phy_type ==
7305 			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7306 				/* force 10G, no AN */
7307 				bp->link_params.req_line_speed = SPEED_10000;
7308 				bp->port.advertising =
7309 						(ADVERTISED_10000baseT_Full |
7310 						 ADVERTISED_FIBRE);
7311 				break;
7312 			}
7313 			BNX2X_ERR("NVRAM config error. "
7314 				  "Invalid link_config 0x%x"
7315 				  "  Autoneg not supported\n",
7316 				  bp->port.link_config);
7317 			return;
7318 		}
7319 		break;
7320 
7321 	case PORT_FEATURE_LINK_SPEED_10M_FULL:
7322 		if (bp->port.supported & SUPPORTED_10baseT_Full) {
7323 			bp->link_params.req_line_speed = SPEED_10;
7324 			bp->port.advertising = (ADVERTISED_10baseT_Full |
7325 						ADVERTISED_TP);
7326 		} else {
7327 			BNX2X_ERR("NVRAM config error. "
7328 				  "Invalid link_config 0x%x"
7329 				  "  speed_cap_mask 0x%x\n",
7330 				  bp->port.link_config,
7331 				  bp->link_params.speed_cap_mask);
7332 			return;
7333 		}
7334 		break;
7335 
7336 	case PORT_FEATURE_LINK_SPEED_10M_HALF:
7337 		if (bp->port.supported & SUPPORTED_10baseT_Half) {
7338 			bp->link_params.req_line_speed = SPEED_10;
7339 			bp->link_params.req_duplex = DUPLEX_HALF;
7340 			bp->port.advertising = (ADVERTISED_10baseT_Half |
7341 						ADVERTISED_TP);
7342 		} else {
7343 			BNX2X_ERR("NVRAM config error. "
7344 				  "Invalid link_config 0x%x"
7345 				  "  speed_cap_mask 0x%x\n",
7346 				  bp->port.link_config,
7347 				  bp->link_params.speed_cap_mask);
7348 			return;
7349 		}
7350 		break;
7351 
7352 	case PORT_FEATURE_LINK_SPEED_100M_FULL:
7353 		if (bp->port.supported & SUPPORTED_100baseT_Full) {
7354 			bp->link_params.req_line_speed = SPEED_100;
7355 			bp->port.advertising = (ADVERTISED_100baseT_Full |
7356 						ADVERTISED_TP);
7357 		} else {
7358 			BNX2X_ERR("NVRAM config error. "
7359 				  "Invalid link_config 0x%x"
7360 				  "  speed_cap_mask 0x%x\n",
7361 				  bp->port.link_config,
7362 				  bp->link_params.speed_cap_mask);
7363 			return;
7364 		}
7365 		break;
7366 
7367 	case PORT_FEATURE_LINK_SPEED_100M_HALF:
7368 		if (bp->port.supported & SUPPORTED_100baseT_Half) {
7369 			bp->link_params.req_line_speed = SPEED_100;
7370 			bp->link_params.req_duplex = DUPLEX_HALF;
7371 			bp->port.advertising = (ADVERTISED_100baseT_Half |
7372 						ADVERTISED_TP);
7373 		} else {
7374 			BNX2X_ERR("NVRAM config error. "
7375 				  "Invalid link_config 0x%x"
7376 				  "  speed_cap_mask 0x%x\n",
7377 				  bp->port.link_config,
7378 				  bp->link_params.speed_cap_mask);
7379 			return;
7380 		}
7381 		break;
7382 
7383 	case PORT_FEATURE_LINK_SPEED_1G:
7384 		if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7385 			bp->link_params.req_line_speed = SPEED_1000;
7386 			bp->port.advertising = (ADVERTISED_1000baseT_Full |
7387 						ADVERTISED_TP);
7388 		} else {
7389 			BNX2X_ERR("NVRAM config error. "
7390 				  "Invalid link_config 0x%x"
7391 				  "  speed_cap_mask 0x%x\n",
7392 				  bp->port.link_config,
7393 				  bp->link_params.speed_cap_mask);
7394 			return;
7395 		}
7396 		break;
7397 
7398 	case PORT_FEATURE_LINK_SPEED_2_5G:
7399 		if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7400 			bp->link_params.req_line_speed = SPEED_2500;
7401 			bp->port.advertising = (ADVERTISED_2500baseX_Full |
7402 						ADVERTISED_TP);
7403 		} else {
7404 			BNX2X_ERR("NVRAM config error. "
7405 				  "Invalid link_config 0x%x"
7406 				  "  speed_cap_mask 0x%x\n",
7407 				  bp->port.link_config,
7408 				  bp->link_params.speed_cap_mask);
7409 			return;
7410 		}
7411 		break;
7412 
7413 	case PORT_FEATURE_LINK_SPEED_10G_CX4:
7414 	case PORT_FEATURE_LINK_SPEED_10G_KX4:
7415 	case PORT_FEATURE_LINK_SPEED_10G_KR:
7416 		if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7417 			bp->link_params.req_line_speed = SPEED_10000;
7418 			bp->port.advertising = (ADVERTISED_10000baseT_Full |
7419 						ADVERTISED_FIBRE);
7420 		} else {
7421 			BNX2X_ERR("NVRAM config error. "
7422 				  "Invalid link_config 0x%x"
7423 				  "  speed_cap_mask 0x%x\n",
7424 				  bp->port.link_config,
7425 				  bp->link_params.speed_cap_mask);
7426 			return;
7427 		}
7428 		break;
7429 
7430 	default:
7431 		BNX2X_ERR("NVRAM config error. "
7432 			  "BAD link speed link_config 0x%x\n",
7433 			  bp->port.link_config);
7434 		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7435 		bp->port.advertising = bp->port.supported;
7436 		break;
7437 	}
7438 
7439 	bp->link_params.req_flow_ctrl = (bp->port.link_config &
7440 					 PORT_FEATURE_FLOW_CONTROL_MASK);
7441 	if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7442 	    !(bp->port.supported & SUPPORTED_Autoneg))
7443 		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7444 
7445 	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7446 		       "  advertising 0x%x\n",
7447 		       bp->link_params.req_line_speed,
7448 		       bp->link_params.req_duplex,
7449 		       bp->link_params.req_flow_ctrl, bp->port.advertising);
7450 }
7451 
bnx2x_get_port_hwinfo(struct bnx2x * bp)7452 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7453 {
7454 	int port = BP_PORT(bp);
7455 	u32 val, val2;
7456 
7457 	bp->link_params.bp = bp;
7458 	bp->link_params.port = port;
7459 
7460 	bp->link_params.serdes_config =
7461 		SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7462 	bp->link_params.lane_config =
7463 		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7464 	bp->link_params.ext_phy_config =
7465 		SHMEM_RD(bp,
7466 			 dev_info.port_hw_config[port].external_phy_config);
7467 	bp->link_params.speed_cap_mask =
7468 		SHMEM_RD(bp,
7469 			 dev_info.port_hw_config[port].speed_capability_mask);
7470 
7471 	bp->port.link_config =
7472 		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7473 
7474 	BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7475 	     KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7476 		       "  link_config 0x%08x\n",
7477 		       bp->link_params.serdes_config,
7478 		       bp->link_params.lane_config,
7479 		       bp->link_params.ext_phy_config,
7480 		       bp->link_params.speed_cap_mask, bp->port.link_config);
7481 
7482 	bp->link_params.switch_cfg = (bp->port.link_config &
7483 				      PORT_FEATURE_CONNECTED_SWITCH_MASK);
7484 	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7485 
7486 	bnx2x_link_settings_requested(bp);
7487 
7488 	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7489 	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7490 	bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7491 	bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7492 	bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7493 	bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7494 	bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7495 	bp->dev->dev_addr[5] = (u8)(val & 0xff);
7496 	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7497 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7498 }
7499 
bnx2x_get_hwinfo(struct bnx2x * bp)7500 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7501 {
7502 	int func = BP_FUNC(bp);
7503 	u32 val, val2;
7504 	int rc = 0;
7505 
7506 	bnx2x_get_common_hwinfo(bp);
7507 
7508 	bp->e1hov = 0;
7509 	bp->e1hmf = 0;
7510 	if (CHIP_IS_E1H(bp)) {
7511 		bp->mf_config =
7512 			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7513 
7514 		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7515 		       FUNC_MF_CFG_E1HOV_TAG_MASK);
7516 		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7517 
7518 			bp->e1hov = val;
7519 			bp->e1hmf = 1;
7520 			BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7521 				       "(0x%04x)\n",
7522 				       func, bp->e1hov, bp->e1hov);
7523 		} else {
7524 			BNX2X_DEV_INFO("Single function mode\n");
7525 			if (BP_E1HVN(bp)) {
7526 				BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7527 					  "  aborting\n", func);
7528 				rc = -EPERM;
7529 			}
7530 		}
7531 	}
7532 
7533 	if (!BP_NOMCP(bp)) {
7534 		bnx2x_get_port_hwinfo(bp);
7535 
7536 		bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7537 			      DRV_MSG_SEQ_NUMBER_MASK);
7538 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7539 	}
7540 
7541 	if (IS_E1HMF(bp)) {
7542 		val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7543 		val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7544 		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7545 		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7546 			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7547 			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7548 			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7549 			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7550 			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7551 			bp->dev->dev_addr[5] = (u8)(val & 0xff);
7552 			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7553 			       ETH_ALEN);
7554 			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7555 			       ETH_ALEN);
7556 		}
7557 
7558 		return rc;
7559 	}
7560 
7561 	if (BP_NOMCP(bp)) {
7562 		/* only supposed to happen on emulation/FPGA */
7563 		BNX2X_ERR("warning random MAC workaround active\n");
7564 		random_ether_addr(bp->dev->dev_addr);
7565 		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7566 	}
7567 
7568 	return rc;
7569 }
7570 
bnx2x_init_bp(struct bnx2x * bp)7571 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7572 {
7573 	int func = BP_FUNC(bp);
7574 	int rc;
7575 
7576 	/* Disable interrupt handling until HW is initialized */
7577 	atomic_set(&bp->intr_sem, 1);
7578 
7579 	mutex_init(&bp->port.phy_mutex);
7580 
7581 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7582 	INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7583 
7584 	rc = bnx2x_get_hwinfo(bp);
7585 
7586 	/* need to reset chip if undi was active */
7587 	if (!BP_NOMCP(bp))
7588 		bnx2x_undi_unload(bp);
7589 
7590 	if (CHIP_REV_IS_FPGA(bp))
7591 		printk(KERN_ERR PFX "FPGA detected\n");
7592 
7593 	if (BP_NOMCP(bp) && (func == 0))
7594 		printk(KERN_ERR PFX
7595 		       "MCP disabled, must load devices in order!\n");
7596 
7597 	/* Set TPA flags */
7598 	if (disable_tpa) {
7599 		bp->flags &= ~TPA_ENABLE_FLAG;
7600 		bp->dev->features &= ~NETIF_F_LRO;
7601 	} else {
7602 		bp->flags |= TPA_ENABLE_FLAG;
7603 		bp->dev->features |= NETIF_F_LRO;
7604 	}
7605 
7606 
7607 	bp->tx_ring_size = MAX_TX_AVAIL;
7608 	bp->rx_ring_size = MAX_RX_AVAIL;
7609 
7610 	bp->rx_csum = 1;
7611 	bp->rx_offset = 0;
7612 
7613 	bp->tx_ticks = 50;
7614 	bp->rx_ticks = 25;
7615 
7616 	bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7617 	bp->current_interval = (poll ? poll : bp->timer_interval);
7618 
7619 	init_timer(&bp->timer);
7620 	bp->timer.expires = jiffies + bp->current_interval;
7621 	bp->timer.data = (unsigned long) bp;
7622 	bp->timer.function = bnx2x_timer;
7623 
7624 	return rc;
7625 }
7626 
7627 /*
7628  * ethtool service functions
7629  */
7630 
7631 /* All ethtool functions called with rtnl_lock */
7632 
bnx2x_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)7633 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7634 {
7635 	struct bnx2x *bp = netdev_priv(dev);
7636 
7637 	cmd->supported = bp->port.supported;
7638 	cmd->advertising = bp->port.advertising;
7639 
7640 	if (netif_carrier_ok(dev)) {
7641 		cmd->speed = bp->link_vars.line_speed;
7642 		cmd->duplex = bp->link_vars.duplex;
7643 	} else {
7644 		cmd->speed = bp->link_params.req_line_speed;
7645 		cmd->duplex = bp->link_params.req_duplex;
7646 	}
7647 	if (IS_E1HMF(bp)) {
7648 		u16 vn_max_rate;
7649 
7650 		vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7651 				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7652 		if (vn_max_rate < cmd->speed)
7653 			cmd->speed = vn_max_rate;
7654 	}
7655 
7656 	if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7657 		u32 ext_phy_type =
7658 			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7659 
7660 		switch (ext_phy_type) {
7661 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7662 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7663 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7664 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7666 			cmd->port = PORT_FIBRE;
7667 			break;
7668 
7669 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7670 			cmd->port = PORT_TP;
7671 			break;
7672 
7673 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7674 			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7675 				  bp->link_params.ext_phy_config);
7676 			break;
7677 
7678 		default:
7679 			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7680 			   bp->link_params.ext_phy_config);
7681 			break;
7682 		}
7683 	} else
7684 		cmd->port = PORT_TP;
7685 
7686 	cmd->phy_address = bp->port.phy_addr;
7687 	cmd->transceiver = XCVR_INTERNAL;
7688 
7689 	if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7690 		cmd->autoneg = AUTONEG_ENABLE;
7691 	else
7692 		cmd->autoneg = AUTONEG_DISABLE;
7693 
7694 	cmd->maxtxpkt = 0;
7695 	cmd->maxrxpkt = 0;
7696 
7697 	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7698 	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7699 	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7700 	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7701 	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7702 	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7703 	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7704 
7705 	return 0;
7706 }
7707 
bnx2x_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)7708 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7709 {
7710 	struct bnx2x *bp = netdev_priv(dev);
7711 	u32 advertising;
7712 
7713 	if (IS_E1HMF(bp))
7714 		return 0;
7715 
7716 	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7717 	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7718 	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7719 	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7720 	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7721 	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7722 	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7723 
7724 	if (cmd->autoneg == AUTONEG_ENABLE) {
7725 		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7726 			DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7727 			return -EINVAL;
7728 		}
7729 
7730 		/* advertise the requested speed and duplex if supported */
7731 		cmd->advertising &= bp->port.supported;
7732 
7733 		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7734 		bp->link_params.req_duplex = DUPLEX_FULL;
7735 		bp->port.advertising |= (ADVERTISED_Autoneg |
7736 					 cmd->advertising);
7737 
7738 	} else { /* forced speed */
7739 		/* advertise the requested speed and duplex if supported */
7740 		switch (cmd->speed) {
7741 		case SPEED_10:
7742 			if (cmd->duplex == DUPLEX_FULL) {
7743 				if (!(bp->port.supported &
7744 				      SUPPORTED_10baseT_Full)) {
7745 					DP(NETIF_MSG_LINK,
7746 					   "10M full not supported\n");
7747 					return -EINVAL;
7748 				}
7749 
7750 				advertising = (ADVERTISED_10baseT_Full |
7751 					       ADVERTISED_TP);
7752 			} else {
7753 				if (!(bp->port.supported &
7754 				      SUPPORTED_10baseT_Half)) {
7755 					DP(NETIF_MSG_LINK,
7756 					   "10M half not supported\n");
7757 					return -EINVAL;
7758 				}
7759 
7760 				advertising = (ADVERTISED_10baseT_Half |
7761 					       ADVERTISED_TP);
7762 			}
7763 			break;
7764 
7765 		case SPEED_100:
7766 			if (cmd->duplex == DUPLEX_FULL) {
7767 				if (!(bp->port.supported &
7768 						SUPPORTED_100baseT_Full)) {
7769 					DP(NETIF_MSG_LINK,
7770 					   "100M full not supported\n");
7771 					return -EINVAL;
7772 				}
7773 
7774 				advertising = (ADVERTISED_100baseT_Full |
7775 					       ADVERTISED_TP);
7776 			} else {
7777 				if (!(bp->port.supported &
7778 						SUPPORTED_100baseT_Half)) {
7779 					DP(NETIF_MSG_LINK,
7780 					   "100M half not supported\n");
7781 					return -EINVAL;
7782 				}
7783 
7784 				advertising = (ADVERTISED_100baseT_Half |
7785 					       ADVERTISED_TP);
7786 			}
7787 			break;
7788 
7789 		case SPEED_1000:
7790 			if (cmd->duplex != DUPLEX_FULL) {
7791 				DP(NETIF_MSG_LINK, "1G half not supported\n");
7792 				return -EINVAL;
7793 			}
7794 
7795 			if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7796 				DP(NETIF_MSG_LINK, "1G full not supported\n");
7797 				return -EINVAL;
7798 			}
7799 
7800 			advertising = (ADVERTISED_1000baseT_Full |
7801 				       ADVERTISED_TP);
7802 			break;
7803 
7804 		case SPEED_2500:
7805 			if (cmd->duplex != DUPLEX_FULL) {
7806 				DP(NETIF_MSG_LINK,
7807 				   "2.5G half not supported\n");
7808 				return -EINVAL;
7809 			}
7810 
7811 			if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7812 				DP(NETIF_MSG_LINK,
7813 				   "2.5G full not supported\n");
7814 				return -EINVAL;
7815 			}
7816 
7817 			advertising = (ADVERTISED_2500baseX_Full |
7818 				       ADVERTISED_TP);
7819 			break;
7820 
7821 		case SPEED_10000:
7822 			if (cmd->duplex != DUPLEX_FULL) {
7823 				DP(NETIF_MSG_LINK, "10G half not supported\n");
7824 				return -EINVAL;
7825 			}
7826 
7827 			if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7828 				DP(NETIF_MSG_LINK, "10G full not supported\n");
7829 				return -EINVAL;
7830 			}
7831 
7832 			advertising = (ADVERTISED_10000baseT_Full |
7833 				       ADVERTISED_FIBRE);
7834 			break;
7835 
7836 		default:
7837 			DP(NETIF_MSG_LINK, "Unsupported speed\n");
7838 			return -EINVAL;
7839 		}
7840 
7841 		bp->link_params.req_line_speed = cmd->speed;
7842 		bp->link_params.req_duplex = cmd->duplex;
7843 		bp->port.advertising = advertising;
7844 	}
7845 
7846 	DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7847 	   DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7848 	   bp->link_params.req_line_speed, bp->link_params.req_duplex,
7849 	   bp->port.advertising);
7850 
7851 	if (netif_running(dev)) {
7852 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7853 		bnx2x_link_set(bp);
7854 	}
7855 
7856 	return 0;
7857 }
7858 
7859 #define PHY_FW_VER_LEN			10
7860 
bnx2x_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)7861 static void bnx2x_get_drvinfo(struct net_device *dev,
7862 			      struct ethtool_drvinfo *info)
7863 {
7864 	struct bnx2x *bp = netdev_priv(dev);
7865 	u8 phy_fw_ver[PHY_FW_VER_LEN];
7866 
7867 	strcpy(info->driver, DRV_MODULE_NAME);
7868 	strcpy(info->version, DRV_MODULE_VERSION);
7869 
7870 	phy_fw_ver[0] = '\0';
7871 	if (bp->port.pmf) {
7872 		bnx2x_acquire_phy_lock(bp);
7873 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
7874 					     (bp->state != BNX2X_STATE_CLOSED),
7875 					     phy_fw_ver, PHY_FW_VER_LEN);
7876 		bnx2x_release_phy_lock(bp);
7877 	}
7878 
7879 	snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7880 		 (bp->common.bc_ver & 0xff0000) >> 16,
7881 		 (bp->common.bc_ver & 0xff00) >> 8,
7882 		 (bp->common.bc_ver & 0xff),
7883 		 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7884 	strcpy(info->bus_info, pci_name(bp->pdev));
7885 	info->n_stats = BNX2X_NUM_STATS;
7886 	info->testinfo_len = BNX2X_NUM_TESTS;
7887 	info->eedump_len = bp->common.flash_size;
7888 	info->regdump_len = 0;
7889 }
7890 
bnx2x_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7891 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7892 {
7893 	struct bnx2x *bp = netdev_priv(dev);
7894 
7895 	if (bp->flags & NO_WOL_FLAG) {
7896 		wol->supported = 0;
7897 		wol->wolopts = 0;
7898 	} else {
7899 		wol->supported = WAKE_MAGIC;
7900 		if (bp->wol)
7901 			wol->wolopts = WAKE_MAGIC;
7902 		else
7903 			wol->wolopts = 0;
7904 	}
7905 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7906 }
7907 
bnx2x_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7908 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7909 {
7910 	struct bnx2x *bp = netdev_priv(dev);
7911 
7912 	if (wol->wolopts & ~WAKE_MAGIC)
7913 		return -EINVAL;
7914 
7915 	if (wol->wolopts & WAKE_MAGIC) {
7916 		if (bp->flags & NO_WOL_FLAG)
7917 			return -EINVAL;
7918 
7919 		bp->wol = 1;
7920 	} else
7921 		bp->wol = 0;
7922 
7923 	return 0;
7924 }
7925 
bnx2x_get_msglevel(struct net_device * dev)7926 static u32 bnx2x_get_msglevel(struct net_device *dev)
7927 {
7928 	struct bnx2x *bp = netdev_priv(dev);
7929 
7930 	return bp->msglevel;
7931 }
7932 
bnx2x_set_msglevel(struct net_device * dev,u32 level)7933 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7934 {
7935 	struct bnx2x *bp = netdev_priv(dev);
7936 
7937 	if (capable(CAP_NET_ADMIN))
7938 		bp->msglevel = level;
7939 }
7940 
bnx2x_nway_reset(struct net_device * dev)7941 static int bnx2x_nway_reset(struct net_device *dev)
7942 {
7943 	struct bnx2x *bp = netdev_priv(dev);
7944 
7945 	if (!bp->port.pmf)
7946 		return 0;
7947 
7948 	if (netif_running(dev)) {
7949 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7950 		bnx2x_link_set(bp);
7951 	}
7952 
7953 	return 0;
7954 }
7955 
bnx2x_get_eeprom_len(struct net_device * dev)7956 static int bnx2x_get_eeprom_len(struct net_device *dev)
7957 {
7958 	struct bnx2x *bp = netdev_priv(dev);
7959 
7960 	return bp->common.flash_size;
7961 }
7962 
bnx2x_acquire_nvram_lock(struct bnx2x * bp)7963 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7964 {
7965 	int port = BP_PORT(bp);
7966 	int count, i;
7967 	u32 val = 0;
7968 
7969 	/* adjust timeout for emulation/FPGA */
7970 	count = NVRAM_TIMEOUT_COUNT;
7971 	if (CHIP_REV_IS_SLOW(bp))
7972 		count *= 100;
7973 
7974 	/* request access to nvram interface */
7975 	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7976 	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7977 
7978 	for (i = 0; i < count*10; i++) {
7979 		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7980 		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7981 			break;
7982 
7983 		udelay(5);
7984 	}
7985 
7986 	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7987 		DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7988 		return -EBUSY;
7989 	}
7990 
7991 	return 0;
7992 }
7993 
bnx2x_release_nvram_lock(struct bnx2x * bp)7994 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7995 {
7996 	int port = BP_PORT(bp);
7997 	int count, i;
7998 	u32 val = 0;
7999 
8000 	/* adjust timeout for emulation/FPGA */
8001 	count = NVRAM_TIMEOUT_COUNT;
8002 	if (CHIP_REV_IS_SLOW(bp))
8003 		count *= 100;
8004 
8005 	/* relinquish nvram interface */
8006 	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8007 	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8008 
8009 	for (i = 0; i < count*10; i++) {
8010 		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8011 		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8012 			break;
8013 
8014 		udelay(5);
8015 	}
8016 
8017 	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8018 		DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8019 		return -EBUSY;
8020 	}
8021 
8022 	return 0;
8023 }
8024 
bnx2x_enable_nvram_access(struct bnx2x * bp)8025 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8026 {
8027 	u32 val;
8028 
8029 	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8030 
8031 	/* enable both bits, even on read */
8032 	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8033 	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
8034 		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
8035 }
8036 
bnx2x_disable_nvram_access(struct bnx2x * bp)8037 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8038 {
8039 	u32 val;
8040 
8041 	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8042 
8043 	/* disable both bits, even after read */
8044 	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8045 	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8046 			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8047 }
8048 
bnx2x_nvram_read_dword(struct bnx2x * bp,u32 offset,u32 * ret_val,u32 cmd_flags)8049 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8050 				  u32 cmd_flags)
8051 {
8052 	int count, i, rc;
8053 	u32 val;
8054 
8055 	/* build the command word */
8056 	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8057 
8058 	/* need to clear DONE bit separately */
8059 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8060 
8061 	/* address of the NVRAM to read from */
8062 	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8063 	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8064 
8065 	/* issue a read command */
8066 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8067 
8068 	/* adjust timeout for emulation/FPGA */
8069 	count = NVRAM_TIMEOUT_COUNT;
8070 	if (CHIP_REV_IS_SLOW(bp))
8071 		count *= 100;
8072 
8073 	/* wait for completion */
8074 	*ret_val = 0;
8075 	rc = -EBUSY;
8076 	for (i = 0; i < count; i++) {
8077 		udelay(5);
8078 		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8079 
8080 		if (val & MCPR_NVM_COMMAND_DONE) {
8081 			val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8082 			/* we read nvram data in cpu order
8083 			 * but ethtool sees it as an array of bytes
8084 			 * converting to big-endian will do the work */
8085 			val = cpu_to_be32(val);
8086 			*ret_val = val;
8087 			rc = 0;
8088 			break;
8089 		}
8090 	}
8091 
8092 	return rc;
8093 }
8094 
bnx2x_nvram_read(struct bnx2x * bp,u32 offset,u8 * ret_buf,int buf_size)8095 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8096 			    int buf_size)
8097 {
8098 	int rc;
8099 	u32 cmd_flags;
8100 	u32 val;
8101 
8102 	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8103 		DP(BNX2X_MSG_NVM,
8104 		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8105 		   offset, buf_size);
8106 		return -EINVAL;
8107 	}
8108 
8109 	if (offset + buf_size > bp->common.flash_size) {
8110 		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8111 				  " buf_size (0x%x) > flash_size (0x%x)\n",
8112 		   offset, buf_size, bp->common.flash_size);
8113 		return -EINVAL;
8114 	}
8115 
8116 	/* request access to nvram interface */
8117 	rc = bnx2x_acquire_nvram_lock(bp);
8118 	if (rc)
8119 		return rc;
8120 
8121 	/* enable access to nvram interface */
8122 	bnx2x_enable_nvram_access(bp);
8123 
8124 	/* read the first word(s) */
8125 	cmd_flags = MCPR_NVM_COMMAND_FIRST;
8126 	while ((buf_size > sizeof(u32)) && (rc == 0)) {
8127 		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8128 		memcpy(ret_buf, &val, 4);
8129 
8130 		/* advance to the next dword */
8131 		offset += sizeof(u32);
8132 		ret_buf += sizeof(u32);
8133 		buf_size -= sizeof(u32);
8134 		cmd_flags = 0;
8135 	}
8136 
8137 	if (rc == 0) {
8138 		cmd_flags |= MCPR_NVM_COMMAND_LAST;
8139 		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8140 		memcpy(ret_buf, &val, 4);
8141 	}
8142 
8143 	/* disable access to nvram interface */
8144 	bnx2x_disable_nvram_access(bp);
8145 	bnx2x_release_nvram_lock(bp);
8146 
8147 	return rc;
8148 }
8149 
bnx2x_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)8150 static int bnx2x_get_eeprom(struct net_device *dev,
8151 			    struct ethtool_eeprom *eeprom, u8 *eebuf)
8152 {
8153 	struct bnx2x *bp = netdev_priv(dev);
8154 	int rc;
8155 
8156 	if (!netif_running(dev))
8157 		return -EAGAIN;
8158 
8159 	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8160 	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8161 	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8162 	   eeprom->len, eeprom->len);
8163 
8164 	/* parameters already validated in ethtool_get_eeprom */
8165 
8166 	rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8167 
8168 	return rc;
8169 }
8170 
bnx2x_nvram_write_dword(struct bnx2x * bp,u32 offset,u32 val,u32 cmd_flags)8171 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8172 				   u32 cmd_flags)
8173 {
8174 	int count, i, rc;
8175 
8176 	/* build the command word */
8177 	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8178 
8179 	/* need to clear DONE bit separately */
8180 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8181 
8182 	/* write the data */
8183 	REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8184 
8185 	/* address of the NVRAM to write to */
8186 	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8187 	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8188 
8189 	/* issue the write command */
8190 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8191 
8192 	/* adjust timeout for emulation/FPGA */
8193 	count = NVRAM_TIMEOUT_COUNT;
8194 	if (CHIP_REV_IS_SLOW(bp))
8195 		count *= 100;
8196 
8197 	/* wait for completion */
8198 	rc = -EBUSY;
8199 	for (i = 0; i < count; i++) {
8200 		udelay(5);
8201 		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8202 		if (val & MCPR_NVM_COMMAND_DONE) {
8203 			rc = 0;
8204 			break;
8205 		}
8206 	}
8207 
8208 	return rc;
8209 }
8210 
8211 #define BYTE_OFFSET(offset)		(8 * (offset & 0x03))
8212 
bnx2x_nvram_write1(struct bnx2x * bp,u32 offset,u8 * data_buf,int buf_size)8213 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8214 			      int buf_size)
8215 {
8216 	int rc;
8217 	u32 cmd_flags;
8218 	u32 align_offset;
8219 	u32 val;
8220 
8221 	if (offset + buf_size > bp->common.flash_size) {
8222 		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8223 				  " buf_size (0x%x) > flash_size (0x%x)\n",
8224 		   offset, buf_size, bp->common.flash_size);
8225 		return -EINVAL;
8226 	}
8227 
8228 	/* request access to nvram interface */
8229 	rc = bnx2x_acquire_nvram_lock(bp);
8230 	if (rc)
8231 		return rc;
8232 
8233 	/* enable access to nvram interface */
8234 	bnx2x_enable_nvram_access(bp);
8235 
8236 	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8237 	align_offset = (offset & ~0x03);
8238 	rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8239 
8240 	if (rc == 0) {
8241 		val &= ~(0xff << BYTE_OFFSET(offset));
8242 		val |= (*data_buf << BYTE_OFFSET(offset));
8243 
8244 		/* nvram data is returned as an array of bytes
8245 		 * convert it back to cpu order */
8246 		val = be32_to_cpu(val);
8247 
8248 		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8249 					     cmd_flags);
8250 	}
8251 
8252 	/* disable access to nvram interface */
8253 	bnx2x_disable_nvram_access(bp);
8254 	bnx2x_release_nvram_lock(bp);
8255 
8256 	return rc;
8257 }
8258 
bnx2x_nvram_write(struct bnx2x * bp,u32 offset,u8 * data_buf,int buf_size)8259 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8260 			     int buf_size)
8261 {
8262 	int rc;
8263 	u32 cmd_flags;
8264 	u32 val;
8265 	u32 written_so_far;
8266 
8267 	if (buf_size == 1)	/* ethtool */
8268 		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8269 
8270 	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8271 		DP(BNX2X_MSG_NVM,
8272 		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8273 		   offset, buf_size);
8274 		return -EINVAL;
8275 	}
8276 
8277 	if (offset + buf_size > bp->common.flash_size) {
8278 		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8279 				  " buf_size (0x%x) > flash_size (0x%x)\n",
8280 		   offset, buf_size, bp->common.flash_size);
8281 		return -EINVAL;
8282 	}
8283 
8284 	/* request access to nvram interface */
8285 	rc = bnx2x_acquire_nvram_lock(bp);
8286 	if (rc)
8287 		return rc;
8288 
8289 	/* enable access to nvram interface */
8290 	bnx2x_enable_nvram_access(bp);
8291 
8292 	written_so_far = 0;
8293 	cmd_flags = MCPR_NVM_COMMAND_FIRST;
8294 	while ((written_so_far < buf_size) && (rc == 0)) {
8295 		if (written_so_far == (buf_size - sizeof(u32)))
8296 			cmd_flags |= MCPR_NVM_COMMAND_LAST;
8297 		else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8298 			cmd_flags |= MCPR_NVM_COMMAND_LAST;
8299 		else if ((offset % NVRAM_PAGE_SIZE) == 0)
8300 			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8301 
8302 		memcpy(&val, data_buf, 4);
8303 
8304 		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8305 
8306 		/* advance to the next dword */
8307 		offset += sizeof(u32);
8308 		data_buf += sizeof(u32);
8309 		written_so_far += sizeof(u32);
8310 		cmd_flags = 0;
8311 	}
8312 
8313 	/* disable access to nvram interface */
8314 	bnx2x_disable_nvram_access(bp);
8315 	bnx2x_release_nvram_lock(bp);
8316 
8317 	return rc;
8318 }
8319 
bnx2x_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)8320 static int bnx2x_set_eeprom(struct net_device *dev,
8321 			    struct ethtool_eeprom *eeprom, u8 *eebuf)
8322 {
8323 	struct bnx2x *bp = netdev_priv(dev);
8324 	int rc;
8325 
8326 	if (!netif_running(dev))
8327 		return -EAGAIN;
8328 
8329 	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8330 	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8331 	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8332 	   eeprom->len, eeprom->len);
8333 
8334 	/* parameters already validated in ethtool_set_eeprom */
8335 
8336 	/* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8337 	if (eeprom->magic == 0x00504859)
8338 		if (bp->port.pmf) {
8339 
8340 			bnx2x_acquire_phy_lock(bp);
8341 			rc = bnx2x_flash_download(bp, BP_PORT(bp),
8342 					     bp->link_params.ext_phy_config,
8343 					     (bp->state != BNX2X_STATE_CLOSED),
8344 					     eebuf, eeprom->len);
8345 			if ((bp->state == BNX2X_STATE_OPEN) ||
8346 			    (bp->state == BNX2X_STATE_DISABLED)) {
8347 				rc |= bnx2x_link_reset(&bp->link_params,
8348 						       &bp->link_vars);
8349 				rc |= bnx2x_phy_init(&bp->link_params,
8350 						     &bp->link_vars);
8351 			}
8352 			bnx2x_release_phy_lock(bp);
8353 
8354 		} else /* Only the PMF can access the PHY */
8355 			return -EINVAL;
8356 	else
8357 		rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8358 
8359 	return rc;
8360 }
8361 
bnx2x_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal)8362 static int bnx2x_get_coalesce(struct net_device *dev,
8363 			      struct ethtool_coalesce *coal)
8364 {
8365 	struct bnx2x *bp = netdev_priv(dev);
8366 
8367 	memset(coal, 0, sizeof(struct ethtool_coalesce));
8368 
8369 	coal->rx_coalesce_usecs = bp->rx_ticks;
8370 	coal->tx_coalesce_usecs = bp->tx_ticks;
8371 
8372 	return 0;
8373 }
8374 
bnx2x_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal)8375 static int bnx2x_set_coalesce(struct net_device *dev,
8376 			      struct ethtool_coalesce *coal)
8377 {
8378 	struct bnx2x *bp = netdev_priv(dev);
8379 
8380 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8381 	if (bp->rx_ticks > 3000)
8382 		bp->rx_ticks = 3000;
8383 
8384 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8385 	if (bp->tx_ticks > 0x3000)
8386 		bp->tx_ticks = 0x3000;
8387 
8388 	if (netif_running(dev))
8389 		bnx2x_update_coalesce(bp);
8390 
8391 	return 0;
8392 }
8393 
bnx2x_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)8394 static void bnx2x_get_ringparam(struct net_device *dev,
8395 				struct ethtool_ringparam *ering)
8396 {
8397 	struct bnx2x *bp = netdev_priv(dev);
8398 
8399 	ering->rx_max_pending = MAX_RX_AVAIL;
8400 	ering->rx_mini_max_pending = 0;
8401 	ering->rx_jumbo_max_pending = 0;
8402 
8403 	ering->rx_pending = bp->rx_ring_size;
8404 	ering->rx_mini_pending = 0;
8405 	ering->rx_jumbo_pending = 0;
8406 
8407 	ering->tx_max_pending = MAX_TX_AVAIL;
8408 	ering->tx_pending = bp->tx_ring_size;
8409 }
8410 
bnx2x_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)8411 static int bnx2x_set_ringparam(struct net_device *dev,
8412 			       struct ethtool_ringparam *ering)
8413 {
8414 	struct bnx2x *bp = netdev_priv(dev);
8415 	int rc = 0;
8416 
8417 	if ((ering->rx_pending > MAX_RX_AVAIL) ||
8418 	    (ering->tx_pending > MAX_TX_AVAIL) ||
8419 	    (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8420 		return -EINVAL;
8421 
8422 	bp->rx_ring_size = ering->rx_pending;
8423 	bp->tx_ring_size = ering->tx_pending;
8424 
8425 	if (netif_running(dev)) {
8426 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8427 		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8428 	}
8429 
8430 	return rc;
8431 }
8432 
bnx2x_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)8433 static void bnx2x_get_pauseparam(struct net_device *dev,
8434 				 struct ethtool_pauseparam *epause)
8435 {
8436 	struct bnx2x *bp = netdev_priv(dev);
8437 
8438 	epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8439 			  (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8440 
8441 	epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8442 			    BNX2X_FLOW_CTRL_RX);
8443 	epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8444 			    BNX2X_FLOW_CTRL_TX);
8445 
8446 	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8447 	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8448 	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8449 }
8450 
bnx2x_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)8451 static int bnx2x_set_pauseparam(struct net_device *dev,
8452 				struct ethtool_pauseparam *epause)
8453 {
8454 	struct bnx2x *bp = netdev_priv(dev);
8455 
8456 	if (IS_E1HMF(bp))
8457 		return 0;
8458 
8459 	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8460 	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8461 	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8462 
8463 	bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8464 
8465 	if (epause->rx_pause)
8466 		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8467 
8468 	if (epause->tx_pause)
8469 		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8470 
8471 	if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8472 		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8473 
8474 	if (epause->autoneg) {
8475 		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8476 			DP(NETIF_MSG_LINK, "autoneg not supported\n");
8477 			return -EINVAL;
8478 		}
8479 
8480 		if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8481 			bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8482 	}
8483 
8484 	DP(NETIF_MSG_LINK,
8485 	   "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8486 
8487 	if (netif_running(dev)) {
8488 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8489 		bnx2x_link_set(bp);
8490 	}
8491 
8492 	return 0;
8493 }
8494 
bnx2x_set_flags(struct net_device * dev,u32 data)8495 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8496 {
8497 	struct bnx2x *bp = netdev_priv(dev);
8498 	int changed = 0;
8499 	int rc = 0;
8500 
8501 	/* TPA requires Rx CSUM offloading */
8502 	if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8503 		if (!(dev->features & NETIF_F_LRO)) {
8504 			dev->features |= NETIF_F_LRO;
8505 			bp->flags |= TPA_ENABLE_FLAG;
8506 			changed = 1;
8507 		}
8508 
8509 	} else if (dev->features & NETIF_F_LRO) {
8510 		dev->features &= ~NETIF_F_LRO;
8511 		bp->flags &= ~TPA_ENABLE_FLAG;
8512 		changed = 1;
8513 	}
8514 
8515 	if (changed && netif_running(dev)) {
8516 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8517 		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8518 	}
8519 
8520 	return rc;
8521 }
8522 
bnx2x_get_rx_csum(struct net_device * dev)8523 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8524 {
8525 	struct bnx2x *bp = netdev_priv(dev);
8526 
8527 	return bp->rx_csum;
8528 }
8529 
bnx2x_set_rx_csum(struct net_device * dev,u32 data)8530 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8531 {
8532 	struct bnx2x *bp = netdev_priv(dev);
8533 	int rc = 0;
8534 
8535 	bp->rx_csum = data;
8536 
8537 	/* Disable TPA, when Rx CSUM is disabled. Otherwise all
8538 	   TPA'ed packets will be discarded due to wrong TCP CSUM */
8539 	if (!data) {
8540 		u32 flags = ethtool_op_get_flags(dev);
8541 
8542 		rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8543 	}
8544 
8545 	return rc;
8546 }
8547 
bnx2x_set_tso(struct net_device * dev,u32 data)8548 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8549 {
8550 	if (data) {
8551 		dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8552 		dev->features |= NETIF_F_TSO6;
8553 	} else {
8554 		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8555 		dev->features &= ~NETIF_F_TSO6;
8556 	}
8557 
8558 	return 0;
8559 }
8560 
8561 static const struct {
8562 	char string[ETH_GSTRING_LEN];
8563 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8564 	{ "register_test (offline)" },
8565 	{ "memory_test (offline)" },
8566 	{ "loopback_test (offline)" },
8567 	{ "nvram_test (online)" },
8568 	{ "interrupt_test (online)" },
8569 	{ "link_test (online)" },
8570 	{ "idle check (online)" },
8571 	{ "MC errors (online)" }
8572 };
8573 
bnx2x_self_test_count(struct net_device * dev)8574 static int bnx2x_self_test_count(struct net_device *dev)
8575 {
8576 	return BNX2X_NUM_TESTS;
8577 }
8578 
bnx2x_test_registers(struct bnx2x * bp)8579 static int bnx2x_test_registers(struct bnx2x *bp)
8580 {
8581 	int idx, i, rc = -ENODEV;
8582 	u32 wr_val = 0;
8583 	int port = BP_PORT(bp);
8584 	static const struct {
8585 		u32  offset0;
8586 		u32  offset1;
8587 		u32  mask;
8588 	} reg_tbl[] = {
8589 /* 0 */		{ BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8590 		{ DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8591 		{ HC_REG_AGG_INT_0,                    4, 0x000003ff },
8592 		{ PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8593 		{ PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8594 		{ PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8595 		{ PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8596 		{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8597 		{ PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8598 		{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8599 /* 10 */	{ PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8600 		{ QM_REG_CONNNUM_0,                    4, 0x000fffff },
8601 		{ TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8602 		{ SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8603 		{ SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8604 		{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8605 		{ XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8606 		{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8607 		{ NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8608 		{ NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8609 /* 20 */	{ NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8610 		{ NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8611 		{ NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8612 		{ NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8613 		{ NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8614 		{ NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8615 		{ NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8616 		{ NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8617 		{ NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8618 		{ NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8619 /* 30 */	{ NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8620 		{ NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8621 		{ NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8622 		{ NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8623 		{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8624 		{ NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8625 		{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8626 		{ NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8627 
8628 		{ 0xffffffff, 0, 0x00000000 }
8629 	};
8630 
8631 	if (!netif_running(bp->dev))
8632 		return rc;
8633 
8634 	/* Repeat the test twice:
8635 	   First by writing 0x00000000, second by writing 0xffffffff */
8636 	for (idx = 0; idx < 2; idx++) {
8637 
8638 		switch (idx) {
8639 		case 0:
8640 			wr_val = 0;
8641 			break;
8642 		case 1:
8643 			wr_val = 0xffffffff;
8644 			break;
8645 		}
8646 
8647 		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8648 			u32 offset, mask, save_val, val;
8649 
8650 			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8651 			mask = reg_tbl[i].mask;
8652 
8653 			save_val = REG_RD(bp, offset);
8654 
8655 			REG_WR(bp, offset, wr_val);
8656 			val = REG_RD(bp, offset);
8657 
8658 			/* Restore the original register's value */
8659 			REG_WR(bp, offset, save_val);
8660 
8661 			/* verify that value is as expected value */
8662 			if ((val & mask) != (wr_val & mask))
8663 				goto test_reg_exit;
8664 		}
8665 	}
8666 
8667 	rc = 0;
8668 
8669 test_reg_exit:
8670 	return rc;
8671 }
8672 
bnx2x_test_memory(struct bnx2x * bp)8673 static int bnx2x_test_memory(struct bnx2x *bp)
8674 {
8675 	int i, j, rc = -ENODEV;
8676 	u32 val;
8677 	static const struct {
8678 		u32 offset;
8679 		int size;
8680 	} mem_tbl[] = {
8681 		{ CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8682 		{ CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8683 		{ CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8684 		{ DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8685 		{ TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8686 		{ UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8687 		{ XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8688 
8689 		{ 0xffffffff, 0 }
8690 	};
8691 	static const struct {
8692 		char *name;
8693 		u32 offset;
8694 		u32 e1_mask;
8695 		u32 e1h_mask;
8696 	} prty_tbl[] = {
8697 		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8698 		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8699 		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8700 		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8701 		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8702 		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8703 
8704 		{ NULL, 0xffffffff, 0, 0 }
8705 	};
8706 
8707 	if (!netif_running(bp->dev))
8708 		return rc;
8709 
8710 	/* Go through all the memories */
8711 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8712 		for (j = 0; j < mem_tbl[i].size; j++)
8713 			REG_RD(bp, mem_tbl[i].offset + j*4);
8714 
8715 	/* Check the parity status */
8716 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8717 		val = REG_RD(bp, prty_tbl[i].offset);
8718 		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8719 		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8720 			DP(NETIF_MSG_HW,
8721 			   "%s is 0x%x\n", prty_tbl[i].name, val);
8722 			goto test_mem_exit;
8723 		}
8724 	}
8725 
8726 	rc = 0;
8727 
8728 test_mem_exit:
8729 	return rc;
8730 }
8731 
bnx2x_wait_for_link(struct bnx2x * bp,u8 link_up)8732 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8733 {
8734 	int cnt = 1000;
8735 
8736 	if (link_up)
8737 		while (bnx2x_link_test(bp) && cnt--)
8738 			msleep(10);
8739 }
8740 
bnx2x_run_loopback(struct bnx2x * bp,int loopback_mode,u8 link_up)8741 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8742 {
8743 	unsigned int pkt_size, num_pkts, i;
8744 	struct sk_buff *skb;
8745 	unsigned char *packet;
8746 	struct bnx2x_fastpath *fp = &bp->fp[0];
8747 	u16 tx_start_idx, tx_idx;
8748 	u16 rx_start_idx, rx_idx;
8749 	u16 pkt_prod;
8750 	struct sw_tx_bd *tx_buf;
8751 	struct eth_tx_bd *tx_bd;
8752 	dma_addr_t mapping;
8753 	union eth_rx_cqe *cqe;
8754 	u8 cqe_fp_flags;
8755 	struct sw_rx_bd *rx_buf;
8756 	u16 len;
8757 	int rc = -ENODEV;
8758 
8759 	if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8760 		bp->link_params.loopback_mode = LOOPBACK_BMAC;
8761 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8762 
8763 	} else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8764 		u16 cnt = 1000;
8765 		bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8766 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8767 		/* wait until link state is restored */
8768 		if (link_up)
8769 			while (cnt-- && bnx2x_test_link(&bp->link_params,
8770 							&bp->link_vars))
8771 				msleep(10);
8772 	} else
8773 		return -EINVAL;
8774 
8775 	pkt_size = 1514;
8776 	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8777 	if (!skb) {
8778 		rc = -ENOMEM;
8779 		goto test_loopback_exit;
8780 	}
8781 	packet = skb_put(skb, pkt_size);
8782 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8783 	memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8784 	for (i = ETH_HLEN; i < pkt_size; i++)
8785 		packet[i] = (unsigned char) (i & 0xff);
8786 
8787 	num_pkts = 0;
8788 	tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8789 	rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8790 
8791 	pkt_prod = fp->tx_pkt_prod++;
8792 	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8793 	tx_buf->first_bd = fp->tx_bd_prod;
8794 	tx_buf->skb = skb;
8795 
8796 	tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8797 	mapping = pci_map_single(bp->pdev, skb->data,
8798 				 skb_headlen(skb), PCI_DMA_TODEVICE);
8799 	tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8800 	tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8801 	tx_bd->nbd = cpu_to_le16(1);
8802 	tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8803 	tx_bd->vlan = cpu_to_le16(pkt_prod);
8804 	tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8805 				       ETH_TX_BD_FLAGS_END_BD);
8806 	tx_bd->general_data = ((UNICAST_ADDRESS <<
8807 				ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8808 
8809 	wmb();
8810 
8811 	fp->hw_tx_prods->bds_prod =
8812 		cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8813 	mb(); /* FW restriction: must not reorder writing nbd and packets */
8814 	fp->hw_tx_prods->packets_prod =
8815 		cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8816 	DOORBELL(bp, FP_IDX(fp), 0);
8817 
8818 	mmiowb();
8819 
8820 	num_pkts++;
8821 	fp->tx_bd_prod++;
8822 	bp->dev->trans_start = jiffies;
8823 
8824 	udelay(100);
8825 
8826 	tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8827 	if (tx_idx != tx_start_idx + num_pkts)
8828 		goto test_loopback_exit;
8829 
8830 	rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8831 	if (rx_idx != rx_start_idx + num_pkts)
8832 		goto test_loopback_exit;
8833 
8834 	cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8835 	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8836 	if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8837 		goto test_loopback_rx_exit;
8838 
8839 	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8840 	if (len != pkt_size)
8841 		goto test_loopback_rx_exit;
8842 
8843 	rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8844 	skb = rx_buf->skb;
8845 	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8846 	for (i = ETH_HLEN; i < pkt_size; i++)
8847 		if (*(skb->data + i) != (unsigned char) (i & 0xff))
8848 			goto test_loopback_rx_exit;
8849 
8850 	rc = 0;
8851 
8852 test_loopback_rx_exit:
8853 
8854 	fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8855 	fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8856 	fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8857 	fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8858 
8859 	/* Update producers */
8860 	bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8861 			     fp->rx_sge_prod);
8862 
8863 test_loopback_exit:
8864 	bp->link_params.loopback_mode = LOOPBACK_NONE;
8865 
8866 	return rc;
8867 }
8868 
bnx2x_test_loopback(struct bnx2x * bp,u8 link_up)8869 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8870 {
8871 	int rc = 0;
8872 
8873 	if (!netif_running(bp->dev))
8874 		return BNX2X_LOOPBACK_FAILED;
8875 
8876 	bnx2x_netif_stop(bp, 1);
8877 	bnx2x_acquire_phy_lock(bp);
8878 
8879 	if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8880 		DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8881 		rc |= BNX2X_MAC_LOOPBACK_FAILED;
8882 	}
8883 
8884 	if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8885 		DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8886 		rc |= BNX2X_PHY_LOOPBACK_FAILED;
8887 	}
8888 
8889 	bnx2x_release_phy_lock(bp);
8890 	bnx2x_netif_start(bp);
8891 
8892 	return rc;
8893 }
8894 
8895 #define CRC32_RESIDUAL			0xdebb20e3
8896 
bnx2x_test_nvram(struct bnx2x * bp)8897 static int bnx2x_test_nvram(struct bnx2x *bp)
8898 {
8899 	static const struct {
8900 		int offset;
8901 		int size;
8902 	} nvram_tbl[] = {
8903 		{     0,  0x14 }, /* bootstrap */
8904 		{  0x14,  0xec }, /* dir */
8905 		{ 0x100, 0x350 }, /* manuf_info */
8906 		{ 0x450,  0xf0 }, /* feature_info */
8907 		{ 0x640,  0x64 }, /* upgrade_key_info */
8908 		{ 0x6a4,  0x64 },
8909 		{ 0x708,  0x70 }, /* manuf_key_info */
8910 		{ 0x778,  0x70 },
8911 		{     0,     0 }
8912 	};
8913 	u32 buf[0x350 / 4];
8914 	u8 *data = (u8 *)buf;
8915 	int i, rc;
8916 	u32 magic, csum;
8917 
8918 	rc = bnx2x_nvram_read(bp, 0, data, 4);
8919 	if (rc) {
8920 		DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8921 		goto test_nvram_exit;
8922 	}
8923 
8924 	magic = be32_to_cpu(buf[0]);
8925 	if (magic != 0x669955aa) {
8926 		DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8927 		rc = -ENODEV;
8928 		goto test_nvram_exit;
8929 	}
8930 
8931 	for (i = 0; nvram_tbl[i].size; i++) {
8932 
8933 		rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8934 				      nvram_tbl[i].size);
8935 		if (rc) {
8936 			DP(NETIF_MSG_PROBE,
8937 			   "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8938 			goto test_nvram_exit;
8939 		}
8940 
8941 		csum = ether_crc_le(nvram_tbl[i].size, data);
8942 		if (csum != CRC32_RESIDUAL) {
8943 			DP(NETIF_MSG_PROBE,
8944 			   "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8945 			rc = -ENODEV;
8946 			goto test_nvram_exit;
8947 		}
8948 	}
8949 
8950 test_nvram_exit:
8951 	return rc;
8952 }
8953 
bnx2x_test_intr(struct bnx2x * bp)8954 static int bnx2x_test_intr(struct bnx2x *bp)
8955 {
8956 	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8957 	int i, rc;
8958 
8959 	if (!netif_running(bp->dev))
8960 		return -ENODEV;
8961 
8962 	config->hdr.length_6b = 0;
8963 	if (CHIP_IS_E1(bp))
8964 		config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8965 	else
8966 		config->hdr.offset = BP_FUNC(bp);
8967 	config->hdr.client_id = BP_CL_ID(bp);
8968 	config->hdr.reserved1 = 0;
8969 
8970 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8971 			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8972 			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8973 	if (rc == 0) {
8974 		bp->set_mac_pending++;
8975 		for (i = 0; i < 10; i++) {
8976 			if (!bp->set_mac_pending)
8977 				break;
8978 			msleep_interruptible(10);
8979 		}
8980 		if (i == 10)
8981 			rc = -ENODEV;
8982 	}
8983 
8984 	return rc;
8985 }
8986 
bnx2x_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)8987 static void bnx2x_self_test(struct net_device *dev,
8988 			    struct ethtool_test *etest, u64 *buf)
8989 {
8990 	struct bnx2x *bp = netdev_priv(dev);
8991 
8992 	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8993 
8994 	if (!netif_running(dev))
8995 		return;
8996 
8997 	/* offline tests are not supported in MF mode */
8998 	if (IS_E1HMF(bp))
8999 		etest->flags &= ~ETH_TEST_FL_OFFLINE;
9000 
9001 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
9002 		u8 link_up;
9003 
9004 		link_up = bp->link_vars.link_up;
9005 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9006 		bnx2x_nic_load(bp, LOAD_DIAG);
9007 		/* wait until link state is restored */
9008 		bnx2x_wait_for_link(bp, link_up);
9009 
9010 		if (bnx2x_test_registers(bp) != 0) {
9011 			buf[0] = 1;
9012 			etest->flags |= ETH_TEST_FL_FAILED;
9013 		}
9014 		if (bnx2x_test_memory(bp) != 0) {
9015 			buf[1] = 1;
9016 			etest->flags |= ETH_TEST_FL_FAILED;
9017 		}
9018 		buf[2] = bnx2x_test_loopback(bp, link_up);
9019 		if (buf[2] != 0)
9020 			etest->flags |= ETH_TEST_FL_FAILED;
9021 
9022 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9023 		bnx2x_nic_load(bp, LOAD_NORMAL);
9024 		/* wait until link state is restored */
9025 		bnx2x_wait_for_link(bp, link_up);
9026 	}
9027 	if (bnx2x_test_nvram(bp) != 0) {
9028 		buf[3] = 1;
9029 		etest->flags |= ETH_TEST_FL_FAILED;
9030 	}
9031 	if (bnx2x_test_intr(bp) != 0) {
9032 		buf[4] = 1;
9033 		etest->flags |= ETH_TEST_FL_FAILED;
9034 	}
9035 	if (bp->port.pmf)
9036 		if (bnx2x_link_test(bp) != 0) {
9037 			buf[5] = 1;
9038 			etest->flags |= ETH_TEST_FL_FAILED;
9039 		}
9040 	buf[7] = bnx2x_mc_assert(bp);
9041 	if (buf[7] != 0)
9042 		etest->flags |= ETH_TEST_FL_FAILED;
9043 
9044 #ifdef BNX2X_EXTRA_DEBUG
9045 	bnx2x_panic_dump(bp);
9046 #endif
9047 }
9048 
9049 static const struct {
9050 	long offset;
9051 	int size;
9052 	u32 flags;
9053 #define STATS_FLAGS_PORT		1
9054 #define STATS_FLAGS_FUNC		2
9055 	u8 string[ETH_GSTRING_LEN];
9056 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9057 /* 1 */	{ STATS_OFFSET32(valid_bytes_received_hi),
9058 				8, STATS_FLAGS_FUNC, "rx_bytes" },
9059 	{ STATS_OFFSET32(error_bytes_received_hi),
9060 				8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9061 	{ STATS_OFFSET32(total_bytes_transmitted_hi),
9062 				8, STATS_FLAGS_FUNC, "tx_bytes" },
9063 	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9064 				8, STATS_FLAGS_PORT, "tx_error_bytes" },
9065 	{ STATS_OFFSET32(total_unicast_packets_received_hi),
9066 				8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9067 	{ STATS_OFFSET32(total_multicast_packets_received_hi),
9068 				8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9069 	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
9070 				8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9071 	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9072 				8, STATS_FLAGS_FUNC, "tx_packets" },
9073 	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9074 				8, STATS_FLAGS_PORT, "tx_mac_errors" },
9075 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9076 				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9077 	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9078 				8, STATS_FLAGS_PORT, "rx_crc_errors" },
9079 	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9080 				8, STATS_FLAGS_PORT, "rx_align_errors" },
9081 	{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9082 				8, STATS_FLAGS_PORT, "tx_single_collisions" },
9083 	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9084 				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9085 	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9086 				8, STATS_FLAGS_PORT, "tx_deferred" },
9087 	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9088 				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9089 	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9090 				8, STATS_FLAGS_PORT, "tx_late_collisions" },
9091 	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9092 				8, STATS_FLAGS_PORT, "tx_total_collisions" },
9093 	{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9094 				8, STATS_FLAGS_PORT, "rx_fragments" },
9095 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9096 				8, STATS_FLAGS_PORT, "rx_jabbers" },
9097 	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9098 				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9099 	{ STATS_OFFSET32(jabber_packets_received),
9100 				4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9101 	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9102 				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9103 	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9104 			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9105 	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9106 			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9107 	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9108 			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9109 	{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9110 			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9111 	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9112 			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9113 	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9114 			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9115 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9116 				8, STATS_FLAGS_PORT, "rx_xon_frames" },
9117 	{ STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9118 				8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9119 	{ STATS_OFFSET32(tx_stat_outxonsent_hi),
9120 				8, STATS_FLAGS_PORT, "tx_xon_frames" },
9121 	{ STATS_OFFSET32(tx_stat_outxoffsent_hi),
9122 				8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9123 	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9124 				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9125 	{ STATS_OFFSET32(mac_filter_discard),
9126 				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9127 	{ STATS_OFFSET32(no_buff_discard),
9128 				4, STATS_FLAGS_FUNC, "rx_discards" },
9129 	{ STATS_OFFSET32(xxoverflow_discard),
9130 				4, STATS_FLAGS_PORT, "rx_fw_discards" },
9131 	{ STATS_OFFSET32(brb_drop_hi),
9132 				8, STATS_FLAGS_PORT, "brb_discard" },
9133 	{ STATS_OFFSET32(brb_truncate_hi),
9134 				8, STATS_FLAGS_PORT, "brb_truncate" },
9135 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9136 				4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9137 	{ STATS_OFFSET32(rx_skb_alloc_failed),
9138 				4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9139 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9140 				4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9141 };
9142 
9143 #define IS_NOT_E1HMF_STAT(bp, i) \
9144 		(IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9145 
bnx2x_get_strings(struct net_device * dev,u32 stringset,u8 * buf)9146 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9147 {
9148 	struct bnx2x *bp = netdev_priv(dev);
9149 	int i, j;
9150 
9151 	switch (stringset) {
9152 	case ETH_SS_STATS:
9153 		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9154 			if (IS_NOT_E1HMF_STAT(bp, i))
9155 				continue;
9156 			strcpy(buf + j*ETH_GSTRING_LEN,
9157 			       bnx2x_stats_arr[i].string);
9158 			j++;
9159 		}
9160 		break;
9161 
9162 	case ETH_SS_TEST:
9163 		memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9164 		break;
9165 	}
9166 }
9167 
bnx2x_get_stats_count(struct net_device * dev)9168 static int bnx2x_get_stats_count(struct net_device *dev)
9169 {
9170 	struct bnx2x *bp = netdev_priv(dev);
9171 	int i, num_stats = 0;
9172 
9173 	for (i = 0; i < BNX2X_NUM_STATS; i++) {
9174 		if (IS_NOT_E1HMF_STAT(bp, i))
9175 			continue;
9176 		num_stats++;
9177 	}
9178 	return num_stats;
9179 }
9180 
bnx2x_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)9181 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9182 				    struct ethtool_stats *stats, u64 *buf)
9183 {
9184 	struct bnx2x *bp = netdev_priv(dev);
9185 	u32 *hw_stats = (u32 *)&bp->eth_stats;
9186 	int i, j;
9187 
9188 	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9189 		if (IS_NOT_E1HMF_STAT(bp, i))
9190 			continue;
9191 
9192 		if (bnx2x_stats_arr[i].size == 0) {
9193 			/* skip this counter */
9194 			buf[j] = 0;
9195 			j++;
9196 			continue;
9197 		}
9198 		if (bnx2x_stats_arr[i].size == 4) {
9199 			/* 4-byte counter */
9200 			buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9201 			j++;
9202 			continue;
9203 		}
9204 		/* 8-byte counter */
9205 		buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9206 				  *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9207 		j++;
9208 	}
9209 }
9210 
bnx2x_phys_id(struct net_device * dev,u32 data)9211 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9212 {
9213 	struct bnx2x *bp = netdev_priv(dev);
9214 	int port = BP_PORT(bp);
9215 	int i;
9216 
9217 	if (!netif_running(dev))
9218 		return 0;
9219 
9220 	if (!bp->port.pmf)
9221 		return 0;
9222 
9223 	if (data == 0)
9224 		data = 2;
9225 
9226 	for (i = 0; i < (data * 2); i++) {
9227 		if ((i % 2) == 0)
9228 			bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9229 				      bp->link_params.hw_led_mode,
9230 				      bp->link_params.chip_id);
9231 		else
9232 			bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9233 				      bp->link_params.hw_led_mode,
9234 				      bp->link_params.chip_id);
9235 
9236 		msleep_interruptible(500);
9237 		if (signal_pending(current))
9238 			break;
9239 	}
9240 
9241 	if (bp->link_vars.link_up)
9242 		bnx2x_set_led(bp, port, LED_MODE_OPER,
9243 			      bp->link_vars.line_speed,
9244 			      bp->link_params.hw_led_mode,
9245 			      bp->link_params.chip_id);
9246 
9247 	return 0;
9248 }
9249 
9250 static struct ethtool_ops bnx2x_ethtool_ops = {
9251 	.get_settings		= bnx2x_get_settings,
9252 	.set_settings		= bnx2x_set_settings,
9253 	.get_drvinfo		= bnx2x_get_drvinfo,
9254 	.get_wol		= bnx2x_get_wol,
9255 	.set_wol		= bnx2x_set_wol,
9256 	.get_msglevel		= bnx2x_get_msglevel,
9257 	.set_msglevel		= bnx2x_set_msglevel,
9258 	.nway_reset		= bnx2x_nway_reset,
9259 	.get_link		= ethtool_op_get_link,
9260 	.get_eeprom_len		= bnx2x_get_eeprom_len,
9261 	.get_eeprom		= bnx2x_get_eeprom,
9262 	.set_eeprom		= bnx2x_set_eeprom,
9263 	.get_coalesce		= bnx2x_get_coalesce,
9264 	.set_coalesce		= bnx2x_set_coalesce,
9265 	.get_ringparam		= bnx2x_get_ringparam,
9266 	.set_ringparam		= bnx2x_set_ringparam,
9267 	.get_pauseparam		= bnx2x_get_pauseparam,
9268 	.set_pauseparam		= bnx2x_set_pauseparam,
9269 	.get_rx_csum		= bnx2x_get_rx_csum,
9270 	.set_rx_csum		= bnx2x_set_rx_csum,
9271 	.get_tx_csum		= ethtool_op_get_tx_csum,
9272 	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
9273 	.set_flags		= bnx2x_set_flags,
9274 	.get_flags		= ethtool_op_get_flags,
9275 	.get_sg			= ethtool_op_get_sg,
9276 	.set_sg			= ethtool_op_set_sg,
9277 	.get_tso		= ethtool_op_get_tso,
9278 	.set_tso		= bnx2x_set_tso,
9279 	.self_test_count	= bnx2x_self_test_count,
9280 	.self_test		= bnx2x_self_test,
9281 	.get_strings		= bnx2x_get_strings,
9282 	.phys_id		= bnx2x_phys_id,
9283 	.get_stats_count	= bnx2x_get_stats_count,
9284 	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
9285 };
9286 
9287 /* end of ethtool_ops */
9288 
9289 /****************************************************************************
9290 * General service functions
9291 ****************************************************************************/
9292 
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)9293 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9294 {
9295 	u16 pmcsr;
9296 
9297 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9298 
9299 	switch (state) {
9300 	case PCI_D0:
9301 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9302 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9303 				       PCI_PM_CTRL_PME_STATUS));
9304 
9305 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9306 			/* delay required during transition out of D3hot */
9307 			msleep(20);
9308 		break;
9309 
9310 	case PCI_D3hot:
9311 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9312 		pmcsr |= 3;
9313 
9314 		if (bp->wol)
9315 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9316 
9317 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9318 				      pmcsr);
9319 
9320 		/* No more memory access after this point until
9321 		* device is brought back to D0.
9322 		*/
9323 		break;
9324 
9325 	default:
9326 		return -EINVAL;
9327 	}
9328 	return 0;
9329 }
9330 
bnx2x_has_rx_work(struct bnx2x_fastpath * fp)9331 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9332 {
9333 	u16 rx_cons_sb;
9334 
9335 	/* Tell compiler that status block fields can change */
9336 	barrier();
9337 	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9338 	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9339 		rx_cons_sb++;
9340 	return (fp->rx_comp_cons != rx_cons_sb);
9341 }
9342 
9343 /*
9344  * net_device service functions
9345  */
9346 
bnx2x_poll(struct napi_struct * napi,int budget)9347 static int bnx2x_poll(struct napi_struct *napi, int budget)
9348 {
9349 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9350 						 napi);
9351 	struct bnx2x *bp = fp->bp;
9352 	int work_done = 0;
9353 
9354 #ifdef BNX2X_STOP_ON_ERROR
9355 	if (unlikely(bp->panic))
9356 		goto poll_panic;
9357 #endif
9358 
9359 	prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9360 	prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9361 	prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9362 
9363 	bnx2x_update_fpsb_idx(fp);
9364 
9365 	if (bnx2x_has_tx_work(fp))
9366 		bnx2x_tx_int(fp, budget);
9367 
9368 	if (bnx2x_has_rx_work(fp))
9369 		work_done = bnx2x_rx_int(fp, budget);
9370 	rmb(); /* BNX2X_HAS_WORK() reads the status block */
9371 
9372 	/* must not complete if we consumed full budget */
9373 	if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9374 
9375 #ifdef BNX2X_STOP_ON_ERROR
9376 poll_panic:
9377 #endif
9378 		netif_rx_complete(napi);
9379 
9380 		bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9381 			     le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9382 		bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9383 			     le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9384 	}
9385 	return work_done;
9386 }
9387 
9388 
9389 /* we split the first BD into headers and data BDs
9390  * to ease the pain of our fellow microcode engineers
9391  * we use one mapping for both BDs
9392  * So far this has only been observed to happen
9393  * in Other Operating Systems(TM)
9394  */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct eth_tx_bd ** tx_bd,u16 hlen,u16 bd_prod,int nbd)9395 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9396 				   struct bnx2x_fastpath *fp,
9397 				   struct eth_tx_bd **tx_bd, u16 hlen,
9398 				   u16 bd_prod, int nbd)
9399 {
9400 	struct eth_tx_bd *h_tx_bd = *tx_bd;
9401 	struct eth_tx_bd *d_tx_bd;
9402 	dma_addr_t mapping;
9403 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
9404 
9405 	/* first fix first BD */
9406 	h_tx_bd->nbd = cpu_to_le16(nbd);
9407 	h_tx_bd->nbytes = cpu_to_le16(hlen);
9408 
9409 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d "
9410 	   "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9411 	   h_tx_bd->addr_lo, h_tx_bd->nbd);
9412 
9413 	/* now get a new data BD
9414 	 * (after the pbd) and fill it */
9415 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9416 	d_tx_bd = &fp->tx_desc_ring[bd_prod];
9417 
9418 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9419 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9420 
9421 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9422 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9423 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9424 	d_tx_bd->vlan = 0;
9425 	/* this marks the BD as one that has no individual mapping
9426 	 * the FW ignores this flag in a BD not marked start
9427 	 */
9428 	d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9429 	DP(NETIF_MSG_TX_QUEUED,
9430 	   "TSO split data size is %d (%x:%x)\n",
9431 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9432 
9433 	/* update tx_bd for marking the last BD flag */
9434 	*tx_bd = d_tx_bd;
9435 
9436 	return bd_prod;
9437 }
9438 
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)9439 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9440 {
9441 	if (fix > 0)
9442 		csum = (u16) ~csum_fold(csum_sub(csum,
9443 				csum_partial(t_header - fix, fix, 0)));
9444 
9445 	else if (fix < 0)
9446 		csum = (u16) ~csum_fold(csum_add(csum,
9447 				csum_partial(t_header, -fix, 0)));
9448 
9449 	return swab16(csum);
9450 }
9451 
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)9452 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9453 {
9454 	u32 rc;
9455 
9456 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9457 		rc = XMIT_PLAIN;
9458 
9459 	else {
9460 		if (skb->protocol == ntohs(ETH_P_IPV6)) {
9461 			rc = XMIT_CSUM_V6;
9462 			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9463 				rc |= XMIT_CSUM_TCP;
9464 
9465 		} else {
9466 			rc = XMIT_CSUM_V4;
9467 			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9468 				rc |= XMIT_CSUM_TCP;
9469 		}
9470 	}
9471 
9472 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9473 		rc |= XMIT_GSO_V4;
9474 
9475 	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9476 		rc |= XMIT_GSO_V6;
9477 
9478 	return rc;
9479 }
9480 
9481 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9482 /* check if packet requires linearization (packet is too fragmented) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)9483 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9484 			     u32 xmit_type)
9485 {
9486 	int to_copy = 0;
9487 	int hlen = 0;
9488 	int first_bd_sz = 0;
9489 
9490 	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9491 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9492 
9493 		if (xmit_type & XMIT_GSO) {
9494 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9495 			/* Check if LSO packet needs to be copied:
9496 			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9497 			int wnd_size = MAX_FETCH_BD - 3;
9498 			/* Number of windows to check */
9499 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9500 			int wnd_idx = 0;
9501 			int frag_idx = 0;
9502 			u32 wnd_sum = 0;
9503 
9504 			/* Headers length */
9505 			hlen = (int)(skb_transport_header(skb) - skb->data) +
9506 				tcp_hdrlen(skb);
9507 
9508 			/* Amount of data (w/o headers) on linear part of SKB*/
9509 			first_bd_sz = skb_headlen(skb) - hlen;
9510 
9511 			wnd_sum  = first_bd_sz;
9512 
9513 			/* Calculate the first sum - it's special */
9514 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9515 				wnd_sum +=
9516 					skb_shinfo(skb)->frags[frag_idx].size;
9517 
9518 			/* If there was data on linear skb data - check it */
9519 			if (first_bd_sz > 0) {
9520 				if (unlikely(wnd_sum < lso_mss)) {
9521 					to_copy = 1;
9522 					goto exit_lbl;
9523 				}
9524 
9525 				wnd_sum -= first_bd_sz;
9526 			}
9527 
9528 			/* Others are easier: run through the frag list and
9529 			   check all windows */
9530 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9531 				wnd_sum +=
9532 			  skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9533 
9534 				if (unlikely(wnd_sum < lso_mss)) {
9535 					to_copy = 1;
9536 					break;
9537 				}
9538 				wnd_sum -=
9539 					skb_shinfo(skb)->frags[wnd_idx].size;
9540 			}
9541 
9542 		} else {
9543 			/* in non-LSO too fragmented packet should always
9544 			   be linearized */
9545 			to_copy = 1;
9546 		}
9547 	}
9548 
9549 exit_lbl:
9550 	if (unlikely(to_copy))
9551 		DP(NETIF_MSG_TX_QUEUED,
9552 		   "Linearization IS REQUIRED for %s packet. "
9553 		   "num_frags %d  hlen %d  first_bd_sz %d\n",
9554 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9555 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9556 
9557 	return to_copy;
9558 }
9559 #endif
9560 
9561 /* called with netif_tx_lock
9562  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9563  * netif_wake_queue()
9564  */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)9565 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9566 {
9567 	struct bnx2x *bp = netdev_priv(dev);
9568 	struct bnx2x_fastpath *fp;
9569 	struct sw_tx_bd *tx_buf;
9570 	struct eth_tx_bd *tx_bd;
9571 	struct eth_tx_parse_bd *pbd = NULL;
9572 	u16 pkt_prod, bd_prod;
9573 	int nbd, fp_index;
9574 	dma_addr_t mapping;
9575 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
9576 	int vlan_off = (bp->e1hov ? 4 : 0);
9577 	int i;
9578 	u8 hlen = 0;
9579 
9580 #ifdef BNX2X_STOP_ON_ERROR
9581 	if (unlikely(bp->panic))
9582 		return NETDEV_TX_BUSY;
9583 #endif
9584 
9585 	fp_index = (smp_processor_id() % bp->num_queues);
9586 	fp = &bp->fp[fp_index];
9587 
9588 	if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9589 		bp->eth_stats.driver_xoff++,
9590 		netif_stop_queue(dev);
9591 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9592 		return NETDEV_TX_BUSY;
9593 	}
9594 
9595 	DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9596 	   "  gso type %x  xmit_type %x\n",
9597 	   skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9598 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9599 
9600 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9601 	/* First, check if we need to linearize the skb
9602 	   (due to FW restrictions) */
9603 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9604 		/* Statistics of linearization */
9605 		bp->lin_cnt++;
9606 		if (skb_linearize(skb) != 0) {
9607 			DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9608 			   "silently dropping this SKB\n");
9609 			dev_kfree_skb_any(skb);
9610 			return NETDEV_TX_OK;
9611 		}
9612 	}
9613 #endif
9614 
9615 	/*
9616 	Please read carefully. First we use one BD which we mark as start,
9617 	then for TSO or xsum we have a parsing info BD,
9618 	and only then we have the rest of the TSO BDs.
9619 	(don't forget to mark the last one as last,
9620 	and to unmap only AFTER you write to the BD ...)
9621 	And above all, all pdb sizes are in words - NOT DWORDS!
9622 	*/
9623 
9624 	pkt_prod = fp->tx_pkt_prod++;
9625 	bd_prod = TX_BD(fp->tx_bd_prod);
9626 
9627 	/* get a tx_buf and first BD */
9628 	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9629 	tx_bd = &fp->tx_desc_ring[bd_prod];
9630 
9631 	tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9632 	tx_bd->general_data = (UNICAST_ADDRESS <<
9633 			       ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9634 	/* header nbd */
9635 	tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9636 
9637 	/* remember the first BD of the packet */
9638 	tx_buf->first_bd = fp->tx_bd_prod;
9639 	tx_buf->skb = skb;
9640 
9641 	DP(NETIF_MSG_TX_QUEUED,
9642 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9643 	   pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9644 
9645 #ifdef BCM_VLAN
9646 	if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9647 	    (bp->flags & HW_VLAN_TX_FLAG)) {
9648 		tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9649 		tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9650 		vlan_off += 4;
9651 	} else
9652 #endif
9653 		tx_bd->vlan = cpu_to_le16(pkt_prod);
9654 
9655 	if (xmit_type) {
9656 		/* turn on parsing and get a BD */
9657 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9658 		pbd = (void *)&fp->tx_desc_ring[bd_prod];
9659 
9660 		memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9661 	}
9662 
9663 	if (xmit_type & XMIT_CSUM) {
9664 		hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9665 
9666 		/* for now NS flag is not used in Linux */
9667 		pbd->global_data = (hlen |
9668 				    ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9669 				     ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9670 
9671 		pbd->ip_hlen = (skb_transport_header(skb) -
9672 				skb_network_header(skb)) / 2;
9673 
9674 		hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9675 
9676 		pbd->total_hlen = cpu_to_le16(hlen);
9677 		hlen = hlen*2 - vlan_off;
9678 
9679 		tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9680 
9681 		if (xmit_type & XMIT_CSUM_V4)
9682 			tx_bd->bd_flags.as_bitfield |=
9683 						ETH_TX_BD_FLAGS_IP_CSUM;
9684 		else
9685 			tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9686 
9687 		if (xmit_type & XMIT_CSUM_TCP) {
9688 			pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9689 
9690 		} else {
9691 			s8 fix = SKB_CS_OFF(skb); /* signed! */
9692 
9693 			pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9694 			pbd->cs_offset = fix / 2;
9695 
9696 			DP(NETIF_MSG_TX_QUEUED,
9697 			   "hlen %d  offset %d  fix %d  csum before fix %x\n",
9698 			   le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9699 			   SKB_CS(skb));
9700 
9701 			/* HW bug: fixup the CSUM */
9702 			pbd->tcp_pseudo_csum =
9703 				bnx2x_csum_fix(skb_transport_header(skb),
9704 					       SKB_CS(skb), fix);
9705 
9706 			DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9707 			   pbd->tcp_pseudo_csum);
9708 		}
9709 	}
9710 
9711 	mapping = pci_map_single(bp->pdev, skb->data,
9712 				 skb_headlen(skb), PCI_DMA_TODEVICE);
9713 
9714 	tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9715 	tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9716 	nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9717 	tx_bd->nbd = cpu_to_le16(nbd);
9718 	tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9719 
9720 	DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9721 	   "  nbytes %d  flags %x  vlan %x\n",
9722 	   tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9723 	   le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9724 	   le16_to_cpu(tx_bd->vlan));
9725 
9726 	if (xmit_type & XMIT_GSO) {
9727 
9728 		DP(NETIF_MSG_TX_QUEUED,
9729 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9730 		   skb->len, hlen, skb_headlen(skb),
9731 		   skb_shinfo(skb)->gso_size);
9732 
9733 		tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9734 
9735 		if (unlikely(skb_headlen(skb) > hlen))
9736 			bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9737 						 bd_prod, ++nbd);
9738 
9739 		pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9740 		pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9741 		pbd->tcp_flags = pbd_tcp_flags(skb);
9742 
9743 		if (xmit_type & XMIT_GSO_V4) {
9744 			pbd->ip_id = swab16(ip_hdr(skb)->id);
9745 			pbd->tcp_pseudo_csum =
9746 				swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9747 							  ip_hdr(skb)->daddr,
9748 							  0, IPPROTO_TCP, 0));
9749 
9750 		} else
9751 			pbd->tcp_pseudo_csum =
9752 				swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9753 							&ipv6_hdr(skb)->daddr,
9754 							0, IPPROTO_TCP, 0));
9755 
9756 		pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9757 	}
9758 
9759 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9760 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9761 
9762 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9763 		tx_bd = &fp->tx_desc_ring[bd_prod];
9764 
9765 		mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9766 				       frag->size, PCI_DMA_TODEVICE);
9767 
9768 		tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9769 		tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9770 		tx_bd->nbytes = cpu_to_le16(frag->size);
9771 		tx_bd->vlan = cpu_to_le16(pkt_prod);
9772 		tx_bd->bd_flags.as_bitfield = 0;
9773 
9774 		DP(NETIF_MSG_TX_QUEUED,
9775 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9776 		   i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9777 		   le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9778 	}
9779 
9780 	/* now at last mark the BD as the last BD */
9781 	tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9782 
9783 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9784 	   tx_bd, tx_bd->bd_flags.as_bitfield);
9785 
9786 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9787 
9788 	/* now send a tx doorbell, counting the next BD
9789 	 * if the packet contains or ends with it
9790 	 */
9791 	if (TX_BD_POFF(bd_prod) < nbd)
9792 		nbd++;
9793 
9794 	if (pbd)
9795 		DP(NETIF_MSG_TX_QUEUED,
9796 		   "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9797 		   "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9798 		   pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9799 		   pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9800 		   pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9801 
9802 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9803 
9804 	/*
9805 	 * Make sure that the BD data is updated before updating the producer
9806 	 * since FW might read the BD right after the producer is updated.
9807 	 * This is only applicable for weak-ordered memory model archs such
9808 	 * as IA-64. The following barrier is also mandatory since FW will
9809 	 * assumes packets must have BDs.
9810 	 */
9811 	wmb();
9812 
9813 	fp->hw_tx_prods->bds_prod =
9814 		cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9815 	mb(); /* FW restriction: must not reorder writing nbd and packets */
9816 	fp->hw_tx_prods->packets_prod =
9817 		cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9818 	DOORBELL(bp, FP_IDX(fp), 0);
9819 
9820 	mmiowb();
9821 
9822 	fp->tx_bd_prod += nbd;
9823 	dev->trans_start = jiffies;
9824 
9825 	if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9826 		/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9827 		   if we put Tx into XOFF state. */
9828 		smp_mb();
9829 		netif_stop_queue(dev);
9830 		bp->eth_stats.driver_xoff++;
9831 		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9832 			netif_wake_queue(dev);
9833 	}
9834 	fp->tx_pkt++;
9835 
9836 	return NETDEV_TX_OK;
9837 }
9838 
9839 /* called with rtnl_lock */
bnx2x_open(struct net_device * dev)9840 static int bnx2x_open(struct net_device *dev)
9841 {
9842 	struct bnx2x *bp = netdev_priv(dev);
9843 
9844 	netif_carrier_off(dev);
9845 
9846 	bnx2x_set_power_state(bp, PCI_D0);
9847 
9848 	return bnx2x_nic_load(bp, LOAD_OPEN);
9849 }
9850 
9851 /* called with rtnl_lock */
bnx2x_close(struct net_device * dev)9852 static int bnx2x_close(struct net_device *dev)
9853 {
9854 	struct bnx2x *bp = netdev_priv(dev);
9855 
9856 	/* Unload the driver, release IRQs */
9857 	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9858 	if (atomic_read(&bp->pdev->enable_cnt) == 1)
9859 		if (!CHIP_REV_IS_SLOW(bp))
9860 			bnx2x_set_power_state(bp, PCI_D3hot);
9861 
9862 	return 0;
9863 }
9864 
9865 /* called with netif_tx_lock from set_multicast */
bnx2x_set_rx_mode(struct net_device * dev)9866 static void bnx2x_set_rx_mode(struct net_device *dev)
9867 {
9868 	struct bnx2x *bp = netdev_priv(dev);
9869 	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9870 	int port = BP_PORT(bp);
9871 
9872 	if (bp->state != BNX2X_STATE_OPEN) {
9873 		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9874 		return;
9875 	}
9876 
9877 	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9878 
9879 	if (dev->flags & IFF_PROMISC)
9880 		rx_mode = BNX2X_RX_MODE_PROMISC;
9881 
9882 	else if ((dev->flags & IFF_ALLMULTI) ||
9883 		 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9884 		rx_mode = BNX2X_RX_MODE_ALLMULTI;
9885 
9886 	else { /* some multicasts */
9887 		if (CHIP_IS_E1(bp)) {
9888 			int i, old, offset;
9889 			struct dev_mc_list *mclist;
9890 			struct mac_configuration_cmd *config =
9891 						bnx2x_sp(bp, mcast_config);
9892 
9893 			for (i = 0, mclist = dev->mc_list;
9894 			     mclist && (i < dev->mc_count);
9895 			     i++, mclist = mclist->next) {
9896 
9897 				config->config_table[i].
9898 					cam_entry.msb_mac_addr =
9899 					swab16(*(u16 *)&mclist->dmi_addr[0]);
9900 				config->config_table[i].
9901 					cam_entry.middle_mac_addr =
9902 					swab16(*(u16 *)&mclist->dmi_addr[2]);
9903 				config->config_table[i].
9904 					cam_entry.lsb_mac_addr =
9905 					swab16(*(u16 *)&mclist->dmi_addr[4]);
9906 				config->config_table[i].cam_entry.flags =
9907 							cpu_to_le16(port);
9908 				config->config_table[i].
9909 					target_table_entry.flags = 0;
9910 				config->config_table[i].
9911 					target_table_entry.client_id = 0;
9912 				config->config_table[i].
9913 					target_table_entry.vlan_id = 0;
9914 
9915 				DP(NETIF_MSG_IFUP,
9916 				   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9917 				   config->config_table[i].
9918 						cam_entry.msb_mac_addr,
9919 				   config->config_table[i].
9920 						cam_entry.middle_mac_addr,
9921 				   config->config_table[i].
9922 						cam_entry.lsb_mac_addr);
9923 			}
9924 			old = config->hdr.length_6b;
9925 			if (old > i) {
9926 				for (; i < old; i++) {
9927 					if (CAM_IS_INVALID(config->
9928 							   config_table[i])) {
9929 						/* already invalidated */
9930 						break;
9931 					}
9932 					/* invalidate */
9933 					CAM_INVALIDATE(config->
9934 						       config_table[i]);
9935 				}
9936 			}
9937 
9938 			if (CHIP_REV_IS_SLOW(bp))
9939 				offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9940 			else
9941 				offset = BNX2X_MAX_MULTICAST*(1 + port);
9942 
9943 			config->hdr.length_6b = i;
9944 			config->hdr.offset = offset;
9945 			config->hdr.client_id = BP_CL_ID(bp);
9946 			config->hdr.reserved1 = 0;
9947 
9948 			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9949 				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9950 				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9951 				      0);
9952 		} else { /* E1H */
9953 			/* Accept one or more multicasts */
9954 			struct dev_mc_list *mclist;
9955 			u32 mc_filter[MC_HASH_SIZE];
9956 			u32 crc, bit, regidx;
9957 			int i;
9958 
9959 			memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9960 
9961 			for (i = 0, mclist = dev->mc_list;
9962 			     mclist && (i < dev->mc_count);
9963 			     i++, mclist = mclist->next) {
9964 
9965 				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9966 				   mclist->dmi_addr);
9967 
9968 				crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9969 				bit = (crc >> 24) & 0xff;
9970 				regidx = bit >> 5;
9971 				bit &= 0x1f;
9972 				mc_filter[regidx] |= (1 << bit);
9973 			}
9974 
9975 			for (i = 0; i < MC_HASH_SIZE; i++)
9976 				REG_WR(bp, MC_HASH_OFFSET(bp, i),
9977 				       mc_filter[i]);
9978 		}
9979 	}
9980 
9981 	bp->rx_mode = rx_mode;
9982 	bnx2x_set_storm_rx_mode(bp);
9983 }
9984 
9985 /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)9986 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9987 {
9988 	struct sockaddr *addr = p;
9989 	struct bnx2x *bp = netdev_priv(dev);
9990 
9991 	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9992 		return -EINVAL;
9993 
9994 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9995 	if (netif_running(dev)) {
9996 		if (CHIP_IS_E1(bp))
9997 			bnx2x_set_mac_addr_e1(bp, 1);
9998 		else
9999 			bnx2x_set_mac_addr_e1h(bp, 1);
10000 	}
10001 
10002 	return 0;
10003 }
10004 
10005 /* called with rtnl_lock */
bnx2x_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)10006 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10007 {
10008 	struct mii_ioctl_data *data = if_mii(ifr);
10009 	struct bnx2x *bp = netdev_priv(dev);
10010 	int port = BP_PORT(bp);
10011 	int err;
10012 
10013 	switch (cmd) {
10014 	case SIOCGMIIPHY:
10015 		data->phy_id = bp->port.phy_addr;
10016 
10017 		/* fallthrough */
10018 
10019 	case SIOCGMIIREG: {
10020 		u16 mii_regval;
10021 
10022 		if (!netif_running(dev))
10023 			return -EAGAIN;
10024 
10025 		mutex_lock(&bp->port.phy_mutex);
10026 		err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10027 				      DEFAULT_PHY_DEV_ADDR,
10028 				      (data->reg_num & 0x1f), &mii_regval);
10029 		data->val_out = mii_regval;
10030 		mutex_unlock(&bp->port.phy_mutex);
10031 		return err;
10032 	}
10033 
10034 	case SIOCSMIIREG:
10035 		if (!capable(CAP_NET_ADMIN))
10036 			return -EPERM;
10037 
10038 		if (!netif_running(dev))
10039 			return -EAGAIN;
10040 
10041 		mutex_lock(&bp->port.phy_mutex);
10042 		err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10043 				       DEFAULT_PHY_DEV_ADDR,
10044 				       (data->reg_num & 0x1f), data->val_in);
10045 		mutex_unlock(&bp->port.phy_mutex);
10046 		return err;
10047 
10048 	default:
10049 		/* do nothing */
10050 		break;
10051 	}
10052 
10053 	return -EOPNOTSUPP;
10054 }
10055 
10056 /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)10057 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10058 {
10059 	struct bnx2x *bp = netdev_priv(dev);
10060 	int rc = 0;
10061 
10062 	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10063 	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10064 		return -EINVAL;
10065 
10066 	/* This does not race with packet allocation
10067 	 * because the actual alloc size is
10068 	 * only updated as part of load
10069 	 */
10070 	dev->mtu = new_mtu;
10071 
10072 	if (netif_running(dev)) {
10073 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10074 		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10075 	}
10076 
10077 	return rc;
10078 }
10079 
bnx2x_tx_timeout(struct net_device * dev)10080 static void bnx2x_tx_timeout(struct net_device *dev)
10081 {
10082 	struct bnx2x *bp = netdev_priv(dev);
10083 
10084 #ifdef BNX2X_STOP_ON_ERROR
10085 	if (!bp->panic)
10086 		bnx2x_panic();
10087 #endif
10088 	/* This allows the netif to be shutdown gracefully before resetting */
10089 	schedule_work(&bp->reset_task);
10090 }
10091 
10092 #ifdef BCM_VLAN
10093 /* called with rtnl_lock */
bnx2x_vlan_rx_register(struct net_device * dev,struct vlan_group * vlgrp)10094 static void bnx2x_vlan_rx_register(struct net_device *dev,
10095 				   struct vlan_group *vlgrp)
10096 {
10097 	struct bnx2x *bp = netdev_priv(dev);
10098 
10099 	bp->vlgrp = vlgrp;
10100 
10101 	/* Set flags according to the required capabilities */
10102 	bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10103 
10104 	if (dev->features & NETIF_F_HW_VLAN_TX)
10105 		bp->flags |= HW_VLAN_TX_FLAG;
10106 
10107 	if (dev->features & NETIF_F_HW_VLAN_RX)
10108 		bp->flags |= HW_VLAN_RX_FLAG;
10109 
10110 	if (netif_running(dev))
10111 		bnx2x_set_client_config(bp);
10112 }
10113 
10114 #endif
10115 
10116 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
poll_bnx2x(struct net_device * dev)10117 static void poll_bnx2x(struct net_device *dev)
10118 {
10119 	struct bnx2x *bp = netdev_priv(dev);
10120 
10121 	disable_irq(bp->pdev->irq);
10122 	bnx2x_interrupt(bp->pdev->irq, dev);
10123 	enable_irq(bp->pdev->irq);
10124 }
10125 #endif
10126 
10127 static const struct net_device_ops bnx2x_netdev_ops = {
10128 	.ndo_open		= bnx2x_open,
10129 	.ndo_stop		= bnx2x_close,
10130 	.ndo_start_xmit		= bnx2x_start_xmit,
10131 	.ndo_set_multicast_list = bnx2x_set_rx_mode,
10132 	.ndo_set_mac_address	= bnx2x_change_mac_addr,
10133 	.ndo_validate_addr	= eth_validate_addr,
10134 	.ndo_do_ioctl		= bnx2x_ioctl,
10135 	.ndo_change_mtu		= bnx2x_change_mtu,
10136 	.ndo_tx_timeout		= bnx2x_tx_timeout,
10137 #ifdef BCM_VLAN
10138 	.ndo_vlan_rx_register	= bnx2x_vlan_rx_register,
10139 #endif
10140 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10141 	.ndo_poll_controller	= poll_bnx2x,
10142 #endif
10143 };
10144 
10145 
bnx2x_init_dev(struct pci_dev * pdev,struct net_device * dev)10146 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10147 				    struct net_device *dev)
10148 {
10149 	struct bnx2x *bp;
10150 	int rc;
10151 
10152 	SET_NETDEV_DEV(dev, &pdev->dev);
10153 	bp = netdev_priv(dev);
10154 
10155 	bp->dev = dev;
10156 	bp->pdev = pdev;
10157 	bp->flags = 0;
10158 	bp->func = PCI_FUNC(pdev->devfn);
10159 
10160 	rc = pci_enable_device(pdev);
10161 	if (rc) {
10162 		printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10163 		goto err_out;
10164 	}
10165 
10166 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10167 		printk(KERN_ERR PFX "Cannot find PCI device base address,"
10168 		       " aborting\n");
10169 		rc = -ENODEV;
10170 		goto err_out_disable;
10171 	}
10172 
10173 	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10174 		printk(KERN_ERR PFX "Cannot find second PCI device"
10175 		       " base address, aborting\n");
10176 		rc = -ENODEV;
10177 		goto err_out_disable;
10178 	}
10179 
10180 	if (atomic_read(&pdev->enable_cnt) == 1) {
10181 		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10182 		if (rc) {
10183 			printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10184 			       " aborting\n");
10185 			goto err_out_disable;
10186 		}
10187 
10188 		pci_set_master(pdev);
10189 		pci_save_state(pdev);
10190 	}
10191 
10192 	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10193 	if (bp->pm_cap == 0) {
10194 		printk(KERN_ERR PFX "Cannot find power management"
10195 		       " capability, aborting\n");
10196 		rc = -EIO;
10197 		goto err_out_release;
10198 	}
10199 
10200 	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10201 	if (bp->pcie_cap == 0) {
10202 		printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10203 		       " aborting\n");
10204 		rc = -EIO;
10205 		goto err_out_release;
10206 	}
10207 
10208 	if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10209 		bp->flags |= USING_DAC_FLAG;
10210 		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10211 			printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10212 			       " failed, aborting\n");
10213 			rc = -EIO;
10214 			goto err_out_release;
10215 		}
10216 
10217 	} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10218 		printk(KERN_ERR PFX "System does not support DMA,"
10219 		       " aborting\n");
10220 		rc = -EIO;
10221 		goto err_out_release;
10222 	}
10223 
10224 	dev->mem_start = pci_resource_start(pdev, 0);
10225 	dev->base_addr = dev->mem_start;
10226 	dev->mem_end = pci_resource_end(pdev, 0);
10227 
10228 	dev->irq = pdev->irq;
10229 
10230 	bp->regview = pci_ioremap_bar(pdev, 0);
10231 	if (!bp->regview) {
10232 		printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10233 		rc = -ENOMEM;
10234 		goto err_out_release;
10235 	}
10236 
10237 	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10238 					min_t(u64, BNX2X_DB_SIZE,
10239 					      pci_resource_len(pdev, 2)));
10240 	if (!bp->doorbells) {
10241 		printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10242 		rc = -ENOMEM;
10243 		goto err_out_unmap;
10244 	}
10245 
10246 	bnx2x_set_power_state(bp, PCI_D0);
10247 
10248 	/* clean indirect addresses */
10249 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10250 			       PCICFG_VENDOR_ID_OFFSET);
10251 	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10252 	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10253 	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10254 	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10255 
10256 	dev->watchdog_timeo = TX_TIMEOUT;
10257 
10258 	dev->netdev_ops = &bnx2x_netdev_ops;
10259 	dev->ethtool_ops = &bnx2x_ethtool_ops;
10260 	dev->features |= NETIF_F_SG;
10261 	dev->features |= NETIF_F_HW_CSUM;
10262 	if (bp->flags & USING_DAC_FLAG)
10263 		dev->features |= NETIF_F_HIGHDMA;
10264 #ifdef BCM_VLAN
10265 	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10266 	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10267 #endif
10268 	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10269 	dev->features |= NETIF_F_TSO6;
10270 
10271 	return 0;
10272 
10273 err_out_unmap:
10274 	if (bp->regview) {
10275 		iounmap(bp->regview);
10276 		bp->regview = NULL;
10277 	}
10278 	if (bp->doorbells) {
10279 		iounmap(bp->doorbells);
10280 		bp->doorbells = NULL;
10281 	}
10282 
10283 err_out_release:
10284 	if (atomic_read(&pdev->enable_cnt) == 1)
10285 		pci_release_regions(pdev);
10286 
10287 err_out_disable:
10288 	pci_disable_device(pdev);
10289 	pci_set_drvdata(pdev, NULL);
10290 
10291 err_out:
10292 	return rc;
10293 }
10294 
bnx2x_get_pcie_width(struct bnx2x * bp)10295 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10296 {
10297 	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10298 
10299 	val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10300 	return val;
10301 }
10302 
10303 /* return value of 1=2.5GHz 2=5GHz */
bnx2x_get_pcie_speed(struct bnx2x * bp)10304 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10305 {
10306 	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10307 
10308 	val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10309 	return val;
10310 }
10311 
bnx2x_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)10312 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10313 				    const struct pci_device_id *ent)
10314 {
10315 	static int version_printed;
10316 	struct net_device *dev = NULL;
10317 	struct bnx2x *bp;
10318 	int rc;
10319 
10320 	if (version_printed++ == 0)
10321 		printk(KERN_INFO "%s", version);
10322 
10323 	/* dev zeroed in init_etherdev */
10324 	dev = alloc_etherdev(sizeof(*bp));
10325 	if (!dev) {
10326 		printk(KERN_ERR PFX "Cannot allocate net device\n");
10327 		return -ENOMEM;
10328 	}
10329 
10330 	bp = netdev_priv(dev);
10331 	bp->msglevel = debug;
10332 
10333 	rc = bnx2x_init_dev(pdev, dev);
10334 	if (rc < 0) {
10335 		free_netdev(dev);
10336 		return rc;
10337 	}
10338 
10339 	pci_set_drvdata(pdev, dev);
10340 
10341 	rc = bnx2x_init_bp(bp);
10342 	if (rc)
10343 		goto init_one_exit;
10344 
10345 	rc = register_netdev(dev);
10346 	if (rc) {
10347 		dev_err(&pdev->dev, "Cannot register net device\n");
10348 		goto init_one_exit;
10349 	}
10350 
10351 	bp->common.name = board_info[ent->driver_data].name;
10352 	printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10353 	       " IRQ %d, ", dev->name, bp->common.name,
10354 	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10355 	       bnx2x_get_pcie_width(bp),
10356 	       (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10357 	       dev->base_addr, bp->pdev->irq);
10358 	printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10359 	return 0;
10360 
10361 init_one_exit:
10362 	if (bp->regview)
10363 		iounmap(bp->regview);
10364 
10365 	if (bp->doorbells)
10366 		iounmap(bp->doorbells);
10367 
10368 	free_netdev(dev);
10369 
10370 	if (atomic_read(&pdev->enable_cnt) == 1)
10371 		pci_release_regions(pdev);
10372 
10373 	pci_disable_device(pdev);
10374 	pci_set_drvdata(pdev, NULL);
10375 
10376 	return rc;
10377 }
10378 
bnx2x_remove_one(struct pci_dev * pdev)10379 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10380 {
10381 	struct net_device *dev = pci_get_drvdata(pdev);
10382 	struct bnx2x *bp;
10383 
10384 	if (!dev) {
10385 		printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10386 		return;
10387 	}
10388 	bp = netdev_priv(dev);
10389 
10390 	unregister_netdev(dev);
10391 
10392 	if (bp->regview)
10393 		iounmap(bp->regview);
10394 
10395 	if (bp->doorbells)
10396 		iounmap(bp->doorbells);
10397 
10398 	free_netdev(dev);
10399 
10400 	if (atomic_read(&pdev->enable_cnt) == 1)
10401 		pci_release_regions(pdev);
10402 
10403 	pci_disable_device(pdev);
10404 	pci_set_drvdata(pdev, NULL);
10405 }
10406 
bnx2x_suspend(struct pci_dev * pdev,pm_message_t state)10407 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10408 {
10409 	struct net_device *dev = pci_get_drvdata(pdev);
10410 	struct bnx2x *bp;
10411 
10412 	if (!dev) {
10413 		printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10414 		return -ENODEV;
10415 	}
10416 	bp = netdev_priv(dev);
10417 
10418 	rtnl_lock();
10419 
10420 	pci_save_state(pdev);
10421 
10422 	if (!netif_running(dev)) {
10423 		rtnl_unlock();
10424 		return 0;
10425 	}
10426 
10427 	netif_device_detach(dev);
10428 
10429 	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10430 
10431 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10432 
10433 	rtnl_unlock();
10434 
10435 	return 0;
10436 }
10437 
bnx2x_resume(struct pci_dev * pdev)10438 static int bnx2x_resume(struct pci_dev *pdev)
10439 {
10440 	struct net_device *dev = pci_get_drvdata(pdev);
10441 	struct bnx2x *bp;
10442 	int rc;
10443 
10444 	if (!dev) {
10445 		printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10446 		return -ENODEV;
10447 	}
10448 	bp = netdev_priv(dev);
10449 
10450 	rtnl_lock();
10451 
10452 	pci_restore_state(pdev);
10453 
10454 	if (!netif_running(dev)) {
10455 		rtnl_unlock();
10456 		return 0;
10457 	}
10458 
10459 	bnx2x_set_power_state(bp, PCI_D0);
10460 	netif_device_attach(dev);
10461 
10462 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
10463 
10464 	rtnl_unlock();
10465 
10466 	return rc;
10467 }
10468 
bnx2x_eeh_nic_unload(struct bnx2x * bp)10469 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10470 {
10471 	int i;
10472 
10473 	bp->state = BNX2X_STATE_ERROR;
10474 
10475 	bp->rx_mode = BNX2X_RX_MODE_NONE;
10476 
10477 	bnx2x_netif_stop(bp, 0);
10478 
10479 	del_timer_sync(&bp->timer);
10480 	bp->stats_state = STATS_STATE_DISABLED;
10481 	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10482 
10483 	/* Release IRQs */
10484 	bnx2x_free_irq(bp);
10485 
10486 	if (CHIP_IS_E1(bp)) {
10487 		struct mac_configuration_cmd *config =
10488 						bnx2x_sp(bp, mcast_config);
10489 
10490 		for (i = 0; i < config->hdr.length_6b; i++)
10491 			CAM_INVALIDATE(config->config_table[i]);
10492 	}
10493 
10494 	/* Free SKBs, SGEs, TPA pool and driver internals */
10495 	bnx2x_free_skbs(bp);
10496 	for_each_queue(bp, i)
10497 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10498 	for_each_queue(bp, i)
10499 		netif_napi_del(&bnx2x_fp(bp, i, napi));
10500 	bnx2x_free_mem(bp);
10501 
10502 	bp->state = BNX2X_STATE_CLOSED;
10503 
10504 	netif_carrier_off(bp->dev);
10505 
10506 	return 0;
10507 }
10508 
bnx2x_eeh_recover(struct bnx2x * bp)10509 static void bnx2x_eeh_recover(struct bnx2x *bp)
10510 {
10511 	u32 val;
10512 
10513 	mutex_init(&bp->port.phy_mutex);
10514 
10515 	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10516 	bp->link_params.shmem_base = bp->common.shmem_base;
10517 	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10518 
10519 	if (!bp->common.shmem_base ||
10520 	    (bp->common.shmem_base < 0xA0000) ||
10521 	    (bp->common.shmem_base >= 0xC0000)) {
10522 		BNX2X_DEV_INFO("MCP not active\n");
10523 		bp->flags |= NO_MCP_FLAG;
10524 		return;
10525 	}
10526 
10527 	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10528 	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10529 		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10530 		BNX2X_ERR("BAD MCP validity signature\n");
10531 
10532 	if (!BP_NOMCP(bp)) {
10533 		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10534 			      & DRV_MSG_SEQ_NUMBER_MASK);
10535 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10536 	}
10537 }
10538 
10539 /**
10540  * bnx2x_io_error_detected - called when PCI error is detected
10541  * @pdev: Pointer to PCI device
10542  * @state: The current pci connection state
10543  *
10544  * This function is called after a PCI bus error affecting
10545  * this device has been detected.
10546  */
bnx2x_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)10547 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10548 						pci_channel_state_t state)
10549 {
10550 	struct net_device *dev = pci_get_drvdata(pdev);
10551 	struct bnx2x *bp = netdev_priv(dev);
10552 
10553 	rtnl_lock();
10554 
10555 	netif_device_detach(dev);
10556 
10557 	if (netif_running(dev))
10558 		bnx2x_eeh_nic_unload(bp);
10559 
10560 	pci_disable_device(pdev);
10561 
10562 	rtnl_unlock();
10563 
10564 	/* Request a slot reset */
10565 	return PCI_ERS_RESULT_NEED_RESET;
10566 }
10567 
10568 /**
10569  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10570  * @pdev: Pointer to PCI device
10571  *
10572  * Restart the card from scratch, as if from a cold-boot.
10573  */
bnx2x_io_slot_reset(struct pci_dev * pdev)10574 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10575 {
10576 	struct net_device *dev = pci_get_drvdata(pdev);
10577 	struct bnx2x *bp = netdev_priv(dev);
10578 
10579 	rtnl_lock();
10580 
10581 	if (pci_enable_device(pdev)) {
10582 		dev_err(&pdev->dev,
10583 			"Cannot re-enable PCI device after reset\n");
10584 		rtnl_unlock();
10585 		return PCI_ERS_RESULT_DISCONNECT;
10586 	}
10587 
10588 	pci_set_master(pdev);
10589 	pci_restore_state(pdev);
10590 
10591 	if (netif_running(dev))
10592 		bnx2x_set_power_state(bp, PCI_D0);
10593 
10594 	rtnl_unlock();
10595 
10596 	return PCI_ERS_RESULT_RECOVERED;
10597 }
10598 
10599 /**
10600  * bnx2x_io_resume - called when traffic can start flowing again
10601  * @pdev: Pointer to PCI device
10602  *
10603  * This callback is called when the error recovery driver tells us that
10604  * its OK to resume normal operation.
10605  */
bnx2x_io_resume(struct pci_dev * pdev)10606 static void bnx2x_io_resume(struct pci_dev *pdev)
10607 {
10608 	struct net_device *dev = pci_get_drvdata(pdev);
10609 	struct bnx2x *bp = netdev_priv(dev);
10610 
10611 	rtnl_lock();
10612 
10613 	bnx2x_eeh_recover(bp);
10614 
10615 	if (netif_running(dev))
10616 		bnx2x_nic_load(bp, LOAD_NORMAL);
10617 
10618 	netif_device_attach(dev);
10619 
10620 	rtnl_unlock();
10621 }
10622 
10623 static struct pci_error_handlers bnx2x_err_handler = {
10624 	.error_detected = bnx2x_io_error_detected,
10625 	.slot_reset = bnx2x_io_slot_reset,
10626 	.resume = bnx2x_io_resume,
10627 };
10628 
10629 static struct pci_driver bnx2x_pci_driver = {
10630 	.name        = DRV_MODULE_NAME,
10631 	.id_table    = bnx2x_pci_tbl,
10632 	.probe       = bnx2x_init_one,
10633 	.remove      = __devexit_p(bnx2x_remove_one),
10634 	.suspend     = bnx2x_suspend,
10635 	.resume      = bnx2x_resume,
10636 	.err_handler = &bnx2x_err_handler,
10637 };
10638 
bnx2x_init(void)10639 static int __init bnx2x_init(void)
10640 {
10641 	bnx2x_wq = create_singlethread_workqueue("bnx2x");
10642 	if (bnx2x_wq == NULL) {
10643 		printk(KERN_ERR PFX "Cannot create workqueue\n");
10644 		return -ENOMEM;
10645 	}
10646 
10647 	return pci_register_driver(&bnx2x_pci_driver);
10648 }
10649 
bnx2x_cleanup(void)10650 static void __exit bnx2x_cleanup(void)
10651 {
10652 	pci_unregister_driver(&bnx2x_pci_driver);
10653 
10654 	destroy_workqueue(bnx2x_wq);
10655 }
10656 
10657 module_init(bnx2x_init);
10658 module_exit(bnx2x_cleanup);
10659 
10660