• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46  *      Possible values '1' for enable and '0' for disable. Default is '0'
47  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48  *                 Possible values '1' for enable , '0' for disable.
49  *                 Default is '2' - which means disable in promisc mode
50  *                 and enable in non-promiscuous mode.
51  * multiq: This parameter used to enable/disable MULTIQUEUE support.
52  *      Possible values '1' for enable and '0' for disable. Default is '0'
53  ************************************************************************/
54 
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <linux/prefetch.h>
82 #include <net/tcp.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
RXD_IS_UP2DT(struct RxD_t * rxdp)100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
is_s2io_card_up(const struct s2io_nic * sp)123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp)	\
342 	init_timer(&timer);				\
343 	timer.function = handle;			\
344 	timer.data = (unsigned long)arg;		\
345 	mod_timer(&timer, (jiffies + exp))		\
346 
347 /* copy mac addr to def_mac_addr array */
do_s2io_copy_mac_addr(struct s2io_nic * sp,int offset,u64 mac_addr)348 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349 {
350 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 }
357 
358 /*
359  * Constants to be programmed into the Xena's registers, to configure
360  * the XAUI.
361  */
362 
363 #define	END_SIGN	0x0
364 static const u64 herc_act_dtx_cfg[] = {
365 	/* Set address */
366 	0x8000051536750000ULL, 0x80000515367500E0ULL,
367 	/* Write data */
368 	0x8000051536750004ULL, 0x80000515367500E4ULL,
369 	/* Set address */
370 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
371 	/* Write data */
372 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
373 	/* Set address */
374 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
375 	/* Write data */
376 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
377 	/* Set address */
378 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
379 	/* Write data */
380 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
381 	/* Done */
382 	END_SIGN
383 };
384 
385 static const u64 xena_dtx_cfg[] = {
386 	/* Set address */
387 	0x8000051500000000ULL, 0x80000515000000E0ULL,
388 	/* Write data */
389 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
390 	/* Set address */
391 	0x8001051500000000ULL, 0x80010515000000E0ULL,
392 	/* Write data */
393 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
394 	/* Set address */
395 	0x8002051500000000ULL, 0x80020515000000E0ULL,
396 	/* Write data */
397 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
398 	END_SIGN
399 };
400 
401 /*
402  * Constants for Fixing the MacAddress problem seen mostly on
403  * Alpha machines.
404  */
405 static const u64 fix_mac[] = {
406 	0x0060000000000000ULL, 0x0060600000000000ULL,
407 	0x0040600000000000ULL, 0x0000600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0060600000000000ULL,
413 	0x0020600000000000ULL, 0x0060600000000000ULL,
414 	0x0020600000000000ULL, 0x0060600000000000ULL,
415 	0x0020600000000000ULL, 0x0060600000000000ULL,
416 	0x0020600000000000ULL, 0x0060600000000000ULL,
417 	0x0020600000000000ULL, 0x0060600000000000ULL,
418 	0x0020600000000000ULL, 0x0000600000000000ULL,
419 	0x0040600000000000ULL, 0x0060600000000000ULL,
420 	END_SIGN
421 };
422 
423 MODULE_LICENSE("GPL");
424 MODULE_VERSION(DRV_VERSION);
425 
426 
427 /* Module Loadable parameters. */
428 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
429 S2IO_PARM_INT(rx_ring_num, 1);
430 S2IO_PARM_INT(multiq, 0);
431 S2IO_PARM_INT(rx_ring_mode, 1);
432 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
433 S2IO_PARM_INT(rmac_pause_time, 0x100);
434 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
435 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
436 S2IO_PARM_INT(shared_splits, 0);
437 S2IO_PARM_INT(tmac_util_period, 5);
438 S2IO_PARM_INT(rmac_util_period, 5);
439 S2IO_PARM_INT(l3l4hdr_size, 128);
440 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
441 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
442 /* Frequency of Rx desc syncs expressed as power of 2 */
443 S2IO_PARM_INT(rxsync_frequency, 3);
444 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
445 S2IO_PARM_INT(intr_type, 2);
446 /* Large receive offload feature */
447 
448 /* Max pkts to be aggregated by LRO at one time. If not specified,
449  * aggregation happens until we hit max IP pkt size(64K)
450  */
451 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
452 S2IO_PARM_INT(indicate_max_pkts, 0);
453 
454 S2IO_PARM_INT(napi, 1);
455 S2IO_PARM_INT(ufo, 0);
456 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
457 
458 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
459 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
460 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
461 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
462 static unsigned int rts_frm_len[MAX_RX_RINGS] =
463 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
464 
465 module_param_array(tx_fifo_len, uint, NULL, 0);
466 module_param_array(rx_ring_sz, uint, NULL, 0);
467 module_param_array(rts_frm_len, uint, NULL, 0);
468 
469 /*
470  * S2IO device table.
471  * This table lists all the devices that this driver supports.
472  */
473 static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
474 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
475 	 PCI_ANY_ID, PCI_ANY_ID},
476 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
477 	 PCI_ANY_ID, PCI_ANY_ID},
478 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
479 	 PCI_ANY_ID, PCI_ANY_ID},
480 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
481 	 PCI_ANY_ID, PCI_ANY_ID},
482 	{0,}
483 };
484 
485 MODULE_DEVICE_TABLE(pci, s2io_tbl);
486 
487 static struct pci_error_handlers s2io_err_handler = {
488 	.error_detected = s2io_io_error_detected,
489 	.slot_reset = s2io_io_slot_reset,
490 	.resume = s2io_io_resume,
491 };
492 
493 static struct pci_driver s2io_driver = {
494 	.name = "S2IO",
495 	.id_table = s2io_tbl,
496 	.probe = s2io_init_nic,
497 	.remove = __devexit_p(s2io_rem_nic),
498 	.err_handler = &s2io_err_handler,
499 };
500 
501 /* A simplifier macro used both by init and free shared_mem Fns(). */
502 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
503 
504 /* netqueue manipulation helper functions */
s2io_stop_all_tx_queue(struct s2io_nic * sp)505 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
506 {
507 	if (!sp->config.multiq) {
508 		int i;
509 
510 		for (i = 0; i < sp->config.tx_fifo_num; i++)
511 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
512 	}
513 	netif_tx_stop_all_queues(sp->dev);
514 }
515 
s2io_stop_tx_queue(struct s2io_nic * sp,int fifo_no)516 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
517 {
518 	if (!sp->config.multiq)
519 		sp->mac_control.fifos[fifo_no].queue_state =
520 			FIFO_QUEUE_STOP;
521 
522 	netif_tx_stop_all_queues(sp->dev);
523 }
524 
s2io_start_all_tx_queue(struct s2io_nic * sp)525 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
526 {
527 	if (!sp->config.multiq) {
528 		int i;
529 
530 		for (i = 0; i < sp->config.tx_fifo_num; i++)
531 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
532 	}
533 	netif_tx_start_all_queues(sp->dev);
534 }
535 
s2io_start_tx_queue(struct s2io_nic * sp,int fifo_no)536 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
537 {
538 	if (!sp->config.multiq)
539 		sp->mac_control.fifos[fifo_no].queue_state =
540 			FIFO_QUEUE_START;
541 
542 	netif_tx_start_all_queues(sp->dev);
543 }
544 
s2io_wake_all_tx_queue(struct s2io_nic * sp)545 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
546 {
547 	if (!sp->config.multiq) {
548 		int i;
549 
550 		for (i = 0; i < sp->config.tx_fifo_num; i++)
551 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
552 	}
553 	netif_tx_wake_all_queues(sp->dev);
554 }
555 
s2io_wake_tx_queue(struct fifo_info * fifo,int cnt,u8 multiq)556 static inline void s2io_wake_tx_queue(
557 	struct fifo_info *fifo, int cnt, u8 multiq)
558 {
559 
560 	if (multiq) {
561 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
562 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
563 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
564 		if (netif_queue_stopped(fifo->dev)) {
565 			fifo->queue_state = FIFO_QUEUE_START;
566 			netif_wake_queue(fifo->dev);
567 		}
568 	}
569 }
570 
571 /**
572  * init_shared_mem - Allocation and Initialization of Memory
573  * @nic: Device private variable.
574  * Description: The function allocates all the memory areas shared
575  * between the NIC and the driver. This includes Tx descriptors,
576  * Rx descriptors and the statistics block.
577  */
578 
init_shared_mem(struct s2io_nic * nic)579 static int init_shared_mem(struct s2io_nic *nic)
580 {
581 	u32 size;
582 	void *tmp_v_addr, *tmp_v_addr_next;
583 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
584 	struct RxD_block *pre_rxd_blk = NULL;
585 	int i, j, blk_cnt;
586 	int lst_size, lst_per_page;
587 	struct net_device *dev = nic->dev;
588 	unsigned long tmp;
589 	struct buffAdd *ba;
590 	struct config_param *config = &nic->config;
591 	struct mac_info *mac_control = &nic->mac_control;
592 	unsigned long long mem_allocated = 0;
593 
594 	/* Allocation and initialization of TXDLs in FIFOs */
595 	size = 0;
596 	for (i = 0; i < config->tx_fifo_num; i++) {
597 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
598 
599 		size += tx_cfg->fifo_len;
600 	}
601 	if (size > MAX_AVAILABLE_TXDS) {
602 		DBG_PRINT(ERR_DBG,
603 			  "Too many TxDs requested: %d, max supported: %d\n",
604 			  size, MAX_AVAILABLE_TXDS);
605 		return -EINVAL;
606 	}
607 
608 	size = 0;
609 	for (i = 0; i < config->tx_fifo_num; i++) {
610 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
611 
612 		size = tx_cfg->fifo_len;
613 		/*
614 		 * Legal values are from 2 to 8192
615 		 */
616 		if (size < 2) {
617 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
618 				  "Valid lengths are 2 through 8192\n",
619 				  i, size);
620 			return -EINVAL;
621 		}
622 	}
623 
624 	lst_size = (sizeof(struct TxD) * config->max_txds);
625 	lst_per_page = PAGE_SIZE / lst_size;
626 
627 	for (i = 0; i < config->tx_fifo_num; i++) {
628 		struct fifo_info *fifo = &mac_control->fifos[i];
629 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
630 		int fifo_len = tx_cfg->fifo_len;
631 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
632 
633 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
634 		if (!fifo->list_info) {
635 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
636 			return -ENOMEM;
637 		}
638 		mem_allocated += list_holder_size;
639 	}
640 	for (i = 0; i < config->tx_fifo_num; i++) {
641 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
642 						lst_per_page);
643 		struct fifo_info *fifo = &mac_control->fifos[i];
644 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
645 
646 		fifo->tx_curr_put_info.offset = 0;
647 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
648 		fifo->tx_curr_get_info.offset = 0;
649 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
650 		fifo->fifo_no = i;
651 		fifo->nic = nic;
652 		fifo->max_txds = MAX_SKB_FRAGS + 2;
653 		fifo->dev = dev;
654 
655 		for (j = 0; j < page_num; j++) {
656 			int k = 0;
657 			dma_addr_t tmp_p;
658 			void *tmp_v;
659 			tmp_v = pci_alloc_consistent(nic->pdev,
660 						     PAGE_SIZE, &tmp_p);
661 			if (!tmp_v) {
662 				DBG_PRINT(INFO_DBG,
663 					  "pci_alloc_consistent failed for TxDL\n");
664 				return -ENOMEM;
665 			}
666 			/* If we got a zero DMA address(can happen on
667 			 * certain platforms like PPC), reallocate.
668 			 * Store virtual address of page we don't want,
669 			 * to be freed later.
670 			 */
671 			if (!tmp_p) {
672 				mac_control->zerodma_virt_addr = tmp_v;
673 				DBG_PRINT(INIT_DBG,
674 					  "%s: Zero DMA address for TxDL. "
675 					  "Virtual address %p\n",
676 					  dev->name, tmp_v);
677 				tmp_v = pci_alloc_consistent(nic->pdev,
678 							     PAGE_SIZE, &tmp_p);
679 				if (!tmp_v) {
680 					DBG_PRINT(INFO_DBG,
681 						  "pci_alloc_consistent failed for TxDL\n");
682 					return -ENOMEM;
683 				}
684 				mem_allocated += PAGE_SIZE;
685 			}
686 			while (k < lst_per_page) {
687 				int l = (j * lst_per_page) + k;
688 				if (l == tx_cfg->fifo_len)
689 					break;
690 				fifo->list_info[l].list_virt_addr =
691 					tmp_v + (k * lst_size);
692 				fifo->list_info[l].list_phy_addr =
693 					tmp_p + (k * lst_size);
694 				k++;
695 			}
696 		}
697 	}
698 
699 	for (i = 0; i < config->tx_fifo_num; i++) {
700 		struct fifo_info *fifo = &mac_control->fifos[i];
701 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
702 
703 		size = tx_cfg->fifo_len;
704 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
705 		if (!fifo->ufo_in_band_v)
706 			return -ENOMEM;
707 		mem_allocated += (size * sizeof(u64));
708 	}
709 
710 	/* Allocation and initialization of RXDs in Rings */
711 	size = 0;
712 	for (i = 0; i < config->rx_ring_num; i++) {
713 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
714 		struct ring_info *ring = &mac_control->rings[i];
715 
716 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
717 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
718 				  "multiple of RxDs per Block\n",
719 				  dev->name, i);
720 			return FAILURE;
721 		}
722 		size += rx_cfg->num_rxd;
723 		ring->block_count = rx_cfg->num_rxd /
724 			(rxd_count[nic->rxd_mode] + 1);
725 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
726 	}
727 	if (nic->rxd_mode == RXD_MODE_1)
728 		size = (size * (sizeof(struct RxD1)));
729 	else
730 		size = (size * (sizeof(struct RxD3)));
731 
732 	for (i = 0; i < config->rx_ring_num; i++) {
733 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
734 		struct ring_info *ring = &mac_control->rings[i];
735 
736 		ring->rx_curr_get_info.block_index = 0;
737 		ring->rx_curr_get_info.offset = 0;
738 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
739 		ring->rx_curr_put_info.block_index = 0;
740 		ring->rx_curr_put_info.offset = 0;
741 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
742 		ring->nic = nic;
743 		ring->ring_no = i;
744 
745 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
746 		/*  Allocating all the Rx blocks */
747 		for (j = 0; j < blk_cnt; j++) {
748 			struct rx_block_info *rx_blocks;
749 			int l;
750 
751 			rx_blocks = &ring->rx_blocks[j];
752 			size = SIZE_OF_BLOCK;	/* size is always page size */
753 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
754 							  &tmp_p_addr);
755 			if (tmp_v_addr == NULL) {
756 				/*
757 				 * In case of failure, free_shared_mem()
758 				 * is called, which should free any
759 				 * memory that was alloced till the
760 				 * failure happened.
761 				 */
762 				rx_blocks->block_virt_addr = tmp_v_addr;
763 				return -ENOMEM;
764 			}
765 			mem_allocated += size;
766 			memset(tmp_v_addr, 0, size);
767 
768 			size = sizeof(struct rxd_info) *
769 				rxd_count[nic->rxd_mode];
770 			rx_blocks->block_virt_addr = tmp_v_addr;
771 			rx_blocks->block_dma_addr = tmp_p_addr;
772 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
773 			if (!rx_blocks->rxds)
774 				return -ENOMEM;
775 			mem_allocated += size;
776 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
777 				rx_blocks->rxds[l].virt_addr =
778 					rx_blocks->block_virt_addr +
779 					(rxd_size[nic->rxd_mode] * l);
780 				rx_blocks->rxds[l].dma_addr =
781 					rx_blocks->block_dma_addr +
782 					(rxd_size[nic->rxd_mode] * l);
783 			}
784 		}
785 		/* Interlinking all Rx Blocks */
786 		for (j = 0; j < blk_cnt; j++) {
787 			int next = (j + 1) % blk_cnt;
788 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
789 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
790 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
791 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
792 
793 			pre_rxd_blk = tmp_v_addr;
794 			pre_rxd_blk->reserved_2_pNext_RxD_block =
795 				(unsigned long)tmp_v_addr_next;
796 			pre_rxd_blk->pNext_RxD_Blk_physical =
797 				(u64)tmp_p_addr_next;
798 		}
799 	}
800 	if (nic->rxd_mode == RXD_MODE_3B) {
801 		/*
802 		 * Allocation of Storages for buffer addresses in 2BUFF mode
803 		 * and the buffers as well.
804 		 */
805 		for (i = 0; i < config->rx_ring_num; i++) {
806 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
807 			struct ring_info *ring = &mac_control->rings[i];
808 
809 			blk_cnt = rx_cfg->num_rxd /
810 				(rxd_count[nic->rxd_mode] + 1);
811 			size = sizeof(struct buffAdd *) * blk_cnt;
812 			ring->ba = kmalloc(size, GFP_KERNEL);
813 			if (!ring->ba)
814 				return -ENOMEM;
815 			mem_allocated += size;
816 			for (j = 0; j < blk_cnt; j++) {
817 				int k = 0;
818 
819 				size = sizeof(struct buffAdd) *
820 					(rxd_count[nic->rxd_mode] + 1);
821 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
822 				if (!ring->ba[j])
823 					return -ENOMEM;
824 				mem_allocated += size;
825 				while (k != rxd_count[nic->rxd_mode]) {
826 					ba = &ring->ba[j][k];
827 					size = BUF0_LEN + ALIGN_SIZE;
828 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
829 					if (!ba->ba_0_org)
830 						return -ENOMEM;
831 					mem_allocated += size;
832 					tmp = (unsigned long)ba->ba_0_org;
833 					tmp += ALIGN_SIZE;
834 					tmp &= ~((unsigned long)ALIGN_SIZE);
835 					ba->ba_0 = (void *)tmp;
836 
837 					size = BUF1_LEN + ALIGN_SIZE;
838 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
839 					if (!ba->ba_1_org)
840 						return -ENOMEM;
841 					mem_allocated += size;
842 					tmp = (unsigned long)ba->ba_1_org;
843 					tmp += ALIGN_SIZE;
844 					tmp &= ~((unsigned long)ALIGN_SIZE);
845 					ba->ba_1 = (void *)tmp;
846 					k++;
847 				}
848 			}
849 		}
850 	}
851 
852 	/* Allocation and initialization of Statistics block */
853 	size = sizeof(struct stat_block);
854 	mac_control->stats_mem =
855 		pci_alloc_consistent(nic->pdev, size,
856 				     &mac_control->stats_mem_phy);
857 
858 	if (!mac_control->stats_mem) {
859 		/*
860 		 * In case of failure, free_shared_mem() is called, which
861 		 * should free any memory that was alloced till the
862 		 * failure happened.
863 		 */
864 		return -ENOMEM;
865 	}
866 	mem_allocated += size;
867 	mac_control->stats_mem_sz = size;
868 
869 	tmp_v_addr = mac_control->stats_mem;
870 	mac_control->stats_info = tmp_v_addr;
871 	memset(tmp_v_addr, 0, size);
872 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
873 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
874 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
875 	return SUCCESS;
876 }
877 
878 /**
879  * free_shared_mem - Free the allocated Memory
880  * @nic:  Device private variable.
881  * Description: This function is to free all memory locations allocated by
882  * the init_shared_mem() function and return it to the kernel.
883  */
884 
free_shared_mem(struct s2io_nic * nic)885 static void free_shared_mem(struct s2io_nic *nic)
886 {
887 	int i, j, blk_cnt, size;
888 	void *tmp_v_addr;
889 	dma_addr_t tmp_p_addr;
890 	int lst_size, lst_per_page;
891 	struct net_device *dev;
892 	int page_num = 0;
893 	struct config_param *config;
894 	struct mac_info *mac_control;
895 	struct stat_block *stats;
896 	struct swStat *swstats;
897 
898 	if (!nic)
899 		return;
900 
901 	dev = nic->dev;
902 
903 	config = &nic->config;
904 	mac_control = &nic->mac_control;
905 	stats = mac_control->stats_info;
906 	swstats = &stats->sw_stat;
907 
908 	lst_size = sizeof(struct TxD) * config->max_txds;
909 	lst_per_page = PAGE_SIZE / lst_size;
910 
911 	for (i = 0; i < config->tx_fifo_num; i++) {
912 		struct fifo_info *fifo = &mac_control->fifos[i];
913 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
914 
915 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
916 		for (j = 0; j < page_num; j++) {
917 			int mem_blks = (j * lst_per_page);
918 			struct list_info_hold *fli;
919 
920 			if (!fifo->list_info)
921 				return;
922 
923 			fli = &fifo->list_info[mem_blks];
924 			if (!fli->list_virt_addr)
925 				break;
926 			pci_free_consistent(nic->pdev, PAGE_SIZE,
927 					    fli->list_virt_addr,
928 					    fli->list_phy_addr);
929 			swstats->mem_freed += PAGE_SIZE;
930 		}
931 		/* If we got a zero DMA address during allocation,
932 		 * free the page now
933 		 */
934 		if (mac_control->zerodma_virt_addr) {
935 			pci_free_consistent(nic->pdev, PAGE_SIZE,
936 					    mac_control->zerodma_virt_addr,
937 					    (dma_addr_t)0);
938 			DBG_PRINT(INIT_DBG,
939 				  "%s: Freeing TxDL with zero DMA address. "
940 				  "Virtual address %p\n",
941 				  dev->name, mac_control->zerodma_virt_addr);
942 			swstats->mem_freed += PAGE_SIZE;
943 		}
944 		kfree(fifo->list_info);
945 		swstats->mem_freed += tx_cfg->fifo_len *
946 			sizeof(struct list_info_hold);
947 	}
948 
949 	size = SIZE_OF_BLOCK;
950 	for (i = 0; i < config->rx_ring_num; i++) {
951 		struct ring_info *ring = &mac_control->rings[i];
952 
953 		blk_cnt = ring->block_count;
954 		for (j = 0; j < blk_cnt; j++) {
955 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
956 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
957 			if (tmp_v_addr == NULL)
958 				break;
959 			pci_free_consistent(nic->pdev, size,
960 					    tmp_v_addr, tmp_p_addr);
961 			swstats->mem_freed += size;
962 			kfree(ring->rx_blocks[j].rxds);
963 			swstats->mem_freed += sizeof(struct rxd_info) *
964 				rxd_count[nic->rxd_mode];
965 		}
966 	}
967 
968 	if (nic->rxd_mode == RXD_MODE_3B) {
969 		/* Freeing buffer storage addresses in 2BUFF mode. */
970 		for (i = 0; i < config->rx_ring_num; i++) {
971 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
972 			struct ring_info *ring = &mac_control->rings[i];
973 
974 			blk_cnt = rx_cfg->num_rxd /
975 				(rxd_count[nic->rxd_mode] + 1);
976 			for (j = 0; j < blk_cnt; j++) {
977 				int k = 0;
978 				if (!ring->ba[j])
979 					continue;
980 				while (k != rxd_count[nic->rxd_mode]) {
981 					struct buffAdd *ba = &ring->ba[j][k];
982 					kfree(ba->ba_0_org);
983 					swstats->mem_freed +=
984 						BUF0_LEN + ALIGN_SIZE;
985 					kfree(ba->ba_1_org);
986 					swstats->mem_freed +=
987 						BUF1_LEN + ALIGN_SIZE;
988 					k++;
989 				}
990 				kfree(ring->ba[j]);
991 				swstats->mem_freed += sizeof(struct buffAdd) *
992 					(rxd_count[nic->rxd_mode] + 1);
993 			}
994 			kfree(ring->ba);
995 			swstats->mem_freed += sizeof(struct buffAdd *) *
996 				blk_cnt;
997 		}
998 	}
999 
1000 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
1001 		struct fifo_info *fifo = &mac_control->fifos[i];
1002 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1003 
1004 		if (fifo->ufo_in_band_v) {
1005 			swstats->mem_freed += tx_cfg->fifo_len *
1006 				sizeof(u64);
1007 			kfree(fifo->ufo_in_band_v);
1008 		}
1009 	}
1010 
1011 	if (mac_control->stats_mem) {
1012 		swstats->mem_freed += mac_control->stats_mem_sz;
1013 		pci_free_consistent(nic->pdev,
1014 				    mac_control->stats_mem_sz,
1015 				    mac_control->stats_mem,
1016 				    mac_control->stats_mem_phy);
1017 	}
1018 }
1019 
1020 /**
1021  * s2io_verify_pci_mode -
1022  */
1023 
s2io_verify_pci_mode(struct s2io_nic * nic)1024 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1025 {
1026 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1027 	register u64 val64 = 0;
1028 	int     mode;
1029 
1030 	val64 = readq(&bar0->pci_mode);
1031 	mode = (u8)GET_PCI_MODE(val64);
1032 
1033 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1034 		return -1;      /* Unknown PCI mode */
1035 	return mode;
1036 }
1037 
1038 #define NEC_VENID   0x1033
1039 #define NEC_DEVID   0x0125
s2io_on_nec_bridge(struct pci_dev * s2io_pdev)1040 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1041 {
1042 	struct pci_dev *tdev = NULL;
1043 	while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1044 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1045 			if (tdev->bus == s2io_pdev->bus->parent) {
1046 				pci_dev_put(tdev);
1047 				return 1;
1048 			}
1049 		}
1050 	}
1051 	return 0;
1052 }
1053 
1054 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1055 /**
1056  * s2io_print_pci_mode -
1057  */
s2io_print_pci_mode(struct s2io_nic * nic)1058 static int s2io_print_pci_mode(struct s2io_nic *nic)
1059 {
1060 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1061 	register u64 val64 = 0;
1062 	int	mode;
1063 	struct config_param *config = &nic->config;
1064 	const char *pcimode;
1065 
1066 	val64 = readq(&bar0->pci_mode);
1067 	mode = (u8)GET_PCI_MODE(val64);
1068 
1069 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1070 		return -1;	/* Unknown PCI mode */
1071 
1072 	config->bus_speed = bus_speed[mode];
1073 
1074 	if (s2io_on_nec_bridge(nic->pdev)) {
1075 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1076 			  nic->dev->name);
1077 		return mode;
1078 	}
1079 
1080 	switch (mode) {
1081 	case PCI_MODE_PCI_33:
1082 		pcimode = "33MHz PCI bus";
1083 		break;
1084 	case PCI_MODE_PCI_66:
1085 		pcimode = "66MHz PCI bus";
1086 		break;
1087 	case PCI_MODE_PCIX_M1_66:
1088 		pcimode = "66MHz PCIX(M1) bus";
1089 		break;
1090 	case PCI_MODE_PCIX_M1_100:
1091 		pcimode = "100MHz PCIX(M1) bus";
1092 		break;
1093 	case PCI_MODE_PCIX_M1_133:
1094 		pcimode = "133MHz PCIX(M1) bus";
1095 		break;
1096 	case PCI_MODE_PCIX_M2_66:
1097 		pcimode = "133MHz PCIX(M2) bus";
1098 		break;
1099 	case PCI_MODE_PCIX_M2_100:
1100 		pcimode = "200MHz PCIX(M2) bus";
1101 		break;
1102 	case PCI_MODE_PCIX_M2_133:
1103 		pcimode = "266MHz PCIX(M2) bus";
1104 		break;
1105 	default:
1106 		pcimode = "unsupported bus!";
1107 		mode = -1;
1108 	}
1109 
1110 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1111 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1112 
1113 	return mode;
1114 }
1115 
1116 /**
1117  *  init_tti - Initialization transmit traffic interrupt scheme
1118  *  @nic: device private variable
1119  *  @link: link status (UP/DOWN) used to enable/disable continuous
1120  *  transmit interrupts
1121  *  Description: The function configures transmit traffic interrupts
1122  *  Return Value:  SUCCESS on success and
1123  *  '-1' on failure
1124  */
1125 
init_tti(struct s2io_nic * nic,int link)1126 static int init_tti(struct s2io_nic *nic, int link)
1127 {
1128 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1129 	register u64 val64 = 0;
1130 	int i;
1131 	struct config_param *config = &nic->config;
1132 
1133 	for (i = 0; i < config->tx_fifo_num; i++) {
1134 		/*
1135 		 * TTI Initialization. Default Tx timer gets us about
1136 		 * 250 interrupts per sec. Continuous interrupts are enabled
1137 		 * by default.
1138 		 */
1139 		if (nic->device_type == XFRAME_II_DEVICE) {
1140 			int count = (nic->config.bus_speed * 125)/2;
1141 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1142 		} else
1143 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1144 
1145 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1146 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1147 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1148 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1149 		if (i == 0)
1150 			if (use_continuous_tx_intrs && (link == LINK_UP))
1151 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1152 		writeq(val64, &bar0->tti_data1_mem);
1153 
1154 		if (nic->config.intr_type == MSI_X) {
1155 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1156 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1157 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1158 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1159 		} else {
1160 			if ((nic->config.tx_steering_type ==
1161 			     TX_DEFAULT_STEERING) &&
1162 			    (config->tx_fifo_num > 1) &&
1163 			    (i >= nic->udp_fifo_idx) &&
1164 			    (i < (nic->udp_fifo_idx +
1165 				  nic->total_udp_fifos)))
1166 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1167 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1168 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1169 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1170 			else
1171 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1172 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1173 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1174 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1175 		}
1176 
1177 		writeq(val64, &bar0->tti_data2_mem);
1178 
1179 		val64 = TTI_CMD_MEM_WE |
1180 			TTI_CMD_MEM_STROBE_NEW_CMD |
1181 			TTI_CMD_MEM_OFFSET(i);
1182 		writeq(val64, &bar0->tti_command_mem);
1183 
1184 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1185 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1186 					  S2IO_BIT_RESET) != SUCCESS)
1187 			return FAILURE;
1188 	}
1189 
1190 	return SUCCESS;
1191 }
1192 
1193 /**
1194  *  init_nic - Initialization of hardware
1195  *  @nic: device private variable
1196  *  Description: The function sequentially configures every block
1197  *  of the H/W from their reset values.
1198  *  Return Value:  SUCCESS on success and
1199  *  '-1' on failure (endian settings incorrect).
1200  */
1201 
init_nic(struct s2io_nic * nic)1202 static int init_nic(struct s2io_nic *nic)
1203 {
1204 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1205 	struct net_device *dev = nic->dev;
1206 	register u64 val64 = 0;
1207 	void __iomem *add;
1208 	u32 time;
1209 	int i, j;
1210 	int dtx_cnt = 0;
1211 	unsigned long long mem_share;
1212 	int mem_size;
1213 	struct config_param *config = &nic->config;
1214 	struct mac_info *mac_control = &nic->mac_control;
1215 
1216 	/* to set the swapper controle on the card */
1217 	if (s2io_set_swapper(nic)) {
1218 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1219 		return -EIO;
1220 	}
1221 
1222 	/*
1223 	 * Herc requires EOI to be removed from reset before XGXS, so..
1224 	 */
1225 	if (nic->device_type & XFRAME_II_DEVICE) {
1226 		val64 = 0xA500000000ULL;
1227 		writeq(val64, &bar0->sw_reset);
1228 		msleep(500);
1229 		val64 = readq(&bar0->sw_reset);
1230 	}
1231 
1232 	/* Remove XGXS from reset state */
1233 	val64 = 0;
1234 	writeq(val64, &bar0->sw_reset);
1235 	msleep(500);
1236 	val64 = readq(&bar0->sw_reset);
1237 
1238 	/* Ensure that it's safe to access registers by checking
1239 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1240 	 */
1241 	if (nic->device_type == XFRAME_II_DEVICE) {
1242 		for (i = 0; i < 50; i++) {
1243 			val64 = readq(&bar0->adapter_status);
1244 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1245 				break;
1246 			msleep(10);
1247 		}
1248 		if (i == 50)
1249 			return -ENODEV;
1250 	}
1251 
1252 	/*  Enable Receiving broadcasts */
1253 	add = &bar0->mac_cfg;
1254 	val64 = readq(&bar0->mac_cfg);
1255 	val64 |= MAC_RMAC_BCAST_ENABLE;
1256 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1257 	writel((u32)val64, add);
1258 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1259 	writel((u32) (val64 >> 32), (add + 4));
1260 
1261 	/* Read registers in all blocks */
1262 	val64 = readq(&bar0->mac_int_mask);
1263 	val64 = readq(&bar0->mc_int_mask);
1264 	val64 = readq(&bar0->xgxs_int_mask);
1265 
1266 	/*  Set MTU */
1267 	val64 = dev->mtu;
1268 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1269 
1270 	if (nic->device_type & XFRAME_II_DEVICE) {
1271 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1272 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1273 					  &bar0->dtx_control, UF);
1274 			if (dtx_cnt & 0x1)
1275 				msleep(1); /* Necessary!! */
1276 			dtx_cnt++;
1277 		}
1278 	} else {
1279 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1280 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1281 					  &bar0->dtx_control, UF);
1282 			val64 = readq(&bar0->dtx_control);
1283 			dtx_cnt++;
1284 		}
1285 	}
1286 
1287 	/*  Tx DMA Initialization */
1288 	val64 = 0;
1289 	writeq(val64, &bar0->tx_fifo_partition_0);
1290 	writeq(val64, &bar0->tx_fifo_partition_1);
1291 	writeq(val64, &bar0->tx_fifo_partition_2);
1292 	writeq(val64, &bar0->tx_fifo_partition_3);
1293 
1294 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1295 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1296 
1297 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1298 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1299 
1300 		if (i == (config->tx_fifo_num - 1)) {
1301 			if (i % 2 == 0)
1302 				i++;
1303 		}
1304 
1305 		switch (i) {
1306 		case 1:
1307 			writeq(val64, &bar0->tx_fifo_partition_0);
1308 			val64 = 0;
1309 			j = 0;
1310 			break;
1311 		case 3:
1312 			writeq(val64, &bar0->tx_fifo_partition_1);
1313 			val64 = 0;
1314 			j = 0;
1315 			break;
1316 		case 5:
1317 			writeq(val64, &bar0->tx_fifo_partition_2);
1318 			val64 = 0;
1319 			j = 0;
1320 			break;
1321 		case 7:
1322 			writeq(val64, &bar0->tx_fifo_partition_3);
1323 			val64 = 0;
1324 			j = 0;
1325 			break;
1326 		default:
1327 			j++;
1328 			break;
1329 		}
1330 	}
1331 
1332 	/*
1333 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1334 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1335 	 */
1336 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1337 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1338 
1339 	val64 = readq(&bar0->tx_fifo_partition_0);
1340 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1341 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1342 
1343 	/*
1344 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1345 	 * integrity checking.
1346 	 */
1347 	val64 = readq(&bar0->tx_pa_cfg);
1348 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1349 		TX_PA_CFG_IGNORE_SNAP_OUI |
1350 		TX_PA_CFG_IGNORE_LLC_CTRL |
1351 		TX_PA_CFG_IGNORE_L2_ERR;
1352 	writeq(val64, &bar0->tx_pa_cfg);
1353 
1354 	/* Rx DMA intialization. */
1355 	val64 = 0;
1356 	for (i = 0; i < config->rx_ring_num; i++) {
1357 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1358 
1359 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1360 	}
1361 	writeq(val64, &bar0->rx_queue_priority);
1362 
1363 	/*
1364 	 * Allocating equal share of memory to all the
1365 	 * configured Rings.
1366 	 */
1367 	val64 = 0;
1368 	if (nic->device_type & XFRAME_II_DEVICE)
1369 		mem_size = 32;
1370 	else
1371 		mem_size = 64;
1372 
1373 	for (i = 0; i < config->rx_ring_num; i++) {
1374 		switch (i) {
1375 		case 0:
1376 			mem_share = (mem_size / config->rx_ring_num +
1377 				     mem_size % config->rx_ring_num);
1378 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1379 			continue;
1380 		case 1:
1381 			mem_share = (mem_size / config->rx_ring_num);
1382 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1383 			continue;
1384 		case 2:
1385 			mem_share = (mem_size / config->rx_ring_num);
1386 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1387 			continue;
1388 		case 3:
1389 			mem_share = (mem_size / config->rx_ring_num);
1390 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1391 			continue;
1392 		case 4:
1393 			mem_share = (mem_size / config->rx_ring_num);
1394 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1395 			continue;
1396 		case 5:
1397 			mem_share = (mem_size / config->rx_ring_num);
1398 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1399 			continue;
1400 		case 6:
1401 			mem_share = (mem_size / config->rx_ring_num);
1402 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1403 			continue;
1404 		case 7:
1405 			mem_share = (mem_size / config->rx_ring_num);
1406 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1407 			continue;
1408 		}
1409 	}
1410 	writeq(val64, &bar0->rx_queue_cfg);
1411 
1412 	/*
1413 	 * Filling Tx round robin registers
1414 	 * as per the number of FIFOs for equal scheduling priority
1415 	 */
1416 	switch (config->tx_fifo_num) {
1417 	case 1:
1418 		val64 = 0x0;
1419 		writeq(val64, &bar0->tx_w_round_robin_0);
1420 		writeq(val64, &bar0->tx_w_round_robin_1);
1421 		writeq(val64, &bar0->tx_w_round_robin_2);
1422 		writeq(val64, &bar0->tx_w_round_robin_3);
1423 		writeq(val64, &bar0->tx_w_round_robin_4);
1424 		break;
1425 	case 2:
1426 		val64 = 0x0001000100010001ULL;
1427 		writeq(val64, &bar0->tx_w_round_robin_0);
1428 		writeq(val64, &bar0->tx_w_round_robin_1);
1429 		writeq(val64, &bar0->tx_w_round_robin_2);
1430 		writeq(val64, &bar0->tx_w_round_robin_3);
1431 		val64 = 0x0001000100000000ULL;
1432 		writeq(val64, &bar0->tx_w_round_robin_4);
1433 		break;
1434 	case 3:
1435 		val64 = 0x0001020001020001ULL;
1436 		writeq(val64, &bar0->tx_w_round_robin_0);
1437 		val64 = 0x0200010200010200ULL;
1438 		writeq(val64, &bar0->tx_w_round_robin_1);
1439 		val64 = 0x0102000102000102ULL;
1440 		writeq(val64, &bar0->tx_w_round_robin_2);
1441 		val64 = 0x0001020001020001ULL;
1442 		writeq(val64, &bar0->tx_w_round_robin_3);
1443 		val64 = 0x0200010200000000ULL;
1444 		writeq(val64, &bar0->tx_w_round_robin_4);
1445 		break;
1446 	case 4:
1447 		val64 = 0x0001020300010203ULL;
1448 		writeq(val64, &bar0->tx_w_round_robin_0);
1449 		writeq(val64, &bar0->tx_w_round_robin_1);
1450 		writeq(val64, &bar0->tx_w_round_robin_2);
1451 		writeq(val64, &bar0->tx_w_round_robin_3);
1452 		val64 = 0x0001020300000000ULL;
1453 		writeq(val64, &bar0->tx_w_round_robin_4);
1454 		break;
1455 	case 5:
1456 		val64 = 0x0001020304000102ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_0);
1458 		val64 = 0x0304000102030400ULL;
1459 		writeq(val64, &bar0->tx_w_round_robin_1);
1460 		val64 = 0x0102030400010203ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_2);
1462 		val64 = 0x0400010203040001ULL;
1463 		writeq(val64, &bar0->tx_w_round_robin_3);
1464 		val64 = 0x0203040000000000ULL;
1465 		writeq(val64, &bar0->tx_w_round_robin_4);
1466 		break;
1467 	case 6:
1468 		val64 = 0x0001020304050001ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_0);
1470 		val64 = 0x0203040500010203ULL;
1471 		writeq(val64, &bar0->tx_w_round_robin_1);
1472 		val64 = 0x0405000102030405ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_2);
1474 		val64 = 0x0001020304050001ULL;
1475 		writeq(val64, &bar0->tx_w_round_robin_3);
1476 		val64 = 0x0203040500000000ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_4);
1478 		break;
1479 	case 7:
1480 		val64 = 0x0001020304050600ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_0);
1482 		val64 = 0x0102030405060001ULL;
1483 		writeq(val64, &bar0->tx_w_round_robin_1);
1484 		val64 = 0x0203040506000102ULL;
1485 		writeq(val64, &bar0->tx_w_round_robin_2);
1486 		val64 = 0x0304050600010203ULL;
1487 		writeq(val64, &bar0->tx_w_round_robin_3);
1488 		val64 = 0x0405060000000000ULL;
1489 		writeq(val64, &bar0->tx_w_round_robin_4);
1490 		break;
1491 	case 8:
1492 		val64 = 0x0001020304050607ULL;
1493 		writeq(val64, &bar0->tx_w_round_robin_0);
1494 		writeq(val64, &bar0->tx_w_round_robin_1);
1495 		writeq(val64, &bar0->tx_w_round_robin_2);
1496 		writeq(val64, &bar0->tx_w_round_robin_3);
1497 		val64 = 0x0001020300000000ULL;
1498 		writeq(val64, &bar0->tx_w_round_robin_4);
1499 		break;
1500 	}
1501 
1502 	/* Enable all configured Tx FIFO partitions */
1503 	val64 = readq(&bar0->tx_fifo_partition_0);
1504 	val64 |= (TX_FIFO_PARTITION_EN);
1505 	writeq(val64, &bar0->tx_fifo_partition_0);
1506 
1507 	/* Filling the Rx round robin registers as per the
1508 	 * number of Rings and steering based on QoS with
1509 	 * equal priority.
1510 	 */
1511 	switch (config->rx_ring_num) {
1512 	case 1:
1513 		val64 = 0x0;
1514 		writeq(val64, &bar0->rx_w_round_robin_0);
1515 		writeq(val64, &bar0->rx_w_round_robin_1);
1516 		writeq(val64, &bar0->rx_w_round_robin_2);
1517 		writeq(val64, &bar0->rx_w_round_robin_3);
1518 		writeq(val64, &bar0->rx_w_round_robin_4);
1519 
1520 		val64 = 0x8080808080808080ULL;
1521 		writeq(val64, &bar0->rts_qos_steering);
1522 		break;
1523 	case 2:
1524 		val64 = 0x0001000100010001ULL;
1525 		writeq(val64, &bar0->rx_w_round_robin_0);
1526 		writeq(val64, &bar0->rx_w_round_robin_1);
1527 		writeq(val64, &bar0->rx_w_round_robin_2);
1528 		writeq(val64, &bar0->rx_w_round_robin_3);
1529 		val64 = 0x0001000100000000ULL;
1530 		writeq(val64, &bar0->rx_w_round_robin_4);
1531 
1532 		val64 = 0x8080808040404040ULL;
1533 		writeq(val64, &bar0->rts_qos_steering);
1534 		break;
1535 	case 3:
1536 		val64 = 0x0001020001020001ULL;
1537 		writeq(val64, &bar0->rx_w_round_robin_0);
1538 		val64 = 0x0200010200010200ULL;
1539 		writeq(val64, &bar0->rx_w_round_robin_1);
1540 		val64 = 0x0102000102000102ULL;
1541 		writeq(val64, &bar0->rx_w_round_robin_2);
1542 		val64 = 0x0001020001020001ULL;
1543 		writeq(val64, &bar0->rx_w_round_robin_3);
1544 		val64 = 0x0200010200000000ULL;
1545 		writeq(val64, &bar0->rx_w_round_robin_4);
1546 
1547 		val64 = 0x8080804040402020ULL;
1548 		writeq(val64, &bar0->rts_qos_steering);
1549 		break;
1550 	case 4:
1551 		val64 = 0x0001020300010203ULL;
1552 		writeq(val64, &bar0->rx_w_round_robin_0);
1553 		writeq(val64, &bar0->rx_w_round_robin_1);
1554 		writeq(val64, &bar0->rx_w_round_robin_2);
1555 		writeq(val64, &bar0->rx_w_round_robin_3);
1556 		val64 = 0x0001020300000000ULL;
1557 		writeq(val64, &bar0->rx_w_round_robin_4);
1558 
1559 		val64 = 0x8080404020201010ULL;
1560 		writeq(val64, &bar0->rts_qos_steering);
1561 		break;
1562 	case 5:
1563 		val64 = 0x0001020304000102ULL;
1564 		writeq(val64, &bar0->rx_w_round_robin_0);
1565 		val64 = 0x0304000102030400ULL;
1566 		writeq(val64, &bar0->rx_w_round_robin_1);
1567 		val64 = 0x0102030400010203ULL;
1568 		writeq(val64, &bar0->rx_w_round_robin_2);
1569 		val64 = 0x0400010203040001ULL;
1570 		writeq(val64, &bar0->rx_w_round_robin_3);
1571 		val64 = 0x0203040000000000ULL;
1572 		writeq(val64, &bar0->rx_w_round_robin_4);
1573 
1574 		val64 = 0x8080404020201008ULL;
1575 		writeq(val64, &bar0->rts_qos_steering);
1576 		break;
1577 	case 6:
1578 		val64 = 0x0001020304050001ULL;
1579 		writeq(val64, &bar0->rx_w_round_robin_0);
1580 		val64 = 0x0203040500010203ULL;
1581 		writeq(val64, &bar0->rx_w_round_robin_1);
1582 		val64 = 0x0405000102030405ULL;
1583 		writeq(val64, &bar0->rx_w_round_robin_2);
1584 		val64 = 0x0001020304050001ULL;
1585 		writeq(val64, &bar0->rx_w_round_robin_3);
1586 		val64 = 0x0203040500000000ULL;
1587 		writeq(val64, &bar0->rx_w_round_robin_4);
1588 
1589 		val64 = 0x8080404020100804ULL;
1590 		writeq(val64, &bar0->rts_qos_steering);
1591 		break;
1592 	case 7:
1593 		val64 = 0x0001020304050600ULL;
1594 		writeq(val64, &bar0->rx_w_round_robin_0);
1595 		val64 = 0x0102030405060001ULL;
1596 		writeq(val64, &bar0->rx_w_round_robin_1);
1597 		val64 = 0x0203040506000102ULL;
1598 		writeq(val64, &bar0->rx_w_round_robin_2);
1599 		val64 = 0x0304050600010203ULL;
1600 		writeq(val64, &bar0->rx_w_round_robin_3);
1601 		val64 = 0x0405060000000000ULL;
1602 		writeq(val64, &bar0->rx_w_round_robin_4);
1603 
1604 		val64 = 0x8080402010080402ULL;
1605 		writeq(val64, &bar0->rts_qos_steering);
1606 		break;
1607 	case 8:
1608 		val64 = 0x0001020304050607ULL;
1609 		writeq(val64, &bar0->rx_w_round_robin_0);
1610 		writeq(val64, &bar0->rx_w_round_robin_1);
1611 		writeq(val64, &bar0->rx_w_round_robin_2);
1612 		writeq(val64, &bar0->rx_w_round_robin_3);
1613 		val64 = 0x0001020300000000ULL;
1614 		writeq(val64, &bar0->rx_w_round_robin_4);
1615 
1616 		val64 = 0x8040201008040201ULL;
1617 		writeq(val64, &bar0->rts_qos_steering);
1618 		break;
1619 	}
1620 
1621 	/* UDP Fix */
1622 	val64 = 0;
1623 	for (i = 0; i < 8; i++)
1624 		writeq(val64, &bar0->rts_frm_len_n[i]);
1625 
1626 	/* Set the default rts frame length for the rings configured */
1627 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1628 	for (i = 0 ; i < config->rx_ring_num ; i++)
1629 		writeq(val64, &bar0->rts_frm_len_n[i]);
1630 
1631 	/* Set the frame length for the configured rings
1632 	 * desired by the user
1633 	 */
1634 	for (i = 0; i < config->rx_ring_num; i++) {
1635 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1636 		 * specified frame length steering.
1637 		 * If the user provides the frame length then program
1638 		 * the rts_frm_len register for those values or else
1639 		 * leave it as it is.
1640 		 */
1641 		if (rts_frm_len[i] != 0) {
1642 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1643 			       &bar0->rts_frm_len_n[i]);
1644 		}
1645 	}
1646 
1647 	/* Disable differentiated services steering logic */
1648 	for (i = 0; i < 64; i++) {
1649 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1650 			DBG_PRINT(ERR_DBG,
1651 				  "%s: rts_ds_steer failed on codepoint %d\n",
1652 				  dev->name, i);
1653 			return -ENODEV;
1654 		}
1655 	}
1656 
1657 	/* Program statistics memory */
1658 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1659 
1660 	if (nic->device_type == XFRAME_II_DEVICE) {
1661 		val64 = STAT_BC(0x320);
1662 		writeq(val64, &bar0->stat_byte_cnt);
1663 	}
1664 
1665 	/*
1666 	 * Initializing the sampling rate for the device to calculate the
1667 	 * bandwidth utilization.
1668 	 */
1669 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1670 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1671 	writeq(val64, &bar0->mac_link_util);
1672 
1673 	/*
1674 	 * Initializing the Transmit and Receive Traffic Interrupt
1675 	 * Scheme.
1676 	 */
1677 
1678 	/* Initialize TTI */
1679 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1680 		return -ENODEV;
1681 
1682 	/* RTI Initialization */
1683 	if (nic->device_type == XFRAME_II_DEVICE) {
1684 		/*
1685 		 * Programmed to generate Apprx 500 Intrs per
1686 		 * second
1687 		 */
1688 		int count = (nic->config.bus_speed * 125)/4;
1689 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1690 	} else
1691 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1692 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1693 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1694 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1695 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1696 
1697 	writeq(val64, &bar0->rti_data1_mem);
1698 
1699 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1700 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1701 	if (nic->config.intr_type == MSI_X)
1702 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1703 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1704 	else
1705 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1706 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1707 	writeq(val64, &bar0->rti_data2_mem);
1708 
1709 	for (i = 0; i < config->rx_ring_num; i++) {
1710 		val64 = RTI_CMD_MEM_WE |
1711 			RTI_CMD_MEM_STROBE_NEW_CMD |
1712 			RTI_CMD_MEM_OFFSET(i);
1713 		writeq(val64, &bar0->rti_command_mem);
1714 
1715 		/*
1716 		 * Once the operation completes, the Strobe bit of the
1717 		 * command register will be reset. We poll for this
1718 		 * particular condition. We wait for a maximum of 500ms
1719 		 * for the operation to complete, if it's not complete
1720 		 * by then we return error.
1721 		 */
1722 		time = 0;
1723 		while (true) {
1724 			val64 = readq(&bar0->rti_command_mem);
1725 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1726 				break;
1727 
1728 			if (time > 10) {
1729 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1730 					  dev->name);
1731 				return -ENODEV;
1732 			}
1733 			time++;
1734 			msleep(50);
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * Initializing proper values as Pause threshold into all
1740 	 * the 8 Queues on Rx side.
1741 	 */
1742 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1743 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1744 
1745 	/* Disable RMAC PAD STRIPPING */
1746 	add = &bar0->mac_cfg;
1747 	val64 = readq(&bar0->mac_cfg);
1748 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1749 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750 	writel((u32) (val64), add);
1751 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1752 	writel((u32) (val64 >> 32), (add + 4));
1753 	val64 = readq(&bar0->mac_cfg);
1754 
1755 	/* Enable FCS stripping by adapter */
1756 	add = &bar0->mac_cfg;
1757 	val64 = readq(&bar0->mac_cfg);
1758 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1759 	if (nic->device_type == XFRAME_II_DEVICE)
1760 		writeq(val64, &bar0->mac_cfg);
1761 	else {
1762 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1763 		writel((u32) (val64), add);
1764 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1765 		writel((u32) (val64 >> 32), (add + 4));
1766 	}
1767 
1768 	/*
1769 	 * Set the time value to be inserted in the pause frame
1770 	 * generated by xena.
1771 	 */
1772 	val64 = readq(&bar0->rmac_pause_cfg);
1773 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1774 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1775 	writeq(val64, &bar0->rmac_pause_cfg);
1776 
1777 	/*
1778 	 * Set the Threshold Limit for Generating the pause frame
1779 	 * If the amount of data in any Queue exceeds ratio of
1780 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1781 	 * pause frame is generated
1782 	 */
1783 	val64 = 0;
1784 	for (i = 0; i < 4; i++) {
1785 		val64 |= (((u64)0xFF00 |
1786 			   nic->mac_control.mc_pause_threshold_q0q3)
1787 			  << (i * 2 * 8));
1788 	}
1789 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1790 
1791 	val64 = 0;
1792 	for (i = 0; i < 4; i++) {
1793 		val64 |= (((u64)0xFF00 |
1794 			   nic->mac_control.mc_pause_threshold_q4q7)
1795 			  << (i * 2 * 8));
1796 	}
1797 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1798 
1799 	/*
1800 	 * TxDMA will stop Read request if the number of read split has
1801 	 * exceeded the limit pointed by shared_splits
1802 	 */
1803 	val64 = readq(&bar0->pic_control);
1804 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1805 	writeq(val64, &bar0->pic_control);
1806 
1807 	if (nic->config.bus_speed == 266) {
1808 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1809 		writeq(0x0, &bar0->read_retry_delay);
1810 		writeq(0x0, &bar0->write_retry_delay);
1811 	}
1812 
1813 	/*
1814 	 * Programming the Herc to split every write transaction
1815 	 * that does not start on an ADB to reduce disconnects.
1816 	 */
1817 	if (nic->device_type == XFRAME_II_DEVICE) {
1818 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1819 			MISC_LINK_STABILITY_PRD(3);
1820 		writeq(val64, &bar0->misc_control);
1821 		val64 = readq(&bar0->pic_control2);
1822 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1823 		writeq(val64, &bar0->pic_control2);
1824 	}
1825 	if (strstr(nic->product_name, "CX4")) {
1826 		val64 = TMAC_AVG_IPG(0x17);
1827 		writeq(val64, &bar0->tmac_avg_ipg);
1828 	}
1829 
1830 	return SUCCESS;
1831 }
1832 #define LINK_UP_DOWN_INTERRUPT		1
1833 #define MAC_RMAC_ERR_TIMER		2
1834 
s2io_link_fault_indication(struct s2io_nic * nic)1835 static int s2io_link_fault_indication(struct s2io_nic *nic)
1836 {
1837 	if (nic->device_type == XFRAME_II_DEVICE)
1838 		return LINK_UP_DOWN_INTERRUPT;
1839 	else
1840 		return MAC_RMAC_ERR_TIMER;
1841 }
1842 
1843 /**
1844  *  do_s2io_write_bits -  update alarm bits in alarm register
1845  *  @value: alarm bits
1846  *  @flag: interrupt status
1847  *  @addr: address value
1848  *  Description: update alarm bits in alarm register
1849  *  Return Value:
1850  *  NONE.
1851  */
do_s2io_write_bits(u64 value,int flag,void __iomem * addr)1852 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1853 {
1854 	u64 temp64;
1855 
1856 	temp64 = readq(addr);
1857 
1858 	if (flag == ENABLE_INTRS)
1859 		temp64 &= ~((u64)value);
1860 	else
1861 		temp64 |= ((u64)value);
1862 	writeq(temp64, addr);
1863 }
1864 
en_dis_err_alarms(struct s2io_nic * nic,u16 mask,int flag)1865 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1866 {
1867 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1868 	register u64 gen_int_mask = 0;
1869 	u64 interruptible;
1870 
1871 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1872 	if (mask & TX_DMA_INTR) {
1873 		gen_int_mask |= TXDMA_INT_M;
1874 
1875 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1876 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1877 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1878 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1879 
1880 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1881 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1882 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1883 				   &bar0->pfc_err_mask);
1884 
1885 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1886 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1887 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1888 
1889 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1890 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1891 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1892 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1893 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1894 				   PCC_TXB_ECC_SG_ERR,
1895 				   flag, &bar0->pcc_err_mask);
1896 
1897 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1898 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1899 
1900 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1901 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1902 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1903 				   flag, &bar0->lso_err_mask);
1904 
1905 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1906 				   flag, &bar0->tpa_err_mask);
1907 
1908 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1909 	}
1910 
1911 	if (mask & TX_MAC_INTR) {
1912 		gen_int_mask |= TXMAC_INT_M;
1913 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1914 				   &bar0->mac_int_mask);
1915 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1916 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1917 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1918 				   flag, &bar0->mac_tmac_err_mask);
1919 	}
1920 
1921 	if (mask & TX_XGXS_INTR) {
1922 		gen_int_mask |= TXXGXS_INT_M;
1923 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1924 				   &bar0->xgxs_int_mask);
1925 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1926 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1927 				   flag, &bar0->xgxs_txgxs_err_mask);
1928 	}
1929 
1930 	if (mask & RX_DMA_INTR) {
1931 		gen_int_mask |= RXDMA_INT_M;
1932 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1933 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1934 				   flag, &bar0->rxdma_int_mask);
1935 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1936 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1937 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1938 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1939 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1940 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1941 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1942 				   &bar0->prc_pcix_err_mask);
1943 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1944 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1945 				   &bar0->rpa_err_mask);
1946 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1947 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1948 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1949 				   RDA_FRM_ECC_SG_ERR |
1950 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1951 				   flag, &bar0->rda_err_mask);
1952 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1953 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1954 				   flag, &bar0->rti_err_mask);
1955 	}
1956 
1957 	if (mask & RX_MAC_INTR) {
1958 		gen_int_mask |= RXMAC_INT_M;
1959 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1960 				   &bar0->mac_int_mask);
1961 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1962 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1963 				 RMAC_DOUBLE_ECC_ERR);
1964 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1965 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1966 		do_s2io_write_bits(interruptible,
1967 				   flag, &bar0->mac_rmac_err_mask);
1968 	}
1969 
1970 	if (mask & RX_XGXS_INTR) {
1971 		gen_int_mask |= RXXGXS_INT_M;
1972 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1973 				   &bar0->xgxs_int_mask);
1974 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1975 				   &bar0->xgxs_rxgxs_err_mask);
1976 	}
1977 
1978 	if (mask & MC_INTR) {
1979 		gen_int_mask |= MC_INT_M;
1980 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1981 				   flag, &bar0->mc_int_mask);
1982 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1983 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1984 				   &bar0->mc_err_mask);
1985 	}
1986 	nic->general_int_mask = gen_int_mask;
1987 
1988 	/* Remove this line when alarm interrupts are enabled */
1989 	nic->general_int_mask = 0;
1990 }
1991 
1992 /**
1993  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1994  *  @nic: device private variable,
1995  *  @mask: A mask indicating which Intr block must be modified and,
1996  *  @flag: A flag indicating whether to enable or disable the Intrs.
1997  *  Description: This function will either disable or enable the interrupts
1998  *  depending on the flag argument. The mask argument can be used to
1999  *  enable/disable any Intr block.
2000  *  Return Value: NONE.
2001  */
2002 
en_dis_able_nic_intrs(struct s2io_nic * nic,u16 mask,int flag)2003 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2004 {
2005 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2006 	register u64 temp64 = 0, intr_mask = 0;
2007 
2008 	intr_mask = nic->general_int_mask;
2009 
2010 	/*  Top level interrupt classification */
2011 	/*  PIC Interrupts */
2012 	if (mask & TX_PIC_INTR) {
2013 		/*  Enable PIC Intrs in the general intr mask register */
2014 		intr_mask |= TXPIC_INT_M;
2015 		if (flag == ENABLE_INTRS) {
2016 			/*
2017 			 * If Hercules adapter enable GPIO otherwise
2018 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2019 			 * interrupts for now.
2020 			 * TODO
2021 			 */
2022 			if (s2io_link_fault_indication(nic) ==
2023 			    LINK_UP_DOWN_INTERRUPT) {
2024 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2025 						   &bar0->pic_int_mask);
2026 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2027 						   &bar0->gpio_int_mask);
2028 			} else
2029 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2030 		} else if (flag == DISABLE_INTRS) {
2031 			/*
2032 			 * Disable PIC Intrs in the general
2033 			 * intr mask register
2034 			 */
2035 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2036 		}
2037 	}
2038 
2039 	/*  Tx traffic interrupts */
2040 	if (mask & TX_TRAFFIC_INTR) {
2041 		intr_mask |= TXTRAFFIC_INT_M;
2042 		if (flag == ENABLE_INTRS) {
2043 			/*
2044 			 * Enable all the Tx side interrupts
2045 			 * writing 0 Enables all 64 TX interrupt levels
2046 			 */
2047 			writeq(0x0, &bar0->tx_traffic_mask);
2048 		} else if (flag == DISABLE_INTRS) {
2049 			/*
2050 			 * Disable Tx Traffic Intrs in the general intr mask
2051 			 * register.
2052 			 */
2053 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2054 		}
2055 	}
2056 
2057 	/*  Rx traffic interrupts */
2058 	if (mask & RX_TRAFFIC_INTR) {
2059 		intr_mask |= RXTRAFFIC_INT_M;
2060 		if (flag == ENABLE_INTRS) {
2061 			/* writing 0 Enables all 8 RX interrupt levels */
2062 			writeq(0x0, &bar0->rx_traffic_mask);
2063 		} else if (flag == DISABLE_INTRS) {
2064 			/*
2065 			 * Disable Rx Traffic Intrs in the general intr mask
2066 			 * register.
2067 			 */
2068 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2069 		}
2070 	}
2071 
2072 	temp64 = readq(&bar0->general_int_mask);
2073 	if (flag == ENABLE_INTRS)
2074 		temp64 &= ~((u64)intr_mask);
2075 	else
2076 		temp64 = DISABLE_ALL_INTRS;
2077 	writeq(temp64, &bar0->general_int_mask);
2078 
2079 	nic->general_int_mask = readq(&bar0->general_int_mask);
2080 }
2081 
2082 /**
2083  *  verify_pcc_quiescent- Checks for PCC quiescent state
2084  *  Return: 1 If PCC is quiescence
2085  *          0 If PCC is not quiescence
2086  */
verify_pcc_quiescent(struct s2io_nic * sp,int flag)2087 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2088 {
2089 	int ret = 0, herc;
2090 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2091 	u64 val64 = readq(&bar0->adapter_status);
2092 
2093 	herc = (sp->device_type == XFRAME_II_DEVICE);
2094 
2095 	if (flag == false) {
2096 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2097 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2098 				ret = 1;
2099 		} else {
2100 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2101 				ret = 1;
2102 		}
2103 	} else {
2104 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2105 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2106 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2107 				ret = 1;
2108 		} else {
2109 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2110 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2111 				ret = 1;
2112 		}
2113 	}
2114 
2115 	return ret;
2116 }
2117 /**
2118  *  verify_xena_quiescence - Checks whether the H/W is ready
2119  *  Description: Returns whether the H/W is ready to go or not. Depending
2120  *  on whether adapter enable bit was written or not the comparison
2121  *  differs and the calling function passes the input argument flag to
2122  *  indicate this.
2123  *  Return: 1 If xena is quiescence
2124  *          0 If Xena is not quiescence
2125  */
2126 
verify_xena_quiescence(struct s2io_nic * sp)2127 static int verify_xena_quiescence(struct s2io_nic *sp)
2128 {
2129 	int  mode;
2130 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2131 	u64 val64 = readq(&bar0->adapter_status);
2132 	mode = s2io_verify_pci_mode(sp);
2133 
2134 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2135 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2136 		return 0;
2137 	}
2138 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2139 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2140 		return 0;
2141 	}
2142 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2143 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2144 		return 0;
2145 	}
2146 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2147 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2148 		return 0;
2149 	}
2150 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2151 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2152 		return 0;
2153 	}
2154 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2155 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2156 		return 0;
2157 	}
2158 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2159 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2160 		return 0;
2161 	}
2162 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2163 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2164 		return 0;
2165 	}
2166 
2167 	/*
2168 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2169 	 * the the P_PLL_LOCK bit in the adapter_status register will
2170 	 * not be asserted.
2171 	 */
2172 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2173 	    sp->device_type == XFRAME_II_DEVICE &&
2174 	    mode != PCI_MODE_PCI_33) {
2175 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2176 		return 0;
2177 	}
2178 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2179 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2180 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2181 		return 0;
2182 	}
2183 	return 1;
2184 }
2185 
2186 /**
2187  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2188  * @sp: Pointer to device specifc structure
2189  * Description :
2190  * New procedure to clear mac address reading  problems on Alpha platforms
2191  *
2192  */
2193 
fix_mac_address(struct s2io_nic * sp)2194 static void fix_mac_address(struct s2io_nic *sp)
2195 {
2196 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2197 	int i = 0;
2198 
2199 	while (fix_mac[i] != END_SIGN) {
2200 		writeq(fix_mac[i++], &bar0->gpio_control);
2201 		udelay(10);
2202 		(void) readq(&bar0->gpio_control);
2203 	}
2204 }
2205 
2206 /**
2207  *  start_nic - Turns the device on
2208  *  @nic : device private variable.
2209  *  Description:
2210  *  This function actually turns the device on. Before this  function is
2211  *  called,all Registers are configured from their reset states
2212  *  and shared memory is allocated but the NIC is still quiescent. On
2213  *  calling this function, the device interrupts are cleared and the NIC is
2214  *  literally switched on by writing into the adapter control register.
2215  *  Return Value:
2216  *  SUCCESS on success and -1 on failure.
2217  */
2218 
start_nic(struct s2io_nic * nic)2219 static int start_nic(struct s2io_nic *nic)
2220 {
2221 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2222 	struct net_device *dev = nic->dev;
2223 	register u64 val64 = 0;
2224 	u16 subid, i;
2225 	struct config_param *config = &nic->config;
2226 	struct mac_info *mac_control = &nic->mac_control;
2227 
2228 	/*  PRC Initialization and configuration */
2229 	for (i = 0; i < config->rx_ring_num; i++) {
2230 		struct ring_info *ring = &mac_control->rings[i];
2231 
2232 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2233 		       &bar0->prc_rxd0_n[i]);
2234 
2235 		val64 = readq(&bar0->prc_ctrl_n[i]);
2236 		if (nic->rxd_mode == RXD_MODE_1)
2237 			val64 |= PRC_CTRL_RC_ENABLED;
2238 		else
2239 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2240 		if (nic->device_type == XFRAME_II_DEVICE)
2241 			val64 |= PRC_CTRL_GROUP_READS;
2242 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2243 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2244 		writeq(val64, &bar0->prc_ctrl_n[i]);
2245 	}
2246 
2247 	if (nic->rxd_mode == RXD_MODE_3B) {
2248 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2249 		val64 = readq(&bar0->rx_pa_cfg);
2250 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2251 		writeq(val64, &bar0->rx_pa_cfg);
2252 	}
2253 
2254 	if (vlan_tag_strip == 0) {
2255 		val64 = readq(&bar0->rx_pa_cfg);
2256 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2257 		writeq(val64, &bar0->rx_pa_cfg);
2258 		nic->vlan_strip_flag = 0;
2259 	}
2260 
2261 	/*
2262 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2263 	 * for around 100ms, which is approximately the time required
2264 	 * for the device to be ready for operation.
2265 	 */
2266 	val64 = readq(&bar0->mc_rldram_mrs);
2267 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2268 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2269 	val64 = readq(&bar0->mc_rldram_mrs);
2270 
2271 	msleep(100);	/* Delay by around 100 ms. */
2272 
2273 	/* Enabling ECC Protection. */
2274 	val64 = readq(&bar0->adapter_control);
2275 	val64 &= ~ADAPTER_ECC_EN;
2276 	writeq(val64, &bar0->adapter_control);
2277 
2278 	/*
2279 	 * Verify if the device is ready to be enabled, if so enable
2280 	 * it.
2281 	 */
2282 	val64 = readq(&bar0->adapter_status);
2283 	if (!verify_xena_quiescence(nic)) {
2284 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2285 			  "Adapter status reads: 0x%llx\n",
2286 			  dev->name, (unsigned long long)val64);
2287 		return FAILURE;
2288 	}
2289 
2290 	/*
2291 	 * With some switches, link might be already up at this point.
2292 	 * Because of this weird behavior, when we enable laser,
2293 	 * we may not get link. We need to handle this. We cannot
2294 	 * figure out which switch is misbehaving. So we are forced to
2295 	 * make a global change.
2296 	 */
2297 
2298 	/* Enabling Laser. */
2299 	val64 = readq(&bar0->adapter_control);
2300 	val64 |= ADAPTER_EOI_TX_ON;
2301 	writeq(val64, &bar0->adapter_control);
2302 
2303 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2304 		/*
2305 		 * Dont see link state interrupts initially on some switches,
2306 		 * so directly scheduling the link state task here.
2307 		 */
2308 		schedule_work(&nic->set_link_task);
2309 	}
2310 	/* SXE-002: Initialize link and activity LED */
2311 	subid = nic->pdev->subsystem_device;
2312 	if (((subid & 0xFF) >= 0x07) &&
2313 	    (nic->device_type == XFRAME_I_DEVICE)) {
2314 		val64 = readq(&bar0->gpio_control);
2315 		val64 |= 0x0000800000000000ULL;
2316 		writeq(val64, &bar0->gpio_control);
2317 		val64 = 0x0411040400000000ULL;
2318 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2319 	}
2320 
2321 	return SUCCESS;
2322 }
2323 /**
2324  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2325  */
s2io_txdl_getskb(struct fifo_info * fifo_data,struct TxD * txdlp,int get_off)2326 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2327 					struct TxD *txdlp, int get_off)
2328 {
2329 	struct s2io_nic *nic = fifo_data->nic;
2330 	struct sk_buff *skb;
2331 	struct TxD *txds;
2332 	u16 j, frg_cnt;
2333 
2334 	txds = txdlp;
2335 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2336 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2337 				 sizeof(u64), PCI_DMA_TODEVICE);
2338 		txds++;
2339 	}
2340 
2341 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2342 	if (!skb) {
2343 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2344 		return NULL;
2345 	}
2346 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2347 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2348 	frg_cnt = skb_shinfo(skb)->nr_frags;
2349 	if (frg_cnt) {
2350 		txds++;
2351 		for (j = 0; j < frg_cnt; j++, txds++) {
2352 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2353 			if (!txds->Buffer_Pointer)
2354 				break;
2355 			pci_unmap_page(nic->pdev,
2356 				       (dma_addr_t)txds->Buffer_Pointer,
2357 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2358 		}
2359 	}
2360 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2361 	return skb;
2362 }
2363 
2364 /**
2365  *  free_tx_buffers - Free all queued Tx buffers
2366  *  @nic : device private variable.
2367  *  Description:
2368  *  Free all queued Tx buffers.
2369  *  Return Value: void
2370  */
2371 
free_tx_buffers(struct s2io_nic * nic)2372 static void free_tx_buffers(struct s2io_nic *nic)
2373 {
2374 	struct net_device *dev = nic->dev;
2375 	struct sk_buff *skb;
2376 	struct TxD *txdp;
2377 	int i, j;
2378 	int cnt = 0;
2379 	struct config_param *config = &nic->config;
2380 	struct mac_info *mac_control = &nic->mac_control;
2381 	struct stat_block *stats = mac_control->stats_info;
2382 	struct swStat *swstats = &stats->sw_stat;
2383 
2384 	for (i = 0; i < config->tx_fifo_num; i++) {
2385 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2386 		struct fifo_info *fifo = &mac_control->fifos[i];
2387 		unsigned long flags;
2388 
2389 		spin_lock_irqsave(&fifo->tx_lock, flags);
2390 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2391 			txdp = fifo->list_info[j].list_virt_addr;
2392 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2393 			if (skb) {
2394 				swstats->mem_freed += skb->truesize;
2395 				dev_kfree_skb(skb);
2396 				cnt++;
2397 			}
2398 		}
2399 		DBG_PRINT(INTR_DBG,
2400 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2401 			  dev->name, cnt, i);
2402 		fifo->tx_curr_get_info.offset = 0;
2403 		fifo->tx_curr_put_info.offset = 0;
2404 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2405 	}
2406 }
2407 
2408 /**
2409  *   stop_nic -  To stop the nic
2410  *   @nic ; device private variable.
2411  *   Description:
2412  *   This function does exactly the opposite of what the start_nic()
2413  *   function does. This function is called to stop the device.
2414  *   Return Value:
2415  *   void.
2416  */
2417 
stop_nic(struct s2io_nic * nic)2418 static void stop_nic(struct s2io_nic *nic)
2419 {
2420 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2421 	register u64 val64 = 0;
2422 	u16 interruptible;
2423 
2424 	/*  Disable all interrupts */
2425 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2426 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2427 	interruptible |= TX_PIC_INTR;
2428 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2429 
2430 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2431 	val64 = readq(&bar0->adapter_control);
2432 	val64 &= ~(ADAPTER_CNTL_EN);
2433 	writeq(val64, &bar0->adapter_control);
2434 }
2435 
2436 /**
2437  *  fill_rx_buffers - Allocates the Rx side skbs
2438  *  @ring_info: per ring structure
2439  *  @from_card_up: If this is true, we will map the buffer to get
2440  *     the dma address for buf0 and buf1 to give it to the card.
2441  *     Else we will sync the already mapped buffer to give it to the card.
2442  *  Description:
2443  *  The function allocates Rx side skbs and puts the physical
2444  *  address of these buffers into the RxD buffer pointers, so that the NIC
2445  *  can DMA the received frame into these locations.
2446  *  The NIC supports 3 receive modes, viz
2447  *  1. single buffer,
2448  *  2. three buffer and
2449  *  3. Five buffer modes.
2450  *  Each mode defines how many fragments the received frame will be split
2451  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2452  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2453  *  is split into 3 fragments. As of now only single buffer mode is
2454  *  supported.
2455  *   Return Value:
2456  *  SUCCESS on success or an appropriate -ve value on failure.
2457  */
fill_rx_buffers(struct s2io_nic * nic,struct ring_info * ring,int from_card_up)2458 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2459 			   int from_card_up)
2460 {
2461 	struct sk_buff *skb;
2462 	struct RxD_t *rxdp;
2463 	int off, size, block_no, block_no1;
2464 	u32 alloc_tab = 0;
2465 	u32 alloc_cnt;
2466 	u64 tmp;
2467 	struct buffAdd *ba;
2468 	struct RxD_t *first_rxdp = NULL;
2469 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2470 	int rxd_index = 0;
2471 	struct RxD1 *rxdp1;
2472 	struct RxD3 *rxdp3;
2473 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2474 
2475 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2476 
2477 	block_no1 = ring->rx_curr_get_info.block_index;
2478 	while (alloc_tab < alloc_cnt) {
2479 		block_no = ring->rx_curr_put_info.block_index;
2480 
2481 		off = ring->rx_curr_put_info.offset;
2482 
2483 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2484 
2485 		rxd_index = off + 1;
2486 		if (block_no)
2487 			rxd_index += (block_no * ring->rxd_count);
2488 
2489 		if ((block_no == block_no1) &&
2490 		    (off == ring->rx_curr_get_info.offset) &&
2491 		    (rxdp->Host_Control)) {
2492 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2493 				  ring->dev->name);
2494 			goto end;
2495 		}
2496 		if (off && (off == ring->rxd_count)) {
2497 			ring->rx_curr_put_info.block_index++;
2498 			if (ring->rx_curr_put_info.block_index ==
2499 			    ring->block_count)
2500 				ring->rx_curr_put_info.block_index = 0;
2501 			block_no = ring->rx_curr_put_info.block_index;
2502 			off = 0;
2503 			ring->rx_curr_put_info.offset = off;
2504 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2505 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2506 				  ring->dev->name, rxdp);
2507 
2508 		}
2509 
2510 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2511 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2512 		     (rxdp->Control_2 & s2BIT(0)))) {
2513 			ring->rx_curr_put_info.offset = off;
2514 			goto end;
2515 		}
2516 		/* calculate size of skb based on ring mode */
2517 		size = ring->mtu +
2518 			HEADER_ETHERNET_II_802_3_SIZE +
2519 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2520 		if (ring->rxd_mode == RXD_MODE_1)
2521 			size += NET_IP_ALIGN;
2522 		else
2523 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2524 
2525 		/* allocate skb */
2526 		skb = netdev_alloc_skb(nic->dev, size);
2527 		if (!skb) {
2528 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2529 				  ring->dev->name);
2530 			if (first_rxdp) {
2531 				wmb();
2532 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2533 			}
2534 			swstats->mem_alloc_fail_cnt++;
2535 
2536 			return -ENOMEM ;
2537 		}
2538 		swstats->mem_allocated += skb->truesize;
2539 
2540 		if (ring->rxd_mode == RXD_MODE_1) {
2541 			/* 1 buffer mode - normal operation mode */
2542 			rxdp1 = (struct RxD1 *)rxdp;
2543 			memset(rxdp, 0, sizeof(struct RxD1));
2544 			skb_reserve(skb, NET_IP_ALIGN);
2545 			rxdp1->Buffer0_ptr =
2546 				pci_map_single(ring->pdev, skb->data,
2547 					       size - NET_IP_ALIGN,
2548 					       PCI_DMA_FROMDEVICE);
2549 			if (pci_dma_mapping_error(nic->pdev,
2550 						  rxdp1->Buffer0_ptr))
2551 				goto pci_map_failed;
2552 
2553 			rxdp->Control_2 =
2554 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2555 			rxdp->Host_Control = (unsigned long)skb;
2556 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2557 			/*
2558 			 * 2 buffer mode -
2559 			 * 2 buffer mode provides 128
2560 			 * byte aligned receive buffers.
2561 			 */
2562 
2563 			rxdp3 = (struct RxD3 *)rxdp;
2564 			/* save buffer pointers to avoid frequent dma mapping */
2565 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2566 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2567 			memset(rxdp, 0, sizeof(struct RxD3));
2568 			/* restore the buffer pointers for dma sync*/
2569 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2570 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2571 
2572 			ba = &ring->ba[block_no][off];
2573 			skb_reserve(skb, BUF0_LEN);
2574 			tmp = (u64)(unsigned long)skb->data;
2575 			tmp += ALIGN_SIZE;
2576 			tmp &= ~ALIGN_SIZE;
2577 			skb->data = (void *) (unsigned long)tmp;
2578 			skb_reset_tail_pointer(skb);
2579 
2580 			if (from_card_up) {
2581 				rxdp3->Buffer0_ptr =
2582 					pci_map_single(ring->pdev, ba->ba_0,
2583 						       BUF0_LEN,
2584 						       PCI_DMA_FROMDEVICE);
2585 				if (pci_dma_mapping_error(nic->pdev,
2586 							  rxdp3->Buffer0_ptr))
2587 					goto pci_map_failed;
2588 			} else
2589 				pci_dma_sync_single_for_device(ring->pdev,
2590 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2591 							       BUF0_LEN,
2592 							       PCI_DMA_FROMDEVICE);
2593 
2594 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2595 			if (ring->rxd_mode == RXD_MODE_3B) {
2596 				/* Two buffer mode */
2597 
2598 				/*
2599 				 * Buffer2 will have L3/L4 header plus
2600 				 * L4 payload
2601 				 */
2602 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2603 								    skb->data,
2604 								    ring->mtu + 4,
2605 								    PCI_DMA_FROMDEVICE);
2606 
2607 				if (pci_dma_mapping_error(nic->pdev,
2608 							  rxdp3->Buffer2_ptr))
2609 					goto pci_map_failed;
2610 
2611 				if (from_card_up) {
2612 					rxdp3->Buffer1_ptr =
2613 						pci_map_single(ring->pdev,
2614 							       ba->ba_1,
2615 							       BUF1_LEN,
2616 							       PCI_DMA_FROMDEVICE);
2617 
2618 					if (pci_dma_mapping_error(nic->pdev,
2619 								  rxdp3->Buffer1_ptr)) {
2620 						pci_unmap_single(ring->pdev,
2621 								 (dma_addr_t)(unsigned long)
2622 								 skb->data,
2623 								 ring->mtu + 4,
2624 								 PCI_DMA_FROMDEVICE);
2625 						goto pci_map_failed;
2626 					}
2627 				}
2628 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2629 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2630 					(ring->mtu + 4);
2631 			}
2632 			rxdp->Control_2 |= s2BIT(0);
2633 			rxdp->Host_Control = (unsigned long) (skb);
2634 		}
2635 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2636 			rxdp->Control_1 |= RXD_OWN_XENA;
2637 		off++;
2638 		if (off == (ring->rxd_count + 1))
2639 			off = 0;
2640 		ring->rx_curr_put_info.offset = off;
2641 
2642 		rxdp->Control_2 |= SET_RXD_MARKER;
2643 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2644 			if (first_rxdp) {
2645 				wmb();
2646 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2647 			}
2648 			first_rxdp = rxdp;
2649 		}
2650 		ring->rx_bufs_left += 1;
2651 		alloc_tab++;
2652 	}
2653 
2654 end:
2655 	/* Transfer ownership of first descriptor to adapter just before
2656 	 * exiting. Before that, use memory barrier so that ownership
2657 	 * and other fields are seen by adapter correctly.
2658 	 */
2659 	if (first_rxdp) {
2660 		wmb();
2661 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2662 	}
2663 
2664 	return SUCCESS;
2665 
2666 pci_map_failed:
2667 	swstats->pci_map_fail_cnt++;
2668 	swstats->mem_freed += skb->truesize;
2669 	dev_kfree_skb_irq(skb);
2670 	return -ENOMEM;
2671 }
2672 
free_rxd_blk(struct s2io_nic * sp,int ring_no,int blk)2673 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2674 {
2675 	struct net_device *dev = sp->dev;
2676 	int j;
2677 	struct sk_buff *skb;
2678 	struct RxD_t *rxdp;
2679 	struct RxD1 *rxdp1;
2680 	struct RxD3 *rxdp3;
2681 	struct mac_info *mac_control = &sp->mac_control;
2682 	struct stat_block *stats = mac_control->stats_info;
2683 	struct swStat *swstats = &stats->sw_stat;
2684 
2685 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2686 		rxdp = mac_control->rings[ring_no].
2687 			rx_blocks[blk].rxds[j].virt_addr;
2688 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2689 		if (!skb)
2690 			continue;
2691 		if (sp->rxd_mode == RXD_MODE_1) {
2692 			rxdp1 = (struct RxD1 *)rxdp;
2693 			pci_unmap_single(sp->pdev,
2694 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2695 					 dev->mtu +
2696 					 HEADER_ETHERNET_II_802_3_SIZE +
2697 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2698 					 PCI_DMA_FROMDEVICE);
2699 			memset(rxdp, 0, sizeof(struct RxD1));
2700 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2701 			rxdp3 = (struct RxD3 *)rxdp;
2702 			pci_unmap_single(sp->pdev,
2703 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2704 					 BUF0_LEN,
2705 					 PCI_DMA_FROMDEVICE);
2706 			pci_unmap_single(sp->pdev,
2707 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2708 					 BUF1_LEN,
2709 					 PCI_DMA_FROMDEVICE);
2710 			pci_unmap_single(sp->pdev,
2711 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2712 					 dev->mtu + 4,
2713 					 PCI_DMA_FROMDEVICE);
2714 			memset(rxdp, 0, sizeof(struct RxD3));
2715 		}
2716 		swstats->mem_freed += skb->truesize;
2717 		dev_kfree_skb(skb);
2718 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2719 	}
2720 }
2721 
2722 /**
2723  *  free_rx_buffers - Frees all Rx buffers
2724  *  @sp: device private variable.
2725  *  Description:
2726  *  This function will free all Rx buffers allocated by host.
2727  *  Return Value:
2728  *  NONE.
2729  */
2730 
free_rx_buffers(struct s2io_nic * sp)2731 static void free_rx_buffers(struct s2io_nic *sp)
2732 {
2733 	struct net_device *dev = sp->dev;
2734 	int i, blk = 0, buf_cnt = 0;
2735 	struct config_param *config = &sp->config;
2736 	struct mac_info *mac_control = &sp->mac_control;
2737 
2738 	for (i = 0; i < config->rx_ring_num; i++) {
2739 		struct ring_info *ring = &mac_control->rings[i];
2740 
2741 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2742 			free_rxd_blk(sp, i, blk);
2743 
2744 		ring->rx_curr_put_info.block_index = 0;
2745 		ring->rx_curr_get_info.block_index = 0;
2746 		ring->rx_curr_put_info.offset = 0;
2747 		ring->rx_curr_get_info.offset = 0;
2748 		ring->rx_bufs_left = 0;
2749 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2750 			  dev->name, buf_cnt, i);
2751 	}
2752 }
2753 
s2io_chk_rx_buffers(struct s2io_nic * nic,struct ring_info * ring)2754 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2755 {
2756 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2757 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2758 			  ring->dev->name);
2759 	}
2760 	return 0;
2761 }
2762 
2763 /**
2764  * s2io_poll - Rx interrupt handler for NAPI support
2765  * @napi : pointer to the napi structure.
2766  * @budget : The number of packets that were budgeted to be processed
2767  * during  one pass through the 'Poll" function.
2768  * Description:
2769  * Comes into picture only if NAPI support has been incorporated. It does
2770  * the same thing that rx_intr_handler does, but not in a interrupt context
2771  * also It will process only a given number of packets.
2772  * Return value:
2773  * 0 on success and 1 if there are No Rx packets to be processed.
2774  */
2775 
s2io_poll_msix(struct napi_struct * napi,int budget)2776 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2777 {
2778 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2779 	struct net_device *dev = ring->dev;
2780 	int pkts_processed = 0;
2781 	u8 __iomem *addr = NULL;
2782 	u8 val8 = 0;
2783 	struct s2io_nic *nic = netdev_priv(dev);
2784 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2785 	int budget_org = budget;
2786 
2787 	if (unlikely(!is_s2io_card_up(nic)))
2788 		return 0;
2789 
2790 	pkts_processed = rx_intr_handler(ring, budget);
2791 	s2io_chk_rx_buffers(nic, ring);
2792 
2793 	if (pkts_processed < budget_org) {
2794 		napi_complete(napi);
2795 		/*Re Enable MSI-Rx Vector*/
2796 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2797 		addr += 7 - ring->ring_no;
2798 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2799 		writeb(val8, addr);
2800 		val8 = readb(addr);
2801 	}
2802 	return pkts_processed;
2803 }
2804 
s2io_poll_inta(struct napi_struct * napi,int budget)2805 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2806 {
2807 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2808 	int pkts_processed = 0;
2809 	int ring_pkts_processed, i;
2810 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2811 	int budget_org = budget;
2812 	struct config_param *config = &nic->config;
2813 	struct mac_info *mac_control = &nic->mac_control;
2814 
2815 	if (unlikely(!is_s2io_card_up(nic)))
2816 		return 0;
2817 
2818 	for (i = 0; i < config->rx_ring_num; i++) {
2819 		struct ring_info *ring = &mac_control->rings[i];
2820 		ring_pkts_processed = rx_intr_handler(ring, budget);
2821 		s2io_chk_rx_buffers(nic, ring);
2822 		pkts_processed += ring_pkts_processed;
2823 		budget -= ring_pkts_processed;
2824 		if (budget <= 0)
2825 			break;
2826 	}
2827 	if (pkts_processed < budget_org) {
2828 		napi_complete(napi);
2829 		/* Re enable the Rx interrupts for the ring */
2830 		writeq(0, &bar0->rx_traffic_mask);
2831 		readl(&bar0->rx_traffic_mask);
2832 	}
2833 	return pkts_processed;
2834 }
2835 
2836 #ifdef CONFIG_NET_POLL_CONTROLLER
2837 /**
2838  * s2io_netpoll - netpoll event handler entry point
2839  * @dev : pointer to the device structure.
2840  * Description:
2841  * 	This function will be called by upper layer to check for events on the
2842  * interface in situations where interrupts are disabled. It is used for
2843  * specific in-kernel networking tasks, such as remote consoles and kernel
2844  * debugging over the network (example netdump in RedHat).
2845  */
s2io_netpoll(struct net_device * dev)2846 static void s2io_netpoll(struct net_device *dev)
2847 {
2848 	struct s2io_nic *nic = netdev_priv(dev);
2849 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2851 	int i;
2852 	struct config_param *config = &nic->config;
2853 	struct mac_info *mac_control = &nic->mac_control;
2854 
2855 	if (pci_channel_offline(nic->pdev))
2856 		return;
2857 
2858 	disable_irq(dev->irq);
2859 
2860 	writeq(val64, &bar0->rx_traffic_int);
2861 	writeq(val64, &bar0->tx_traffic_int);
2862 
2863 	/* we need to free up the transmitted skbufs or else netpoll will
2864 	 * run out of skbs and will fail and eventually netpoll application such
2865 	 * as netdump will fail.
2866 	 */
2867 	for (i = 0; i < config->tx_fifo_num; i++)
2868 		tx_intr_handler(&mac_control->fifos[i]);
2869 
2870 	/* check for received packet and indicate up to network */
2871 	for (i = 0; i < config->rx_ring_num; i++) {
2872 		struct ring_info *ring = &mac_control->rings[i];
2873 
2874 		rx_intr_handler(ring, 0);
2875 	}
2876 
2877 	for (i = 0; i < config->rx_ring_num; i++) {
2878 		struct ring_info *ring = &mac_control->rings[i];
2879 
2880 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2881 			DBG_PRINT(INFO_DBG,
2882 				  "%s: Out of memory in Rx Netpoll!!\n",
2883 				  dev->name);
2884 			break;
2885 		}
2886 	}
2887 	enable_irq(dev->irq);
2888 }
2889 #endif
2890 
2891 /**
2892  *  rx_intr_handler - Rx interrupt handler
2893  *  @ring_info: per ring structure.
2894  *  @budget: budget for napi processing.
2895  *  Description:
2896  *  If the interrupt is because of a received frame or if the
2897  *  receive ring contains fresh as yet un-processed frames,this function is
2898  *  called. It picks out the RxD at which place the last Rx processing had
2899  *  stopped and sends the skb to the OSM's Rx handler and then increments
2900  *  the offset.
2901  *  Return Value:
2902  *  No. of napi packets processed.
2903  */
rx_intr_handler(struct ring_info * ring_data,int budget)2904 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2905 {
2906 	int get_block, put_block;
2907 	struct rx_curr_get_info get_info, put_info;
2908 	struct RxD_t *rxdp;
2909 	struct sk_buff *skb;
2910 	int pkt_cnt = 0, napi_pkts = 0;
2911 	int i;
2912 	struct RxD1 *rxdp1;
2913 	struct RxD3 *rxdp3;
2914 
2915 	get_info = ring_data->rx_curr_get_info;
2916 	get_block = get_info.block_index;
2917 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2918 	put_block = put_info.block_index;
2919 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2920 
2921 	while (RXD_IS_UP2DT(rxdp)) {
2922 		/*
2923 		 * If your are next to put index then it's
2924 		 * FIFO full condition
2925 		 */
2926 		if ((get_block == put_block) &&
2927 		    (get_info.offset + 1) == put_info.offset) {
2928 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2929 				  ring_data->dev->name);
2930 			break;
2931 		}
2932 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2933 		if (skb == NULL) {
2934 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2935 				  ring_data->dev->name);
2936 			return 0;
2937 		}
2938 		if (ring_data->rxd_mode == RXD_MODE_1) {
2939 			rxdp1 = (struct RxD1 *)rxdp;
2940 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2941 					 rxdp1->Buffer0_ptr,
2942 					 ring_data->mtu +
2943 					 HEADER_ETHERNET_II_802_3_SIZE +
2944 					 HEADER_802_2_SIZE +
2945 					 HEADER_SNAP_SIZE,
2946 					 PCI_DMA_FROMDEVICE);
2947 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2948 			rxdp3 = (struct RxD3 *)rxdp;
2949 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2950 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2951 						    BUF0_LEN,
2952 						    PCI_DMA_FROMDEVICE);
2953 			pci_unmap_single(ring_data->pdev,
2954 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2955 					 ring_data->mtu + 4,
2956 					 PCI_DMA_FROMDEVICE);
2957 		}
2958 		prefetch(skb->data);
2959 		rx_osm_handler(ring_data, rxdp);
2960 		get_info.offset++;
2961 		ring_data->rx_curr_get_info.offset = get_info.offset;
2962 		rxdp = ring_data->rx_blocks[get_block].
2963 			rxds[get_info.offset].virt_addr;
2964 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2965 			get_info.offset = 0;
2966 			ring_data->rx_curr_get_info.offset = get_info.offset;
2967 			get_block++;
2968 			if (get_block == ring_data->block_count)
2969 				get_block = 0;
2970 			ring_data->rx_curr_get_info.block_index = get_block;
2971 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2972 		}
2973 
2974 		if (ring_data->nic->config.napi) {
2975 			budget--;
2976 			napi_pkts++;
2977 			if (!budget)
2978 				break;
2979 		}
2980 		pkt_cnt++;
2981 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2982 			break;
2983 	}
2984 	if (ring_data->lro) {
2985 		/* Clear all LRO sessions before exiting */
2986 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2987 			struct lro *lro = &ring_data->lro0_n[i];
2988 			if (lro->in_use) {
2989 				update_L3L4_header(ring_data->nic, lro);
2990 				queue_rx_frame(lro->parent, lro->vlan_tag);
2991 				clear_lro_session(lro);
2992 			}
2993 		}
2994 	}
2995 	return napi_pkts;
2996 }
2997 
2998 /**
2999  *  tx_intr_handler - Transmit interrupt handler
3000  *  @nic : device private variable
3001  *  Description:
3002  *  If an interrupt was raised to indicate DMA complete of the
3003  *  Tx packet, this function is called. It identifies the last TxD
3004  *  whose buffer was freed and frees all skbs whose data have already
3005  *  DMA'ed into the NICs internal memory.
3006  *  Return Value:
3007  *  NONE
3008  */
3009 
tx_intr_handler(struct fifo_info * fifo_data)3010 static void tx_intr_handler(struct fifo_info *fifo_data)
3011 {
3012 	struct s2io_nic *nic = fifo_data->nic;
3013 	struct tx_curr_get_info get_info, put_info;
3014 	struct sk_buff *skb = NULL;
3015 	struct TxD *txdlp;
3016 	int pkt_cnt = 0;
3017 	unsigned long flags = 0;
3018 	u8 err_mask;
3019 	struct stat_block *stats = nic->mac_control.stats_info;
3020 	struct swStat *swstats = &stats->sw_stat;
3021 
3022 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3023 		return;
3024 
3025 	get_info = fifo_data->tx_curr_get_info;
3026 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3027 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3028 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3029 	       (get_info.offset != put_info.offset) &&
3030 	       (txdlp->Host_Control)) {
3031 		/* Check for TxD errors */
3032 		if (txdlp->Control_1 & TXD_T_CODE) {
3033 			unsigned long long err;
3034 			err = txdlp->Control_1 & TXD_T_CODE;
3035 			if (err & 0x1) {
3036 				swstats->parity_err_cnt++;
3037 			}
3038 
3039 			/* update t_code statistics */
3040 			err_mask = err >> 48;
3041 			switch (err_mask) {
3042 			case 2:
3043 				swstats->tx_buf_abort_cnt++;
3044 				break;
3045 
3046 			case 3:
3047 				swstats->tx_desc_abort_cnt++;
3048 				break;
3049 
3050 			case 7:
3051 				swstats->tx_parity_err_cnt++;
3052 				break;
3053 
3054 			case 10:
3055 				swstats->tx_link_loss_cnt++;
3056 				break;
3057 
3058 			case 15:
3059 				swstats->tx_list_proc_err_cnt++;
3060 				break;
3061 			}
3062 		}
3063 
3064 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3065 		if (skb == NULL) {
3066 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3067 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3068 				  __func__);
3069 			return;
3070 		}
3071 		pkt_cnt++;
3072 
3073 		/* Updating the statistics block */
3074 		swstats->mem_freed += skb->truesize;
3075 		dev_kfree_skb_irq(skb);
3076 
3077 		get_info.offset++;
3078 		if (get_info.offset == get_info.fifo_len + 1)
3079 			get_info.offset = 0;
3080 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3081 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3082 	}
3083 
3084 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3085 
3086 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3087 }
3088 
3089 /**
3090  *  s2io_mdio_write - Function to write in to MDIO registers
3091  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3092  *  @addr     : address value
3093  *  @value    : data value
3094  *  @dev      : pointer to net_device structure
3095  *  Description:
3096  *  This function is used to write values to the MDIO registers
3097  *  NONE
3098  */
s2io_mdio_write(u32 mmd_type,u64 addr,u16 value,struct net_device * dev)3099 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3100 			    struct net_device *dev)
3101 {
3102 	u64 val64;
3103 	struct s2io_nic *sp = netdev_priv(dev);
3104 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3105 
3106 	/* address transaction */
3107 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3108 		MDIO_MMD_DEV_ADDR(mmd_type) |
3109 		MDIO_MMS_PRT_ADDR(0x0);
3110 	writeq(val64, &bar0->mdio_control);
3111 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3112 	writeq(val64, &bar0->mdio_control);
3113 	udelay(100);
3114 
3115 	/* Data transaction */
3116 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3117 		MDIO_MMD_DEV_ADDR(mmd_type) |
3118 		MDIO_MMS_PRT_ADDR(0x0) |
3119 		MDIO_MDIO_DATA(value) |
3120 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3121 	writeq(val64, &bar0->mdio_control);
3122 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3123 	writeq(val64, &bar0->mdio_control);
3124 	udelay(100);
3125 
3126 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3127 		MDIO_MMD_DEV_ADDR(mmd_type) |
3128 		MDIO_MMS_PRT_ADDR(0x0) |
3129 		MDIO_OP(MDIO_OP_READ_TRANS);
3130 	writeq(val64, &bar0->mdio_control);
3131 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3132 	writeq(val64, &bar0->mdio_control);
3133 	udelay(100);
3134 }
3135 
3136 /**
3137  *  s2io_mdio_read - Function to write in to MDIO registers
3138  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3139  *  @addr     : address value
3140  *  @dev      : pointer to net_device structure
3141  *  Description:
3142  *  This function is used to read values to the MDIO registers
3143  *  NONE
3144  */
s2io_mdio_read(u32 mmd_type,u64 addr,struct net_device * dev)3145 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3146 {
3147 	u64 val64 = 0x0;
3148 	u64 rval64 = 0x0;
3149 	struct s2io_nic *sp = netdev_priv(dev);
3150 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3151 
3152 	/* address transaction */
3153 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3154 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3155 			 | MDIO_MMS_PRT_ADDR(0x0));
3156 	writeq(val64, &bar0->mdio_control);
3157 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3158 	writeq(val64, &bar0->mdio_control);
3159 	udelay(100);
3160 
3161 	/* Data transaction */
3162 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3163 		MDIO_MMD_DEV_ADDR(mmd_type) |
3164 		MDIO_MMS_PRT_ADDR(0x0) |
3165 		MDIO_OP(MDIO_OP_READ_TRANS);
3166 	writeq(val64, &bar0->mdio_control);
3167 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3168 	writeq(val64, &bar0->mdio_control);
3169 	udelay(100);
3170 
3171 	/* Read the value from regs */
3172 	rval64 = readq(&bar0->mdio_control);
3173 	rval64 = rval64 & 0xFFFF0000;
3174 	rval64 = rval64 >> 16;
3175 	return rval64;
3176 }
3177 
3178 /**
3179  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3180  *  @counter      : counter value to be updated
3181  *  @flag         : flag to indicate the status
3182  *  @type         : counter type
3183  *  Description:
3184  *  This function is to check the status of the xpak counters value
3185  *  NONE
3186  */
3187 
s2io_chk_xpak_counter(u64 * counter,u64 * regs_stat,u32 index,u16 flag,u16 type)3188 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3189 				  u16 flag, u16 type)
3190 {
3191 	u64 mask = 0x3;
3192 	u64 val64;
3193 	int i;
3194 	for (i = 0; i < index; i++)
3195 		mask = mask << 0x2;
3196 
3197 	if (flag > 0) {
3198 		*counter = *counter + 1;
3199 		val64 = *regs_stat & mask;
3200 		val64 = val64 >> (index * 0x2);
3201 		val64 = val64 + 1;
3202 		if (val64 == 3) {
3203 			switch (type) {
3204 			case 1:
3205 				DBG_PRINT(ERR_DBG,
3206 					  "Take Xframe NIC out of service.\n");
3207 				DBG_PRINT(ERR_DBG,
3208 "Excessive temperatures may result in premature transceiver failure.\n");
3209 				break;
3210 			case 2:
3211 				DBG_PRINT(ERR_DBG,
3212 					  "Take Xframe NIC out of service.\n");
3213 				DBG_PRINT(ERR_DBG,
3214 "Excessive bias currents may indicate imminent laser diode failure.\n");
3215 				break;
3216 			case 3:
3217 				DBG_PRINT(ERR_DBG,
3218 					  "Take Xframe NIC out of service.\n");
3219 				DBG_PRINT(ERR_DBG,
3220 "Excessive laser output power may saturate far-end receiver.\n");
3221 				break;
3222 			default:
3223 				DBG_PRINT(ERR_DBG,
3224 					  "Incorrect XPAK Alarm type\n");
3225 			}
3226 			val64 = 0x0;
3227 		}
3228 		val64 = val64 << (index * 0x2);
3229 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3230 
3231 	} else {
3232 		*regs_stat = *regs_stat & (~mask);
3233 	}
3234 }
3235 
3236 /**
3237  *  s2io_updt_xpak_counter - Function to update the xpak counters
3238  *  @dev         : pointer to net_device struct
3239  *  Description:
3240  *  This function is to upate the status of the xpak counters value
3241  *  NONE
3242  */
s2io_updt_xpak_counter(struct net_device * dev)3243 static void s2io_updt_xpak_counter(struct net_device *dev)
3244 {
3245 	u16 flag  = 0x0;
3246 	u16 type  = 0x0;
3247 	u16 val16 = 0x0;
3248 	u64 val64 = 0x0;
3249 	u64 addr  = 0x0;
3250 
3251 	struct s2io_nic *sp = netdev_priv(dev);
3252 	struct stat_block *stats = sp->mac_control.stats_info;
3253 	struct xpakStat *xstats = &stats->xpak_stat;
3254 
3255 	/* Check the communication with the MDIO slave */
3256 	addr = MDIO_CTRL1;
3257 	val64 = 0x0;
3258 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3259 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3260 		DBG_PRINT(ERR_DBG,
3261 			  "ERR: MDIO slave access failed - Returned %llx\n",
3262 			  (unsigned long long)val64);
3263 		return;
3264 	}
3265 
3266 	/* Check for the expected value of control reg 1 */
3267 	if (val64 != MDIO_CTRL1_SPEED10G) {
3268 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3269 			  "Returned: %llx- Expected: 0x%x\n",
3270 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3271 		return;
3272 	}
3273 
3274 	/* Loading the DOM register to MDIO register */
3275 	addr = 0xA100;
3276 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3277 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3278 
3279 	/* Reading the Alarm flags */
3280 	addr = 0xA070;
3281 	val64 = 0x0;
3282 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3283 
3284 	flag = CHECKBIT(val64, 0x7);
3285 	type = 1;
3286 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3287 			      &xstats->xpak_regs_stat,
3288 			      0x0, flag, type);
3289 
3290 	if (CHECKBIT(val64, 0x6))
3291 		xstats->alarm_transceiver_temp_low++;
3292 
3293 	flag = CHECKBIT(val64, 0x3);
3294 	type = 2;
3295 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3296 			      &xstats->xpak_regs_stat,
3297 			      0x2, flag, type);
3298 
3299 	if (CHECKBIT(val64, 0x2))
3300 		xstats->alarm_laser_bias_current_low++;
3301 
3302 	flag = CHECKBIT(val64, 0x1);
3303 	type = 3;
3304 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3305 			      &xstats->xpak_regs_stat,
3306 			      0x4, flag, type);
3307 
3308 	if (CHECKBIT(val64, 0x0))
3309 		xstats->alarm_laser_output_power_low++;
3310 
3311 	/* Reading the Warning flags */
3312 	addr = 0xA074;
3313 	val64 = 0x0;
3314 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3315 
3316 	if (CHECKBIT(val64, 0x7))
3317 		xstats->warn_transceiver_temp_high++;
3318 
3319 	if (CHECKBIT(val64, 0x6))
3320 		xstats->warn_transceiver_temp_low++;
3321 
3322 	if (CHECKBIT(val64, 0x3))
3323 		xstats->warn_laser_bias_current_high++;
3324 
3325 	if (CHECKBIT(val64, 0x2))
3326 		xstats->warn_laser_bias_current_low++;
3327 
3328 	if (CHECKBIT(val64, 0x1))
3329 		xstats->warn_laser_output_power_high++;
3330 
3331 	if (CHECKBIT(val64, 0x0))
3332 		xstats->warn_laser_output_power_low++;
3333 }
3334 
3335 /**
3336  *  wait_for_cmd_complete - waits for a command to complete.
3337  *  @sp : private member of the device structure, which is a pointer to the
3338  *  s2io_nic structure.
3339  *  Description: Function that waits for a command to Write into RMAC
3340  *  ADDR DATA registers to be completed and returns either success or
3341  *  error depending on whether the command was complete or not.
3342  *  Return value:
3343  *   SUCCESS on success and FAILURE on failure.
3344  */
3345 
wait_for_cmd_complete(void __iomem * addr,u64 busy_bit,int bit_state)3346 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3347 				 int bit_state)
3348 {
3349 	int ret = FAILURE, cnt = 0, delay = 1;
3350 	u64 val64;
3351 
3352 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3353 		return FAILURE;
3354 
3355 	do {
3356 		val64 = readq(addr);
3357 		if (bit_state == S2IO_BIT_RESET) {
3358 			if (!(val64 & busy_bit)) {
3359 				ret = SUCCESS;
3360 				break;
3361 			}
3362 		} else {
3363 			if (val64 & busy_bit) {
3364 				ret = SUCCESS;
3365 				break;
3366 			}
3367 		}
3368 
3369 		if (in_interrupt())
3370 			mdelay(delay);
3371 		else
3372 			msleep(delay);
3373 
3374 		if (++cnt >= 10)
3375 			delay = 50;
3376 	} while (cnt < 20);
3377 	return ret;
3378 }
3379 /*
3380  * check_pci_device_id - Checks if the device id is supported
3381  * @id : device id
3382  * Description: Function to check if the pci device id is supported by driver.
3383  * Return value: Actual device id if supported else PCI_ANY_ID
3384  */
check_pci_device_id(u16 id)3385 static u16 check_pci_device_id(u16 id)
3386 {
3387 	switch (id) {
3388 	case PCI_DEVICE_ID_HERC_WIN:
3389 	case PCI_DEVICE_ID_HERC_UNI:
3390 		return XFRAME_II_DEVICE;
3391 	case PCI_DEVICE_ID_S2IO_UNI:
3392 	case PCI_DEVICE_ID_S2IO_WIN:
3393 		return XFRAME_I_DEVICE;
3394 	default:
3395 		return PCI_ANY_ID;
3396 	}
3397 }
3398 
3399 /**
3400  *  s2io_reset - Resets the card.
3401  *  @sp : private member of the device structure.
3402  *  Description: Function to Reset the card. This function then also
3403  *  restores the previously saved PCI configuration space registers as
3404  *  the card reset also resets the configuration space.
3405  *  Return value:
3406  *  void.
3407  */
3408 
s2io_reset(struct s2io_nic * sp)3409 static void s2io_reset(struct s2io_nic *sp)
3410 {
3411 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3412 	u64 val64;
3413 	u16 subid, pci_cmd;
3414 	int i;
3415 	u16 val16;
3416 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3417 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3418 	struct stat_block *stats;
3419 	struct swStat *swstats;
3420 
3421 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3422 		  __func__, pci_name(sp->pdev));
3423 
3424 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3425 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3426 
3427 	val64 = SW_RESET_ALL;
3428 	writeq(val64, &bar0->sw_reset);
3429 	if (strstr(sp->product_name, "CX4"))
3430 		msleep(750);
3431 	msleep(250);
3432 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3433 
3434 		/* Restore the PCI state saved during initialization. */
3435 		pci_restore_state(sp->pdev);
3436 		pci_save_state(sp->pdev);
3437 		pci_read_config_word(sp->pdev, 0x2, &val16);
3438 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3439 			break;
3440 		msleep(200);
3441 	}
3442 
3443 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3444 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3445 
3446 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3447 
3448 	s2io_init_pci(sp);
3449 
3450 	/* Set swapper to enable I/O register access */
3451 	s2io_set_swapper(sp);
3452 
3453 	/* restore mac_addr entries */
3454 	do_s2io_restore_unicast_mc(sp);
3455 
3456 	/* Restore the MSIX table entries from local variables */
3457 	restore_xmsi_data(sp);
3458 
3459 	/* Clear certain PCI/PCI-X fields after reset */
3460 	if (sp->device_type == XFRAME_II_DEVICE) {
3461 		/* Clear "detected parity error" bit */
3462 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3463 
3464 		/* Clearing PCIX Ecc status register */
3465 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3466 
3467 		/* Clearing PCI_STATUS error reflected here */
3468 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3469 	}
3470 
3471 	/* Reset device statistics maintained by OS */
3472 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3473 
3474 	stats = sp->mac_control.stats_info;
3475 	swstats = &stats->sw_stat;
3476 
3477 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3478 	up_cnt = swstats->link_up_cnt;
3479 	down_cnt = swstats->link_down_cnt;
3480 	up_time = swstats->link_up_time;
3481 	down_time = swstats->link_down_time;
3482 	reset_cnt = swstats->soft_reset_cnt;
3483 	mem_alloc_cnt = swstats->mem_allocated;
3484 	mem_free_cnt = swstats->mem_freed;
3485 	watchdog_cnt = swstats->watchdog_timer_cnt;
3486 
3487 	memset(stats, 0, sizeof(struct stat_block));
3488 
3489 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3490 	swstats->link_up_cnt = up_cnt;
3491 	swstats->link_down_cnt = down_cnt;
3492 	swstats->link_up_time = up_time;
3493 	swstats->link_down_time = down_time;
3494 	swstats->soft_reset_cnt = reset_cnt;
3495 	swstats->mem_allocated = mem_alloc_cnt;
3496 	swstats->mem_freed = mem_free_cnt;
3497 	swstats->watchdog_timer_cnt = watchdog_cnt;
3498 
3499 	/* SXE-002: Configure link and activity LED to turn it off */
3500 	subid = sp->pdev->subsystem_device;
3501 	if (((subid & 0xFF) >= 0x07) &&
3502 	    (sp->device_type == XFRAME_I_DEVICE)) {
3503 		val64 = readq(&bar0->gpio_control);
3504 		val64 |= 0x0000800000000000ULL;
3505 		writeq(val64, &bar0->gpio_control);
3506 		val64 = 0x0411040400000000ULL;
3507 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3508 	}
3509 
3510 	/*
3511 	 * Clear spurious ECC interrupts that would have occurred on
3512 	 * XFRAME II cards after reset.
3513 	 */
3514 	if (sp->device_type == XFRAME_II_DEVICE) {
3515 		val64 = readq(&bar0->pcc_err_reg);
3516 		writeq(val64, &bar0->pcc_err_reg);
3517 	}
3518 
3519 	sp->device_enabled_once = false;
3520 }
3521 
3522 /**
3523  *  s2io_set_swapper - to set the swapper controle on the card
3524  *  @sp : private member of the device structure,
3525  *  pointer to the s2io_nic structure.
3526  *  Description: Function to set the swapper control on the card
3527  *  correctly depending on the 'endianness' of the system.
3528  *  Return value:
3529  *  SUCCESS on success and FAILURE on failure.
3530  */
3531 
s2io_set_swapper(struct s2io_nic * sp)3532 static int s2io_set_swapper(struct s2io_nic *sp)
3533 {
3534 	struct net_device *dev = sp->dev;
3535 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3536 	u64 val64, valt, valr;
3537 
3538 	/*
3539 	 * Set proper endian settings and verify the same by reading
3540 	 * the PIF Feed-back register.
3541 	 */
3542 
3543 	val64 = readq(&bar0->pif_rd_swapper_fb);
3544 	if (val64 != 0x0123456789ABCDEFULL) {
3545 		int i = 0;
3546 		static const u64 value[] = {
3547 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3548 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3549 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3550 			0			/* FE=0, SE=0 */
3551 		};
3552 
3553 		while (i < 4) {
3554 			writeq(value[i], &bar0->swapper_ctrl);
3555 			val64 = readq(&bar0->pif_rd_swapper_fb);
3556 			if (val64 == 0x0123456789ABCDEFULL)
3557 				break;
3558 			i++;
3559 		}
3560 		if (i == 4) {
3561 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3562 				  "feedback read %llx\n",
3563 				  dev->name, (unsigned long long)val64);
3564 			return FAILURE;
3565 		}
3566 		valr = value[i];
3567 	} else {
3568 		valr = readq(&bar0->swapper_ctrl);
3569 	}
3570 
3571 	valt = 0x0123456789ABCDEFULL;
3572 	writeq(valt, &bar0->xmsi_address);
3573 	val64 = readq(&bar0->xmsi_address);
3574 
3575 	if (val64 != valt) {
3576 		int i = 0;
3577 		static const u64 value[] = {
3578 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3579 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3580 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3581 			0			/* FE=0, SE=0 */
3582 		};
3583 
3584 		while (i < 4) {
3585 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3586 			writeq(valt, &bar0->xmsi_address);
3587 			val64 = readq(&bar0->xmsi_address);
3588 			if (val64 == valt)
3589 				break;
3590 			i++;
3591 		}
3592 		if (i == 4) {
3593 			unsigned long long x = val64;
3594 			DBG_PRINT(ERR_DBG,
3595 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3596 			return FAILURE;
3597 		}
3598 	}
3599 	val64 = readq(&bar0->swapper_ctrl);
3600 	val64 &= 0xFFFF000000000000ULL;
3601 
3602 #ifdef __BIG_ENDIAN
3603 	/*
3604 	 * The device by default set to a big endian format, so a
3605 	 * big endian driver need not set anything.
3606 	 */
3607 	val64 |= (SWAPPER_CTRL_TXP_FE |
3608 		  SWAPPER_CTRL_TXP_SE |
3609 		  SWAPPER_CTRL_TXD_R_FE |
3610 		  SWAPPER_CTRL_TXD_W_FE |
3611 		  SWAPPER_CTRL_TXF_R_FE |
3612 		  SWAPPER_CTRL_RXD_R_FE |
3613 		  SWAPPER_CTRL_RXD_W_FE |
3614 		  SWAPPER_CTRL_RXF_W_FE |
3615 		  SWAPPER_CTRL_XMSI_FE |
3616 		  SWAPPER_CTRL_STATS_FE |
3617 		  SWAPPER_CTRL_STATS_SE);
3618 	if (sp->config.intr_type == INTA)
3619 		val64 |= SWAPPER_CTRL_XMSI_SE;
3620 	writeq(val64, &bar0->swapper_ctrl);
3621 #else
3622 	/*
3623 	 * Initially we enable all bits to make it accessible by the
3624 	 * driver, then we selectively enable only those bits that
3625 	 * we want to set.
3626 	 */
3627 	val64 |= (SWAPPER_CTRL_TXP_FE |
3628 		  SWAPPER_CTRL_TXP_SE |
3629 		  SWAPPER_CTRL_TXD_R_FE |
3630 		  SWAPPER_CTRL_TXD_R_SE |
3631 		  SWAPPER_CTRL_TXD_W_FE |
3632 		  SWAPPER_CTRL_TXD_W_SE |
3633 		  SWAPPER_CTRL_TXF_R_FE |
3634 		  SWAPPER_CTRL_RXD_R_FE |
3635 		  SWAPPER_CTRL_RXD_R_SE |
3636 		  SWAPPER_CTRL_RXD_W_FE |
3637 		  SWAPPER_CTRL_RXD_W_SE |
3638 		  SWAPPER_CTRL_RXF_W_FE |
3639 		  SWAPPER_CTRL_XMSI_FE |
3640 		  SWAPPER_CTRL_STATS_FE |
3641 		  SWAPPER_CTRL_STATS_SE);
3642 	if (sp->config.intr_type == INTA)
3643 		val64 |= SWAPPER_CTRL_XMSI_SE;
3644 	writeq(val64, &bar0->swapper_ctrl);
3645 #endif
3646 	val64 = readq(&bar0->swapper_ctrl);
3647 
3648 	/*
3649 	 * Verifying if endian settings are accurate by reading a
3650 	 * feedback register.
3651 	 */
3652 	val64 = readq(&bar0->pif_rd_swapper_fb);
3653 	if (val64 != 0x0123456789ABCDEFULL) {
3654 		/* Endian settings are incorrect, calls for another dekko. */
3655 		DBG_PRINT(ERR_DBG,
3656 			  "%s: Endian settings are wrong, feedback read %llx\n",
3657 			  dev->name, (unsigned long long)val64);
3658 		return FAILURE;
3659 	}
3660 
3661 	return SUCCESS;
3662 }
3663 
wait_for_msix_trans(struct s2io_nic * nic,int i)3664 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3665 {
3666 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3667 	u64 val64;
3668 	int ret = 0, cnt = 0;
3669 
3670 	do {
3671 		val64 = readq(&bar0->xmsi_access);
3672 		if (!(val64 & s2BIT(15)))
3673 			break;
3674 		mdelay(1);
3675 		cnt++;
3676 	} while (cnt < 5);
3677 	if (cnt == 5) {
3678 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3679 		ret = 1;
3680 	}
3681 
3682 	return ret;
3683 }
3684 
restore_xmsi_data(struct s2io_nic * nic)3685 static void restore_xmsi_data(struct s2io_nic *nic)
3686 {
3687 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3688 	u64 val64;
3689 	int i, msix_index;
3690 
3691 	if (nic->device_type == XFRAME_I_DEVICE)
3692 		return;
3693 
3694 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3695 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3696 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3697 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3698 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3699 		writeq(val64, &bar0->xmsi_access);
3700 		if (wait_for_msix_trans(nic, msix_index)) {
3701 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3702 				  __func__, msix_index);
3703 			continue;
3704 		}
3705 	}
3706 }
3707 
store_xmsi_data(struct s2io_nic * nic)3708 static void store_xmsi_data(struct s2io_nic *nic)
3709 {
3710 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3711 	u64 val64, addr, data;
3712 	int i, msix_index;
3713 
3714 	if (nic->device_type == XFRAME_I_DEVICE)
3715 		return;
3716 
3717 	/* Store and display */
3718 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3719 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3720 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3721 		writeq(val64, &bar0->xmsi_access);
3722 		if (wait_for_msix_trans(nic, msix_index)) {
3723 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3724 				  __func__, msix_index);
3725 			continue;
3726 		}
3727 		addr = readq(&bar0->xmsi_address);
3728 		data = readq(&bar0->xmsi_data);
3729 		if (addr && data) {
3730 			nic->msix_info[i].addr = addr;
3731 			nic->msix_info[i].data = data;
3732 		}
3733 	}
3734 }
3735 
s2io_enable_msi_x(struct s2io_nic * nic)3736 static int s2io_enable_msi_x(struct s2io_nic *nic)
3737 {
3738 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3739 	u64 rx_mat;
3740 	u16 msi_control; /* Temp variable */
3741 	int ret, i, j, msix_indx = 1;
3742 	int size;
3743 	struct stat_block *stats = nic->mac_control.stats_info;
3744 	struct swStat *swstats = &stats->sw_stat;
3745 
3746 	size = nic->num_entries * sizeof(struct msix_entry);
3747 	nic->entries = kzalloc(size, GFP_KERNEL);
3748 	if (!nic->entries) {
3749 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3750 			  __func__);
3751 		swstats->mem_alloc_fail_cnt++;
3752 		return -ENOMEM;
3753 	}
3754 	swstats->mem_allocated += size;
3755 
3756 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3757 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3758 	if (!nic->s2io_entries) {
3759 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3760 			  __func__);
3761 		swstats->mem_alloc_fail_cnt++;
3762 		kfree(nic->entries);
3763 		swstats->mem_freed
3764 			+= (nic->num_entries * sizeof(struct msix_entry));
3765 		return -ENOMEM;
3766 	}
3767 	swstats->mem_allocated += size;
3768 
3769 	nic->entries[0].entry = 0;
3770 	nic->s2io_entries[0].entry = 0;
3771 	nic->s2io_entries[0].in_use = MSIX_FLG;
3772 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3773 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3774 
3775 	for (i = 1; i < nic->num_entries; i++) {
3776 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3777 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3778 		nic->s2io_entries[i].arg = NULL;
3779 		nic->s2io_entries[i].in_use = 0;
3780 	}
3781 
3782 	rx_mat = readq(&bar0->rx_mat);
3783 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3784 		rx_mat |= RX_MAT_SET(j, msix_indx);
3785 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3786 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3787 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3788 		msix_indx += 8;
3789 	}
3790 	writeq(rx_mat, &bar0->rx_mat);
3791 	readq(&bar0->rx_mat);
3792 
3793 	ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3794 	/* We fail init if error or we get less vectors than min required */
3795 	if (ret) {
3796 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3797 		kfree(nic->entries);
3798 		swstats->mem_freed += nic->num_entries *
3799 			sizeof(struct msix_entry);
3800 		kfree(nic->s2io_entries);
3801 		swstats->mem_freed += nic->num_entries *
3802 			sizeof(struct s2io_msix_entry);
3803 		nic->entries = NULL;
3804 		nic->s2io_entries = NULL;
3805 		return -ENOMEM;
3806 	}
3807 
3808 	/*
3809 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3810 	 * in the herc NIC. (Temp change, needs to be removed later)
3811 	 */
3812 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3813 	msi_control |= 0x1; /* Enable MSI */
3814 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3815 
3816 	return 0;
3817 }
3818 
3819 /* Handle software interrupt used during MSI(X) test */
s2io_test_intr(int irq,void * dev_id)3820 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3821 {
3822 	struct s2io_nic *sp = dev_id;
3823 
3824 	sp->msi_detected = 1;
3825 	wake_up(&sp->msi_wait);
3826 
3827 	return IRQ_HANDLED;
3828 }
3829 
3830 /* Test interrupt path by forcing a a software IRQ */
s2io_test_msi(struct s2io_nic * sp)3831 static int s2io_test_msi(struct s2io_nic *sp)
3832 {
3833 	struct pci_dev *pdev = sp->pdev;
3834 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3835 	int err;
3836 	u64 val64, saved64;
3837 
3838 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3839 			  sp->name, sp);
3840 	if (err) {
3841 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3842 			  sp->dev->name, pci_name(pdev), pdev->irq);
3843 		return err;
3844 	}
3845 
3846 	init_waitqueue_head(&sp->msi_wait);
3847 	sp->msi_detected = 0;
3848 
3849 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3850 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3851 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3852 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3853 	writeq(val64, &bar0->scheduled_int_ctrl);
3854 
3855 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3856 
3857 	if (!sp->msi_detected) {
3858 		/* MSI(X) test failed, go back to INTx mode */
3859 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3860 			  "using MSI(X) during test\n",
3861 			  sp->dev->name, pci_name(pdev));
3862 
3863 		err = -EOPNOTSUPP;
3864 	}
3865 
3866 	free_irq(sp->entries[1].vector, sp);
3867 
3868 	writeq(saved64, &bar0->scheduled_int_ctrl);
3869 
3870 	return err;
3871 }
3872 
remove_msix_isr(struct s2io_nic * sp)3873 static void remove_msix_isr(struct s2io_nic *sp)
3874 {
3875 	int i;
3876 	u16 msi_control;
3877 
3878 	for (i = 0; i < sp->num_entries; i++) {
3879 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3880 			int vector = sp->entries[i].vector;
3881 			void *arg = sp->s2io_entries[i].arg;
3882 			free_irq(vector, arg);
3883 		}
3884 	}
3885 
3886 	kfree(sp->entries);
3887 	kfree(sp->s2io_entries);
3888 	sp->entries = NULL;
3889 	sp->s2io_entries = NULL;
3890 
3891 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3892 	msi_control &= 0xFFFE; /* Disable MSI */
3893 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3894 
3895 	pci_disable_msix(sp->pdev);
3896 }
3897 
remove_inta_isr(struct s2io_nic * sp)3898 static void remove_inta_isr(struct s2io_nic *sp)
3899 {
3900 	struct net_device *dev = sp->dev;
3901 
3902 	free_irq(sp->pdev->irq, dev);
3903 }
3904 
3905 /* ********************************************************* *
3906  * Functions defined below concern the OS part of the driver *
3907  * ********************************************************* */
3908 
3909 /**
3910  *  s2io_open - open entry point of the driver
3911  *  @dev : pointer to the device structure.
3912  *  Description:
3913  *  This function is the open entry point of the driver. It mainly calls a
3914  *  function to allocate Rx buffers and inserts them into the buffer
3915  *  descriptors and then enables the Rx part of the NIC.
3916  *  Return value:
3917  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3918  *   file on failure.
3919  */
3920 
s2io_open(struct net_device * dev)3921 static int s2io_open(struct net_device *dev)
3922 {
3923 	struct s2io_nic *sp = netdev_priv(dev);
3924 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3925 	int err = 0;
3926 
3927 	/*
3928 	 * Make sure you have link off by default every time
3929 	 * Nic is initialized
3930 	 */
3931 	netif_carrier_off(dev);
3932 	sp->last_link_state = 0;
3933 
3934 	/* Initialize H/W and enable interrupts */
3935 	err = s2io_card_up(sp);
3936 	if (err) {
3937 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3938 			  dev->name);
3939 		goto hw_init_failed;
3940 	}
3941 
3942 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3943 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3944 		s2io_card_down(sp);
3945 		err = -ENODEV;
3946 		goto hw_init_failed;
3947 	}
3948 	s2io_start_all_tx_queue(sp);
3949 	return 0;
3950 
3951 hw_init_failed:
3952 	if (sp->config.intr_type == MSI_X) {
3953 		if (sp->entries) {
3954 			kfree(sp->entries);
3955 			swstats->mem_freed += sp->num_entries *
3956 				sizeof(struct msix_entry);
3957 		}
3958 		if (sp->s2io_entries) {
3959 			kfree(sp->s2io_entries);
3960 			swstats->mem_freed += sp->num_entries *
3961 				sizeof(struct s2io_msix_entry);
3962 		}
3963 	}
3964 	return err;
3965 }
3966 
3967 /**
3968  *  s2io_close -close entry point of the driver
3969  *  @dev : device pointer.
3970  *  Description:
3971  *  This is the stop entry point of the driver. It needs to undo exactly
3972  *  whatever was done by the open entry point,thus it's usually referred to
3973  *  as the close function.Among other things this function mainly stops the
3974  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3975  *  Return value:
3976  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3977  *  file on failure.
3978  */
3979 
s2io_close(struct net_device * dev)3980 static int s2io_close(struct net_device *dev)
3981 {
3982 	struct s2io_nic *sp = netdev_priv(dev);
3983 	struct config_param *config = &sp->config;
3984 	u64 tmp64;
3985 	int offset;
3986 
3987 	/* Return if the device is already closed               *
3988 	 *  Can happen when s2io_card_up failed in change_mtu    *
3989 	 */
3990 	if (!is_s2io_card_up(sp))
3991 		return 0;
3992 
3993 	s2io_stop_all_tx_queue(sp);
3994 	/* delete all populated mac entries */
3995 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3996 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3997 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3998 			do_s2io_delete_unicast_mc(sp, tmp64);
3999 	}
4000 
4001 	s2io_card_down(sp);
4002 
4003 	return 0;
4004 }
4005 
4006 /**
4007  *  s2io_xmit - Tx entry point of te driver
4008  *  @skb : the socket buffer containing the Tx data.
4009  *  @dev : device pointer.
4010  *  Description :
4011  *  This function is the Tx entry point of the driver. S2IO NIC supports
4012  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4013  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4014  *  not be upadted.
4015  *  Return value:
4016  *  0 on success & 1 on failure.
4017  */
4018 
s2io_xmit(struct sk_buff * skb,struct net_device * dev)4019 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4020 {
4021 	struct s2io_nic *sp = netdev_priv(dev);
4022 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4023 	register u64 val64;
4024 	struct TxD *txdp;
4025 	struct TxFIFO_element __iomem *tx_fifo;
4026 	unsigned long flags = 0;
4027 	u16 vlan_tag = 0;
4028 	struct fifo_info *fifo = NULL;
4029 	int do_spin_lock = 1;
4030 	int offload_type;
4031 	int enable_per_list_interrupt = 0;
4032 	struct config_param *config = &sp->config;
4033 	struct mac_info *mac_control = &sp->mac_control;
4034 	struct stat_block *stats = mac_control->stats_info;
4035 	struct swStat *swstats = &stats->sw_stat;
4036 
4037 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4038 
4039 	if (unlikely(skb->len <= 0)) {
4040 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4041 		dev_kfree_skb_any(skb);
4042 		return NETDEV_TX_OK;
4043 	}
4044 
4045 	if (!is_s2io_card_up(sp)) {
4046 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4047 			  dev->name);
4048 		dev_kfree_skb(skb);
4049 		return NETDEV_TX_OK;
4050 	}
4051 
4052 	queue = 0;
4053 	if (vlan_tx_tag_present(skb))
4054 		vlan_tag = vlan_tx_tag_get(skb);
4055 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4056 		if (skb->protocol == htons(ETH_P_IP)) {
4057 			struct iphdr *ip;
4058 			struct tcphdr *th;
4059 			ip = ip_hdr(skb);
4060 
4061 			if (!ip_is_fragment(ip)) {
4062 				th = (struct tcphdr *)(((unsigned char *)ip) +
4063 						       ip->ihl*4);
4064 
4065 				if (ip->protocol == IPPROTO_TCP) {
4066 					queue_len = sp->total_tcp_fifos;
4067 					queue = (ntohs(th->source) +
4068 						 ntohs(th->dest)) &
4069 						sp->fifo_selector[queue_len - 1];
4070 					if (queue >= queue_len)
4071 						queue = queue_len - 1;
4072 				} else if (ip->protocol == IPPROTO_UDP) {
4073 					queue_len = sp->total_udp_fifos;
4074 					queue = (ntohs(th->source) +
4075 						 ntohs(th->dest)) &
4076 						sp->fifo_selector[queue_len - 1];
4077 					if (queue >= queue_len)
4078 						queue = queue_len - 1;
4079 					queue += sp->udp_fifo_idx;
4080 					if (skb->len > 1024)
4081 						enable_per_list_interrupt = 1;
4082 					do_spin_lock = 0;
4083 				}
4084 			}
4085 		}
4086 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4087 		/* get fifo number based on skb->priority value */
4088 		queue = config->fifo_mapping
4089 			[skb->priority & (MAX_TX_FIFOS - 1)];
4090 	fifo = &mac_control->fifos[queue];
4091 
4092 	if (do_spin_lock)
4093 		spin_lock_irqsave(&fifo->tx_lock, flags);
4094 	else {
4095 		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4096 			return NETDEV_TX_LOCKED;
4097 	}
4098 
4099 	if (sp->config.multiq) {
4100 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4101 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4102 			return NETDEV_TX_BUSY;
4103 		}
4104 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4105 		if (netif_queue_stopped(dev)) {
4106 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4107 			return NETDEV_TX_BUSY;
4108 		}
4109 	}
4110 
4111 	put_off = (u16)fifo->tx_curr_put_info.offset;
4112 	get_off = (u16)fifo->tx_curr_get_info.offset;
4113 	txdp = fifo->list_info[put_off].list_virt_addr;
4114 
4115 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4116 	/* Avoid "put" pointer going beyond "get" pointer */
4117 	if (txdp->Host_Control ||
4118 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4119 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4120 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4121 		dev_kfree_skb(skb);
4122 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4123 		return NETDEV_TX_OK;
4124 	}
4125 
4126 	offload_type = s2io_offload_type(skb);
4127 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4128 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4129 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4130 	}
4131 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4132 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4133 				    TXD_TX_CKO_TCP_EN |
4134 				    TXD_TX_CKO_UDP_EN);
4135 	}
4136 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4137 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4138 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4139 	if (enable_per_list_interrupt)
4140 		if (put_off & (queue_len >> 5))
4141 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4142 	if (vlan_tag) {
4143 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4144 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4145 	}
4146 
4147 	frg_len = skb_headlen(skb);
4148 	if (offload_type == SKB_GSO_UDP) {
4149 		int ufo_size;
4150 
4151 		ufo_size = s2io_udp_mss(skb);
4152 		ufo_size &= ~7;
4153 		txdp->Control_1 |= TXD_UFO_EN;
4154 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4155 		txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4156 #ifdef __BIG_ENDIAN
4157 		/* both variants do cpu_to_be64(be32_to_cpu(...)) */
4158 		fifo->ufo_in_band_v[put_off] =
4159 			(__force u64)skb_shinfo(skb)->ip6_frag_id;
4160 #else
4161 		fifo->ufo_in_band_v[put_off] =
4162 			(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4163 #endif
4164 		txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4165 		txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4166 						      fifo->ufo_in_band_v,
4167 						      sizeof(u64),
4168 						      PCI_DMA_TODEVICE);
4169 		if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4170 			goto pci_map_failed;
4171 		txdp++;
4172 	}
4173 
4174 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4175 					      frg_len, PCI_DMA_TODEVICE);
4176 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4177 		goto pci_map_failed;
4178 
4179 	txdp->Host_Control = (unsigned long)skb;
4180 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4181 	if (offload_type == SKB_GSO_UDP)
4182 		txdp->Control_1 |= TXD_UFO_EN;
4183 
4184 	frg_cnt = skb_shinfo(skb)->nr_frags;
4185 	/* For fragmented SKB. */
4186 	for (i = 0; i < frg_cnt; i++) {
4187 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4188 		/* A '0' length fragment will be ignored */
4189 		if (!skb_frag_size(frag))
4190 			continue;
4191 		txdp++;
4192 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4193 							     frag, 0,
4194 							     skb_frag_size(frag),
4195 							     DMA_TO_DEVICE);
4196 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4197 		if (offload_type == SKB_GSO_UDP)
4198 			txdp->Control_1 |= TXD_UFO_EN;
4199 	}
4200 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4201 
4202 	if (offload_type == SKB_GSO_UDP)
4203 		frg_cnt++; /* as Txd0 was used for inband header */
4204 
4205 	tx_fifo = mac_control->tx_FIFO_start[queue];
4206 	val64 = fifo->list_info[put_off].list_phy_addr;
4207 	writeq(val64, &tx_fifo->TxDL_Pointer);
4208 
4209 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4210 		 TX_FIFO_LAST_LIST);
4211 	if (offload_type)
4212 		val64 |= TX_FIFO_SPECIAL_FUNC;
4213 
4214 	writeq(val64, &tx_fifo->List_Control);
4215 
4216 	mmiowb();
4217 
4218 	put_off++;
4219 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4220 		put_off = 0;
4221 	fifo->tx_curr_put_info.offset = put_off;
4222 
4223 	/* Avoid "put" pointer going beyond "get" pointer */
4224 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4225 		swstats->fifo_full_cnt++;
4226 		DBG_PRINT(TX_DBG,
4227 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4228 			  put_off, get_off);
4229 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4230 	}
4231 	swstats->mem_allocated += skb->truesize;
4232 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4233 
4234 	if (sp->config.intr_type == MSI_X)
4235 		tx_intr_handler(fifo);
4236 
4237 	return NETDEV_TX_OK;
4238 
4239 pci_map_failed:
4240 	swstats->pci_map_fail_cnt++;
4241 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4242 	swstats->mem_freed += skb->truesize;
4243 	dev_kfree_skb(skb);
4244 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4245 	return NETDEV_TX_OK;
4246 }
4247 
4248 static void
s2io_alarm_handle(unsigned long data)4249 s2io_alarm_handle(unsigned long data)
4250 {
4251 	struct s2io_nic *sp = (struct s2io_nic *)data;
4252 	struct net_device *dev = sp->dev;
4253 
4254 	s2io_handle_errors(dev);
4255 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4256 }
4257 
s2io_msix_ring_handle(int irq,void * dev_id)4258 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4259 {
4260 	struct ring_info *ring = (struct ring_info *)dev_id;
4261 	struct s2io_nic *sp = ring->nic;
4262 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4263 
4264 	if (unlikely(!is_s2io_card_up(sp)))
4265 		return IRQ_HANDLED;
4266 
4267 	if (sp->config.napi) {
4268 		u8 __iomem *addr = NULL;
4269 		u8 val8 = 0;
4270 
4271 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4272 		addr += (7 - ring->ring_no);
4273 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4274 		writeb(val8, addr);
4275 		val8 = readb(addr);
4276 		napi_schedule(&ring->napi);
4277 	} else {
4278 		rx_intr_handler(ring, 0);
4279 		s2io_chk_rx_buffers(sp, ring);
4280 	}
4281 
4282 	return IRQ_HANDLED;
4283 }
4284 
s2io_msix_fifo_handle(int irq,void * dev_id)4285 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4286 {
4287 	int i;
4288 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4289 	struct s2io_nic *sp = fifos->nic;
4290 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4291 	struct config_param *config  = &sp->config;
4292 	u64 reason;
4293 
4294 	if (unlikely(!is_s2io_card_up(sp)))
4295 		return IRQ_NONE;
4296 
4297 	reason = readq(&bar0->general_int_status);
4298 	if (unlikely(reason == S2IO_MINUS_ONE))
4299 		/* Nothing much can be done. Get out */
4300 		return IRQ_HANDLED;
4301 
4302 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4303 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4304 
4305 		if (reason & GEN_INTR_TXPIC)
4306 			s2io_txpic_intr_handle(sp);
4307 
4308 		if (reason & GEN_INTR_TXTRAFFIC)
4309 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4310 
4311 		for (i = 0; i < config->tx_fifo_num; i++)
4312 			tx_intr_handler(&fifos[i]);
4313 
4314 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4315 		readl(&bar0->general_int_status);
4316 		return IRQ_HANDLED;
4317 	}
4318 	/* The interrupt was not raised by us */
4319 	return IRQ_NONE;
4320 }
4321 
s2io_txpic_intr_handle(struct s2io_nic * sp)4322 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4323 {
4324 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4325 	u64 val64;
4326 
4327 	val64 = readq(&bar0->pic_int_status);
4328 	if (val64 & PIC_INT_GPIO) {
4329 		val64 = readq(&bar0->gpio_int_reg);
4330 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4331 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4332 			/*
4333 			 * This is unstable state so clear both up/down
4334 			 * interrupt and adapter to re-evaluate the link state.
4335 			 */
4336 			val64 |= GPIO_INT_REG_LINK_DOWN;
4337 			val64 |= GPIO_INT_REG_LINK_UP;
4338 			writeq(val64, &bar0->gpio_int_reg);
4339 			val64 = readq(&bar0->gpio_int_mask);
4340 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4341 				   GPIO_INT_MASK_LINK_DOWN);
4342 			writeq(val64, &bar0->gpio_int_mask);
4343 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4344 			val64 = readq(&bar0->adapter_status);
4345 			/* Enable Adapter */
4346 			val64 = readq(&bar0->adapter_control);
4347 			val64 |= ADAPTER_CNTL_EN;
4348 			writeq(val64, &bar0->adapter_control);
4349 			val64 |= ADAPTER_LED_ON;
4350 			writeq(val64, &bar0->adapter_control);
4351 			if (!sp->device_enabled_once)
4352 				sp->device_enabled_once = 1;
4353 
4354 			s2io_link(sp, LINK_UP);
4355 			/*
4356 			 * unmask link down interrupt and mask link-up
4357 			 * intr
4358 			 */
4359 			val64 = readq(&bar0->gpio_int_mask);
4360 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4361 			val64 |= GPIO_INT_MASK_LINK_UP;
4362 			writeq(val64, &bar0->gpio_int_mask);
4363 
4364 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4365 			val64 = readq(&bar0->adapter_status);
4366 			s2io_link(sp, LINK_DOWN);
4367 			/* Link is down so unmaks link up interrupt */
4368 			val64 = readq(&bar0->gpio_int_mask);
4369 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4370 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4371 			writeq(val64, &bar0->gpio_int_mask);
4372 
4373 			/* turn off LED */
4374 			val64 = readq(&bar0->adapter_control);
4375 			val64 = val64 & (~ADAPTER_LED_ON);
4376 			writeq(val64, &bar0->adapter_control);
4377 		}
4378 	}
4379 	val64 = readq(&bar0->gpio_int_mask);
4380 }
4381 
4382 /**
4383  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4384  *  @value: alarm bits
4385  *  @addr: address value
4386  *  @cnt: counter variable
4387  *  Description: Check for alarm and increment the counter
4388  *  Return Value:
4389  *  1 - if alarm bit set
4390  *  0 - if alarm bit is not set
4391  */
do_s2io_chk_alarm_bit(u64 value,void __iomem * addr,unsigned long long * cnt)4392 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4393 				 unsigned long long *cnt)
4394 {
4395 	u64 val64;
4396 	val64 = readq(addr);
4397 	if (val64 & value) {
4398 		writeq(val64, addr);
4399 		(*cnt)++;
4400 		return 1;
4401 	}
4402 	return 0;
4403 
4404 }
4405 
4406 /**
4407  *  s2io_handle_errors - Xframe error indication handler
4408  *  @nic: device private variable
4409  *  Description: Handle alarms such as loss of link, single or
4410  *  double ECC errors, critical and serious errors.
4411  *  Return Value:
4412  *  NONE
4413  */
s2io_handle_errors(void * dev_id)4414 static void s2io_handle_errors(void *dev_id)
4415 {
4416 	struct net_device *dev = (struct net_device *)dev_id;
4417 	struct s2io_nic *sp = netdev_priv(dev);
4418 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4419 	u64 temp64 = 0, val64 = 0;
4420 	int i = 0;
4421 
4422 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4423 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4424 
4425 	if (!is_s2io_card_up(sp))
4426 		return;
4427 
4428 	if (pci_channel_offline(sp->pdev))
4429 		return;
4430 
4431 	memset(&sw_stat->ring_full_cnt, 0,
4432 	       sizeof(sw_stat->ring_full_cnt));
4433 
4434 	/* Handling the XPAK counters update */
4435 	if (stats->xpak_timer_count < 72000) {
4436 		/* waiting for an hour */
4437 		stats->xpak_timer_count++;
4438 	} else {
4439 		s2io_updt_xpak_counter(dev);
4440 		/* reset the count to zero */
4441 		stats->xpak_timer_count = 0;
4442 	}
4443 
4444 	/* Handling link status change error Intr */
4445 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4446 		val64 = readq(&bar0->mac_rmac_err_reg);
4447 		writeq(val64, &bar0->mac_rmac_err_reg);
4448 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4449 			schedule_work(&sp->set_link_task);
4450 	}
4451 
4452 	/* In case of a serious error, the device will be Reset. */
4453 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4454 				  &sw_stat->serious_err_cnt))
4455 		goto reset;
4456 
4457 	/* Check for data parity error */
4458 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4459 				  &sw_stat->parity_err_cnt))
4460 		goto reset;
4461 
4462 	/* Check for ring full counter */
4463 	if (sp->device_type == XFRAME_II_DEVICE) {
4464 		val64 = readq(&bar0->ring_bump_counter1);
4465 		for (i = 0; i < 4; i++) {
4466 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4467 			temp64 >>= 64 - ((i+1)*16);
4468 			sw_stat->ring_full_cnt[i] += temp64;
4469 		}
4470 
4471 		val64 = readq(&bar0->ring_bump_counter2);
4472 		for (i = 0; i < 4; i++) {
4473 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4474 			temp64 >>= 64 - ((i+1)*16);
4475 			sw_stat->ring_full_cnt[i+4] += temp64;
4476 		}
4477 	}
4478 
4479 	val64 = readq(&bar0->txdma_int_status);
4480 	/*check for pfc_err*/
4481 	if (val64 & TXDMA_PFC_INT) {
4482 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4483 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4484 					  PFC_PCIX_ERR,
4485 					  &bar0->pfc_err_reg,
4486 					  &sw_stat->pfc_err_cnt))
4487 			goto reset;
4488 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4489 				      &bar0->pfc_err_reg,
4490 				      &sw_stat->pfc_err_cnt);
4491 	}
4492 
4493 	/*check for tda_err*/
4494 	if (val64 & TXDMA_TDA_INT) {
4495 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4496 					  TDA_SM0_ERR_ALARM |
4497 					  TDA_SM1_ERR_ALARM,
4498 					  &bar0->tda_err_reg,
4499 					  &sw_stat->tda_err_cnt))
4500 			goto reset;
4501 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4502 				      &bar0->tda_err_reg,
4503 				      &sw_stat->tda_err_cnt);
4504 	}
4505 	/*check for pcc_err*/
4506 	if (val64 & TXDMA_PCC_INT) {
4507 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4508 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4509 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4510 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4511 					  PCC_TXB_ECC_DB_ERR,
4512 					  &bar0->pcc_err_reg,
4513 					  &sw_stat->pcc_err_cnt))
4514 			goto reset;
4515 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4516 				      &bar0->pcc_err_reg,
4517 				      &sw_stat->pcc_err_cnt);
4518 	}
4519 
4520 	/*check for tti_err*/
4521 	if (val64 & TXDMA_TTI_INT) {
4522 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4523 					  &bar0->tti_err_reg,
4524 					  &sw_stat->tti_err_cnt))
4525 			goto reset;
4526 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4527 				      &bar0->tti_err_reg,
4528 				      &sw_stat->tti_err_cnt);
4529 	}
4530 
4531 	/*check for lso_err*/
4532 	if (val64 & TXDMA_LSO_INT) {
4533 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4534 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4535 					  &bar0->lso_err_reg,
4536 					  &sw_stat->lso_err_cnt))
4537 			goto reset;
4538 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4539 				      &bar0->lso_err_reg,
4540 				      &sw_stat->lso_err_cnt);
4541 	}
4542 
4543 	/*check for tpa_err*/
4544 	if (val64 & TXDMA_TPA_INT) {
4545 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4546 					  &bar0->tpa_err_reg,
4547 					  &sw_stat->tpa_err_cnt))
4548 			goto reset;
4549 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4550 				      &bar0->tpa_err_reg,
4551 				      &sw_stat->tpa_err_cnt);
4552 	}
4553 
4554 	/*check for sm_err*/
4555 	if (val64 & TXDMA_SM_INT) {
4556 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4557 					  &bar0->sm_err_reg,
4558 					  &sw_stat->sm_err_cnt))
4559 			goto reset;
4560 	}
4561 
4562 	val64 = readq(&bar0->mac_int_status);
4563 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4564 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4565 					  &bar0->mac_tmac_err_reg,
4566 					  &sw_stat->mac_tmac_err_cnt))
4567 			goto reset;
4568 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4569 				      TMAC_DESC_ECC_SG_ERR |
4570 				      TMAC_DESC_ECC_DB_ERR,
4571 				      &bar0->mac_tmac_err_reg,
4572 				      &sw_stat->mac_tmac_err_cnt);
4573 	}
4574 
4575 	val64 = readq(&bar0->xgxs_int_status);
4576 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4577 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4578 					  &bar0->xgxs_txgxs_err_reg,
4579 					  &sw_stat->xgxs_txgxs_err_cnt))
4580 			goto reset;
4581 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4582 				      &bar0->xgxs_txgxs_err_reg,
4583 				      &sw_stat->xgxs_txgxs_err_cnt);
4584 	}
4585 
4586 	val64 = readq(&bar0->rxdma_int_status);
4587 	if (val64 & RXDMA_INT_RC_INT_M) {
4588 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4589 					  RC_FTC_ECC_DB_ERR |
4590 					  RC_PRCn_SM_ERR_ALARM |
4591 					  RC_FTC_SM_ERR_ALARM,
4592 					  &bar0->rc_err_reg,
4593 					  &sw_stat->rc_err_cnt))
4594 			goto reset;
4595 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4596 				      RC_FTC_ECC_SG_ERR |
4597 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4598 				      &sw_stat->rc_err_cnt);
4599 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4600 					  PRC_PCI_AB_WR_Rn |
4601 					  PRC_PCI_AB_F_WR_Rn,
4602 					  &bar0->prc_pcix_err_reg,
4603 					  &sw_stat->prc_pcix_err_cnt))
4604 			goto reset;
4605 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4606 				      PRC_PCI_DP_WR_Rn |
4607 				      PRC_PCI_DP_F_WR_Rn,
4608 				      &bar0->prc_pcix_err_reg,
4609 				      &sw_stat->prc_pcix_err_cnt);
4610 	}
4611 
4612 	if (val64 & RXDMA_INT_RPA_INT_M) {
4613 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4614 					  &bar0->rpa_err_reg,
4615 					  &sw_stat->rpa_err_cnt))
4616 			goto reset;
4617 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4618 				      &bar0->rpa_err_reg,
4619 				      &sw_stat->rpa_err_cnt);
4620 	}
4621 
4622 	if (val64 & RXDMA_INT_RDA_INT_M) {
4623 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4624 					  RDA_FRM_ECC_DB_N_AERR |
4625 					  RDA_SM1_ERR_ALARM |
4626 					  RDA_SM0_ERR_ALARM |
4627 					  RDA_RXD_ECC_DB_SERR,
4628 					  &bar0->rda_err_reg,
4629 					  &sw_stat->rda_err_cnt))
4630 			goto reset;
4631 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4632 				      RDA_FRM_ECC_SG_ERR |
4633 				      RDA_MISC_ERR |
4634 				      RDA_PCIX_ERR,
4635 				      &bar0->rda_err_reg,
4636 				      &sw_stat->rda_err_cnt);
4637 	}
4638 
4639 	if (val64 & RXDMA_INT_RTI_INT_M) {
4640 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4641 					  &bar0->rti_err_reg,
4642 					  &sw_stat->rti_err_cnt))
4643 			goto reset;
4644 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4645 				      &bar0->rti_err_reg,
4646 				      &sw_stat->rti_err_cnt);
4647 	}
4648 
4649 	val64 = readq(&bar0->mac_int_status);
4650 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4651 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4652 					  &bar0->mac_rmac_err_reg,
4653 					  &sw_stat->mac_rmac_err_cnt))
4654 			goto reset;
4655 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4656 				      RMAC_SINGLE_ECC_ERR |
4657 				      RMAC_DOUBLE_ECC_ERR,
4658 				      &bar0->mac_rmac_err_reg,
4659 				      &sw_stat->mac_rmac_err_cnt);
4660 	}
4661 
4662 	val64 = readq(&bar0->xgxs_int_status);
4663 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4664 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4665 					  &bar0->xgxs_rxgxs_err_reg,
4666 					  &sw_stat->xgxs_rxgxs_err_cnt))
4667 			goto reset;
4668 	}
4669 
4670 	val64 = readq(&bar0->mc_int_status);
4671 	if (val64 & MC_INT_STATUS_MC_INT) {
4672 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4673 					  &bar0->mc_err_reg,
4674 					  &sw_stat->mc_err_cnt))
4675 			goto reset;
4676 
4677 		/* Handling Ecc errors */
4678 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4679 			writeq(val64, &bar0->mc_err_reg);
4680 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4681 				sw_stat->double_ecc_errs++;
4682 				if (sp->device_type != XFRAME_II_DEVICE) {
4683 					/*
4684 					 * Reset XframeI only if critical error
4685 					 */
4686 					if (val64 &
4687 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4688 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4689 						goto reset;
4690 				}
4691 			} else
4692 				sw_stat->single_ecc_errs++;
4693 		}
4694 	}
4695 	return;
4696 
4697 reset:
4698 	s2io_stop_all_tx_queue(sp);
4699 	schedule_work(&sp->rst_timer_task);
4700 	sw_stat->soft_reset_cnt++;
4701 }
4702 
4703 /**
4704  *  s2io_isr - ISR handler of the device .
4705  *  @irq: the irq of the device.
4706  *  @dev_id: a void pointer to the dev structure of the NIC.
4707  *  Description:  This function is the ISR handler of the device. It
4708  *  identifies the reason for the interrupt and calls the relevant
4709  *  service routines. As a contongency measure, this ISR allocates the
4710  *  recv buffers, if their numbers are below the panic value which is
4711  *  presently set to 25% of the original number of rcv buffers allocated.
4712  *  Return value:
4713  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4714  *   IRQ_NONE: will be returned if interrupt is not from our device
4715  */
s2io_isr(int irq,void * dev_id)4716 static irqreturn_t s2io_isr(int irq, void *dev_id)
4717 {
4718 	struct net_device *dev = (struct net_device *)dev_id;
4719 	struct s2io_nic *sp = netdev_priv(dev);
4720 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4721 	int i;
4722 	u64 reason = 0;
4723 	struct mac_info *mac_control;
4724 	struct config_param *config;
4725 
4726 	/* Pretend we handled any irq's from a disconnected card */
4727 	if (pci_channel_offline(sp->pdev))
4728 		return IRQ_NONE;
4729 
4730 	if (!is_s2io_card_up(sp))
4731 		return IRQ_NONE;
4732 
4733 	config = &sp->config;
4734 	mac_control = &sp->mac_control;
4735 
4736 	/*
4737 	 * Identify the cause for interrupt and call the appropriate
4738 	 * interrupt handler. Causes for the interrupt could be;
4739 	 * 1. Rx of packet.
4740 	 * 2. Tx complete.
4741 	 * 3. Link down.
4742 	 */
4743 	reason = readq(&bar0->general_int_status);
4744 
4745 	if (unlikely(reason == S2IO_MINUS_ONE))
4746 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4747 
4748 	if (reason &
4749 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4750 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4751 
4752 		if (config->napi) {
4753 			if (reason & GEN_INTR_RXTRAFFIC) {
4754 				napi_schedule(&sp->napi);
4755 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4756 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4757 				readl(&bar0->rx_traffic_int);
4758 			}
4759 		} else {
4760 			/*
4761 			 * rx_traffic_int reg is an R1 register, writing all 1's
4762 			 * will ensure that the actual interrupt causing bit
4763 			 * get's cleared and hence a read can be avoided.
4764 			 */
4765 			if (reason & GEN_INTR_RXTRAFFIC)
4766 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4767 
4768 			for (i = 0; i < config->rx_ring_num; i++) {
4769 				struct ring_info *ring = &mac_control->rings[i];
4770 
4771 				rx_intr_handler(ring, 0);
4772 			}
4773 		}
4774 
4775 		/*
4776 		 * tx_traffic_int reg is an R1 register, writing all 1's
4777 		 * will ensure that the actual interrupt causing bit get's
4778 		 * cleared and hence a read can be avoided.
4779 		 */
4780 		if (reason & GEN_INTR_TXTRAFFIC)
4781 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4782 
4783 		for (i = 0; i < config->tx_fifo_num; i++)
4784 			tx_intr_handler(&mac_control->fifos[i]);
4785 
4786 		if (reason & GEN_INTR_TXPIC)
4787 			s2io_txpic_intr_handle(sp);
4788 
4789 		/*
4790 		 * Reallocate the buffers from the interrupt handler itself.
4791 		 */
4792 		if (!config->napi) {
4793 			for (i = 0; i < config->rx_ring_num; i++) {
4794 				struct ring_info *ring = &mac_control->rings[i];
4795 
4796 				s2io_chk_rx_buffers(sp, ring);
4797 			}
4798 		}
4799 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4800 		readl(&bar0->general_int_status);
4801 
4802 		return IRQ_HANDLED;
4803 
4804 	} else if (!reason) {
4805 		/* The interrupt was not raised by us */
4806 		return IRQ_NONE;
4807 	}
4808 
4809 	return IRQ_HANDLED;
4810 }
4811 
4812 /**
4813  * s2io_updt_stats -
4814  */
s2io_updt_stats(struct s2io_nic * sp)4815 static void s2io_updt_stats(struct s2io_nic *sp)
4816 {
4817 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4818 	u64 val64;
4819 	int cnt = 0;
4820 
4821 	if (is_s2io_card_up(sp)) {
4822 		/* Apprx 30us on a 133 MHz bus */
4823 		val64 = SET_UPDT_CLICKS(10) |
4824 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4825 		writeq(val64, &bar0->stat_cfg);
4826 		do {
4827 			udelay(100);
4828 			val64 = readq(&bar0->stat_cfg);
4829 			if (!(val64 & s2BIT(0)))
4830 				break;
4831 			cnt++;
4832 			if (cnt == 5)
4833 				break; /* Updt failed */
4834 		} while (1);
4835 	}
4836 }
4837 
4838 /**
4839  *  s2io_get_stats - Updates the device statistics structure.
4840  *  @dev : pointer to the device structure.
4841  *  Description:
4842  *  This function updates the device statistics structure in the s2io_nic
4843  *  structure and returns a pointer to the same.
4844  *  Return value:
4845  *  pointer to the updated net_device_stats structure.
4846  */
s2io_get_stats(struct net_device * dev)4847 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4848 {
4849 	struct s2io_nic *sp = netdev_priv(dev);
4850 	struct mac_info *mac_control = &sp->mac_control;
4851 	struct stat_block *stats = mac_control->stats_info;
4852 	u64 delta;
4853 
4854 	/* Configure Stats for immediate updt */
4855 	s2io_updt_stats(sp);
4856 
4857 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4858 	 * This can be done while running by changing the MTU.  To prevent the
4859 	 * system from having the stats zero'ed, the driver keeps a copy of the
4860 	 * last update to the system (which is also zero'ed on reset).  This
4861 	 * enables the driver to accurately know the delta between the last
4862 	 * update and the current update.
4863 	 */
4864 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4865 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4866 	sp->stats.rx_packets += delta;
4867 	dev->stats.rx_packets += delta;
4868 
4869 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4870 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4871 	sp->stats.tx_packets += delta;
4872 	dev->stats.tx_packets += delta;
4873 
4874 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4875 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4876 	sp->stats.rx_bytes += delta;
4877 	dev->stats.rx_bytes += delta;
4878 
4879 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4880 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4881 	sp->stats.tx_bytes += delta;
4882 	dev->stats.tx_bytes += delta;
4883 
4884 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4885 	sp->stats.rx_errors += delta;
4886 	dev->stats.rx_errors += delta;
4887 
4888 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4889 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4890 	sp->stats.tx_errors += delta;
4891 	dev->stats.tx_errors += delta;
4892 
4893 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4894 	sp->stats.rx_dropped += delta;
4895 	dev->stats.rx_dropped += delta;
4896 
4897 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4898 	sp->stats.tx_dropped += delta;
4899 	dev->stats.tx_dropped += delta;
4900 
4901 	/* The adapter MAC interprets pause frames as multicast packets, but
4902 	 * does not pass them up.  This erroneously increases the multicast
4903 	 * packet count and needs to be deducted when the multicast frame count
4904 	 * is queried.
4905 	 */
4906 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4907 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4908 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4909 	delta -= sp->stats.multicast;
4910 	sp->stats.multicast += delta;
4911 	dev->stats.multicast += delta;
4912 
4913 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4914 		le32_to_cpu(stats->rmac_usized_frms)) +
4915 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4916 	sp->stats.rx_length_errors += delta;
4917 	dev->stats.rx_length_errors += delta;
4918 
4919 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4920 	sp->stats.rx_crc_errors += delta;
4921 	dev->stats.rx_crc_errors += delta;
4922 
4923 	return &dev->stats;
4924 }
4925 
4926 /**
4927  *  s2io_set_multicast - entry point for multicast address enable/disable.
4928  *  @dev : pointer to the device structure
4929  *  Description:
4930  *  This function is a driver entry point which gets called by the kernel
4931  *  whenever multicast addresses must be enabled/disabled. This also gets
4932  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4933  *  determine, if multicast address must be enabled or if promiscuous mode
4934  *  is to be disabled etc.
4935  *  Return value:
4936  *  void.
4937  */
4938 
s2io_set_multicast(struct net_device * dev)4939 static void s2io_set_multicast(struct net_device *dev)
4940 {
4941 	int i, j, prev_cnt;
4942 	struct netdev_hw_addr *ha;
4943 	struct s2io_nic *sp = netdev_priv(dev);
4944 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4945 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4946 		0xfeffffffffffULL;
4947 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4948 	void __iomem *add;
4949 	struct config_param *config = &sp->config;
4950 
4951 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4952 		/*  Enable all Multicast addresses */
4953 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4954 		       &bar0->rmac_addr_data0_mem);
4955 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4956 		       &bar0->rmac_addr_data1_mem);
4957 		val64 = RMAC_ADDR_CMD_MEM_WE |
4958 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4959 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4960 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4961 		/* Wait till command completes */
4962 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4963 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4964 				      S2IO_BIT_RESET);
4965 
4966 		sp->m_cast_flg = 1;
4967 		sp->all_multi_pos = config->max_mc_addr - 1;
4968 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4969 		/*  Disable all Multicast addresses */
4970 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4971 		       &bar0->rmac_addr_data0_mem);
4972 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4973 		       &bar0->rmac_addr_data1_mem);
4974 		val64 = RMAC_ADDR_CMD_MEM_WE |
4975 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4976 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4977 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4978 		/* Wait till command completes */
4979 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4980 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4981 				      S2IO_BIT_RESET);
4982 
4983 		sp->m_cast_flg = 0;
4984 		sp->all_multi_pos = 0;
4985 	}
4986 
4987 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4988 		/*  Put the NIC into promiscuous mode */
4989 		add = &bar0->mac_cfg;
4990 		val64 = readq(&bar0->mac_cfg);
4991 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4992 
4993 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4994 		writel((u32)val64, add);
4995 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4996 		writel((u32) (val64 >> 32), (add + 4));
4997 
4998 		if (vlan_tag_strip != 1) {
4999 			val64 = readq(&bar0->rx_pa_cfg);
5000 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5001 			writeq(val64, &bar0->rx_pa_cfg);
5002 			sp->vlan_strip_flag = 0;
5003 		}
5004 
5005 		val64 = readq(&bar0->mac_cfg);
5006 		sp->promisc_flg = 1;
5007 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5008 			  dev->name);
5009 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5010 		/*  Remove the NIC from promiscuous mode */
5011 		add = &bar0->mac_cfg;
5012 		val64 = readq(&bar0->mac_cfg);
5013 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5014 
5015 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5016 		writel((u32)val64, add);
5017 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5018 		writel((u32) (val64 >> 32), (add + 4));
5019 
5020 		if (vlan_tag_strip != 0) {
5021 			val64 = readq(&bar0->rx_pa_cfg);
5022 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5023 			writeq(val64, &bar0->rx_pa_cfg);
5024 			sp->vlan_strip_flag = 1;
5025 		}
5026 
5027 		val64 = readq(&bar0->mac_cfg);
5028 		sp->promisc_flg = 0;
5029 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5030 	}
5031 
5032 	/*  Update individual M_CAST address list */
5033 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5034 		if (netdev_mc_count(dev) >
5035 		    (config->max_mc_addr - config->max_mac_addr)) {
5036 			DBG_PRINT(ERR_DBG,
5037 				  "%s: No more Rx filters can be added - "
5038 				  "please enable ALL_MULTI instead\n",
5039 				  dev->name);
5040 			return;
5041 		}
5042 
5043 		prev_cnt = sp->mc_addr_count;
5044 		sp->mc_addr_count = netdev_mc_count(dev);
5045 
5046 		/* Clear out the previous list of Mc in the H/W. */
5047 		for (i = 0; i < prev_cnt; i++) {
5048 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5049 			       &bar0->rmac_addr_data0_mem);
5050 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5051 			       &bar0->rmac_addr_data1_mem);
5052 			val64 = RMAC_ADDR_CMD_MEM_WE |
5053 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5054 				RMAC_ADDR_CMD_MEM_OFFSET
5055 				(config->mc_start_offset + i);
5056 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5057 
5058 			/* Wait for command completes */
5059 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5060 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5061 						  S2IO_BIT_RESET)) {
5062 				DBG_PRINT(ERR_DBG,
5063 					  "%s: Adding Multicasts failed\n",
5064 					  dev->name);
5065 				return;
5066 			}
5067 		}
5068 
5069 		/* Create the new Rx filter list and update the same in H/W. */
5070 		i = 0;
5071 		netdev_for_each_mc_addr(ha, dev) {
5072 			mac_addr = 0;
5073 			for (j = 0; j < ETH_ALEN; j++) {
5074 				mac_addr |= ha->addr[j];
5075 				mac_addr <<= 8;
5076 			}
5077 			mac_addr >>= 8;
5078 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5079 			       &bar0->rmac_addr_data0_mem);
5080 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5081 			       &bar0->rmac_addr_data1_mem);
5082 			val64 = RMAC_ADDR_CMD_MEM_WE |
5083 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5084 				RMAC_ADDR_CMD_MEM_OFFSET
5085 				(i + config->mc_start_offset);
5086 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5087 
5088 			/* Wait for command completes */
5089 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5090 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5091 						  S2IO_BIT_RESET)) {
5092 				DBG_PRINT(ERR_DBG,
5093 					  "%s: Adding Multicasts failed\n",
5094 					  dev->name);
5095 				return;
5096 			}
5097 			i++;
5098 		}
5099 	}
5100 }
5101 
5102 /* read from CAM unicast & multicast addresses and store it in
5103  * def_mac_addr structure
5104  */
do_s2io_store_unicast_mc(struct s2io_nic * sp)5105 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5106 {
5107 	int offset;
5108 	u64 mac_addr = 0x0;
5109 	struct config_param *config = &sp->config;
5110 
5111 	/* store unicast & multicast mac addresses */
5112 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5113 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5114 		/* if read fails disable the entry */
5115 		if (mac_addr == FAILURE)
5116 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5117 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5118 	}
5119 }
5120 
5121 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
do_s2io_restore_unicast_mc(struct s2io_nic * sp)5122 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5123 {
5124 	int offset;
5125 	struct config_param *config = &sp->config;
5126 	/* restore unicast mac address */
5127 	for (offset = 0; offset < config->max_mac_addr; offset++)
5128 		do_s2io_prog_unicast(sp->dev,
5129 				     sp->def_mac_addr[offset].mac_addr);
5130 
5131 	/* restore multicast mac address */
5132 	for (offset = config->mc_start_offset;
5133 	     offset < config->max_mc_addr; offset++)
5134 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5135 }
5136 
5137 /* add a multicast MAC address to CAM */
do_s2io_add_mc(struct s2io_nic * sp,u8 * addr)5138 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5139 {
5140 	int i;
5141 	u64 mac_addr = 0;
5142 	struct config_param *config = &sp->config;
5143 
5144 	for (i = 0; i < ETH_ALEN; i++) {
5145 		mac_addr <<= 8;
5146 		mac_addr |= addr[i];
5147 	}
5148 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5149 		return SUCCESS;
5150 
5151 	/* check if the multicast mac already preset in CAM */
5152 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5153 		u64 tmp64;
5154 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5155 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5156 			break;
5157 
5158 		if (tmp64 == mac_addr)
5159 			return SUCCESS;
5160 	}
5161 	if (i == config->max_mc_addr) {
5162 		DBG_PRINT(ERR_DBG,
5163 			  "CAM full no space left for multicast MAC\n");
5164 		return FAILURE;
5165 	}
5166 	/* Update the internal structure with this new mac address */
5167 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5168 
5169 	return do_s2io_add_mac(sp, mac_addr, i);
5170 }
5171 
5172 /* add MAC address to CAM */
do_s2io_add_mac(struct s2io_nic * sp,u64 addr,int off)5173 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5174 {
5175 	u64 val64;
5176 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5177 
5178 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5179 	       &bar0->rmac_addr_data0_mem);
5180 
5181 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5182 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5183 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5184 
5185 	/* Wait till command completes */
5186 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5187 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5188 				  S2IO_BIT_RESET)) {
5189 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5190 		return FAILURE;
5191 	}
5192 	return SUCCESS;
5193 }
5194 /* deletes a specified unicast/multicast mac entry from CAM */
do_s2io_delete_unicast_mc(struct s2io_nic * sp,u64 addr)5195 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5196 {
5197 	int offset;
5198 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5199 	struct config_param *config = &sp->config;
5200 
5201 	for (offset = 1;
5202 	     offset < config->max_mc_addr; offset++) {
5203 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5204 		if (tmp64 == addr) {
5205 			/* disable the entry by writing  0xffffffffffffULL */
5206 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5207 				return FAILURE;
5208 			/* store the new mac list from CAM */
5209 			do_s2io_store_unicast_mc(sp);
5210 			return SUCCESS;
5211 		}
5212 	}
5213 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5214 		  (unsigned long long)addr);
5215 	return FAILURE;
5216 }
5217 
5218 /* read mac entries from CAM */
do_s2io_read_unicast_mc(struct s2io_nic * sp,int offset)5219 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5220 {
5221 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5222 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5223 
5224 	/* read mac addr */
5225 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5226 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5227 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5228 
5229 	/* Wait till command completes */
5230 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5231 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5232 				  S2IO_BIT_RESET)) {
5233 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5234 		return FAILURE;
5235 	}
5236 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5237 
5238 	return tmp64 >> 16;
5239 }
5240 
5241 /**
5242  * s2io_set_mac_addr driver entry point
5243  */
5244 
s2io_set_mac_addr(struct net_device * dev,void * p)5245 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5246 {
5247 	struct sockaddr *addr = p;
5248 
5249 	if (!is_valid_ether_addr(addr->sa_data))
5250 		return -EADDRNOTAVAIL;
5251 
5252 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5253 
5254 	/* store the MAC address in CAM */
5255 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5256 }
5257 /**
5258  *  do_s2io_prog_unicast - Programs the Xframe mac address
5259  *  @dev : pointer to the device structure.
5260  *  @addr: a uchar pointer to the new mac address which is to be set.
5261  *  Description : This procedure will program the Xframe to receive
5262  *  frames with new Mac Address
5263  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5264  *  as defined in errno.h file on failure.
5265  */
5266 
do_s2io_prog_unicast(struct net_device * dev,u8 * addr)5267 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5268 {
5269 	struct s2io_nic *sp = netdev_priv(dev);
5270 	register u64 mac_addr = 0, perm_addr = 0;
5271 	int i;
5272 	u64 tmp64;
5273 	struct config_param *config = &sp->config;
5274 
5275 	/*
5276 	 * Set the new MAC address as the new unicast filter and reflect this
5277 	 * change on the device address registered with the OS. It will be
5278 	 * at offset 0.
5279 	 */
5280 	for (i = 0; i < ETH_ALEN; i++) {
5281 		mac_addr <<= 8;
5282 		mac_addr |= addr[i];
5283 		perm_addr <<= 8;
5284 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5285 	}
5286 
5287 	/* check if the dev_addr is different than perm_addr */
5288 	if (mac_addr == perm_addr)
5289 		return SUCCESS;
5290 
5291 	/* check if the mac already preset in CAM */
5292 	for (i = 1; i < config->max_mac_addr; i++) {
5293 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5294 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5295 			break;
5296 
5297 		if (tmp64 == mac_addr) {
5298 			DBG_PRINT(INFO_DBG,
5299 				  "MAC addr:0x%llx already present in CAM\n",
5300 				  (unsigned long long)mac_addr);
5301 			return SUCCESS;
5302 		}
5303 	}
5304 	if (i == config->max_mac_addr) {
5305 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5306 		return FAILURE;
5307 	}
5308 	/* Update the internal structure with this new mac address */
5309 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5310 
5311 	return do_s2io_add_mac(sp, mac_addr, i);
5312 }
5313 
5314 /**
5315  * s2io_ethtool_sset - Sets different link parameters.
5316  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5317  * @info: pointer to the structure with parameters given by ethtool to set
5318  * link information.
5319  * Description:
5320  * The function sets different link parameters provided by the user onto
5321  * the NIC.
5322  * Return value:
5323  * 0 on success.
5324  */
5325 
s2io_ethtool_sset(struct net_device * dev,struct ethtool_cmd * info)5326 static int s2io_ethtool_sset(struct net_device *dev,
5327 			     struct ethtool_cmd *info)
5328 {
5329 	struct s2io_nic *sp = netdev_priv(dev);
5330 	if ((info->autoneg == AUTONEG_ENABLE) ||
5331 	    (ethtool_cmd_speed(info) != SPEED_10000) ||
5332 	    (info->duplex != DUPLEX_FULL))
5333 		return -EINVAL;
5334 	else {
5335 		s2io_close(sp->dev);
5336 		s2io_open(sp->dev);
5337 	}
5338 
5339 	return 0;
5340 }
5341 
5342 /**
5343  * s2io_ethtol_gset - Return link specific information.
5344  * @sp : private member of the device structure, pointer to the
5345  *      s2io_nic structure.
5346  * @info : pointer to the structure with parameters given by ethtool
5347  * to return link information.
5348  * Description:
5349  * Returns link specific information like speed, duplex etc.. to ethtool.
5350  * Return value :
5351  * return 0 on success.
5352  */
5353 
s2io_ethtool_gset(struct net_device * dev,struct ethtool_cmd * info)5354 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5355 {
5356 	struct s2io_nic *sp = netdev_priv(dev);
5357 	info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5358 	info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5359 	info->port = PORT_FIBRE;
5360 
5361 	/* info->transceiver */
5362 	info->transceiver = XCVR_EXTERNAL;
5363 
5364 	if (netif_carrier_ok(sp->dev)) {
5365 		ethtool_cmd_speed_set(info, SPEED_10000);
5366 		info->duplex = DUPLEX_FULL;
5367 	} else {
5368 		ethtool_cmd_speed_set(info, -1);
5369 		info->duplex = -1;
5370 	}
5371 
5372 	info->autoneg = AUTONEG_DISABLE;
5373 	return 0;
5374 }
5375 
5376 /**
5377  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5378  * @sp : private member of the device structure, which is a pointer to the
5379  * s2io_nic structure.
5380  * @info : pointer to the structure with parameters given by ethtool to
5381  * return driver information.
5382  * Description:
5383  * Returns driver specefic information like name, version etc.. to ethtool.
5384  * Return value:
5385  *  void
5386  */
5387 
s2io_ethtool_gdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)5388 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5389 				  struct ethtool_drvinfo *info)
5390 {
5391 	struct s2io_nic *sp = netdev_priv(dev);
5392 
5393 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5394 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5395 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5396 	info->regdump_len = XENA_REG_SPACE;
5397 	info->eedump_len = XENA_EEPROM_SPACE;
5398 }
5399 
5400 /**
5401  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5402  *  @sp: private member of the device structure, which is a pointer to the
5403  *  s2io_nic structure.
5404  *  @regs : pointer to the structure with parameters given by ethtool for
5405  *  dumping the registers.
5406  *  @reg_space: The input argumnet into which all the registers are dumped.
5407  *  Description:
5408  *  Dumps the entire register space of xFrame NIC into the user given
5409  *  buffer area.
5410  * Return value :
5411  * void .
5412  */
5413 
s2io_ethtool_gregs(struct net_device * dev,struct ethtool_regs * regs,void * space)5414 static void s2io_ethtool_gregs(struct net_device *dev,
5415 			       struct ethtool_regs *regs, void *space)
5416 {
5417 	int i;
5418 	u64 reg;
5419 	u8 *reg_space = (u8 *)space;
5420 	struct s2io_nic *sp = netdev_priv(dev);
5421 
5422 	regs->len = XENA_REG_SPACE;
5423 	regs->version = sp->pdev->subsystem_device;
5424 
5425 	for (i = 0; i < regs->len; i += 8) {
5426 		reg = readq(sp->bar0 + i);
5427 		memcpy((reg_space + i), &reg, 8);
5428 	}
5429 }
5430 
5431 /*
5432  *  s2io_set_led - control NIC led
5433  */
s2io_set_led(struct s2io_nic * sp,bool on)5434 static void s2io_set_led(struct s2io_nic *sp, bool on)
5435 {
5436 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5437 	u16 subid = sp->pdev->subsystem_device;
5438 	u64 val64;
5439 
5440 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5441 	    ((subid & 0xFF) >= 0x07)) {
5442 		val64 = readq(&bar0->gpio_control);
5443 		if (on)
5444 			val64 |= GPIO_CTRL_GPIO_0;
5445 		else
5446 			val64 &= ~GPIO_CTRL_GPIO_0;
5447 
5448 		writeq(val64, &bar0->gpio_control);
5449 	} else {
5450 		val64 = readq(&bar0->adapter_control);
5451 		if (on)
5452 			val64 |= ADAPTER_LED_ON;
5453 		else
5454 			val64 &= ~ADAPTER_LED_ON;
5455 
5456 		writeq(val64, &bar0->adapter_control);
5457 	}
5458 
5459 }
5460 
5461 /**
5462  * s2io_ethtool_set_led - To physically identify the nic on the system.
5463  * @dev : network device
5464  * @state: led setting
5465  *
5466  * Description: Used to physically identify the NIC on the system.
5467  * The Link LED will blink for a time specified by the user for
5468  * identification.
5469  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5470  * identification is possible only if it's link is up.
5471  */
5472 
s2io_ethtool_set_led(struct net_device * dev,enum ethtool_phys_id_state state)5473 static int s2io_ethtool_set_led(struct net_device *dev,
5474 				enum ethtool_phys_id_state state)
5475 {
5476 	struct s2io_nic *sp = netdev_priv(dev);
5477 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5478 	u16 subid = sp->pdev->subsystem_device;
5479 
5480 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5481 		u64 val64 = readq(&bar0->adapter_control);
5482 		if (!(val64 & ADAPTER_CNTL_EN)) {
5483 			pr_err("Adapter Link down, cannot blink LED\n");
5484 			return -EAGAIN;
5485 		}
5486 	}
5487 
5488 	switch (state) {
5489 	case ETHTOOL_ID_ACTIVE:
5490 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5491 		return 1;	/* cycle on/off once per second */
5492 
5493 	case ETHTOOL_ID_ON:
5494 		s2io_set_led(sp, true);
5495 		break;
5496 
5497 	case ETHTOOL_ID_OFF:
5498 		s2io_set_led(sp, false);
5499 		break;
5500 
5501 	case ETHTOOL_ID_INACTIVE:
5502 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5503 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5504 	}
5505 
5506 	return 0;
5507 }
5508 
s2io_ethtool_gringparam(struct net_device * dev,struct ethtool_ringparam * ering)5509 static void s2io_ethtool_gringparam(struct net_device *dev,
5510 				    struct ethtool_ringparam *ering)
5511 {
5512 	struct s2io_nic *sp = netdev_priv(dev);
5513 	int i, tx_desc_count = 0, rx_desc_count = 0;
5514 
5515 	if (sp->rxd_mode == RXD_MODE_1) {
5516 		ering->rx_max_pending = MAX_RX_DESC_1;
5517 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5518 	} else {
5519 		ering->rx_max_pending = MAX_RX_DESC_2;
5520 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5521 	}
5522 
5523 	ering->tx_max_pending = MAX_TX_DESC;
5524 
5525 	for (i = 0; i < sp->config.rx_ring_num; i++)
5526 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5527 	ering->rx_pending = rx_desc_count;
5528 	ering->rx_jumbo_pending = rx_desc_count;
5529 
5530 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5531 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5532 	ering->tx_pending = tx_desc_count;
5533 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5534 }
5535 
5536 /**
5537  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5538  * @sp : private member of the device structure, which is a pointer to the
5539  *	s2io_nic structure.
5540  * @ep : pointer to the structure with pause parameters given by ethtool.
5541  * Description:
5542  * Returns the Pause frame generation and reception capability of the NIC.
5543  * Return value:
5544  *  void
5545  */
s2io_ethtool_getpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5546 static void s2io_ethtool_getpause_data(struct net_device *dev,
5547 				       struct ethtool_pauseparam *ep)
5548 {
5549 	u64 val64;
5550 	struct s2io_nic *sp = netdev_priv(dev);
5551 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552 
5553 	val64 = readq(&bar0->rmac_pause_cfg);
5554 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5555 		ep->tx_pause = true;
5556 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5557 		ep->rx_pause = true;
5558 	ep->autoneg = false;
5559 }
5560 
5561 /**
5562  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5563  * @sp : private member of the device structure, which is a pointer to the
5564  *      s2io_nic structure.
5565  * @ep : pointer to the structure with pause parameters given by ethtool.
5566  * Description:
5567  * It can be used to set or reset Pause frame generation or reception
5568  * support of the NIC.
5569  * Return value:
5570  * int, returns 0 on Success
5571  */
5572 
s2io_ethtool_setpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5573 static int s2io_ethtool_setpause_data(struct net_device *dev,
5574 				      struct ethtool_pauseparam *ep)
5575 {
5576 	u64 val64;
5577 	struct s2io_nic *sp = netdev_priv(dev);
5578 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5579 
5580 	val64 = readq(&bar0->rmac_pause_cfg);
5581 	if (ep->tx_pause)
5582 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5583 	else
5584 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5585 	if (ep->rx_pause)
5586 		val64 |= RMAC_PAUSE_RX_ENABLE;
5587 	else
5588 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5589 	writeq(val64, &bar0->rmac_pause_cfg);
5590 	return 0;
5591 }
5592 
5593 /**
5594  * read_eeprom - reads 4 bytes of data from user given offset.
5595  * @sp : private member of the device structure, which is a pointer to the
5596  *      s2io_nic structure.
5597  * @off : offset at which the data must be written
5598  * @data : Its an output parameter where the data read at the given
5599  *	offset is stored.
5600  * Description:
5601  * Will read 4 bytes of data from the user given offset and return the
5602  * read data.
5603  * NOTE: Will allow to read only part of the EEPROM visible through the
5604  *   I2C bus.
5605  * Return value:
5606  *  -1 on failure and 0 on success.
5607  */
5608 
5609 #define S2IO_DEV_ID		5
read_eeprom(struct s2io_nic * sp,int off,u64 * data)5610 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5611 {
5612 	int ret = -1;
5613 	u32 exit_cnt = 0;
5614 	u64 val64;
5615 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5616 
5617 	if (sp->device_type == XFRAME_I_DEVICE) {
5618 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5619 			I2C_CONTROL_ADDR(off) |
5620 			I2C_CONTROL_BYTE_CNT(0x3) |
5621 			I2C_CONTROL_READ |
5622 			I2C_CONTROL_CNTL_START;
5623 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5624 
5625 		while (exit_cnt < 5) {
5626 			val64 = readq(&bar0->i2c_control);
5627 			if (I2C_CONTROL_CNTL_END(val64)) {
5628 				*data = I2C_CONTROL_GET_DATA(val64);
5629 				ret = 0;
5630 				break;
5631 			}
5632 			msleep(50);
5633 			exit_cnt++;
5634 		}
5635 	}
5636 
5637 	if (sp->device_type == XFRAME_II_DEVICE) {
5638 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5639 			SPI_CONTROL_BYTECNT(0x3) |
5640 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5641 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5642 		val64 |= SPI_CONTROL_REQ;
5643 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5644 		while (exit_cnt < 5) {
5645 			val64 = readq(&bar0->spi_control);
5646 			if (val64 & SPI_CONTROL_NACK) {
5647 				ret = 1;
5648 				break;
5649 			} else if (val64 & SPI_CONTROL_DONE) {
5650 				*data = readq(&bar0->spi_data);
5651 				*data &= 0xffffff;
5652 				ret = 0;
5653 				break;
5654 			}
5655 			msleep(50);
5656 			exit_cnt++;
5657 		}
5658 	}
5659 	return ret;
5660 }
5661 
5662 /**
5663  *  write_eeprom - actually writes the relevant part of the data value.
5664  *  @sp : private member of the device structure, which is a pointer to the
5665  *       s2io_nic structure.
5666  *  @off : offset at which the data must be written
5667  *  @data : The data that is to be written
5668  *  @cnt : Number of bytes of the data that are actually to be written into
5669  *  the Eeprom. (max of 3)
5670  * Description:
5671  *  Actually writes the relevant part of the data value into the Eeprom
5672  *  through the I2C bus.
5673  * Return value:
5674  *  0 on success, -1 on failure.
5675  */
5676 
write_eeprom(struct s2io_nic * sp,int off,u64 data,int cnt)5677 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5678 {
5679 	int exit_cnt = 0, ret = -1;
5680 	u64 val64;
5681 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5682 
5683 	if (sp->device_type == XFRAME_I_DEVICE) {
5684 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5685 			I2C_CONTROL_ADDR(off) |
5686 			I2C_CONTROL_BYTE_CNT(cnt) |
5687 			I2C_CONTROL_SET_DATA((u32)data) |
5688 			I2C_CONTROL_CNTL_START;
5689 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5690 
5691 		while (exit_cnt < 5) {
5692 			val64 = readq(&bar0->i2c_control);
5693 			if (I2C_CONTROL_CNTL_END(val64)) {
5694 				if (!(val64 & I2C_CONTROL_NACK))
5695 					ret = 0;
5696 				break;
5697 			}
5698 			msleep(50);
5699 			exit_cnt++;
5700 		}
5701 	}
5702 
5703 	if (sp->device_type == XFRAME_II_DEVICE) {
5704 		int write_cnt = (cnt == 8) ? 0 : cnt;
5705 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5706 
5707 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5708 			SPI_CONTROL_BYTECNT(write_cnt) |
5709 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5710 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5711 		val64 |= SPI_CONTROL_REQ;
5712 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5713 		while (exit_cnt < 5) {
5714 			val64 = readq(&bar0->spi_control);
5715 			if (val64 & SPI_CONTROL_NACK) {
5716 				ret = 1;
5717 				break;
5718 			} else if (val64 & SPI_CONTROL_DONE) {
5719 				ret = 0;
5720 				break;
5721 			}
5722 			msleep(50);
5723 			exit_cnt++;
5724 		}
5725 	}
5726 	return ret;
5727 }
s2io_vpd_read(struct s2io_nic * nic)5728 static void s2io_vpd_read(struct s2io_nic *nic)
5729 {
5730 	u8 *vpd_data;
5731 	u8 data;
5732 	int i = 0, cnt, len, fail = 0;
5733 	int vpd_addr = 0x80;
5734 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5735 
5736 	if (nic->device_type == XFRAME_II_DEVICE) {
5737 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5738 		vpd_addr = 0x80;
5739 	} else {
5740 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5741 		vpd_addr = 0x50;
5742 	}
5743 	strcpy(nic->serial_num, "NOT AVAILABLE");
5744 
5745 	vpd_data = kmalloc(256, GFP_KERNEL);
5746 	if (!vpd_data) {
5747 		swstats->mem_alloc_fail_cnt++;
5748 		return;
5749 	}
5750 	swstats->mem_allocated += 256;
5751 
5752 	for (i = 0; i < 256; i += 4) {
5753 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5754 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5755 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5756 		for (cnt = 0; cnt < 5; cnt++) {
5757 			msleep(2);
5758 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5759 			if (data == 0x80)
5760 				break;
5761 		}
5762 		if (cnt >= 5) {
5763 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5764 			fail = 1;
5765 			break;
5766 		}
5767 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5768 				      (u32 *)&vpd_data[i]);
5769 	}
5770 
5771 	if (!fail) {
5772 		/* read serial number of adapter */
5773 		for (cnt = 0; cnt < 252; cnt++) {
5774 			if ((vpd_data[cnt] == 'S') &&
5775 			    (vpd_data[cnt+1] == 'N')) {
5776 				len = vpd_data[cnt+2];
5777 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5778 					memcpy(nic->serial_num,
5779 					       &vpd_data[cnt + 3],
5780 					       len);
5781 					memset(nic->serial_num+len,
5782 					       0,
5783 					       VPD_STRING_LEN-len);
5784 					break;
5785 				}
5786 			}
5787 		}
5788 	}
5789 
5790 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5791 		len = vpd_data[1];
5792 		memcpy(nic->product_name, &vpd_data[3], len);
5793 		nic->product_name[len] = 0;
5794 	}
5795 	kfree(vpd_data);
5796 	swstats->mem_freed += 256;
5797 }
5798 
5799 /**
5800  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5801  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5802  *  @eeprom : pointer to the user level structure provided by ethtool,
5803  *  containing all relevant information.
5804  *  @data_buf : user defined value to be written into Eeprom.
5805  *  Description: Reads the values stored in the Eeprom at given offset
5806  *  for a given length. Stores these values int the input argument data
5807  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5808  *  Return value:
5809  *  int  0 on success
5810  */
5811 
s2io_ethtool_geeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5812 static int s2io_ethtool_geeprom(struct net_device *dev,
5813 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5814 {
5815 	u32 i, valid;
5816 	u64 data;
5817 	struct s2io_nic *sp = netdev_priv(dev);
5818 
5819 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5820 
5821 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5822 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5823 
5824 	for (i = 0; i < eeprom->len; i += 4) {
5825 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5826 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5827 			return -EFAULT;
5828 		}
5829 		valid = INV(data);
5830 		memcpy((data_buf + i), &valid, 4);
5831 	}
5832 	return 0;
5833 }
5834 
5835 /**
5836  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5837  *  @sp : private member of the device structure, which is a pointer to the
5838  *  s2io_nic structure.
5839  *  @eeprom : pointer to the user level structure provided by ethtool,
5840  *  containing all relevant information.
5841  *  @data_buf ; user defined value to be written into Eeprom.
5842  *  Description:
5843  *  Tries to write the user provided value in the Eeprom, at the offset
5844  *  given by the user.
5845  *  Return value:
5846  *  0 on success, -EFAULT on failure.
5847  */
5848 
s2io_ethtool_seeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5849 static int s2io_ethtool_seeprom(struct net_device *dev,
5850 				struct ethtool_eeprom *eeprom,
5851 				u8 *data_buf)
5852 {
5853 	int len = eeprom->len, cnt = 0;
5854 	u64 valid = 0, data;
5855 	struct s2io_nic *sp = netdev_priv(dev);
5856 
5857 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5858 		DBG_PRINT(ERR_DBG,
5859 			  "ETHTOOL_WRITE_EEPROM Err: "
5860 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5861 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5862 			  eeprom->magic);
5863 		return -EFAULT;
5864 	}
5865 
5866 	while (len) {
5867 		data = (u32)data_buf[cnt] & 0x000000FF;
5868 		if (data)
5869 			valid = (u32)(data << 24);
5870 		else
5871 			valid = data;
5872 
5873 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5874 			DBG_PRINT(ERR_DBG,
5875 				  "ETHTOOL_WRITE_EEPROM Err: "
5876 				  "Cannot write into the specified offset\n");
5877 			return -EFAULT;
5878 		}
5879 		cnt++;
5880 		len--;
5881 	}
5882 
5883 	return 0;
5884 }
5885 
5886 /**
5887  * s2io_register_test - reads and writes into all clock domains.
5888  * @sp : private member of the device structure, which is a pointer to the
5889  * s2io_nic structure.
5890  * @data : variable that returns the result of each of the test conducted b
5891  * by the driver.
5892  * Description:
5893  * Read and write into all clock domains. The NIC has 3 clock domains,
5894  * see that registers in all the three regions are accessible.
5895  * Return value:
5896  * 0 on success.
5897  */
5898 
s2io_register_test(struct s2io_nic * sp,uint64_t * data)5899 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5900 {
5901 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5902 	u64 val64 = 0, exp_val;
5903 	int fail = 0;
5904 
5905 	val64 = readq(&bar0->pif_rd_swapper_fb);
5906 	if (val64 != 0x123456789abcdefULL) {
5907 		fail = 1;
5908 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5909 	}
5910 
5911 	val64 = readq(&bar0->rmac_pause_cfg);
5912 	if (val64 != 0xc000ffff00000000ULL) {
5913 		fail = 1;
5914 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5915 	}
5916 
5917 	val64 = readq(&bar0->rx_queue_cfg);
5918 	if (sp->device_type == XFRAME_II_DEVICE)
5919 		exp_val = 0x0404040404040404ULL;
5920 	else
5921 		exp_val = 0x0808080808080808ULL;
5922 	if (val64 != exp_val) {
5923 		fail = 1;
5924 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5925 	}
5926 
5927 	val64 = readq(&bar0->xgxs_efifo_cfg);
5928 	if (val64 != 0x000000001923141EULL) {
5929 		fail = 1;
5930 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5931 	}
5932 
5933 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5934 	writeq(val64, &bar0->xmsi_data);
5935 	val64 = readq(&bar0->xmsi_data);
5936 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5937 		fail = 1;
5938 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5939 	}
5940 
5941 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5942 	writeq(val64, &bar0->xmsi_data);
5943 	val64 = readq(&bar0->xmsi_data);
5944 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5945 		fail = 1;
5946 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5947 	}
5948 
5949 	*data = fail;
5950 	return fail;
5951 }
5952 
5953 /**
5954  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5955  * @sp : private member of the device structure, which is a pointer to the
5956  * s2io_nic structure.
5957  * @data:variable that returns the result of each of the test conducted by
5958  * the driver.
5959  * Description:
5960  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5961  * register.
5962  * Return value:
5963  * 0 on success.
5964  */
5965 
s2io_eeprom_test(struct s2io_nic * sp,uint64_t * data)5966 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5967 {
5968 	int fail = 0;
5969 	u64 ret_data, org_4F0, org_7F0;
5970 	u8 saved_4F0 = 0, saved_7F0 = 0;
5971 	struct net_device *dev = sp->dev;
5972 
5973 	/* Test Write Error at offset 0 */
5974 	/* Note that SPI interface allows write access to all areas
5975 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5976 	 */
5977 	if (sp->device_type == XFRAME_I_DEVICE)
5978 		if (!write_eeprom(sp, 0, 0, 3))
5979 			fail = 1;
5980 
5981 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5982 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5983 		saved_4F0 = 1;
5984 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5985 		saved_7F0 = 1;
5986 
5987 	/* Test Write at offset 4f0 */
5988 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5989 		fail = 1;
5990 	if (read_eeprom(sp, 0x4F0, &ret_data))
5991 		fail = 1;
5992 
5993 	if (ret_data != 0x012345) {
5994 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5995 			  "Data written %llx Data read %llx\n",
5996 			  dev->name, (unsigned long long)0x12345,
5997 			  (unsigned long long)ret_data);
5998 		fail = 1;
5999 	}
6000 
6001 	/* Reset the EEPROM data go FFFF */
6002 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6003 
6004 	/* Test Write Request Error at offset 0x7c */
6005 	if (sp->device_type == XFRAME_I_DEVICE)
6006 		if (!write_eeprom(sp, 0x07C, 0, 3))
6007 			fail = 1;
6008 
6009 	/* Test Write Request at offset 0x7f0 */
6010 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6011 		fail = 1;
6012 	if (read_eeprom(sp, 0x7F0, &ret_data))
6013 		fail = 1;
6014 
6015 	if (ret_data != 0x012345) {
6016 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6017 			  "Data written %llx Data read %llx\n",
6018 			  dev->name, (unsigned long long)0x12345,
6019 			  (unsigned long long)ret_data);
6020 		fail = 1;
6021 	}
6022 
6023 	/* Reset the EEPROM data go FFFF */
6024 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6025 
6026 	if (sp->device_type == XFRAME_I_DEVICE) {
6027 		/* Test Write Error at offset 0x80 */
6028 		if (!write_eeprom(sp, 0x080, 0, 3))
6029 			fail = 1;
6030 
6031 		/* Test Write Error at offset 0xfc */
6032 		if (!write_eeprom(sp, 0x0FC, 0, 3))
6033 			fail = 1;
6034 
6035 		/* Test Write Error at offset 0x100 */
6036 		if (!write_eeprom(sp, 0x100, 0, 3))
6037 			fail = 1;
6038 
6039 		/* Test Write Error at offset 4ec */
6040 		if (!write_eeprom(sp, 0x4EC, 0, 3))
6041 			fail = 1;
6042 	}
6043 
6044 	/* Restore values at offsets 0x4F0 and 0x7F0 */
6045 	if (saved_4F0)
6046 		write_eeprom(sp, 0x4F0, org_4F0, 3);
6047 	if (saved_7F0)
6048 		write_eeprom(sp, 0x7F0, org_7F0, 3);
6049 
6050 	*data = fail;
6051 	return fail;
6052 }
6053 
6054 /**
6055  * s2io_bist_test - invokes the MemBist test of the card .
6056  * @sp : private member of the device structure, which is a pointer to the
6057  * s2io_nic structure.
6058  * @data:variable that returns the result of each of the test conducted by
6059  * the driver.
6060  * Description:
6061  * This invokes the MemBist test of the card. We give around
6062  * 2 secs time for the Test to complete. If it's still not complete
6063  * within this peiod, we consider that the test failed.
6064  * Return value:
6065  * 0 on success and -1 on failure.
6066  */
6067 
s2io_bist_test(struct s2io_nic * sp,uint64_t * data)6068 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6069 {
6070 	u8 bist = 0;
6071 	int cnt = 0, ret = -1;
6072 
6073 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074 	bist |= PCI_BIST_START;
6075 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6076 
6077 	while (cnt < 20) {
6078 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6079 		if (!(bist & PCI_BIST_START)) {
6080 			*data = (bist & PCI_BIST_CODE_MASK);
6081 			ret = 0;
6082 			break;
6083 		}
6084 		msleep(100);
6085 		cnt++;
6086 	}
6087 
6088 	return ret;
6089 }
6090 
6091 /**
6092  * s2io-link_test - verifies the link state of the nic
6093  * @sp ; private member of the device structure, which is a pointer to the
6094  * s2io_nic structure.
6095  * @data: variable that returns the result of each of the test conducted by
6096  * the driver.
6097  * Description:
6098  * The function verifies the link state of the NIC and updates the input
6099  * argument 'data' appropriately.
6100  * Return value:
6101  * 0 on success.
6102  */
6103 
s2io_link_test(struct s2io_nic * sp,uint64_t * data)6104 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6105 {
6106 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6107 	u64 val64;
6108 
6109 	val64 = readq(&bar0->adapter_status);
6110 	if (!(LINK_IS_UP(val64)))
6111 		*data = 1;
6112 	else
6113 		*data = 0;
6114 
6115 	return *data;
6116 }
6117 
6118 /**
6119  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6120  * @sp - private member of the device structure, which is a pointer to the
6121  * s2io_nic structure.
6122  * @data - variable that returns the result of each of the test
6123  * conducted by the driver.
6124  * Description:
6125  *  This is one of the offline test that tests the read and write
6126  *  access to the RldRam chip on the NIC.
6127  * Return value:
6128  *  0 on success.
6129  */
6130 
s2io_rldram_test(struct s2io_nic * sp,uint64_t * data)6131 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6132 {
6133 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6134 	u64 val64;
6135 	int cnt, iteration = 0, test_fail = 0;
6136 
6137 	val64 = readq(&bar0->adapter_control);
6138 	val64 &= ~ADAPTER_ECC_EN;
6139 	writeq(val64, &bar0->adapter_control);
6140 
6141 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6142 	val64 |= MC_RLDRAM_TEST_MODE;
6143 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6144 
6145 	val64 = readq(&bar0->mc_rldram_mrs);
6146 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6147 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6148 
6149 	val64 |= MC_RLDRAM_MRS_ENABLE;
6150 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6151 
6152 	while (iteration < 2) {
6153 		val64 = 0x55555555aaaa0000ULL;
6154 		if (iteration == 1)
6155 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6156 		writeq(val64, &bar0->mc_rldram_test_d0);
6157 
6158 		val64 = 0xaaaa5a5555550000ULL;
6159 		if (iteration == 1)
6160 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6161 		writeq(val64, &bar0->mc_rldram_test_d1);
6162 
6163 		val64 = 0x55aaaaaaaa5a0000ULL;
6164 		if (iteration == 1)
6165 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6166 		writeq(val64, &bar0->mc_rldram_test_d2);
6167 
6168 		val64 = (u64) (0x0000003ffffe0100ULL);
6169 		writeq(val64, &bar0->mc_rldram_test_add);
6170 
6171 		val64 = MC_RLDRAM_TEST_MODE |
6172 			MC_RLDRAM_TEST_WRITE |
6173 			MC_RLDRAM_TEST_GO;
6174 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6175 
6176 		for (cnt = 0; cnt < 5; cnt++) {
6177 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6178 			if (val64 & MC_RLDRAM_TEST_DONE)
6179 				break;
6180 			msleep(200);
6181 		}
6182 
6183 		if (cnt == 5)
6184 			break;
6185 
6186 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6187 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6188 
6189 		for (cnt = 0; cnt < 5; cnt++) {
6190 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6191 			if (val64 & MC_RLDRAM_TEST_DONE)
6192 				break;
6193 			msleep(500);
6194 		}
6195 
6196 		if (cnt == 5)
6197 			break;
6198 
6199 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6200 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6201 			test_fail = 1;
6202 
6203 		iteration++;
6204 	}
6205 
6206 	*data = test_fail;
6207 
6208 	/* Bring the adapter out of test mode */
6209 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6210 
6211 	return test_fail;
6212 }
6213 
6214 /**
6215  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6216  *  @sp : private member of the device structure, which is a pointer to the
6217  *  s2io_nic structure.
6218  *  @ethtest : pointer to a ethtool command specific structure that will be
6219  *  returned to the user.
6220  *  @data : variable that returns the result of each of the test
6221  * conducted by the driver.
6222  * Description:
6223  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6224  *  the health of the card.
6225  * Return value:
6226  *  void
6227  */
6228 
s2io_ethtool_test(struct net_device * dev,struct ethtool_test * ethtest,uint64_t * data)6229 static void s2io_ethtool_test(struct net_device *dev,
6230 			      struct ethtool_test *ethtest,
6231 			      uint64_t *data)
6232 {
6233 	struct s2io_nic *sp = netdev_priv(dev);
6234 	int orig_state = netif_running(sp->dev);
6235 
6236 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6237 		/* Offline Tests. */
6238 		if (orig_state)
6239 			s2io_close(sp->dev);
6240 
6241 		if (s2io_register_test(sp, &data[0]))
6242 			ethtest->flags |= ETH_TEST_FL_FAILED;
6243 
6244 		s2io_reset(sp);
6245 
6246 		if (s2io_rldram_test(sp, &data[3]))
6247 			ethtest->flags |= ETH_TEST_FL_FAILED;
6248 
6249 		s2io_reset(sp);
6250 
6251 		if (s2io_eeprom_test(sp, &data[1]))
6252 			ethtest->flags |= ETH_TEST_FL_FAILED;
6253 
6254 		if (s2io_bist_test(sp, &data[4]))
6255 			ethtest->flags |= ETH_TEST_FL_FAILED;
6256 
6257 		if (orig_state)
6258 			s2io_open(sp->dev);
6259 
6260 		data[2] = 0;
6261 	} else {
6262 		/* Online Tests. */
6263 		if (!orig_state) {
6264 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6265 				  dev->name);
6266 			data[0] = -1;
6267 			data[1] = -1;
6268 			data[2] = -1;
6269 			data[3] = -1;
6270 			data[4] = -1;
6271 		}
6272 
6273 		if (s2io_link_test(sp, &data[2]))
6274 			ethtest->flags |= ETH_TEST_FL_FAILED;
6275 
6276 		data[0] = 0;
6277 		data[1] = 0;
6278 		data[3] = 0;
6279 		data[4] = 0;
6280 	}
6281 }
6282 
s2io_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)6283 static void s2io_get_ethtool_stats(struct net_device *dev,
6284 				   struct ethtool_stats *estats,
6285 				   u64 *tmp_stats)
6286 {
6287 	int i = 0, k;
6288 	struct s2io_nic *sp = netdev_priv(dev);
6289 	struct stat_block *stats = sp->mac_control.stats_info;
6290 	struct swStat *swstats = &stats->sw_stat;
6291 	struct xpakStat *xstats = &stats->xpak_stat;
6292 
6293 	s2io_updt_stats(sp);
6294 	tmp_stats[i++] =
6295 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6296 		le32_to_cpu(stats->tmac_frms);
6297 	tmp_stats[i++] =
6298 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6299 		le32_to_cpu(stats->tmac_data_octets);
6300 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6301 	tmp_stats[i++] =
6302 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6303 		le32_to_cpu(stats->tmac_mcst_frms);
6304 	tmp_stats[i++] =
6305 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6306 		le32_to_cpu(stats->tmac_bcst_frms);
6307 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6308 	tmp_stats[i++] =
6309 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6310 		le32_to_cpu(stats->tmac_ttl_octets);
6311 	tmp_stats[i++] =
6312 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6313 		le32_to_cpu(stats->tmac_ucst_frms);
6314 	tmp_stats[i++] =
6315 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6316 		le32_to_cpu(stats->tmac_nucst_frms);
6317 	tmp_stats[i++] =
6318 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6319 		le32_to_cpu(stats->tmac_any_err_frms);
6320 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6321 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6322 	tmp_stats[i++] =
6323 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6324 		le32_to_cpu(stats->tmac_vld_ip);
6325 	tmp_stats[i++] =
6326 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6327 		le32_to_cpu(stats->tmac_drop_ip);
6328 	tmp_stats[i++] =
6329 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6330 		le32_to_cpu(stats->tmac_icmp);
6331 	tmp_stats[i++] =
6332 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6333 		le32_to_cpu(stats->tmac_rst_tcp);
6334 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6335 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6336 		le32_to_cpu(stats->tmac_udp);
6337 	tmp_stats[i++] =
6338 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6339 		le32_to_cpu(stats->rmac_vld_frms);
6340 	tmp_stats[i++] =
6341 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6342 		le32_to_cpu(stats->rmac_data_octets);
6343 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6344 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6345 	tmp_stats[i++] =
6346 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6347 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6348 	tmp_stats[i++] =
6349 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6350 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6351 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6352 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6353 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6354 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6355 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6356 	tmp_stats[i++] =
6357 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6358 		le32_to_cpu(stats->rmac_ttl_octets);
6359 	tmp_stats[i++] =
6360 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6361 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6362 	tmp_stats[i++] =
6363 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6364 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6365 	tmp_stats[i++] =
6366 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6367 		le32_to_cpu(stats->rmac_discarded_frms);
6368 	tmp_stats[i++] =
6369 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6370 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6371 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6372 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6373 	tmp_stats[i++] =
6374 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6375 		le32_to_cpu(stats->rmac_usized_frms);
6376 	tmp_stats[i++] =
6377 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6378 		le32_to_cpu(stats->rmac_osized_frms);
6379 	tmp_stats[i++] =
6380 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6381 		le32_to_cpu(stats->rmac_frag_frms);
6382 	tmp_stats[i++] =
6383 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6384 		le32_to_cpu(stats->rmac_jabber_frms);
6385 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6386 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6387 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6388 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6389 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6390 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6391 	tmp_stats[i++] =
6392 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6393 		le32_to_cpu(stats->rmac_ip);
6394 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6395 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6396 	tmp_stats[i++] =
6397 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6398 		le32_to_cpu(stats->rmac_drop_ip);
6399 	tmp_stats[i++] =
6400 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6401 		le32_to_cpu(stats->rmac_icmp);
6402 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6403 	tmp_stats[i++] =
6404 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6405 		le32_to_cpu(stats->rmac_udp);
6406 	tmp_stats[i++] =
6407 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6408 		le32_to_cpu(stats->rmac_err_drp_udp);
6409 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6410 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6411 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6412 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6413 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6414 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6415 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6416 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6417 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6418 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6419 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6420 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6421 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6422 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6423 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6424 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6425 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6426 	tmp_stats[i++] =
6427 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6428 		le32_to_cpu(stats->rmac_pause_cnt);
6429 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6430 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6431 	tmp_stats[i++] =
6432 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6433 		le32_to_cpu(stats->rmac_accepted_ip);
6434 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6435 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6436 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6437 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6438 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6439 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6440 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6441 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6442 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6443 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6444 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6445 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6446 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6447 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6448 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6449 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6450 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6451 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6452 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6453 
6454 	/* Enhanced statistics exist only for Hercules */
6455 	if (sp->device_type == XFRAME_II_DEVICE) {
6456 		tmp_stats[i++] =
6457 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6458 		tmp_stats[i++] =
6459 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6460 		tmp_stats[i++] =
6461 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6462 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6463 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6464 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6465 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6466 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6467 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6468 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6469 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6470 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6471 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6472 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6473 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6474 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6475 	}
6476 
6477 	tmp_stats[i++] = 0;
6478 	tmp_stats[i++] = swstats->single_ecc_errs;
6479 	tmp_stats[i++] = swstats->double_ecc_errs;
6480 	tmp_stats[i++] = swstats->parity_err_cnt;
6481 	tmp_stats[i++] = swstats->serious_err_cnt;
6482 	tmp_stats[i++] = swstats->soft_reset_cnt;
6483 	tmp_stats[i++] = swstats->fifo_full_cnt;
6484 	for (k = 0; k < MAX_RX_RINGS; k++)
6485 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6486 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6487 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6488 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6489 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6490 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6491 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6492 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6493 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6494 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6495 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6496 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6497 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6498 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6499 	tmp_stats[i++] = swstats->sending_both;
6500 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6501 	tmp_stats[i++] = swstats->flush_max_pkts;
6502 	if (swstats->num_aggregations) {
6503 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6504 		int count = 0;
6505 		/*
6506 		 * Since 64-bit divide does not work on all platforms,
6507 		 * do repeated subtraction.
6508 		 */
6509 		while (tmp >= swstats->num_aggregations) {
6510 			tmp -= swstats->num_aggregations;
6511 			count++;
6512 		}
6513 		tmp_stats[i++] = count;
6514 	} else
6515 		tmp_stats[i++] = 0;
6516 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6517 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6518 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6519 	tmp_stats[i++] = swstats->mem_allocated;
6520 	tmp_stats[i++] = swstats->mem_freed;
6521 	tmp_stats[i++] = swstats->link_up_cnt;
6522 	tmp_stats[i++] = swstats->link_down_cnt;
6523 	tmp_stats[i++] = swstats->link_up_time;
6524 	tmp_stats[i++] = swstats->link_down_time;
6525 
6526 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6527 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6528 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6529 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6530 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6531 
6532 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6533 	tmp_stats[i++] = swstats->rx_abort_cnt;
6534 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6535 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6536 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6537 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6538 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6539 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6540 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6541 	tmp_stats[i++] = swstats->tda_err_cnt;
6542 	tmp_stats[i++] = swstats->pfc_err_cnt;
6543 	tmp_stats[i++] = swstats->pcc_err_cnt;
6544 	tmp_stats[i++] = swstats->tti_err_cnt;
6545 	tmp_stats[i++] = swstats->tpa_err_cnt;
6546 	tmp_stats[i++] = swstats->sm_err_cnt;
6547 	tmp_stats[i++] = swstats->lso_err_cnt;
6548 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6549 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6550 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6551 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6552 	tmp_stats[i++] = swstats->rc_err_cnt;
6553 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6554 	tmp_stats[i++] = swstats->rpa_err_cnt;
6555 	tmp_stats[i++] = swstats->rda_err_cnt;
6556 	tmp_stats[i++] = swstats->rti_err_cnt;
6557 	tmp_stats[i++] = swstats->mc_err_cnt;
6558 }
6559 
s2io_ethtool_get_regs_len(struct net_device * dev)6560 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6561 {
6562 	return XENA_REG_SPACE;
6563 }
6564 
6565 
s2io_get_eeprom_len(struct net_device * dev)6566 static int s2io_get_eeprom_len(struct net_device *dev)
6567 {
6568 	return XENA_EEPROM_SPACE;
6569 }
6570 
s2io_get_sset_count(struct net_device * dev,int sset)6571 static int s2io_get_sset_count(struct net_device *dev, int sset)
6572 {
6573 	struct s2io_nic *sp = netdev_priv(dev);
6574 
6575 	switch (sset) {
6576 	case ETH_SS_TEST:
6577 		return S2IO_TEST_LEN;
6578 	case ETH_SS_STATS:
6579 		switch (sp->device_type) {
6580 		case XFRAME_I_DEVICE:
6581 			return XFRAME_I_STAT_LEN;
6582 		case XFRAME_II_DEVICE:
6583 			return XFRAME_II_STAT_LEN;
6584 		default:
6585 			return 0;
6586 		}
6587 	default:
6588 		return -EOPNOTSUPP;
6589 	}
6590 }
6591 
s2io_ethtool_get_strings(struct net_device * dev,u32 stringset,u8 * data)6592 static void s2io_ethtool_get_strings(struct net_device *dev,
6593 				     u32 stringset, u8 *data)
6594 {
6595 	int stat_size = 0;
6596 	struct s2io_nic *sp = netdev_priv(dev);
6597 
6598 	switch (stringset) {
6599 	case ETH_SS_TEST:
6600 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6601 		break;
6602 	case ETH_SS_STATS:
6603 		stat_size = sizeof(ethtool_xena_stats_keys);
6604 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6605 		if (sp->device_type == XFRAME_II_DEVICE) {
6606 			memcpy(data + stat_size,
6607 			       &ethtool_enhanced_stats_keys,
6608 			       sizeof(ethtool_enhanced_stats_keys));
6609 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6610 		}
6611 
6612 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6613 		       sizeof(ethtool_driver_stats_keys));
6614 	}
6615 }
6616 
s2io_set_features(struct net_device * dev,netdev_features_t features)6617 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6618 {
6619 	struct s2io_nic *sp = netdev_priv(dev);
6620 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6621 
6622 	if (changed && netif_running(dev)) {
6623 		int rc;
6624 
6625 		s2io_stop_all_tx_queue(sp);
6626 		s2io_card_down(sp);
6627 		dev->features = features;
6628 		rc = s2io_card_up(sp);
6629 		if (rc)
6630 			s2io_reset(sp);
6631 		else
6632 			s2io_start_all_tx_queue(sp);
6633 
6634 		return rc ? rc : 1;
6635 	}
6636 
6637 	return 0;
6638 }
6639 
6640 static const struct ethtool_ops netdev_ethtool_ops = {
6641 	.get_settings = s2io_ethtool_gset,
6642 	.set_settings = s2io_ethtool_sset,
6643 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6644 	.get_regs_len = s2io_ethtool_get_regs_len,
6645 	.get_regs = s2io_ethtool_gregs,
6646 	.get_link = ethtool_op_get_link,
6647 	.get_eeprom_len = s2io_get_eeprom_len,
6648 	.get_eeprom = s2io_ethtool_geeprom,
6649 	.set_eeprom = s2io_ethtool_seeprom,
6650 	.get_ringparam = s2io_ethtool_gringparam,
6651 	.get_pauseparam = s2io_ethtool_getpause_data,
6652 	.set_pauseparam = s2io_ethtool_setpause_data,
6653 	.self_test = s2io_ethtool_test,
6654 	.get_strings = s2io_ethtool_get_strings,
6655 	.set_phys_id = s2io_ethtool_set_led,
6656 	.get_ethtool_stats = s2io_get_ethtool_stats,
6657 	.get_sset_count = s2io_get_sset_count,
6658 };
6659 
6660 /**
6661  *  s2io_ioctl - Entry point for the Ioctl
6662  *  @dev :  Device pointer.
6663  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6664  *  a proprietary structure used to pass information to the driver.
6665  *  @cmd :  This is used to distinguish between the different commands that
6666  *  can be passed to the IOCTL functions.
6667  *  Description:
6668  *  Currently there are no special functionality supported in IOCTL, hence
6669  *  function always return EOPNOTSUPPORTED
6670  */
6671 
s2io_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6672 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6673 {
6674 	return -EOPNOTSUPP;
6675 }
6676 
6677 /**
6678  *  s2io_change_mtu - entry point to change MTU size for the device.
6679  *   @dev : device pointer.
6680  *   @new_mtu : the new MTU size for the device.
6681  *   Description: A driver entry point to change MTU size for the device.
6682  *   Before changing the MTU the device must be stopped.
6683  *  Return value:
6684  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6685  *   file on failure.
6686  */
6687 
s2io_change_mtu(struct net_device * dev,int new_mtu)6688 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6689 {
6690 	struct s2io_nic *sp = netdev_priv(dev);
6691 	int ret = 0;
6692 
6693 	if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6694 		DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6695 		return -EPERM;
6696 	}
6697 
6698 	dev->mtu = new_mtu;
6699 	if (netif_running(dev)) {
6700 		s2io_stop_all_tx_queue(sp);
6701 		s2io_card_down(sp);
6702 		ret = s2io_card_up(sp);
6703 		if (ret) {
6704 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6705 				  __func__);
6706 			return ret;
6707 		}
6708 		s2io_wake_all_tx_queue(sp);
6709 	} else { /* Device is down */
6710 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6711 		u64 val64 = new_mtu;
6712 
6713 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6714 	}
6715 
6716 	return ret;
6717 }
6718 
6719 /**
6720  * s2io_set_link - Set the LInk status
6721  * @data: long pointer to device private structue
6722  * Description: Sets the link status for the adapter
6723  */
6724 
s2io_set_link(struct work_struct * work)6725 static void s2io_set_link(struct work_struct *work)
6726 {
6727 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6728 					    set_link_task);
6729 	struct net_device *dev = nic->dev;
6730 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6731 	register u64 val64;
6732 	u16 subid;
6733 
6734 	rtnl_lock();
6735 
6736 	if (!netif_running(dev))
6737 		goto out_unlock;
6738 
6739 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6740 		/* The card is being reset, no point doing anything */
6741 		goto out_unlock;
6742 	}
6743 
6744 	subid = nic->pdev->subsystem_device;
6745 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6746 		/*
6747 		 * Allow a small delay for the NICs self initiated
6748 		 * cleanup to complete.
6749 		 */
6750 		msleep(100);
6751 	}
6752 
6753 	val64 = readq(&bar0->adapter_status);
6754 	if (LINK_IS_UP(val64)) {
6755 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6756 			if (verify_xena_quiescence(nic)) {
6757 				val64 = readq(&bar0->adapter_control);
6758 				val64 |= ADAPTER_CNTL_EN;
6759 				writeq(val64, &bar0->adapter_control);
6760 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6761 					    nic->device_type, subid)) {
6762 					val64 = readq(&bar0->gpio_control);
6763 					val64 |= GPIO_CTRL_GPIO_0;
6764 					writeq(val64, &bar0->gpio_control);
6765 					val64 = readq(&bar0->gpio_control);
6766 				} else {
6767 					val64 |= ADAPTER_LED_ON;
6768 					writeq(val64, &bar0->adapter_control);
6769 				}
6770 				nic->device_enabled_once = true;
6771 			} else {
6772 				DBG_PRINT(ERR_DBG,
6773 					  "%s: Error: device is not Quiescent\n",
6774 					  dev->name);
6775 				s2io_stop_all_tx_queue(nic);
6776 			}
6777 		}
6778 		val64 = readq(&bar0->adapter_control);
6779 		val64 |= ADAPTER_LED_ON;
6780 		writeq(val64, &bar0->adapter_control);
6781 		s2io_link(nic, LINK_UP);
6782 	} else {
6783 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6784 						      subid)) {
6785 			val64 = readq(&bar0->gpio_control);
6786 			val64 &= ~GPIO_CTRL_GPIO_0;
6787 			writeq(val64, &bar0->gpio_control);
6788 			val64 = readq(&bar0->gpio_control);
6789 		}
6790 		/* turn off LED */
6791 		val64 = readq(&bar0->adapter_control);
6792 		val64 = val64 & (~ADAPTER_LED_ON);
6793 		writeq(val64, &bar0->adapter_control);
6794 		s2io_link(nic, LINK_DOWN);
6795 	}
6796 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6797 
6798 out_unlock:
6799 	rtnl_unlock();
6800 }
6801 
set_rxd_buffer_pointer(struct s2io_nic * sp,struct RxD_t * rxdp,struct buffAdd * ba,struct sk_buff ** skb,u64 * temp0,u64 * temp1,u64 * temp2,int size)6802 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6803 				  struct buffAdd *ba,
6804 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6805 				  u64 *temp2, int size)
6806 {
6807 	struct net_device *dev = sp->dev;
6808 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6809 
6810 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6811 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6812 		/* allocate skb */
6813 		if (*skb) {
6814 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6815 			/*
6816 			 * As Rx frame are not going to be processed,
6817 			 * using same mapped address for the Rxd
6818 			 * buffer pointer
6819 			 */
6820 			rxdp1->Buffer0_ptr = *temp0;
6821 		} else {
6822 			*skb = netdev_alloc_skb(dev, size);
6823 			if (!(*skb)) {
6824 				DBG_PRINT(INFO_DBG,
6825 					  "%s: Out of memory to allocate %s\n",
6826 					  dev->name, "1 buf mode SKBs");
6827 				stats->mem_alloc_fail_cnt++;
6828 				return -ENOMEM ;
6829 			}
6830 			stats->mem_allocated += (*skb)->truesize;
6831 			/* storing the mapped addr in a temp variable
6832 			 * such it will be used for next rxd whose
6833 			 * Host Control is NULL
6834 			 */
6835 			rxdp1->Buffer0_ptr = *temp0 =
6836 				pci_map_single(sp->pdev, (*skb)->data,
6837 					       size - NET_IP_ALIGN,
6838 					       PCI_DMA_FROMDEVICE);
6839 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6840 				goto memalloc_failed;
6841 			rxdp->Host_Control = (unsigned long) (*skb);
6842 		}
6843 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6844 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6845 		/* Two buffer Mode */
6846 		if (*skb) {
6847 			rxdp3->Buffer2_ptr = *temp2;
6848 			rxdp3->Buffer0_ptr = *temp0;
6849 			rxdp3->Buffer1_ptr = *temp1;
6850 		} else {
6851 			*skb = netdev_alloc_skb(dev, size);
6852 			if (!(*skb)) {
6853 				DBG_PRINT(INFO_DBG,
6854 					  "%s: Out of memory to allocate %s\n",
6855 					  dev->name,
6856 					  "2 buf mode SKBs");
6857 				stats->mem_alloc_fail_cnt++;
6858 				return -ENOMEM;
6859 			}
6860 			stats->mem_allocated += (*skb)->truesize;
6861 			rxdp3->Buffer2_ptr = *temp2 =
6862 				pci_map_single(sp->pdev, (*skb)->data,
6863 					       dev->mtu + 4,
6864 					       PCI_DMA_FROMDEVICE);
6865 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6866 				goto memalloc_failed;
6867 			rxdp3->Buffer0_ptr = *temp0 =
6868 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6869 					       PCI_DMA_FROMDEVICE);
6870 			if (pci_dma_mapping_error(sp->pdev,
6871 						  rxdp3->Buffer0_ptr)) {
6872 				pci_unmap_single(sp->pdev,
6873 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6874 						 dev->mtu + 4,
6875 						 PCI_DMA_FROMDEVICE);
6876 				goto memalloc_failed;
6877 			}
6878 			rxdp->Host_Control = (unsigned long) (*skb);
6879 
6880 			/* Buffer-1 will be dummy buffer not used */
6881 			rxdp3->Buffer1_ptr = *temp1 =
6882 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6883 					       PCI_DMA_FROMDEVICE);
6884 			if (pci_dma_mapping_error(sp->pdev,
6885 						  rxdp3->Buffer1_ptr)) {
6886 				pci_unmap_single(sp->pdev,
6887 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6888 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6889 				pci_unmap_single(sp->pdev,
6890 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6891 						 dev->mtu + 4,
6892 						 PCI_DMA_FROMDEVICE);
6893 				goto memalloc_failed;
6894 			}
6895 		}
6896 	}
6897 	return 0;
6898 
6899 memalloc_failed:
6900 	stats->pci_map_fail_cnt++;
6901 	stats->mem_freed += (*skb)->truesize;
6902 	dev_kfree_skb(*skb);
6903 	return -ENOMEM;
6904 }
6905 
set_rxd_buffer_size(struct s2io_nic * sp,struct RxD_t * rxdp,int size)6906 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6907 				int size)
6908 {
6909 	struct net_device *dev = sp->dev;
6910 	if (sp->rxd_mode == RXD_MODE_1) {
6911 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6912 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6913 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6914 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6915 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6916 	}
6917 }
6918 
rxd_owner_bit_reset(struct s2io_nic * sp)6919 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6920 {
6921 	int i, j, k, blk_cnt = 0, size;
6922 	struct config_param *config = &sp->config;
6923 	struct mac_info *mac_control = &sp->mac_control;
6924 	struct net_device *dev = sp->dev;
6925 	struct RxD_t *rxdp = NULL;
6926 	struct sk_buff *skb = NULL;
6927 	struct buffAdd *ba = NULL;
6928 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6929 
6930 	/* Calculate the size based on ring mode */
6931 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6932 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6933 	if (sp->rxd_mode == RXD_MODE_1)
6934 		size += NET_IP_ALIGN;
6935 	else if (sp->rxd_mode == RXD_MODE_3B)
6936 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6937 
6938 	for (i = 0; i < config->rx_ring_num; i++) {
6939 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6940 		struct ring_info *ring = &mac_control->rings[i];
6941 
6942 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6943 
6944 		for (j = 0; j < blk_cnt; j++) {
6945 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6946 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6947 				if (sp->rxd_mode == RXD_MODE_3B)
6948 					ba = &ring->ba[j][k];
6949 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6950 							   (u64 *)&temp0_64,
6951 							   (u64 *)&temp1_64,
6952 							   (u64 *)&temp2_64,
6953 							   size) == -ENOMEM) {
6954 					return 0;
6955 				}
6956 
6957 				set_rxd_buffer_size(sp, rxdp, size);
6958 				wmb();
6959 				/* flip the Ownership bit to Hardware */
6960 				rxdp->Control_1 |= RXD_OWN_XENA;
6961 			}
6962 		}
6963 	}
6964 	return 0;
6965 
6966 }
6967 
s2io_add_isr(struct s2io_nic * sp)6968 static int s2io_add_isr(struct s2io_nic *sp)
6969 {
6970 	int ret = 0;
6971 	struct net_device *dev = sp->dev;
6972 	int err = 0;
6973 
6974 	if (sp->config.intr_type == MSI_X)
6975 		ret = s2io_enable_msi_x(sp);
6976 	if (ret) {
6977 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6978 		sp->config.intr_type = INTA;
6979 	}
6980 
6981 	/*
6982 	 * Store the values of the MSIX table in
6983 	 * the struct s2io_nic structure
6984 	 */
6985 	store_xmsi_data(sp);
6986 
6987 	/* After proper initialization of H/W, register ISR */
6988 	if (sp->config.intr_type == MSI_X) {
6989 		int i, msix_rx_cnt = 0;
6990 
6991 		for (i = 0; i < sp->num_entries; i++) {
6992 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6993 				if (sp->s2io_entries[i].type ==
6994 				    MSIX_RING_TYPE) {
6995 					sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6996 						dev->name, i);
6997 					err = request_irq(sp->entries[i].vector,
6998 							  s2io_msix_ring_handle,
6999 							  0,
7000 							  sp->desc[i],
7001 							  sp->s2io_entries[i].arg);
7002 				} else if (sp->s2io_entries[i].type ==
7003 					   MSIX_ALARM_TYPE) {
7004 					sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7005 						dev->name, i);
7006 					err = request_irq(sp->entries[i].vector,
7007 							  s2io_msix_fifo_handle,
7008 							  0,
7009 							  sp->desc[i],
7010 							  sp->s2io_entries[i].arg);
7011 
7012 				}
7013 				/* if either data or addr is zero print it. */
7014 				if (!(sp->msix_info[i].addr &&
7015 				      sp->msix_info[i].data)) {
7016 					DBG_PRINT(ERR_DBG,
7017 						  "%s @Addr:0x%llx Data:0x%llx\n",
7018 						  sp->desc[i],
7019 						  (unsigned long long)
7020 						  sp->msix_info[i].addr,
7021 						  (unsigned long long)
7022 						  ntohl(sp->msix_info[i].data));
7023 				} else
7024 					msix_rx_cnt++;
7025 				if (err) {
7026 					remove_msix_isr(sp);
7027 
7028 					DBG_PRINT(ERR_DBG,
7029 						  "%s:MSI-X-%d registration "
7030 						  "failed\n", dev->name, i);
7031 
7032 					DBG_PRINT(ERR_DBG,
7033 						  "%s: Defaulting to INTA\n",
7034 						  dev->name);
7035 					sp->config.intr_type = INTA;
7036 					break;
7037 				}
7038 				sp->s2io_entries[i].in_use =
7039 					MSIX_REGISTERED_SUCCESS;
7040 			}
7041 		}
7042 		if (!err) {
7043 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7044 			DBG_PRINT(INFO_DBG,
7045 				  "MSI-X-TX entries enabled through alarm vector\n");
7046 		}
7047 	}
7048 	if (sp->config.intr_type == INTA) {
7049 		err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7050 				  sp->name, dev);
7051 		if (err) {
7052 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7053 				  dev->name);
7054 			return -1;
7055 		}
7056 	}
7057 	return 0;
7058 }
7059 
s2io_rem_isr(struct s2io_nic * sp)7060 static void s2io_rem_isr(struct s2io_nic *sp)
7061 {
7062 	if (sp->config.intr_type == MSI_X)
7063 		remove_msix_isr(sp);
7064 	else
7065 		remove_inta_isr(sp);
7066 }
7067 
do_s2io_card_down(struct s2io_nic * sp,int do_io)7068 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7069 {
7070 	int cnt = 0;
7071 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7072 	register u64 val64 = 0;
7073 	struct config_param *config;
7074 	config = &sp->config;
7075 
7076 	if (!is_s2io_card_up(sp))
7077 		return;
7078 
7079 	del_timer_sync(&sp->alarm_timer);
7080 	/* If s2io_set_link task is executing, wait till it completes. */
7081 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7082 		msleep(50);
7083 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7084 
7085 	/* Disable napi */
7086 	if (sp->config.napi) {
7087 		int off = 0;
7088 		if (config->intr_type ==  MSI_X) {
7089 			for (; off < sp->config.rx_ring_num; off++)
7090 				napi_disable(&sp->mac_control.rings[off].napi);
7091 		}
7092 		else
7093 			napi_disable(&sp->napi);
7094 	}
7095 
7096 	/* disable Tx and Rx traffic on the NIC */
7097 	if (do_io)
7098 		stop_nic(sp);
7099 
7100 	s2io_rem_isr(sp);
7101 
7102 	/* stop the tx queue, indicate link down */
7103 	s2io_link(sp, LINK_DOWN);
7104 
7105 	/* Check if the device is Quiescent and then Reset the NIC */
7106 	while (do_io) {
7107 		/* As per the HW requirement we need to replenish the
7108 		 * receive buffer to avoid the ring bump. Since there is
7109 		 * no intention of processing the Rx frame at this pointwe are
7110 		 * just setting the ownership bit of rxd in Each Rx
7111 		 * ring to HW and set the appropriate buffer size
7112 		 * based on the ring mode
7113 		 */
7114 		rxd_owner_bit_reset(sp);
7115 
7116 		val64 = readq(&bar0->adapter_status);
7117 		if (verify_xena_quiescence(sp)) {
7118 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7119 				break;
7120 		}
7121 
7122 		msleep(50);
7123 		cnt++;
7124 		if (cnt == 10) {
7125 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7126 				  "adapter status reads 0x%llx\n",
7127 				  (unsigned long long)val64);
7128 			break;
7129 		}
7130 	}
7131 	if (do_io)
7132 		s2io_reset(sp);
7133 
7134 	/* Free all Tx buffers */
7135 	free_tx_buffers(sp);
7136 
7137 	/* Free all Rx buffers */
7138 	free_rx_buffers(sp);
7139 
7140 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7141 }
7142 
s2io_card_down(struct s2io_nic * sp)7143 static void s2io_card_down(struct s2io_nic *sp)
7144 {
7145 	do_s2io_card_down(sp, 1);
7146 }
7147 
s2io_card_up(struct s2io_nic * sp)7148 static int s2io_card_up(struct s2io_nic *sp)
7149 {
7150 	int i, ret = 0;
7151 	struct config_param *config;
7152 	struct mac_info *mac_control;
7153 	struct net_device *dev = (struct net_device *)sp->dev;
7154 	u16 interruptible;
7155 
7156 	/* Initialize the H/W I/O registers */
7157 	ret = init_nic(sp);
7158 	if (ret != 0) {
7159 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7160 			  dev->name);
7161 		if (ret != -EIO)
7162 			s2io_reset(sp);
7163 		return ret;
7164 	}
7165 
7166 	/*
7167 	 * Initializing the Rx buffers. For now we are considering only 1
7168 	 * Rx ring and initializing buffers into 30 Rx blocks
7169 	 */
7170 	config = &sp->config;
7171 	mac_control = &sp->mac_control;
7172 
7173 	for (i = 0; i < config->rx_ring_num; i++) {
7174 		struct ring_info *ring = &mac_control->rings[i];
7175 
7176 		ring->mtu = dev->mtu;
7177 		ring->lro = !!(dev->features & NETIF_F_LRO);
7178 		ret = fill_rx_buffers(sp, ring, 1);
7179 		if (ret) {
7180 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7181 				  dev->name);
7182 			s2io_reset(sp);
7183 			free_rx_buffers(sp);
7184 			return -ENOMEM;
7185 		}
7186 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7187 			  ring->rx_bufs_left);
7188 	}
7189 
7190 	/* Initialise napi */
7191 	if (config->napi) {
7192 		if (config->intr_type ==  MSI_X) {
7193 			for (i = 0; i < sp->config.rx_ring_num; i++)
7194 				napi_enable(&sp->mac_control.rings[i].napi);
7195 		} else {
7196 			napi_enable(&sp->napi);
7197 		}
7198 	}
7199 
7200 	/* Maintain the state prior to the open */
7201 	if (sp->promisc_flg)
7202 		sp->promisc_flg = 0;
7203 	if (sp->m_cast_flg) {
7204 		sp->m_cast_flg = 0;
7205 		sp->all_multi_pos = 0;
7206 	}
7207 
7208 	/* Setting its receive mode */
7209 	s2io_set_multicast(dev);
7210 
7211 	if (dev->features & NETIF_F_LRO) {
7212 		/* Initialize max aggregatable pkts per session based on MTU */
7213 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7214 		/* Check if we can use (if specified) user provided value */
7215 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7216 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7217 	}
7218 
7219 	/* Enable Rx Traffic and interrupts on the NIC */
7220 	if (start_nic(sp)) {
7221 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7222 		s2io_reset(sp);
7223 		free_rx_buffers(sp);
7224 		return -ENODEV;
7225 	}
7226 
7227 	/* Add interrupt service routine */
7228 	if (s2io_add_isr(sp) != 0) {
7229 		if (sp->config.intr_type == MSI_X)
7230 			s2io_rem_isr(sp);
7231 		s2io_reset(sp);
7232 		free_rx_buffers(sp);
7233 		return -ENODEV;
7234 	}
7235 
7236 	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7237 
7238 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7239 
7240 	/*  Enable select interrupts */
7241 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7242 	if (sp->config.intr_type != INTA) {
7243 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7244 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7245 	} else {
7246 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7247 		interruptible |= TX_PIC_INTR;
7248 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7249 	}
7250 
7251 	return 0;
7252 }
7253 
7254 /**
7255  * s2io_restart_nic - Resets the NIC.
7256  * @data : long pointer to the device private structure
7257  * Description:
7258  * This function is scheduled to be run by the s2io_tx_watchdog
7259  * function after 0.5 secs to reset the NIC. The idea is to reduce
7260  * the run time of the watch dog routine which is run holding a
7261  * spin lock.
7262  */
7263 
s2io_restart_nic(struct work_struct * work)7264 static void s2io_restart_nic(struct work_struct *work)
7265 {
7266 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7267 	struct net_device *dev = sp->dev;
7268 
7269 	rtnl_lock();
7270 
7271 	if (!netif_running(dev))
7272 		goto out_unlock;
7273 
7274 	s2io_card_down(sp);
7275 	if (s2io_card_up(sp)) {
7276 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7277 	}
7278 	s2io_wake_all_tx_queue(sp);
7279 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7280 out_unlock:
7281 	rtnl_unlock();
7282 }
7283 
7284 /**
7285  *  s2io_tx_watchdog - Watchdog for transmit side.
7286  *  @dev : Pointer to net device structure
7287  *  Description:
7288  *  This function is triggered if the Tx Queue is stopped
7289  *  for a pre-defined amount of time when the Interface is still up.
7290  *  If the Interface is jammed in such a situation, the hardware is
7291  *  reset (by s2io_close) and restarted again (by s2io_open) to
7292  *  overcome any problem that might have been caused in the hardware.
7293  *  Return value:
7294  *  void
7295  */
7296 
s2io_tx_watchdog(struct net_device * dev)7297 static void s2io_tx_watchdog(struct net_device *dev)
7298 {
7299 	struct s2io_nic *sp = netdev_priv(dev);
7300 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7301 
7302 	if (netif_carrier_ok(dev)) {
7303 		swstats->watchdog_timer_cnt++;
7304 		schedule_work(&sp->rst_timer_task);
7305 		swstats->soft_reset_cnt++;
7306 	}
7307 }
7308 
7309 /**
7310  *   rx_osm_handler - To perform some OS related operations on SKB.
7311  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7312  *   @skb : the socket buffer pointer.
7313  *   @len : length of the packet
7314  *   @cksum : FCS checksum of the frame.
7315  *   @ring_no : the ring from which this RxD was extracted.
7316  *   Description:
7317  *   This function is called by the Rx interrupt serivce routine to perform
7318  *   some OS related operations on the SKB before passing it to the upper
7319  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7320  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7321  *   to the upper layer. If the checksum is wrong, it increments the Rx
7322  *   packet error count, frees the SKB and returns error.
7323  *   Return value:
7324  *   SUCCESS on success and -1 on failure.
7325  */
rx_osm_handler(struct ring_info * ring_data,struct RxD_t * rxdp)7326 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7327 {
7328 	struct s2io_nic *sp = ring_data->nic;
7329 	struct net_device *dev = (struct net_device *)ring_data->dev;
7330 	struct sk_buff *skb = (struct sk_buff *)
7331 		((unsigned long)rxdp->Host_Control);
7332 	int ring_no = ring_data->ring_no;
7333 	u16 l3_csum, l4_csum;
7334 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7335 	struct lro *uninitialized_var(lro);
7336 	u8 err_mask;
7337 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7338 
7339 	skb->dev = dev;
7340 
7341 	if (err) {
7342 		/* Check for parity error */
7343 		if (err & 0x1)
7344 			swstats->parity_err_cnt++;
7345 
7346 		err_mask = err >> 48;
7347 		switch (err_mask) {
7348 		case 1:
7349 			swstats->rx_parity_err_cnt++;
7350 			break;
7351 
7352 		case 2:
7353 			swstats->rx_abort_cnt++;
7354 			break;
7355 
7356 		case 3:
7357 			swstats->rx_parity_abort_cnt++;
7358 			break;
7359 
7360 		case 4:
7361 			swstats->rx_rda_fail_cnt++;
7362 			break;
7363 
7364 		case 5:
7365 			swstats->rx_unkn_prot_cnt++;
7366 			break;
7367 
7368 		case 6:
7369 			swstats->rx_fcs_err_cnt++;
7370 			break;
7371 
7372 		case 7:
7373 			swstats->rx_buf_size_err_cnt++;
7374 			break;
7375 
7376 		case 8:
7377 			swstats->rx_rxd_corrupt_cnt++;
7378 			break;
7379 
7380 		case 15:
7381 			swstats->rx_unkn_err_cnt++;
7382 			break;
7383 		}
7384 		/*
7385 		 * Drop the packet if bad transfer code. Exception being
7386 		 * 0x5, which could be due to unsupported IPv6 extension header.
7387 		 * In this case, we let stack handle the packet.
7388 		 * Note that in this case, since checksum will be incorrect,
7389 		 * stack will validate the same.
7390 		 */
7391 		if (err_mask != 0x5) {
7392 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7393 				  dev->name, err_mask);
7394 			dev->stats.rx_crc_errors++;
7395 			swstats->mem_freed
7396 				+= skb->truesize;
7397 			dev_kfree_skb(skb);
7398 			ring_data->rx_bufs_left -= 1;
7399 			rxdp->Host_Control = 0;
7400 			return 0;
7401 		}
7402 	}
7403 
7404 	rxdp->Host_Control = 0;
7405 	if (sp->rxd_mode == RXD_MODE_1) {
7406 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7407 
7408 		skb_put(skb, len);
7409 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7410 		int get_block = ring_data->rx_curr_get_info.block_index;
7411 		int get_off = ring_data->rx_curr_get_info.offset;
7412 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7413 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7414 		unsigned char *buff = skb_push(skb, buf0_len);
7415 
7416 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7417 		memcpy(buff, ba->ba_0, buf0_len);
7418 		skb_put(skb, buf2_len);
7419 	}
7420 
7421 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7422 	    ((!ring_data->lro) ||
7423 	     (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7424 	    (dev->features & NETIF_F_RXCSUM)) {
7425 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7426 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7427 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7428 			/*
7429 			 * NIC verifies if the Checksum of the received
7430 			 * frame is Ok or not and accordingly returns
7431 			 * a flag in the RxD.
7432 			 */
7433 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7434 			if (ring_data->lro) {
7435 				u32 tcp_len = 0;
7436 				u8 *tcp;
7437 				int ret = 0;
7438 
7439 				ret = s2io_club_tcp_session(ring_data,
7440 							    skb->data, &tcp,
7441 							    &tcp_len, &lro,
7442 							    rxdp, sp);
7443 				switch (ret) {
7444 				case 3: /* Begin anew */
7445 					lro->parent = skb;
7446 					goto aggregate;
7447 				case 1: /* Aggregate */
7448 					lro_append_pkt(sp, lro, skb, tcp_len);
7449 					goto aggregate;
7450 				case 4: /* Flush session */
7451 					lro_append_pkt(sp, lro, skb, tcp_len);
7452 					queue_rx_frame(lro->parent,
7453 						       lro->vlan_tag);
7454 					clear_lro_session(lro);
7455 					swstats->flush_max_pkts++;
7456 					goto aggregate;
7457 				case 2: /* Flush both */
7458 					lro->parent->data_len = lro->frags_len;
7459 					swstats->sending_both++;
7460 					queue_rx_frame(lro->parent,
7461 						       lro->vlan_tag);
7462 					clear_lro_session(lro);
7463 					goto send_up;
7464 				case 0: /* sessions exceeded */
7465 				case -1: /* non-TCP or not L2 aggregatable */
7466 				case 5: /*
7467 					 * First pkt in session not
7468 					 * L3/L4 aggregatable
7469 					 */
7470 					break;
7471 				default:
7472 					DBG_PRINT(ERR_DBG,
7473 						  "%s: Samadhana!!\n",
7474 						  __func__);
7475 					BUG();
7476 				}
7477 			}
7478 		} else {
7479 			/*
7480 			 * Packet with erroneous checksum, let the
7481 			 * upper layers deal with it.
7482 			 */
7483 			skb_checksum_none_assert(skb);
7484 		}
7485 	} else
7486 		skb_checksum_none_assert(skb);
7487 
7488 	swstats->mem_freed += skb->truesize;
7489 send_up:
7490 	skb_record_rx_queue(skb, ring_no);
7491 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7492 aggregate:
7493 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7494 	return SUCCESS;
7495 }
7496 
7497 /**
7498  *  s2io_link - stops/starts the Tx queue.
7499  *  @sp : private member of the device structure, which is a pointer to the
7500  *  s2io_nic structure.
7501  *  @link : inidicates whether link is UP/DOWN.
7502  *  Description:
7503  *  This function stops/starts the Tx queue depending on whether the link
7504  *  status of the NIC is is down or up. This is called by the Alarm
7505  *  interrupt handler whenever a link change interrupt comes up.
7506  *  Return value:
7507  *  void.
7508  */
7509 
s2io_link(struct s2io_nic * sp,int link)7510 static void s2io_link(struct s2io_nic *sp, int link)
7511 {
7512 	struct net_device *dev = (struct net_device *)sp->dev;
7513 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7514 
7515 	if (link != sp->last_link_state) {
7516 		init_tti(sp, link);
7517 		if (link == LINK_DOWN) {
7518 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7519 			s2io_stop_all_tx_queue(sp);
7520 			netif_carrier_off(dev);
7521 			if (swstats->link_up_cnt)
7522 				swstats->link_up_time =
7523 					jiffies - sp->start_time;
7524 			swstats->link_down_cnt++;
7525 		} else {
7526 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7527 			if (swstats->link_down_cnt)
7528 				swstats->link_down_time =
7529 					jiffies - sp->start_time;
7530 			swstats->link_up_cnt++;
7531 			netif_carrier_on(dev);
7532 			s2io_wake_all_tx_queue(sp);
7533 		}
7534 	}
7535 	sp->last_link_state = link;
7536 	sp->start_time = jiffies;
7537 }
7538 
7539 /**
7540  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7541  *  @sp : private member of the device structure, which is a pointer to the
7542  *  s2io_nic structure.
7543  *  Description:
7544  *  This function initializes a few of the PCI and PCI-X configuration registers
7545  *  with recommended values.
7546  *  Return value:
7547  *  void
7548  */
7549 
s2io_init_pci(struct s2io_nic * sp)7550 static void s2io_init_pci(struct s2io_nic *sp)
7551 {
7552 	u16 pci_cmd = 0, pcix_cmd = 0;
7553 
7554 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7555 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7556 			     &(pcix_cmd));
7557 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7558 			      (pcix_cmd | 1));
7559 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7560 			     &(pcix_cmd));
7561 
7562 	/* Set the PErr Response bit in PCI command register. */
7563 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7564 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7565 			      (pci_cmd | PCI_COMMAND_PARITY));
7566 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7567 }
7568 
s2io_verify_parm(struct pci_dev * pdev,u8 * dev_intr_type,u8 * dev_multiq)7569 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7570 			    u8 *dev_multiq)
7571 {
7572 	int i;
7573 
7574 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7575 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7576 			  "(%d) not supported\n", tx_fifo_num);
7577 
7578 		if (tx_fifo_num < 1)
7579 			tx_fifo_num = 1;
7580 		else
7581 			tx_fifo_num = MAX_TX_FIFOS;
7582 
7583 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7584 	}
7585 
7586 	if (multiq)
7587 		*dev_multiq = multiq;
7588 
7589 	if (tx_steering_type && (1 == tx_fifo_num)) {
7590 		if (tx_steering_type != TX_DEFAULT_STEERING)
7591 			DBG_PRINT(ERR_DBG,
7592 				  "Tx steering is not supported with "
7593 				  "one fifo. Disabling Tx steering.\n");
7594 		tx_steering_type = NO_STEERING;
7595 	}
7596 
7597 	if ((tx_steering_type < NO_STEERING) ||
7598 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7599 		DBG_PRINT(ERR_DBG,
7600 			  "Requested transmit steering not supported\n");
7601 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7602 		tx_steering_type = NO_STEERING;
7603 	}
7604 
7605 	if (rx_ring_num > MAX_RX_RINGS) {
7606 		DBG_PRINT(ERR_DBG,
7607 			  "Requested number of rx rings not supported\n");
7608 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7609 			  MAX_RX_RINGS);
7610 		rx_ring_num = MAX_RX_RINGS;
7611 	}
7612 
7613 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7614 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7615 			  "Defaulting to INTA\n");
7616 		*dev_intr_type = INTA;
7617 	}
7618 
7619 	if ((*dev_intr_type == MSI_X) &&
7620 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7621 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7622 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7623 			  "Defaulting to INTA\n");
7624 		*dev_intr_type = INTA;
7625 	}
7626 
7627 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7628 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7629 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7630 		rx_ring_mode = 1;
7631 	}
7632 
7633 	for (i = 0; i < MAX_RX_RINGS; i++)
7634 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7635 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7636 				  "supported\nDefaulting to %d\n",
7637 				  MAX_RX_BLOCKS_PER_RING);
7638 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7639 		}
7640 
7641 	return SUCCESS;
7642 }
7643 
7644 /**
7645  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7646  * or Traffic class respectively.
7647  * @nic: device private variable
7648  * Description: The function configures the receive steering to
7649  * desired receive ring.
7650  * Return Value:  SUCCESS on success and
7651  * '-1' on failure (endian settings incorrect).
7652  */
rts_ds_steer(struct s2io_nic * nic,u8 ds_codepoint,u8 ring)7653 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7654 {
7655 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7656 	register u64 val64 = 0;
7657 
7658 	if (ds_codepoint > 63)
7659 		return FAILURE;
7660 
7661 	val64 = RTS_DS_MEM_DATA(ring);
7662 	writeq(val64, &bar0->rts_ds_mem_data);
7663 
7664 	val64 = RTS_DS_MEM_CTRL_WE |
7665 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7666 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7667 
7668 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7669 
7670 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7671 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7672 				     S2IO_BIT_RESET);
7673 }
7674 
7675 static const struct net_device_ops s2io_netdev_ops = {
7676 	.ndo_open	        = s2io_open,
7677 	.ndo_stop	        = s2io_close,
7678 	.ndo_get_stats	        = s2io_get_stats,
7679 	.ndo_start_xmit    	= s2io_xmit,
7680 	.ndo_validate_addr	= eth_validate_addr,
7681 	.ndo_set_rx_mode	= s2io_set_multicast,
7682 	.ndo_do_ioctl	   	= s2io_ioctl,
7683 	.ndo_set_mac_address    = s2io_set_mac_addr,
7684 	.ndo_change_mtu	   	= s2io_change_mtu,
7685 	.ndo_set_features	= s2io_set_features,
7686 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7687 #ifdef CONFIG_NET_POLL_CONTROLLER
7688 	.ndo_poll_controller    = s2io_netpoll,
7689 #endif
7690 };
7691 
7692 /**
7693  *  s2io_init_nic - Initialization of the adapter .
7694  *  @pdev : structure containing the PCI related information of the device.
7695  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7696  *  Description:
7697  *  The function initializes an adapter identified by the pci_dec structure.
7698  *  All OS related initialization including memory and device structure and
7699  *  initlaization of the device private variable is done. Also the swapper
7700  *  control register is initialized to enable read and write into the I/O
7701  *  registers of the device.
7702  *  Return value:
7703  *  returns 0 on success and negative on failure.
7704  */
7705 
7706 static int __devinit
s2io_init_nic(struct pci_dev * pdev,const struct pci_device_id * pre)7707 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7708 {
7709 	struct s2io_nic *sp;
7710 	struct net_device *dev;
7711 	int i, j, ret;
7712 	int dma_flag = false;
7713 	u32 mac_up, mac_down;
7714 	u64 val64 = 0, tmp64 = 0;
7715 	struct XENA_dev_config __iomem *bar0 = NULL;
7716 	u16 subid;
7717 	struct config_param *config;
7718 	struct mac_info *mac_control;
7719 	int mode;
7720 	u8 dev_intr_type = intr_type;
7721 	u8 dev_multiq = 0;
7722 
7723 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7724 	if (ret)
7725 		return ret;
7726 
7727 	ret = pci_enable_device(pdev);
7728 	if (ret) {
7729 		DBG_PRINT(ERR_DBG,
7730 			  "%s: pci_enable_device failed\n", __func__);
7731 		return ret;
7732 	}
7733 
7734 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7735 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7736 		dma_flag = true;
7737 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7738 			DBG_PRINT(ERR_DBG,
7739 				  "Unable to obtain 64bit DMA "
7740 				  "for consistent allocations\n");
7741 			pci_disable_device(pdev);
7742 			return -ENOMEM;
7743 		}
7744 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7745 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7746 	} else {
7747 		pci_disable_device(pdev);
7748 		return -ENOMEM;
7749 	}
7750 	ret = pci_request_regions(pdev, s2io_driver_name);
7751 	if (ret) {
7752 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7753 			  __func__, ret);
7754 		pci_disable_device(pdev);
7755 		return -ENODEV;
7756 	}
7757 	if (dev_multiq)
7758 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7759 	else
7760 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7761 	if (dev == NULL) {
7762 		pci_disable_device(pdev);
7763 		pci_release_regions(pdev);
7764 		return -ENODEV;
7765 	}
7766 
7767 	pci_set_master(pdev);
7768 	pci_set_drvdata(pdev, dev);
7769 	SET_NETDEV_DEV(dev, &pdev->dev);
7770 
7771 	/*  Private member variable initialized to s2io NIC structure */
7772 	sp = netdev_priv(dev);
7773 	sp->dev = dev;
7774 	sp->pdev = pdev;
7775 	sp->high_dma_flag = dma_flag;
7776 	sp->device_enabled_once = false;
7777 	if (rx_ring_mode == 1)
7778 		sp->rxd_mode = RXD_MODE_1;
7779 	if (rx_ring_mode == 2)
7780 		sp->rxd_mode = RXD_MODE_3B;
7781 
7782 	sp->config.intr_type = dev_intr_type;
7783 
7784 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7785 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7786 		sp->device_type = XFRAME_II_DEVICE;
7787 	else
7788 		sp->device_type = XFRAME_I_DEVICE;
7789 
7790 
7791 	/* Initialize some PCI/PCI-X fields of the NIC. */
7792 	s2io_init_pci(sp);
7793 
7794 	/*
7795 	 * Setting the device configuration parameters.
7796 	 * Most of these parameters can be specified by the user during
7797 	 * module insertion as they are module loadable parameters. If
7798 	 * these parameters are not not specified during load time, they
7799 	 * are initialized with default values.
7800 	 */
7801 	config = &sp->config;
7802 	mac_control = &sp->mac_control;
7803 
7804 	config->napi = napi;
7805 	config->tx_steering_type = tx_steering_type;
7806 
7807 	/* Tx side parameters. */
7808 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7809 		config->tx_fifo_num = MAX_TX_FIFOS;
7810 	else
7811 		config->tx_fifo_num = tx_fifo_num;
7812 
7813 	/* Initialize the fifos used for tx steering */
7814 	if (config->tx_fifo_num < 5) {
7815 		if (config->tx_fifo_num  == 1)
7816 			sp->total_tcp_fifos = 1;
7817 		else
7818 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7819 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7820 		sp->total_udp_fifos = 1;
7821 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7822 	} else {
7823 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7824 				       FIFO_OTHER_MAX_NUM);
7825 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7826 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7827 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7828 	}
7829 
7830 	config->multiq = dev_multiq;
7831 	for (i = 0; i < config->tx_fifo_num; i++) {
7832 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7833 
7834 		tx_cfg->fifo_len = tx_fifo_len[i];
7835 		tx_cfg->fifo_priority = i;
7836 	}
7837 
7838 	/* mapping the QoS priority to the configured fifos */
7839 	for (i = 0; i < MAX_TX_FIFOS; i++)
7840 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7841 
7842 	/* map the hashing selector table to the configured fifos */
7843 	for (i = 0; i < config->tx_fifo_num; i++)
7844 		sp->fifo_selector[i] = fifo_selector[i];
7845 
7846 
7847 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7848 	for (i = 0; i < config->tx_fifo_num; i++) {
7849 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7850 
7851 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7852 		if (tx_cfg->fifo_len < 65) {
7853 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7854 			break;
7855 		}
7856 	}
7857 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7858 	config->max_txds = MAX_SKB_FRAGS + 2;
7859 
7860 	/* Rx side parameters. */
7861 	config->rx_ring_num = rx_ring_num;
7862 	for (i = 0; i < config->rx_ring_num; i++) {
7863 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7864 		struct ring_info *ring = &mac_control->rings[i];
7865 
7866 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7867 		rx_cfg->ring_priority = i;
7868 		ring->rx_bufs_left = 0;
7869 		ring->rxd_mode = sp->rxd_mode;
7870 		ring->rxd_count = rxd_count[sp->rxd_mode];
7871 		ring->pdev = sp->pdev;
7872 		ring->dev = sp->dev;
7873 	}
7874 
7875 	for (i = 0; i < rx_ring_num; i++) {
7876 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7877 
7878 		rx_cfg->ring_org = RING_ORG_BUFF1;
7879 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7880 	}
7881 
7882 	/*  Setting Mac Control parameters */
7883 	mac_control->rmac_pause_time = rmac_pause_time;
7884 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7885 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7886 
7887 
7888 	/*  initialize the shared memory used by the NIC and the host */
7889 	if (init_shared_mem(sp)) {
7890 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7891 		ret = -ENOMEM;
7892 		goto mem_alloc_failed;
7893 	}
7894 
7895 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7896 	if (!sp->bar0) {
7897 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7898 			  dev->name);
7899 		ret = -ENOMEM;
7900 		goto bar0_remap_failed;
7901 	}
7902 
7903 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7904 	if (!sp->bar1) {
7905 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7906 			  dev->name);
7907 		ret = -ENOMEM;
7908 		goto bar1_remap_failed;
7909 	}
7910 
7911 	dev->irq = pdev->irq;
7912 	dev->base_addr = (unsigned long)sp->bar0;
7913 
7914 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7915 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7916 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7917 	}
7918 
7919 	/*  Driver entry points */
7920 	dev->netdev_ops = &s2io_netdev_ops;
7921 	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7922 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7923 		NETIF_F_TSO | NETIF_F_TSO6 |
7924 		NETIF_F_RXCSUM | NETIF_F_LRO;
7925 	dev->features |= dev->hw_features |
7926 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7927 	if (sp->device_type & XFRAME_II_DEVICE) {
7928 		dev->hw_features |= NETIF_F_UFO;
7929 		if (ufo)
7930 			dev->features |= NETIF_F_UFO;
7931 	}
7932 	if (sp->high_dma_flag == true)
7933 		dev->features |= NETIF_F_HIGHDMA;
7934 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7935 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7936 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7937 
7938 	pci_save_state(sp->pdev);
7939 
7940 	/* Setting swapper control on the NIC, for proper reset operation */
7941 	if (s2io_set_swapper(sp)) {
7942 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7943 			  dev->name);
7944 		ret = -EAGAIN;
7945 		goto set_swap_failed;
7946 	}
7947 
7948 	/* Verify if the Herc works on the slot its placed into */
7949 	if (sp->device_type & XFRAME_II_DEVICE) {
7950 		mode = s2io_verify_pci_mode(sp);
7951 		if (mode < 0) {
7952 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7953 				  __func__);
7954 			ret = -EBADSLT;
7955 			goto set_swap_failed;
7956 		}
7957 	}
7958 
7959 	if (sp->config.intr_type == MSI_X) {
7960 		sp->num_entries = config->rx_ring_num + 1;
7961 		ret = s2io_enable_msi_x(sp);
7962 
7963 		if (!ret) {
7964 			ret = s2io_test_msi(sp);
7965 			/* rollback MSI-X, will re-enable during add_isr() */
7966 			remove_msix_isr(sp);
7967 		}
7968 		if (ret) {
7969 
7970 			DBG_PRINT(ERR_DBG,
7971 				  "MSI-X requested but failed to enable\n");
7972 			sp->config.intr_type = INTA;
7973 		}
7974 	}
7975 
7976 	if (config->intr_type ==  MSI_X) {
7977 		for (i = 0; i < config->rx_ring_num ; i++) {
7978 			struct ring_info *ring = &mac_control->rings[i];
7979 
7980 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7981 		}
7982 	} else {
7983 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7984 	}
7985 
7986 	/* Not needed for Herc */
7987 	if (sp->device_type & XFRAME_I_DEVICE) {
7988 		/*
7989 		 * Fix for all "FFs" MAC address problems observed on
7990 		 * Alpha platforms
7991 		 */
7992 		fix_mac_address(sp);
7993 		s2io_reset(sp);
7994 	}
7995 
7996 	/*
7997 	 * MAC address initialization.
7998 	 * For now only one mac address will be read and used.
7999 	 */
8000 	bar0 = sp->bar0;
8001 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8002 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8003 	writeq(val64, &bar0->rmac_addr_cmd_mem);
8004 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8005 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8006 			      S2IO_BIT_RESET);
8007 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
8008 	mac_down = (u32)tmp64;
8009 	mac_up = (u32) (tmp64 >> 32);
8010 
8011 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8012 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8013 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8014 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8015 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8016 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8017 
8018 	/*  Set the factory defined MAC address initially   */
8019 	dev->addr_len = ETH_ALEN;
8020 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8021 	memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8022 
8023 	/* initialize number of multicast & unicast MAC entries variables */
8024 	if (sp->device_type == XFRAME_I_DEVICE) {
8025 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8026 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8027 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8028 	} else if (sp->device_type == XFRAME_II_DEVICE) {
8029 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8030 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8031 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8032 	}
8033 
8034 	/* store mac addresses from CAM to s2io_nic structure */
8035 	do_s2io_store_unicast_mc(sp);
8036 
8037 	/* Configure MSIX vector for number of rings configured plus one */
8038 	if ((sp->device_type == XFRAME_II_DEVICE) &&
8039 	    (config->intr_type == MSI_X))
8040 		sp->num_entries = config->rx_ring_num + 1;
8041 
8042 	/* Store the values of the MSIX table in the s2io_nic structure */
8043 	store_xmsi_data(sp);
8044 	/* reset Nic and bring it to known state */
8045 	s2io_reset(sp);
8046 
8047 	/*
8048 	 * Initialize link state flags
8049 	 * and the card state parameter
8050 	 */
8051 	sp->state = 0;
8052 
8053 	/* Initialize spinlocks */
8054 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
8055 		struct fifo_info *fifo = &mac_control->fifos[i];
8056 
8057 		spin_lock_init(&fifo->tx_lock);
8058 	}
8059 
8060 	/*
8061 	 * SXE-002: Configure link and activity LED to init state
8062 	 * on driver load.
8063 	 */
8064 	subid = sp->pdev->subsystem_device;
8065 	if ((subid & 0xFF) >= 0x07) {
8066 		val64 = readq(&bar0->gpio_control);
8067 		val64 |= 0x0000800000000000ULL;
8068 		writeq(val64, &bar0->gpio_control);
8069 		val64 = 0x0411040400000000ULL;
8070 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8071 		val64 = readq(&bar0->gpio_control);
8072 	}
8073 
8074 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8075 
8076 	if (register_netdev(dev)) {
8077 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8078 		ret = -ENODEV;
8079 		goto register_failed;
8080 	}
8081 	s2io_vpd_read(sp);
8082 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8083 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8084 		  sp->product_name, pdev->revision);
8085 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8086 		  s2io_driver_version);
8087 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8088 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8089 	if (sp->device_type & XFRAME_II_DEVICE) {
8090 		mode = s2io_print_pci_mode(sp);
8091 		if (mode < 0) {
8092 			ret = -EBADSLT;
8093 			unregister_netdev(dev);
8094 			goto set_swap_failed;
8095 		}
8096 	}
8097 	switch (sp->rxd_mode) {
8098 	case RXD_MODE_1:
8099 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8100 			  dev->name);
8101 		break;
8102 	case RXD_MODE_3B:
8103 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8104 			  dev->name);
8105 		break;
8106 	}
8107 
8108 	switch (sp->config.napi) {
8109 	case 0:
8110 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8111 		break;
8112 	case 1:
8113 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8114 		break;
8115 	}
8116 
8117 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8118 		  sp->config.tx_fifo_num);
8119 
8120 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8121 		  sp->config.rx_ring_num);
8122 
8123 	switch (sp->config.intr_type) {
8124 	case INTA:
8125 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8126 		break;
8127 	case MSI_X:
8128 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8129 		break;
8130 	}
8131 	if (sp->config.multiq) {
8132 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8133 			struct fifo_info *fifo = &mac_control->fifos[i];
8134 
8135 			fifo->multiq = config->multiq;
8136 		}
8137 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8138 			  dev->name);
8139 	} else
8140 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8141 			  dev->name);
8142 
8143 	switch (sp->config.tx_steering_type) {
8144 	case NO_STEERING:
8145 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8146 			  dev->name);
8147 		break;
8148 	case TX_PRIORITY_STEERING:
8149 		DBG_PRINT(ERR_DBG,
8150 			  "%s: Priority steering enabled for transmit\n",
8151 			  dev->name);
8152 		break;
8153 	case TX_DEFAULT_STEERING:
8154 		DBG_PRINT(ERR_DBG,
8155 			  "%s: Default steering enabled for transmit\n",
8156 			  dev->name);
8157 	}
8158 
8159 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8160 		  dev->name);
8161 	if (ufo)
8162 		DBG_PRINT(ERR_DBG,
8163 			  "%s: UDP Fragmentation Offload(UFO) enabled\n",
8164 			  dev->name);
8165 	/* Initialize device name */
8166 	sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8167 
8168 	if (vlan_tag_strip)
8169 		sp->vlan_strip_flag = 1;
8170 	else
8171 		sp->vlan_strip_flag = 0;
8172 
8173 	/*
8174 	 * Make Link state as off at this point, when the Link change
8175 	 * interrupt comes the state will be automatically changed to
8176 	 * the right state.
8177 	 */
8178 	netif_carrier_off(dev);
8179 
8180 	return 0;
8181 
8182 register_failed:
8183 set_swap_failed:
8184 	iounmap(sp->bar1);
8185 bar1_remap_failed:
8186 	iounmap(sp->bar0);
8187 bar0_remap_failed:
8188 mem_alloc_failed:
8189 	free_shared_mem(sp);
8190 	pci_disable_device(pdev);
8191 	pci_release_regions(pdev);
8192 	pci_set_drvdata(pdev, NULL);
8193 	free_netdev(dev);
8194 
8195 	return ret;
8196 }
8197 
8198 /**
8199  * s2io_rem_nic - Free the PCI device
8200  * @pdev: structure containing the PCI related information of the device.
8201  * Description: This function is called by the Pci subsystem to release a
8202  * PCI device and free up all resource held up by the device. This could
8203  * be in response to a Hot plug event or when the driver is to be removed
8204  * from memory.
8205  */
8206 
s2io_rem_nic(struct pci_dev * pdev)8207 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8208 {
8209 	struct net_device *dev = pci_get_drvdata(pdev);
8210 	struct s2io_nic *sp;
8211 
8212 	if (dev == NULL) {
8213 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8214 		return;
8215 	}
8216 
8217 	sp = netdev_priv(dev);
8218 
8219 	cancel_work_sync(&sp->rst_timer_task);
8220 	cancel_work_sync(&sp->set_link_task);
8221 
8222 	unregister_netdev(dev);
8223 
8224 	free_shared_mem(sp);
8225 	iounmap(sp->bar0);
8226 	iounmap(sp->bar1);
8227 	pci_release_regions(pdev);
8228 	pci_set_drvdata(pdev, NULL);
8229 	free_netdev(dev);
8230 	pci_disable_device(pdev);
8231 }
8232 
8233 /**
8234  * s2io_starter - Entry point for the driver
8235  * Description: This function is the entry point for the driver. It verifies
8236  * the module loadable parameters and initializes PCI configuration space.
8237  */
8238 
s2io_starter(void)8239 static int __init s2io_starter(void)
8240 {
8241 	return pci_register_driver(&s2io_driver);
8242 }
8243 
8244 /**
8245  * s2io_closer - Cleanup routine for the driver
8246  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8247  */
8248 
s2io_closer(void)8249 static __exit void s2io_closer(void)
8250 {
8251 	pci_unregister_driver(&s2io_driver);
8252 	DBG_PRINT(INIT_DBG, "cleanup done\n");
8253 }
8254 
8255 module_init(s2io_starter);
8256 module_exit(s2io_closer);
8257 
check_L2_lro_capable(u8 * buffer,struct iphdr ** ip,struct tcphdr ** tcp,struct RxD_t * rxdp,struct s2io_nic * sp)8258 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8259 				struct tcphdr **tcp, struct RxD_t *rxdp,
8260 				struct s2io_nic *sp)
8261 {
8262 	int ip_off;
8263 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8264 
8265 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8266 		DBG_PRINT(INIT_DBG,
8267 			  "%s: Non-TCP frames not supported for LRO\n",
8268 			  __func__);
8269 		return -1;
8270 	}
8271 
8272 	/* Checking for DIX type or DIX type with VLAN */
8273 	if ((l2_type == 0) || (l2_type == 4)) {
8274 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8275 		/*
8276 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8277 		 * shift the offset by the VLAN header size bytes.
8278 		 */
8279 		if ((!sp->vlan_strip_flag) &&
8280 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8281 			ip_off += HEADER_VLAN_SIZE;
8282 	} else {
8283 		/* LLC, SNAP etc are considered non-mergeable */
8284 		return -1;
8285 	}
8286 
8287 	*ip = (struct iphdr *)((u8 *)buffer + ip_off);
8288 	ip_len = (u8)((*ip)->ihl);
8289 	ip_len <<= 2;
8290 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8291 
8292 	return 0;
8293 }
8294 
check_for_socket_match(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp)8295 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8296 				  struct tcphdr *tcp)
8297 {
8298 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8299 	if ((lro->iph->saddr != ip->saddr) ||
8300 	    (lro->iph->daddr != ip->daddr) ||
8301 	    (lro->tcph->source != tcp->source) ||
8302 	    (lro->tcph->dest != tcp->dest))
8303 		return -1;
8304 	return 0;
8305 }
8306 
get_l4_pyld_length(struct iphdr * ip,struct tcphdr * tcp)8307 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8308 {
8309 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8310 }
8311 
initiate_new_session(struct lro * lro,u8 * l2h,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len,u16 vlan_tag)8312 static void initiate_new_session(struct lro *lro, u8 *l2h,
8313 				 struct iphdr *ip, struct tcphdr *tcp,
8314 				 u32 tcp_pyld_len, u16 vlan_tag)
8315 {
8316 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8317 	lro->l2h = l2h;
8318 	lro->iph = ip;
8319 	lro->tcph = tcp;
8320 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8321 	lro->tcp_ack = tcp->ack_seq;
8322 	lro->sg_num = 1;
8323 	lro->total_len = ntohs(ip->tot_len);
8324 	lro->frags_len = 0;
8325 	lro->vlan_tag = vlan_tag;
8326 	/*
8327 	 * Check if we saw TCP timestamp.
8328 	 * Other consistency checks have already been done.
8329 	 */
8330 	if (tcp->doff == 8) {
8331 		__be32 *ptr;
8332 		ptr = (__be32 *)(tcp+1);
8333 		lro->saw_ts = 1;
8334 		lro->cur_tsval = ntohl(*(ptr+1));
8335 		lro->cur_tsecr = *(ptr+2);
8336 	}
8337 	lro->in_use = 1;
8338 }
8339 
update_L3L4_header(struct s2io_nic * sp,struct lro * lro)8340 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8341 {
8342 	struct iphdr *ip = lro->iph;
8343 	struct tcphdr *tcp = lro->tcph;
8344 	__sum16 nchk;
8345 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8346 
8347 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8348 
8349 	/* Update L3 header */
8350 	ip->tot_len = htons(lro->total_len);
8351 	ip->check = 0;
8352 	nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8353 	ip->check = nchk;
8354 
8355 	/* Update L4 header */
8356 	tcp->ack_seq = lro->tcp_ack;
8357 	tcp->window = lro->window;
8358 
8359 	/* Update tsecr field if this session has timestamps enabled */
8360 	if (lro->saw_ts) {
8361 		__be32 *ptr = (__be32 *)(tcp + 1);
8362 		*(ptr+2) = lro->cur_tsecr;
8363 	}
8364 
8365 	/* Update counters required for calculation of
8366 	 * average no. of packets aggregated.
8367 	 */
8368 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8369 	swstats->num_aggregations++;
8370 }
8371 
aggregate_new_rx(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp,u32 l4_pyld)8372 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8373 			     struct tcphdr *tcp, u32 l4_pyld)
8374 {
8375 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8376 	lro->total_len += l4_pyld;
8377 	lro->frags_len += l4_pyld;
8378 	lro->tcp_next_seq += l4_pyld;
8379 	lro->sg_num++;
8380 
8381 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8382 	lro->tcp_ack = tcp->ack_seq;
8383 	lro->window = tcp->window;
8384 
8385 	if (lro->saw_ts) {
8386 		__be32 *ptr;
8387 		/* Update tsecr and tsval from this packet */
8388 		ptr = (__be32 *)(tcp+1);
8389 		lro->cur_tsval = ntohl(*(ptr+1));
8390 		lro->cur_tsecr = *(ptr + 2);
8391 	}
8392 }
8393 
verify_l3_l4_lro_capable(struct lro * l_lro,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len)8394 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8395 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8396 {
8397 	u8 *ptr;
8398 
8399 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8400 
8401 	if (!tcp_pyld_len) {
8402 		/* Runt frame or a pure ack */
8403 		return -1;
8404 	}
8405 
8406 	if (ip->ihl != 5) /* IP has options */
8407 		return -1;
8408 
8409 	/* If we see CE codepoint in IP header, packet is not mergeable */
8410 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8411 		return -1;
8412 
8413 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8414 	if (tcp->urg || tcp->psh || tcp->rst ||
8415 	    tcp->syn || tcp->fin ||
8416 	    tcp->ece || tcp->cwr || !tcp->ack) {
8417 		/*
8418 		 * Currently recognize only the ack control word and
8419 		 * any other control field being set would result in
8420 		 * flushing the LRO session
8421 		 */
8422 		return -1;
8423 	}
8424 
8425 	/*
8426 	 * Allow only one TCP timestamp option. Don't aggregate if
8427 	 * any other options are detected.
8428 	 */
8429 	if (tcp->doff != 5 && tcp->doff != 8)
8430 		return -1;
8431 
8432 	if (tcp->doff == 8) {
8433 		ptr = (u8 *)(tcp + 1);
8434 		while (*ptr == TCPOPT_NOP)
8435 			ptr++;
8436 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8437 			return -1;
8438 
8439 		/* Ensure timestamp value increases monotonically */
8440 		if (l_lro)
8441 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8442 				return -1;
8443 
8444 		/* timestamp echo reply should be non-zero */
8445 		if (*((__be32 *)(ptr+6)) == 0)
8446 			return -1;
8447 	}
8448 
8449 	return 0;
8450 }
8451 
s2io_club_tcp_session(struct ring_info * ring_data,u8 * buffer,u8 ** tcp,u32 * tcp_len,struct lro ** lro,struct RxD_t * rxdp,struct s2io_nic * sp)8452 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8453 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8454 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8455 {
8456 	struct iphdr *ip;
8457 	struct tcphdr *tcph;
8458 	int ret = 0, i;
8459 	u16 vlan_tag = 0;
8460 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8461 
8462 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8463 				   rxdp, sp);
8464 	if (ret)
8465 		return ret;
8466 
8467 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8468 
8469 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8470 	tcph = (struct tcphdr *)*tcp;
8471 	*tcp_len = get_l4_pyld_length(ip, tcph);
8472 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8473 		struct lro *l_lro = &ring_data->lro0_n[i];
8474 		if (l_lro->in_use) {
8475 			if (check_for_socket_match(l_lro, ip, tcph))
8476 				continue;
8477 			/* Sock pair matched */
8478 			*lro = l_lro;
8479 
8480 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8481 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8482 					  "expected 0x%x, actual 0x%x\n",
8483 					  __func__,
8484 					  (*lro)->tcp_next_seq,
8485 					  ntohl(tcph->seq));
8486 
8487 				swstats->outof_sequence_pkts++;
8488 				ret = 2;
8489 				break;
8490 			}
8491 
8492 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8493 						      *tcp_len))
8494 				ret = 1; /* Aggregate */
8495 			else
8496 				ret = 2; /* Flush both */
8497 			break;
8498 		}
8499 	}
8500 
8501 	if (ret == 0) {
8502 		/* Before searching for available LRO objects,
8503 		 * check if the pkt is L3/L4 aggregatable. If not
8504 		 * don't create new LRO session. Just send this
8505 		 * packet up.
8506 		 */
8507 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8508 			return 5;
8509 
8510 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8511 			struct lro *l_lro = &ring_data->lro0_n[i];
8512 			if (!(l_lro->in_use)) {
8513 				*lro = l_lro;
8514 				ret = 3; /* Begin anew */
8515 				break;
8516 			}
8517 		}
8518 	}
8519 
8520 	if (ret == 0) { /* sessions exceeded */
8521 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8522 			  __func__);
8523 		*lro = NULL;
8524 		return ret;
8525 	}
8526 
8527 	switch (ret) {
8528 	case 3:
8529 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8530 				     vlan_tag);
8531 		break;
8532 	case 2:
8533 		update_L3L4_header(sp, *lro);
8534 		break;
8535 	case 1:
8536 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8537 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8538 			update_L3L4_header(sp, *lro);
8539 			ret = 4; /* Flush the LRO */
8540 		}
8541 		break;
8542 	default:
8543 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8544 		break;
8545 	}
8546 
8547 	return ret;
8548 }
8549 
clear_lro_session(struct lro * lro)8550 static void clear_lro_session(struct lro *lro)
8551 {
8552 	static u16 lro_struct_size = sizeof(struct lro);
8553 
8554 	memset(lro, 0, lro_struct_size);
8555 }
8556 
queue_rx_frame(struct sk_buff * skb,u16 vlan_tag)8557 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8558 {
8559 	struct net_device *dev = skb->dev;
8560 	struct s2io_nic *sp = netdev_priv(dev);
8561 
8562 	skb->protocol = eth_type_trans(skb, dev);
8563 	if (vlan_tag && sp->vlan_strip_flag)
8564 		__vlan_hwaccel_put_tag(skb, vlan_tag);
8565 	if (sp->config.napi)
8566 		netif_receive_skb(skb);
8567 	else
8568 		netif_rx(skb);
8569 }
8570 
lro_append_pkt(struct s2io_nic * sp,struct lro * lro,struct sk_buff * skb,u32 tcp_len)8571 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8572 			   struct sk_buff *skb, u32 tcp_len)
8573 {
8574 	struct sk_buff *first = lro->parent;
8575 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8576 
8577 	first->len += tcp_len;
8578 	first->data_len = lro->frags_len;
8579 	skb_pull(skb, (skb->len - tcp_len));
8580 	if (skb_shinfo(first)->frag_list)
8581 		lro->last_frag->next = skb;
8582 	else
8583 		skb_shinfo(first)->frag_list = skb;
8584 	first->truesize += skb->truesize;
8585 	lro->last_frag = skb;
8586 	swstats->clubbed_frms_cnt++;
8587 }
8588 
8589 /**
8590  * s2io_io_error_detected - called when PCI error is detected
8591  * @pdev: Pointer to PCI device
8592  * @state: The current pci connection state
8593  *
8594  * This function is called after a PCI bus error affecting
8595  * this device has been detected.
8596  */
s2io_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8597 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8598 					       pci_channel_state_t state)
8599 {
8600 	struct net_device *netdev = pci_get_drvdata(pdev);
8601 	struct s2io_nic *sp = netdev_priv(netdev);
8602 
8603 	netif_device_detach(netdev);
8604 
8605 	if (state == pci_channel_io_perm_failure)
8606 		return PCI_ERS_RESULT_DISCONNECT;
8607 
8608 	if (netif_running(netdev)) {
8609 		/* Bring down the card, while avoiding PCI I/O */
8610 		do_s2io_card_down(sp, 0);
8611 	}
8612 	pci_disable_device(pdev);
8613 
8614 	return PCI_ERS_RESULT_NEED_RESET;
8615 }
8616 
8617 /**
8618  * s2io_io_slot_reset - called after the pci bus has been reset.
8619  * @pdev: Pointer to PCI device
8620  *
8621  * Restart the card from scratch, as if from a cold-boot.
8622  * At this point, the card has exprienced a hard reset,
8623  * followed by fixups by BIOS, and has its config space
8624  * set up identically to what it was at cold boot.
8625  */
s2io_io_slot_reset(struct pci_dev * pdev)8626 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8627 {
8628 	struct net_device *netdev = pci_get_drvdata(pdev);
8629 	struct s2io_nic *sp = netdev_priv(netdev);
8630 
8631 	if (pci_enable_device(pdev)) {
8632 		pr_err("Cannot re-enable PCI device after reset.\n");
8633 		return PCI_ERS_RESULT_DISCONNECT;
8634 	}
8635 
8636 	pci_set_master(pdev);
8637 	s2io_reset(sp);
8638 
8639 	return PCI_ERS_RESULT_RECOVERED;
8640 }
8641 
8642 /**
8643  * s2io_io_resume - called when traffic can start flowing again.
8644  * @pdev: Pointer to PCI device
8645  *
8646  * This callback is called when the error recovery driver tells
8647  * us that its OK to resume normal operation.
8648  */
s2io_io_resume(struct pci_dev * pdev)8649 static void s2io_io_resume(struct pci_dev *pdev)
8650 {
8651 	struct net_device *netdev = pci_get_drvdata(pdev);
8652 	struct s2io_nic *sp = netdev_priv(netdev);
8653 
8654 	if (netif_running(netdev)) {
8655 		if (s2io_card_up(sp)) {
8656 			pr_err("Can't bring device back up after reset.\n");
8657 			return;
8658 		}
8659 
8660 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8661 			s2io_card_down(sp);
8662 			pr_err("Can't restore mac addr after reset.\n");
8663 			return;
8664 		}
8665 	}
8666 
8667 	netif_device_attach(netdev);
8668 	netif_tx_wake_all_queues(netdev);
8669 }
8670