• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4 
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56 
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78 
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84 
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88 
89 #define DRV_VERSION "2.0.26.25"
90 
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94 
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97 
RXD_IS_UP2DT(struct RxD_t * rxdp)98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100 	int ret;
101 
102 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 		(GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104 
105 	return ret;
106 }
107 
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 	(dev_type == XFRAME_I_DEVICE) ?			\
115 		((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 		 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117 
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120 
is_s2io_card_up(const struct s2io_nic * sp)121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125 
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 	"Register test\t(offline)",
129 	"Eeprom test\t(offline)",
130 	"Link test\t(online)",
131 	"RLDRAM test\t(offline)",
132 	"BIST Test\t(offline)"
133 };
134 
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136 	{"tmac_frms"},
137 	{"tmac_data_octets"},
138 	{"tmac_drop_frms"},
139 	{"tmac_mcst_frms"},
140 	{"tmac_bcst_frms"},
141 	{"tmac_pause_ctrl_frms"},
142 	{"tmac_ttl_octets"},
143 	{"tmac_ucst_frms"},
144 	{"tmac_nucst_frms"},
145 	{"tmac_any_err_frms"},
146 	{"tmac_ttl_less_fb_octets"},
147 	{"tmac_vld_ip_octets"},
148 	{"tmac_vld_ip"},
149 	{"tmac_drop_ip"},
150 	{"tmac_icmp"},
151 	{"tmac_rst_tcp"},
152 	{"tmac_tcp"},
153 	{"tmac_udp"},
154 	{"rmac_vld_frms"},
155 	{"rmac_data_octets"},
156 	{"rmac_fcs_err_frms"},
157 	{"rmac_drop_frms"},
158 	{"rmac_vld_mcst_frms"},
159 	{"rmac_vld_bcst_frms"},
160 	{"rmac_in_rng_len_err_frms"},
161 	{"rmac_out_rng_len_err_frms"},
162 	{"rmac_long_frms"},
163 	{"rmac_pause_ctrl_frms"},
164 	{"rmac_unsup_ctrl_frms"},
165 	{"rmac_ttl_octets"},
166 	{"rmac_accepted_ucst_frms"},
167 	{"rmac_accepted_nucst_frms"},
168 	{"rmac_discarded_frms"},
169 	{"rmac_drop_events"},
170 	{"rmac_ttl_less_fb_octets"},
171 	{"rmac_ttl_frms"},
172 	{"rmac_usized_frms"},
173 	{"rmac_osized_frms"},
174 	{"rmac_frag_frms"},
175 	{"rmac_jabber_frms"},
176 	{"rmac_ttl_64_frms"},
177 	{"rmac_ttl_65_127_frms"},
178 	{"rmac_ttl_128_255_frms"},
179 	{"rmac_ttl_256_511_frms"},
180 	{"rmac_ttl_512_1023_frms"},
181 	{"rmac_ttl_1024_1518_frms"},
182 	{"rmac_ip"},
183 	{"rmac_ip_octets"},
184 	{"rmac_hdr_err_ip"},
185 	{"rmac_drop_ip"},
186 	{"rmac_icmp"},
187 	{"rmac_tcp"},
188 	{"rmac_udp"},
189 	{"rmac_err_drp_udp"},
190 	{"rmac_xgmii_err_sym"},
191 	{"rmac_frms_q0"},
192 	{"rmac_frms_q1"},
193 	{"rmac_frms_q2"},
194 	{"rmac_frms_q3"},
195 	{"rmac_frms_q4"},
196 	{"rmac_frms_q5"},
197 	{"rmac_frms_q6"},
198 	{"rmac_frms_q7"},
199 	{"rmac_full_q0"},
200 	{"rmac_full_q1"},
201 	{"rmac_full_q2"},
202 	{"rmac_full_q3"},
203 	{"rmac_full_q4"},
204 	{"rmac_full_q5"},
205 	{"rmac_full_q6"},
206 	{"rmac_full_q7"},
207 	{"rmac_pause_cnt"},
208 	{"rmac_xgmii_data_err_cnt"},
209 	{"rmac_xgmii_ctrl_err_cnt"},
210 	{"rmac_accepted_ip"},
211 	{"rmac_err_tcp"},
212 	{"rd_req_cnt"},
213 	{"new_rd_req_cnt"},
214 	{"new_rd_req_rtry_cnt"},
215 	{"rd_rtry_cnt"},
216 	{"wr_rtry_rd_ack_cnt"},
217 	{"wr_req_cnt"},
218 	{"new_wr_req_cnt"},
219 	{"new_wr_req_rtry_cnt"},
220 	{"wr_rtry_cnt"},
221 	{"wr_disc_cnt"},
222 	{"rd_rtry_wr_ack_cnt"},
223 	{"txp_wr_cnt"},
224 	{"txd_rd_cnt"},
225 	{"txd_wr_cnt"},
226 	{"rxd_rd_cnt"},
227 	{"rxd_wr_cnt"},
228 	{"txf_rd_cnt"},
229 	{"rxf_wr_cnt"}
230 };
231 
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 	{"rmac_ttl_1519_4095_frms"},
234 	{"rmac_ttl_4096_8191_frms"},
235 	{"rmac_ttl_8192_max_frms"},
236 	{"rmac_ttl_gt_max_frms"},
237 	{"rmac_osized_alt_frms"},
238 	{"rmac_jabber_alt_frms"},
239 	{"rmac_gt_max_alt_frms"},
240 	{"rmac_vlan_frms"},
241 	{"rmac_len_discard"},
242 	{"rmac_fcs_discard"},
243 	{"rmac_pf_discard"},
244 	{"rmac_da_discard"},
245 	{"rmac_red_discard"},
246 	{"rmac_rts_discard"},
247 	{"rmac_ingm_full_discard"},
248 	{"link_fault_cnt"}
249 };
250 
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 	{"\n DRIVER STATISTICS"},
253 	{"single_bit_ecc_errs"},
254 	{"double_bit_ecc_errs"},
255 	{"parity_err_cnt"},
256 	{"serious_err_cnt"},
257 	{"soft_reset_cnt"},
258 	{"fifo_full_cnt"},
259 	{"ring_0_full_cnt"},
260 	{"ring_1_full_cnt"},
261 	{"ring_2_full_cnt"},
262 	{"ring_3_full_cnt"},
263 	{"ring_4_full_cnt"},
264 	{"ring_5_full_cnt"},
265 	{"ring_6_full_cnt"},
266 	{"ring_7_full_cnt"},
267 	{"alarm_transceiver_temp_high"},
268 	{"alarm_transceiver_temp_low"},
269 	{"alarm_laser_bias_current_high"},
270 	{"alarm_laser_bias_current_low"},
271 	{"alarm_laser_output_power_high"},
272 	{"alarm_laser_output_power_low"},
273 	{"warn_transceiver_temp_high"},
274 	{"warn_transceiver_temp_low"},
275 	{"warn_laser_bias_current_high"},
276 	{"warn_laser_bias_current_low"},
277 	{"warn_laser_output_power_high"},
278 	{"warn_laser_output_power_low"},
279 	{"lro_aggregated_pkts"},
280 	{"lro_flush_both_count"},
281 	{"lro_out_of_sequence_pkts"},
282 	{"lro_flush_due_to_max_pkts"},
283 	{"lro_avg_aggr_pkts"},
284 	{"mem_alloc_fail_cnt"},
285 	{"pci_map_fail_cnt"},
286 	{"watchdog_timer_cnt"},
287 	{"mem_allocated"},
288 	{"mem_freed"},
289 	{"link_up_cnt"},
290 	{"link_down_cnt"},
291 	{"link_up_time"},
292 	{"link_down_time"},
293 	{"tx_tcode_buf_abort_cnt"},
294 	{"tx_tcode_desc_abort_cnt"},
295 	{"tx_tcode_parity_err_cnt"},
296 	{"tx_tcode_link_loss_cnt"},
297 	{"tx_tcode_list_proc_err_cnt"},
298 	{"rx_tcode_parity_err_cnt"},
299 	{"rx_tcode_abort_cnt"},
300 	{"rx_tcode_parity_abort_cnt"},
301 	{"rx_tcode_rda_fail_cnt"},
302 	{"rx_tcode_unkn_prot_cnt"},
303 	{"rx_tcode_fcs_err_cnt"},
304 	{"rx_tcode_buf_size_err_cnt"},
305 	{"rx_tcode_rxd_corrupt_cnt"},
306 	{"rx_tcode_unkn_err_cnt"},
307 	{"tda_err_cnt"},
308 	{"pfc_err_cnt"},
309 	{"pcc_err_cnt"},
310 	{"tti_err_cnt"},
311 	{"tpa_err_cnt"},
312 	{"sm_err_cnt"},
313 	{"lso_err_cnt"},
314 	{"mac_tmac_err_cnt"},
315 	{"mac_rmac_err_cnt"},
316 	{"xgxs_txgxs_err_cnt"},
317 	{"xgxs_rxgxs_err_cnt"},
318 	{"rc_err_cnt"},
319 	{"prc_pcix_err_cnt"},
320 	{"rpa_err_cnt"},
321 	{"rda_err_cnt"},
322 	{"rti_err_cnt"},
323 	{"mc_err_cnt"}
324 };
325 
326 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
329 
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332 
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335 
336 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN	S2IO_TEST_LEN * ETH_GSTRING_LEN
338 
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)		\
340 			init_timer(&timer);			\
341 			timer.function = handle;		\
342 			timer.data = (unsigned long) arg;	\
343 			mod_timer(&timer, (jiffies + exp))	\
344 
345 /* copy mac addr to def_mac_addr array */
do_s2io_copy_mac_addr(struct s2io_nic * sp,int offset,u64 mac_addr)346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 
356 /* Add the vlan */
s2io_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)357 static void s2io_vlan_rx_register(struct net_device *dev,
358 				  struct vlan_group *grp)
359 {
360 	int i;
361 	struct s2io_nic *nic = netdev_priv(dev);
362 	unsigned long flags[MAX_TX_FIFOS];
363 	struct mac_info *mac_control = &nic->mac_control;
364 	struct config_param *config = &nic->config;
365 
366 	for (i = 0; i < config->tx_fifo_num; i++)
367 		spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
368 
369 	nic->vlgrp = grp;
370 	for (i = config->tx_fifo_num - 1; i >= 0; i--)
371 		spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
372 				flags[i]);
373 }
374 
375 /* Unregister the vlan */
s2io_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)376 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
377 {
378 	int i;
379 	struct s2io_nic *nic = netdev_priv(dev);
380 	unsigned long flags[MAX_TX_FIFOS];
381 	struct mac_info *mac_control = &nic->mac_control;
382 	struct config_param *config = &nic->config;
383 
384 	for (i = 0; i < config->tx_fifo_num; i++)
385 		spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
386 
387 	if (nic->vlgrp)
388 		vlan_group_set_device(nic->vlgrp, vid, NULL);
389 
390 	for (i = config->tx_fifo_num - 1; i >= 0; i--)
391 		spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
392 			flags[i]);
393 }
394 
395 /*
396  * Constants to be programmed into the Xena's registers, to configure
397  * the XAUI.
398  */
399 
400 #define	END_SIGN	0x0
401 static const u64 herc_act_dtx_cfg[] = {
402 	/* Set address */
403 	0x8000051536750000ULL, 0x80000515367500E0ULL,
404 	/* Write data */
405 	0x8000051536750004ULL, 0x80000515367500E4ULL,
406 	/* Set address */
407 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
408 	/* Write data */
409 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
410 	/* Set address */
411 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
412 	/* Write data */
413 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
414 	/* Set address */
415 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
416 	/* Write data */
417 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
418 	/* Done */
419 	END_SIGN
420 };
421 
422 static const u64 xena_dtx_cfg[] = {
423 	/* Set address */
424 	0x8000051500000000ULL, 0x80000515000000E0ULL,
425 	/* Write data */
426 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
427 	/* Set address */
428 	0x8001051500000000ULL, 0x80010515000000E0ULL,
429 	/* Write data */
430 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
431 	/* Set address */
432 	0x8002051500000000ULL, 0x80020515000000E0ULL,
433 	/* Write data */
434 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
435 	END_SIGN
436 };
437 
438 /*
439  * Constants for Fixing the MacAddress problem seen mostly on
440  * Alpha machines.
441  */
442 static const u64 fix_mac[] = {
443 	0x0060000000000000ULL, 0x0060600000000000ULL,
444 	0x0040600000000000ULL, 0x0000600000000000ULL,
445 	0x0020600000000000ULL, 0x0060600000000000ULL,
446 	0x0020600000000000ULL, 0x0060600000000000ULL,
447 	0x0020600000000000ULL, 0x0060600000000000ULL,
448 	0x0020600000000000ULL, 0x0060600000000000ULL,
449 	0x0020600000000000ULL, 0x0060600000000000ULL,
450 	0x0020600000000000ULL, 0x0060600000000000ULL,
451 	0x0020600000000000ULL, 0x0060600000000000ULL,
452 	0x0020600000000000ULL, 0x0060600000000000ULL,
453 	0x0020600000000000ULL, 0x0060600000000000ULL,
454 	0x0020600000000000ULL, 0x0060600000000000ULL,
455 	0x0020600000000000ULL, 0x0000600000000000ULL,
456 	0x0040600000000000ULL, 0x0060600000000000ULL,
457 	END_SIGN
458 };
459 
460 MODULE_LICENSE("GPL");
461 MODULE_VERSION(DRV_VERSION);
462 
463 
464 /* Module Loadable parameters. */
465 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
466 S2IO_PARM_INT(rx_ring_num, 1);
467 S2IO_PARM_INT(multiq, 0);
468 S2IO_PARM_INT(rx_ring_mode, 1);
469 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
470 S2IO_PARM_INT(rmac_pause_time, 0x100);
471 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
472 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
473 S2IO_PARM_INT(shared_splits, 0);
474 S2IO_PARM_INT(tmac_util_period, 5);
475 S2IO_PARM_INT(rmac_util_period, 5);
476 S2IO_PARM_INT(l3l4hdr_size, 128);
477 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
478 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
479 /* Frequency of Rx desc syncs expressed as power of 2 */
480 S2IO_PARM_INT(rxsync_frequency, 3);
481 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
482 S2IO_PARM_INT(intr_type, 2);
483 /* Large receive offload feature */
484 static unsigned int lro_enable;
485 module_param_named(lro, lro_enable, uint, 0);
486 
487 /* Max pkts to be aggregated by LRO at one time. If not specified,
488  * aggregation happens until we hit max IP pkt size(64K)
489  */
490 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
491 S2IO_PARM_INT(indicate_max_pkts, 0);
492 
493 S2IO_PARM_INT(napi, 1);
494 S2IO_PARM_INT(ufo, 0);
495 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
496 
497 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
498     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
499 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
500     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
501 static unsigned int rts_frm_len[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
503 
504 module_param_array(tx_fifo_len, uint, NULL, 0);
505 module_param_array(rx_ring_sz, uint, NULL, 0);
506 module_param_array(rts_frm_len, uint, NULL, 0);
507 
508 /*
509  * S2IO device table.
510  * This table lists all the devices that this driver supports.
511  */
512 static struct pci_device_id s2io_tbl[] __devinitdata = {
513 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
514 	 PCI_ANY_ID, PCI_ANY_ID},
515 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
516 	 PCI_ANY_ID, PCI_ANY_ID},
517 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
520          PCI_ANY_ID, PCI_ANY_ID},
521 	{0,}
522 };
523 
524 MODULE_DEVICE_TABLE(pci, s2io_tbl);
525 
526 static struct pci_error_handlers s2io_err_handler = {
527 	.error_detected = s2io_io_error_detected,
528 	.slot_reset = s2io_io_slot_reset,
529 	.resume = s2io_io_resume,
530 };
531 
532 static struct pci_driver s2io_driver = {
533       .name = "S2IO",
534       .id_table = s2io_tbl,
535       .probe = s2io_init_nic,
536       .remove = __devexit_p(s2io_rem_nic),
537       .err_handler = &s2io_err_handler,
538 };
539 
540 /* A simplifier macro used both by init and free shared_mem Fns(). */
541 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
542 
543 /* netqueue manipulation helper functions */
s2io_stop_all_tx_queue(struct s2io_nic * sp)544 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
545 {
546 	if (!sp->config.multiq) {
547 		int i;
548 
549 		for (i = 0; i < sp->config.tx_fifo_num; i++)
550 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
551 	}
552 	netif_tx_stop_all_queues(sp->dev);
553 }
554 
s2io_stop_tx_queue(struct s2io_nic * sp,int fifo_no)555 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
556 {
557 	if (!sp->config.multiq)
558 		sp->mac_control.fifos[fifo_no].queue_state =
559 			FIFO_QUEUE_STOP;
560 
561 	netif_tx_stop_all_queues(sp->dev);
562 }
563 
s2io_start_all_tx_queue(struct s2io_nic * sp)564 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
565 {
566 	if (!sp->config.multiq) {
567 		int i;
568 
569 		for (i = 0; i < sp->config.tx_fifo_num; i++)
570 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
571 	}
572 	netif_tx_start_all_queues(sp->dev);
573 }
574 
s2io_start_tx_queue(struct s2io_nic * sp,int fifo_no)575 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
576 {
577 	if (!sp->config.multiq)
578 		sp->mac_control.fifos[fifo_no].queue_state =
579 			FIFO_QUEUE_START;
580 
581 	netif_tx_start_all_queues(sp->dev);
582 }
583 
s2io_wake_all_tx_queue(struct s2io_nic * sp)584 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
585 {
586 	if (!sp->config.multiq) {
587 		int i;
588 
589 		for (i = 0; i < sp->config.tx_fifo_num; i++)
590 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
591 	}
592 	netif_tx_wake_all_queues(sp->dev);
593 }
594 
s2io_wake_tx_queue(struct fifo_info * fifo,int cnt,u8 multiq)595 static inline void s2io_wake_tx_queue(
596 	struct fifo_info *fifo, int cnt, u8 multiq)
597 {
598 
599 	if (multiq) {
600 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
601 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
602 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
603 		if (netif_queue_stopped(fifo->dev)) {
604 			fifo->queue_state = FIFO_QUEUE_START;
605 			netif_wake_queue(fifo->dev);
606 		}
607 	}
608 }
609 
610 /**
611  * init_shared_mem - Allocation and Initialization of Memory
612  * @nic: Device private variable.
613  * Description: The function allocates all the memory areas shared
614  * between the NIC and the driver. This includes Tx descriptors,
615  * Rx descriptors and the statistics block.
616  */
617 
init_shared_mem(struct s2io_nic * nic)618 static int init_shared_mem(struct s2io_nic *nic)
619 {
620 	u32 size;
621 	void *tmp_v_addr, *tmp_v_addr_next;
622 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
623 	struct RxD_block *pre_rxd_blk = NULL;
624 	int i, j, blk_cnt;
625 	int lst_size, lst_per_page;
626 	struct net_device *dev = nic->dev;
627 	unsigned long tmp;
628 	struct buffAdd *ba;
629 
630 	struct mac_info *mac_control;
631 	struct config_param *config;
632 	unsigned long long mem_allocated = 0;
633 
634 	mac_control = &nic->mac_control;
635 	config = &nic->config;
636 
637 
638 	/* Allocation and initialization of TXDLs in FIOFs */
639 	size = 0;
640 	for (i = 0; i < config->tx_fifo_num; i++) {
641 		size += config->tx_cfg[i].fifo_len;
642 	}
643 	if (size > MAX_AVAILABLE_TXDS) {
644 		DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
645 		DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
646 		return -EINVAL;
647 	}
648 
649 	size = 0;
650 	for (i = 0; i < config->tx_fifo_num; i++) {
651 		size = config->tx_cfg[i].fifo_len;
652 		/*
653 		 * Legal values are from 2 to 8192
654 		 */
655 		if (size < 2) {
656 			DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
657 			DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
658 			DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
659 				"are 2 to 8192\n");
660 			return -EINVAL;
661 		}
662 	}
663 
664 	lst_size = (sizeof(struct TxD) * config->max_txds);
665 	lst_per_page = PAGE_SIZE / lst_size;
666 
667 	for (i = 0; i < config->tx_fifo_num; i++) {
668 		int fifo_len = config->tx_cfg[i].fifo_len;
669 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
670 		mac_control->fifos[i].list_info = kzalloc(list_holder_size,
671 							  GFP_KERNEL);
672 		if (!mac_control->fifos[i].list_info) {
673 			DBG_PRINT(INFO_DBG,
674 				  "Malloc failed for list_info\n");
675 			return -ENOMEM;
676 		}
677 		mem_allocated += list_holder_size;
678 	}
679 	for (i = 0; i < config->tx_fifo_num; i++) {
680 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
681 						lst_per_page);
682 		mac_control->fifos[i].tx_curr_put_info.offset = 0;
683 		mac_control->fifos[i].tx_curr_put_info.fifo_len =
684 		    config->tx_cfg[i].fifo_len - 1;
685 		mac_control->fifos[i].tx_curr_get_info.offset = 0;
686 		mac_control->fifos[i].tx_curr_get_info.fifo_len =
687 		    config->tx_cfg[i].fifo_len - 1;
688 		mac_control->fifos[i].fifo_no = i;
689 		mac_control->fifos[i].nic = nic;
690 		mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
691 		mac_control->fifos[i].dev = dev;
692 
693 		for (j = 0; j < page_num; j++) {
694 			int k = 0;
695 			dma_addr_t tmp_p;
696 			void *tmp_v;
697 			tmp_v = pci_alloc_consistent(nic->pdev,
698 						     PAGE_SIZE, &tmp_p);
699 			if (!tmp_v) {
700 				DBG_PRINT(INFO_DBG,
701 					  "pci_alloc_consistent ");
702 				DBG_PRINT(INFO_DBG, "failed for TxDL\n");
703 				return -ENOMEM;
704 			}
705 			/* If we got a zero DMA address(can happen on
706 			 * certain platforms like PPC), reallocate.
707 			 * Store virtual address of page we don't want,
708 			 * to be freed later.
709 			 */
710 			if (!tmp_p) {
711 				mac_control->zerodma_virt_addr = tmp_v;
712 				DBG_PRINT(INIT_DBG,
713 				"%s: Zero DMA address for TxDL. ", dev->name);
714 				DBG_PRINT(INIT_DBG,
715 				"Virtual address %p\n", tmp_v);
716 				tmp_v = pci_alloc_consistent(nic->pdev,
717 						     PAGE_SIZE, &tmp_p);
718 				if (!tmp_v) {
719 					DBG_PRINT(INFO_DBG,
720 					  "pci_alloc_consistent ");
721 					DBG_PRINT(INFO_DBG, "failed for TxDL\n");
722 					return -ENOMEM;
723 				}
724 				mem_allocated += PAGE_SIZE;
725 			}
726 			while (k < lst_per_page) {
727 				int l = (j * lst_per_page) + k;
728 				if (l == config->tx_cfg[i].fifo_len)
729 					break;
730 				mac_control->fifos[i].list_info[l].list_virt_addr =
731 				    tmp_v + (k * lst_size);
732 				mac_control->fifos[i].list_info[l].list_phy_addr =
733 				    tmp_p + (k * lst_size);
734 				k++;
735 			}
736 		}
737 	}
738 
739 	for (i = 0; i < config->tx_fifo_num; i++) {
740 		size = config->tx_cfg[i].fifo_len;
741 		mac_control->fifos[i].ufo_in_band_v
742 			= kcalloc(size, sizeof(u64), GFP_KERNEL);
743 		if (!mac_control->fifos[i].ufo_in_band_v)
744 			return -ENOMEM;
745 		mem_allocated += (size * sizeof(u64));
746 	}
747 
748 	/* Allocation and initialization of RXDs in Rings */
749 	size = 0;
750 	for (i = 0; i < config->rx_ring_num; i++) {
751 		if (config->rx_cfg[i].num_rxd %
752 		    (rxd_count[nic->rxd_mode] + 1)) {
753 			DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
754 			DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
755 				  i);
756 			DBG_PRINT(ERR_DBG, "RxDs per Block");
757 			return FAILURE;
758 		}
759 		size += config->rx_cfg[i].num_rxd;
760 		mac_control->rings[i].block_count =
761 			config->rx_cfg[i].num_rxd /
762 			(rxd_count[nic->rxd_mode] + 1 );
763 		mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
764 			mac_control->rings[i].block_count;
765 	}
766 	if (nic->rxd_mode == RXD_MODE_1)
767 		size = (size * (sizeof(struct RxD1)));
768 	else
769 		size = (size * (sizeof(struct RxD3)));
770 
771 	for (i = 0; i < config->rx_ring_num; i++) {
772 		mac_control->rings[i].rx_curr_get_info.block_index = 0;
773 		mac_control->rings[i].rx_curr_get_info.offset = 0;
774 		mac_control->rings[i].rx_curr_get_info.ring_len =
775 		    config->rx_cfg[i].num_rxd - 1;
776 		mac_control->rings[i].rx_curr_put_info.block_index = 0;
777 		mac_control->rings[i].rx_curr_put_info.offset = 0;
778 		mac_control->rings[i].rx_curr_put_info.ring_len =
779 		    config->rx_cfg[i].num_rxd - 1;
780 		mac_control->rings[i].nic = nic;
781 		mac_control->rings[i].ring_no = i;
782 		mac_control->rings[i].lro = lro_enable;
783 
784 		blk_cnt = config->rx_cfg[i].num_rxd /
785 				(rxd_count[nic->rxd_mode] + 1);
786 		/*  Allocating all the Rx blocks */
787 		for (j = 0; j < blk_cnt; j++) {
788 			struct rx_block_info *rx_blocks;
789 			int l;
790 
791 			rx_blocks = &mac_control->rings[i].rx_blocks[j];
792 			size = SIZE_OF_BLOCK; //size is always page size
793 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
794 							  &tmp_p_addr);
795 			if (tmp_v_addr == NULL) {
796 				/*
797 				 * In case of failure, free_shared_mem()
798 				 * is called, which should free any
799 				 * memory that was alloced till the
800 				 * failure happened.
801 				 */
802 				rx_blocks->block_virt_addr = tmp_v_addr;
803 				return -ENOMEM;
804 			}
805 			mem_allocated += size;
806 			memset(tmp_v_addr, 0, size);
807 			rx_blocks->block_virt_addr = tmp_v_addr;
808 			rx_blocks->block_dma_addr = tmp_p_addr;
809 			rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
810 						  rxd_count[nic->rxd_mode],
811 						  GFP_KERNEL);
812 			if (!rx_blocks->rxds)
813 				return -ENOMEM;
814 			mem_allocated +=
815 			(sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
816 			for (l=0; l<rxd_count[nic->rxd_mode];l++) {
817 				rx_blocks->rxds[l].virt_addr =
818 					rx_blocks->block_virt_addr +
819 					(rxd_size[nic->rxd_mode] * l);
820 				rx_blocks->rxds[l].dma_addr =
821 					rx_blocks->block_dma_addr +
822 					(rxd_size[nic->rxd_mode] * l);
823 			}
824 		}
825 		/* Interlinking all Rx Blocks */
826 		for (j = 0; j < blk_cnt; j++) {
827 			tmp_v_addr =
828 				mac_control->rings[i].rx_blocks[j].block_virt_addr;
829 			tmp_v_addr_next =
830 				mac_control->rings[i].rx_blocks[(j + 1) %
831 					      blk_cnt].block_virt_addr;
832 			tmp_p_addr =
833 				mac_control->rings[i].rx_blocks[j].block_dma_addr;
834 			tmp_p_addr_next =
835 				mac_control->rings[i].rx_blocks[(j + 1) %
836 					      blk_cnt].block_dma_addr;
837 
838 			pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
839 			pre_rxd_blk->reserved_2_pNext_RxD_block =
840 			    (unsigned long) tmp_v_addr_next;
841 			pre_rxd_blk->pNext_RxD_Blk_physical =
842 			    (u64) tmp_p_addr_next;
843 		}
844 	}
845 	if (nic->rxd_mode == RXD_MODE_3B) {
846 		/*
847 		 * Allocation of Storages for buffer addresses in 2BUFF mode
848 		 * and the buffers as well.
849 		 */
850 		for (i = 0; i < config->rx_ring_num; i++) {
851 			blk_cnt = config->rx_cfg[i].num_rxd /
852 			   (rxd_count[nic->rxd_mode]+ 1);
853 			mac_control->rings[i].ba =
854 				kmalloc((sizeof(struct buffAdd *) * blk_cnt),
855 				     GFP_KERNEL);
856 			if (!mac_control->rings[i].ba)
857 				return -ENOMEM;
858 			mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
859 			for (j = 0; j < blk_cnt; j++) {
860 				int k = 0;
861 				mac_control->rings[i].ba[j] =
862 					kmalloc((sizeof(struct buffAdd) *
863 						(rxd_count[nic->rxd_mode] + 1)),
864 						GFP_KERNEL);
865 				if (!mac_control->rings[i].ba[j])
866 					return -ENOMEM;
867 				mem_allocated += (sizeof(struct buffAdd) *  \
868 					(rxd_count[nic->rxd_mode] + 1));
869 				while (k != rxd_count[nic->rxd_mode]) {
870 					ba = &mac_control->rings[i].ba[j][k];
871 
872 					ba->ba_0_org = (void *) kmalloc
873 					    (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
874 					if (!ba->ba_0_org)
875 						return -ENOMEM;
876 					mem_allocated +=
877 						(BUF0_LEN + ALIGN_SIZE);
878 					tmp = (unsigned long)ba->ba_0_org;
879 					tmp += ALIGN_SIZE;
880 					tmp &= ~((unsigned long) ALIGN_SIZE);
881 					ba->ba_0 = (void *) tmp;
882 
883 					ba->ba_1_org = (void *) kmalloc
884 					    (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
885 					if (!ba->ba_1_org)
886 						return -ENOMEM;
887 					mem_allocated
888 						+= (BUF1_LEN + ALIGN_SIZE);
889 					tmp = (unsigned long) ba->ba_1_org;
890 					tmp += ALIGN_SIZE;
891 					tmp &= ~((unsigned long) ALIGN_SIZE);
892 					ba->ba_1 = (void *) tmp;
893 					k++;
894 				}
895 			}
896 		}
897 	}
898 
899 	/* Allocation and initialization of Statistics block */
900 	size = sizeof(struct stat_block);
901 	mac_control->stats_mem = pci_alloc_consistent
902 	    (nic->pdev, size, &mac_control->stats_mem_phy);
903 
904 	if (!mac_control->stats_mem) {
905 		/*
906 		 * In case of failure, free_shared_mem() is called, which
907 		 * should free any memory that was alloced till the
908 		 * failure happened.
909 		 */
910 		return -ENOMEM;
911 	}
912 	mem_allocated += size;
913 	mac_control->stats_mem_sz = size;
914 
915 	tmp_v_addr = mac_control->stats_mem;
916 	mac_control->stats_info = (struct stat_block *) tmp_v_addr;
917 	memset(tmp_v_addr, 0, size);
918 	DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
919 		  (unsigned long long) tmp_p_addr);
920 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
921 	return SUCCESS;
922 }
923 
924 /**
925  * free_shared_mem - Free the allocated Memory
926  * @nic:  Device private variable.
927  * Description: This function is to free all memory locations allocated by
928  * the init_shared_mem() function and return it to the kernel.
929  */
930 
free_shared_mem(struct s2io_nic * nic)931 static void free_shared_mem(struct s2io_nic *nic)
932 {
933 	int i, j, blk_cnt, size;
934 	void *tmp_v_addr;
935 	dma_addr_t tmp_p_addr;
936 	struct mac_info *mac_control;
937 	struct config_param *config;
938 	int lst_size, lst_per_page;
939 	struct net_device *dev;
940 	int page_num = 0;
941 
942 	if (!nic)
943 		return;
944 
945 	dev = nic->dev;
946 
947 	mac_control = &nic->mac_control;
948 	config = &nic->config;
949 
950 	lst_size = (sizeof(struct TxD) * config->max_txds);
951 	lst_per_page = PAGE_SIZE / lst_size;
952 
953 	for (i = 0; i < config->tx_fifo_num; i++) {
954 		page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
955 							lst_per_page);
956 		for (j = 0; j < page_num; j++) {
957 			int mem_blks = (j * lst_per_page);
958 			if (!mac_control->fifos[i].list_info)
959 				return;
960 			if (!mac_control->fifos[i].list_info[mem_blks].
961 				 list_virt_addr)
962 				break;
963 			pci_free_consistent(nic->pdev, PAGE_SIZE,
964 					    mac_control->fifos[i].
965 					    list_info[mem_blks].
966 					    list_virt_addr,
967 					    mac_control->fifos[i].
968 					    list_info[mem_blks].
969 					    list_phy_addr);
970 			nic->mac_control.stats_info->sw_stat.mem_freed
971 						+= PAGE_SIZE;
972 		}
973 		/* If we got a zero DMA address during allocation,
974 		 * free the page now
975 		 */
976 		if (mac_control->zerodma_virt_addr) {
977 			pci_free_consistent(nic->pdev, PAGE_SIZE,
978 					    mac_control->zerodma_virt_addr,
979 					    (dma_addr_t)0);
980 			DBG_PRINT(INIT_DBG,
981 			  	"%s: Freeing TxDL with zero DMA addr. ",
982 				dev->name);
983 			DBG_PRINT(INIT_DBG, "Virtual address %p\n",
984 				mac_control->zerodma_virt_addr);
985 			nic->mac_control.stats_info->sw_stat.mem_freed
986 						+= PAGE_SIZE;
987 		}
988 		kfree(mac_control->fifos[i].list_info);
989 		nic->mac_control.stats_info->sw_stat.mem_freed +=
990 		(nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
991 	}
992 
993 	size = SIZE_OF_BLOCK;
994 	for (i = 0; i < config->rx_ring_num; i++) {
995 		blk_cnt = mac_control->rings[i].block_count;
996 		for (j = 0; j < blk_cnt; j++) {
997 			tmp_v_addr = mac_control->rings[i].rx_blocks[j].
998 				block_virt_addr;
999 			tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1000 				block_dma_addr;
1001 			if (tmp_v_addr == NULL)
1002 				break;
1003 			pci_free_consistent(nic->pdev, size,
1004 					    tmp_v_addr, tmp_p_addr);
1005 			nic->mac_control.stats_info->sw_stat.mem_freed += size;
1006 			kfree(mac_control->rings[i].rx_blocks[j].rxds);
1007 			nic->mac_control.stats_info->sw_stat.mem_freed +=
1008 			( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1009 		}
1010 	}
1011 
1012 	if (nic->rxd_mode == RXD_MODE_3B) {
1013 		/* Freeing buffer storage addresses in 2BUFF mode. */
1014 		for (i = 0; i < config->rx_ring_num; i++) {
1015 			blk_cnt = config->rx_cfg[i].num_rxd /
1016 			    (rxd_count[nic->rxd_mode] + 1);
1017 			for (j = 0; j < blk_cnt; j++) {
1018 				int k = 0;
1019 				if (!mac_control->rings[i].ba[j])
1020 					continue;
1021 				while (k != rxd_count[nic->rxd_mode]) {
1022 					struct buffAdd *ba =
1023 						&mac_control->rings[i].ba[j][k];
1024 					kfree(ba->ba_0_org);
1025 					nic->mac_control.stats_info->sw_stat.\
1026 					mem_freed += (BUF0_LEN + ALIGN_SIZE);
1027 					kfree(ba->ba_1_org);
1028 					nic->mac_control.stats_info->sw_stat.\
1029 					mem_freed += (BUF1_LEN + ALIGN_SIZE);
1030 					k++;
1031 				}
1032 				kfree(mac_control->rings[i].ba[j]);
1033 				nic->mac_control.stats_info->sw_stat.mem_freed +=
1034 					(sizeof(struct buffAdd) *
1035 					(rxd_count[nic->rxd_mode] + 1));
1036 			}
1037 			kfree(mac_control->rings[i].ba);
1038 			nic->mac_control.stats_info->sw_stat.mem_freed +=
1039 			(sizeof(struct buffAdd *) * blk_cnt);
1040 		}
1041 	}
1042 
1043 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
1044 		if (mac_control->fifos[i].ufo_in_band_v) {
1045 			nic->mac_control.stats_info->sw_stat.mem_freed
1046 				+= (config->tx_cfg[i].fifo_len * sizeof(u64));
1047 			kfree(mac_control->fifos[i].ufo_in_band_v);
1048 		}
1049 	}
1050 
1051 	if (mac_control->stats_mem) {
1052 		nic->mac_control.stats_info->sw_stat.mem_freed +=
1053 			mac_control->stats_mem_sz;
1054 		pci_free_consistent(nic->pdev,
1055 				    mac_control->stats_mem_sz,
1056 				    mac_control->stats_mem,
1057 				    mac_control->stats_mem_phy);
1058 	}
1059 }
1060 
1061 /**
1062  * s2io_verify_pci_mode -
1063  */
1064 
s2io_verify_pci_mode(struct s2io_nic * nic)1065 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1066 {
1067 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1068 	register u64 val64 = 0;
1069 	int     mode;
1070 
1071 	val64 = readq(&bar0->pci_mode);
1072 	mode = (u8)GET_PCI_MODE(val64);
1073 
1074 	if ( val64 & PCI_MODE_UNKNOWN_MODE)
1075 		return -1;      /* Unknown PCI mode */
1076 	return mode;
1077 }
1078 
1079 #define NEC_VENID   0x1033
1080 #define NEC_DEVID   0x0125
s2io_on_nec_bridge(struct pci_dev * s2io_pdev)1081 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1082 {
1083 	struct pci_dev *tdev = NULL;
1084 	while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1085 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1086 			if (tdev->bus == s2io_pdev->bus->parent) {
1087 				pci_dev_put(tdev);
1088 				return 1;
1089 			}
1090 		}
1091 	}
1092 	return 0;
1093 }
1094 
1095 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1096 /**
1097  * s2io_print_pci_mode -
1098  */
s2io_print_pci_mode(struct s2io_nic * nic)1099 static int s2io_print_pci_mode(struct s2io_nic *nic)
1100 {
1101 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1102 	register u64 val64 = 0;
1103 	int	mode;
1104 	struct config_param *config = &nic->config;
1105 
1106 	val64 = readq(&bar0->pci_mode);
1107 	mode = (u8)GET_PCI_MODE(val64);
1108 
1109 	if ( val64 & PCI_MODE_UNKNOWN_MODE)
1110 		return -1;	/* Unknown PCI mode */
1111 
1112 	config->bus_speed = bus_speed[mode];
1113 
1114 	if (s2io_on_nec_bridge(nic->pdev)) {
1115 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1116 							nic->dev->name);
1117 		return mode;
1118 	}
1119 
1120 	if (val64 & PCI_MODE_32_BITS) {
1121 		DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1122 	} else {
1123 		DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1124 	}
1125 
1126 	switch(mode) {
1127 		case PCI_MODE_PCI_33:
1128 			DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1129 			break;
1130 		case PCI_MODE_PCI_66:
1131 			DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1132 			break;
1133 		case PCI_MODE_PCIX_M1_66:
1134 			DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1135 			break;
1136 		case PCI_MODE_PCIX_M1_100:
1137 			DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1138 			break;
1139 		case PCI_MODE_PCIX_M1_133:
1140 			DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1141 			break;
1142 		case PCI_MODE_PCIX_M2_66:
1143 			DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1144 			break;
1145 		case PCI_MODE_PCIX_M2_100:
1146 			DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1147 			break;
1148 		case PCI_MODE_PCIX_M2_133:
1149 			DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1150 			break;
1151 		default:
1152 			return -1;	/* Unsupported bus speed */
1153 	}
1154 
1155 	return mode;
1156 }
1157 
1158 /**
1159  *  init_tti - Initialization transmit traffic interrupt scheme
1160  *  @nic: device private variable
1161  *  @link: link status (UP/DOWN) used to enable/disable continuous
1162  *  transmit interrupts
1163  *  Description: The function configures transmit traffic interrupts
1164  *  Return Value:  SUCCESS on success and
1165  *  '-1' on failure
1166  */
1167 
init_tti(struct s2io_nic * nic,int link)1168 static int init_tti(struct s2io_nic *nic, int link)
1169 {
1170 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1171 	register u64 val64 = 0;
1172 	int i;
1173 	struct config_param *config;
1174 
1175 	config = &nic->config;
1176 
1177 	for (i = 0; i < config->tx_fifo_num; i++) {
1178 		/*
1179 		 * TTI Initialization. Default Tx timer gets us about
1180 		 * 250 interrupts per sec. Continuous interrupts are enabled
1181 		 * by default.
1182 		 */
1183 		if (nic->device_type == XFRAME_II_DEVICE) {
1184 			int count = (nic->config.bus_speed * 125)/2;
1185 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1186 		} else
1187 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1188 
1189 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1190 				TTI_DATA1_MEM_TX_URNG_B(0x10) |
1191 				TTI_DATA1_MEM_TX_URNG_C(0x30) |
1192 				TTI_DATA1_MEM_TX_TIMER_AC_EN;
1193 		if (i == 0)
1194 			if (use_continuous_tx_intrs && (link == LINK_UP))
1195 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1196 		writeq(val64, &bar0->tti_data1_mem);
1197 
1198 		if (nic->config.intr_type == MSI_X) {
1199 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1200 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1201 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1202 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1203 		} else {
1204 			if ((nic->config.tx_steering_type ==
1205 				TX_DEFAULT_STEERING) &&
1206 				(config->tx_fifo_num > 1) &&
1207 				(i >= nic->udp_fifo_idx) &&
1208 				(i < (nic->udp_fifo_idx +
1209 				nic->total_udp_fifos)))
1210 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1211 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1212 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1213 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1214 			else
1215 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1216 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1217 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1218 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1219 		}
1220 
1221 		writeq(val64, &bar0->tti_data2_mem);
1222 
1223 		val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1224 				TTI_CMD_MEM_OFFSET(i);
1225 		writeq(val64, &bar0->tti_command_mem);
1226 
1227 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1228 			TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1229 			return FAILURE;
1230 	}
1231 
1232 	return SUCCESS;
1233 }
1234 
1235 /**
1236  *  init_nic - Initialization of hardware
1237  *  @nic: device private variable
1238  *  Description: The function sequentially configures every block
1239  *  of the H/W from their reset values.
1240  *  Return Value:  SUCCESS on success and
1241  *  '-1' on failure (endian settings incorrect).
1242  */
1243 
init_nic(struct s2io_nic * nic)1244 static int init_nic(struct s2io_nic *nic)
1245 {
1246 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1247 	struct net_device *dev = nic->dev;
1248 	register u64 val64 = 0;
1249 	void __iomem *add;
1250 	u32 time;
1251 	int i, j;
1252 	struct mac_info *mac_control;
1253 	struct config_param *config;
1254 	int dtx_cnt = 0;
1255 	unsigned long long mem_share;
1256 	int mem_size;
1257 
1258 	mac_control = &nic->mac_control;
1259 	config = &nic->config;
1260 
1261 	/* to set the swapper controle on the card */
1262 	if(s2io_set_swapper(nic)) {
1263 		DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1264 		return -EIO;
1265 	}
1266 
1267 	/*
1268 	 * Herc requires EOI to be removed from reset before XGXS, so..
1269 	 */
1270 	if (nic->device_type & XFRAME_II_DEVICE) {
1271 		val64 = 0xA500000000ULL;
1272 		writeq(val64, &bar0->sw_reset);
1273 		msleep(500);
1274 		val64 = readq(&bar0->sw_reset);
1275 	}
1276 
1277 	/* Remove XGXS from reset state */
1278 	val64 = 0;
1279 	writeq(val64, &bar0->sw_reset);
1280 	msleep(500);
1281 	val64 = readq(&bar0->sw_reset);
1282 
1283 	/* Ensure that it's safe to access registers by checking
1284 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1285 	 */
1286 	if (nic->device_type == XFRAME_II_DEVICE) {
1287 		for (i = 0; i < 50; i++) {
1288 			val64 = readq(&bar0->adapter_status);
1289 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1290 				break;
1291 			msleep(10);
1292 		}
1293 		if (i == 50)
1294 			return -ENODEV;
1295 	}
1296 
1297 	/*  Enable Receiving broadcasts */
1298 	add = &bar0->mac_cfg;
1299 	val64 = readq(&bar0->mac_cfg);
1300 	val64 |= MAC_RMAC_BCAST_ENABLE;
1301 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1302 	writel((u32) val64, add);
1303 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1304 	writel((u32) (val64 >> 32), (add + 4));
1305 
1306 	/* Read registers in all blocks */
1307 	val64 = readq(&bar0->mac_int_mask);
1308 	val64 = readq(&bar0->mc_int_mask);
1309 	val64 = readq(&bar0->xgxs_int_mask);
1310 
1311 	/*  Set MTU */
1312 	val64 = dev->mtu;
1313 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1314 
1315 	if (nic->device_type & XFRAME_II_DEVICE) {
1316 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1317 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1318 					  &bar0->dtx_control, UF);
1319 			if (dtx_cnt & 0x1)
1320 				msleep(1); /* Necessary!! */
1321 			dtx_cnt++;
1322 		}
1323 	} else {
1324 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1325 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1326 					  &bar0->dtx_control, UF);
1327 			val64 = readq(&bar0->dtx_control);
1328 			dtx_cnt++;
1329 		}
1330 	}
1331 
1332 	/*  Tx DMA Initialization */
1333 	val64 = 0;
1334 	writeq(val64, &bar0->tx_fifo_partition_0);
1335 	writeq(val64, &bar0->tx_fifo_partition_1);
1336 	writeq(val64, &bar0->tx_fifo_partition_2);
1337 	writeq(val64, &bar0->tx_fifo_partition_3);
1338 
1339 
1340 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1341 		val64 |=
1342 		    vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1343 			 13) | vBIT(config->tx_cfg[i].fifo_priority,
1344 				    ((j * 32) + 5), 3);
1345 
1346 		if (i == (config->tx_fifo_num - 1)) {
1347 			if (i % 2 == 0)
1348 				i++;
1349 		}
1350 
1351 		switch (i) {
1352 		case 1:
1353 			writeq(val64, &bar0->tx_fifo_partition_0);
1354 			val64 = 0;
1355 			j = 0;
1356 			break;
1357 		case 3:
1358 			writeq(val64, &bar0->tx_fifo_partition_1);
1359 			val64 = 0;
1360 			j = 0;
1361 			break;
1362 		case 5:
1363 			writeq(val64, &bar0->tx_fifo_partition_2);
1364 			val64 = 0;
1365 			j = 0;
1366 			break;
1367 		case 7:
1368 			writeq(val64, &bar0->tx_fifo_partition_3);
1369 			val64 = 0;
1370 			j = 0;
1371 			break;
1372 		default:
1373 			j++;
1374 			break;
1375 		}
1376 	}
1377 
1378 	/*
1379 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1380 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1381 	 */
1382 	if ((nic->device_type == XFRAME_I_DEVICE) &&
1383 		(nic->pdev->revision < 4))
1384 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1385 
1386 	val64 = readq(&bar0->tx_fifo_partition_0);
1387 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1388 		  &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1389 
1390 	/*
1391 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1392 	 * integrity checking.
1393 	 */
1394 	val64 = readq(&bar0->tx_pa_cfg);
1395 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1396 	    TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1397 	writeq(val64, &bar0->tx_pa_cfg);
1398 
1399 	/* Rx DMA intialization. */
1400 	val64 = 0;
1401 	for (i = 0; i < config->rx_ring_num; i++) {
1402 		val64 |=
1403 		    vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1404 			 3);
1405 	}
1406 	writeq(val64, &bar0->rx_queue_priority);
1407 
1408 	/*
1409 	 * Allocating equal share of memory to all the
1410 	 * configured Rings.
1411 	 */
1412 	val64 = 0;
1413 	if (nic->device_type & XFRAME_II_DEVICE)
1414 		mem_size = 32;
1415 	else
1416 		mem_size = 64;
1417 
1418 	for (i = 0; i < config->rx_ring_num; i++) {
1419 		switch (i) {
1420 		case 0:
1421 			mem_share = (mem_size / config->rx_ring_num +
1422 				     mem_size % config->rx_ring_num);
1423 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1424 			continue;
1425 		case 1:
1426 			mem_share = (mem_size / config->rx_ring_num);
1427 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1428 			continue;
1429 		case 2:
1430 			mem_share = (mem_size / config->rx_ring_num);
1431 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1432 			continue;
1433 		case 3:
1434 			mem_share = (mem_size / config->rx_ring_num);
1435 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1436 			continue;
1437 		case 4:
1438 			mem_share = (mem_size / config->rx_ring_num);
1439 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1440 			continue;
1441 		case 5:
1442 			mem_share = (mem_size / config->rx_ring_num);
1443 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1444 			continue;
1445 		case 6:
1446 			mem_share = (mem_size / config->rx_ring_num);
1447 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1448 			continue;
1449 		case 7:
1450 			mem_share = (mem_size / config->rx_ring_num);
1451 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1452 			continue;
1453 		}
1454 	}
1455 	writeq(val64, &bar0->rx_queue_cfg);
1456 
1457 	/*
1458 	 * Filling Tx round robin registers
1459 	 * as per the number of FIFOs for equal scheduling priority
1460 	 */
1461 	switch (config->tx_fifo_num) {
1462 	case 1:
1463 		val64 = 0x0;
1464 		writeq(val64, &bar0->tx_w_round_robin_0);
1465 		writeq(val64, &bar0->tx_w_round_robin_1);
1466 		writeq(val64, &bar0->tx_w_round_robin_2);
1467 		writeq(val64, &bar0->tx_w_round_robin_3);
1468 		writeq(val64, &bar0->tx_w_round_robin_4);
1469 		break;
1470 	case 2:
1471 		val64 = 0x0001000100010001ULL;
1472 		writeq(val64, &bar0->tx_w_round_robin_0);
1473 		writeq(val64, &bar0->tx_w_round_robin_1);
1474 		writeq(val64, &bar0->tx_w_round_robin_2);
1475 		writeq(val64, &bar0->tx_w_round_robin_3);
1476 		val64 = 0x0001000100000000ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_4);
1478 		break;
1479 	case 3:
1480 		val64 = 0x0001020001020001ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_0);
1482 		val64 = 0x0200010200010200ULL;
1483 		writeq(val64, &bar0->tx_w_round_robin_1);
1484 		val64 = 0x0102000102000102ULL;
1485 		writeq(val64, &bar0->tx_w_round_robin_2);
1486 		val64 = 0x0001020001020001ULL;
1487 		writeq(val64, &bar0->tx_w_round_robin_3);
1488 		val64 = 0x0200010200000000ULL;
1489 		writeq(val64, &bar0->tx_w_round_robin_4);
1490 		break;
1491 	case 4:
1492 		val64 = 0x0001020300010203ULL;
1493 		writeq(val64, &bar0->tx_w_round_robin_0);
1494 		writeq(val64, &bar0->tx_w_round_robin_1);
1495 		writeq(val64, &bar0->tx_w_round_robin_2);
1496 		writeq(val64, &bar0->tx_w_round_robin_3);
1497 		val64 = 0x0001020300000000ULL;
1498 		writeq(val64, &bar0->tx_w_round_robin_4);
1499 		break;
1500 	case 5:
1501 		val64 = 0x0001020304000102ULL;
1502 		writeq(val64, &bar0->tx_w_round_robin_0);
1503 		val64 = 0x0304000102030400ULL;
1504 		writeq(val64, &bar0->tx_w_round_robin_1);
1505 		val64 = 0x0102030400010203ULL;
1506 		writeq(val64, &bar0->tx_w_round_robin_2);
1507 		val64 = 0x0400010203040001ULL;
1508 		writeq(val64, &bar0->tx_w_round_robin_3);
1509 		val64 = 0x0203040000000000ULL;
1510 		writeq(val64, &bar0->tx_w_round_robin_4);
1511 		break;
1512 	case 6:
1513 		val64 = 0x0001020304050001ULL;
1514 		writeq(val64, &bar0->tx_w_round_robin_0);
1515 		val64 = 0x0203040500010203ULL;
1516 		writeq(val64, &bar0->tx_w_round_robin_1);
1517 		val64 = 0x0405000102030405ULL;
1518 		writeq(val64, &bar0->tx_w_round_robin_2);
1519 		val64 = 0x0001020304050001ULL;
1520 		writeq(val64, &bar0->tx_w_round_robin_3);
1521 		val64 = 0x0203040500000000ULL;
1522 		writeq(val64, &bar0->tx_w_round_robin_4);
1523 		break;
1524 	case 7:
1525 		val64 = 0x0001020304050600ULL;
1526 		writeq(val64, &bar0->tx_w_round_robin_0);
1527 		val64 = 0x0102030405060001ULL;
1528 		writeq(val64, &bar0->tx_w_round_robin_1);
1529 		val64 = 0x0203040506000102ULL;
1530 		writeq(val64, &bar0->tx_w_round_robin_2);
1531 		val64 = 0x0304050600010203ULL;
1532 		writeq(val64, &bar0->tx_w_round_robin_3);
1533 		val64 = 0x0405060000000000ULL;
1534 		writeq(val64, &bar0->tx_w_round_robin_4);
1535 		break;
1536 	case 8:
1537 		val64 = 0x0001020304050607ULL;
1538 		writeq(val64, &bar0->tx_w_round_robin_0);
1539 		writeq(val64, &bar0->tx_w_round_robin_1);
1540 		writeq(val64, &bar0->tx_w_round_robin_2);
1541 		writeq(val64, &bar0->tx_w_round_robin_3);
1542 		val64 = 0x0001020300000000ULL;
1543 		writeq(val64, &bar0->tx_w_round_robin_4);
1544 		break;
1545 	}
1546 
1547 	/* Enable all configured Tx FIFO partitions */
1548 	val64 = readq(&bar0->tx_fifo_partition_0);
1549 	val64 |= (TX_FIFO_PARTITION_EN);
1550 	writeq(val64, &bar0->tx_fifo_partition_0);
1551 
1552 	/* Filling the Rx round robin registers as per the
1553 	 * number of Rings and steering based on QoS with
1554 	 * equal priority.
1555 	 */
1556 	switch (config->rx_ring_num) {
1557 	case 1:
1558 		val64 = 0x0;
1559 		writeq(val64, &bar0->rx_w_round_robin_0);
1560 		writeq(val64, &bar0->rx_w_round_robin_1);
1561 		writeq(val64, &bar0->rx_w_round_robin_2);
1562 		writeq(val64, &bar0->rx_w_round_robin_3);
1563 		writeq(val64, &bar0->rx_w_round_robin_4);
1564 
1565 		val64 = 0x8080808080808080ULL;
1566 		writeq(val64, &bar0->rts_qos_steering);
1567 		break;
1568 	case 2:
1569 		val64 = 0x0001000100010001ULL;
1570 		writeq(val64, &bar0->rx_w_round_robin_0);
1571 		writeq(val64, &bar0->rx_w_round_robin_1);
1572 		writeq(val64, &bar0->rx_w_round_robin_2);
1573 		writeq(val64, &bar0->rx_w_round_robin_3);
1574 		val64 = 0x0001000100000000ULL;
1575 		writeq(val64, &bar0->rx_w_round_robin_4);
1576 
1577 		val64 = 0x8080808040404040ULL;
1578 		writeq(val64, &bar0->rts_qos_steering);
1579 		break;
1580 	case 3:
1581 		val64 = 0x0001020001020001ULL;
1582 		writeq(val64, &bar0->rx_w_round_robin_0);
1583 		val64 = 0x0200010200010200ULL;
1584 		writeq(val64, &bar0->rx_w_round_robin_1);
1585 		val64 = 0x0102000102000102ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_2);
1587 		val64 = 0x0001020001020001ULL;
1588 		writeq(val64, &bar0->rx_w_round_robin_3);
1589 		val64 = 0x0200010200000000ULL;
1590 		writeq(val64, &bar0->rx_w_round_robin_4);
1591 
1592 		val64 = 0x8080804040402020ULL;
1593 		writeq(val64, &bar0->rts_qos_steering);
1594 		break;
1595 	case 4:
1596 		val64 = 0x0001020300010203ULL;
1597 		writeq(val64, &bar0->rx_w_round_robin_0);
1598 		writeq(val64, &bar0->rx_w_round_robin_1);
1599 		writeq(val64, &bar0->rx_w_round_robin_2);
1600 		writeq(val64, &bar0->rx_w_round_robin_3);
1601 		val64 = 0x0001020300000000ULL;
1602 		writeq(val64, &bar0->rx_w_round_robin_4);
1603 
1604 		val64 = 0x8080404020201010ULL;
1605 		writeq(val64, &bar0->rts_qos_steering);
1606 		break;
1607 	case 5:
1608 		val64 = 0x0001020304000102ULL;
1609 		writeq(val64, &bar0->rx_w_round_robin_0);
1610 		val64 = 0x0304000102030400ULL;
1611 		writeq(val64, &bar0->rx_w_round_robin_1);
1612 		val64 = 0x0102030400010203ULL;
1613 		writeq(val64, &bar0->rx_w_round_robin_2);
1614 		val64 = 0x0400010203040001ULL;
1615 		writeq(val64, &bar0->rx_w_round_robin_3);
1616 		val64 = 0x0203040000000000ULL;
1617 		writeq(val64, &bar0->rx_w_round_robin_4);
1618 
1619 		val64 = 0x8080404020201008ULL;
1620 		writeq(val64, &bar0->rts_qos_steering);
1621 		break;
1622 	case 6:
1623 		val64 = 0x0001020304050001ULL;
1624 		writeq(val64, &bar0->rx_w_round_robin_0);
1625 		val64 = 0x0203040500010203ULL;
1626 		writeq(val64, &bar0->rx_w_round_robin_1);
1627 		val64 = 0x0405000102030405ULL;
1628 		writeq(val64, &bar0->rx_w_round_robin_2);
1629 		val64 = 0x0001020304050001ULL;
1630 		writeq(val64, &bar0->rx_w_round_robin_3);
1631 		val64 = 0x0203040500000000ULL;
1632 		writeq(val64, &bar0->rx_w_round_robin_4);
1633 
1634 		val64 = 0x8080404020100804ULL;
1635 		writeq(val64, &bar0->rts_qos_steering);
1636 		break;
1637 	case 7:
1638 		val64 = 0x0001020304050600ULL;
1639 		writeq(val64, &bar0->rx_w_round_robin_0);
1640 		val64 = 0x0102030405060001ULL;
1641 		writeq(val64, &bar0->rx_w_round_robin_1);
1642 		val64 = 0x0203040506000102ULL;
1643 		writeq(val64, &bar0->rx_w_round_robin_2);
1644 		val64 = 0x0304050600010203ULL;
1645 		writeq(val64, &bar0->rx_w_round_robin_3);
1646 		val64 = 0x0405060000000000ULL;
1647 		writeq(val64, &bar0->rx_w_round_robin_4);
1648 
1649 		val64 = 0x8080402010080402ULL;
1650 		writeq(val64, &bar0->rts_qos_steering);
1651 		break;
1652 	case 8:
1653 		val64 = 0x0001020304050607ULL;
1654 		writeq(val64, &bar0->rx_w_round_robin_0);
1655 		writeq(val64, &bar0->rx_w_round_robin_1);
1656 		writeq(val64, &bar0->rx_w_round_robin_2);
1657 		writeq(val64, &bar0->rx_w_round_robin_3);
1658 		val64 = 0x0001020300000000ULL;
1659 		writeq(val64, &bar0->rx_w_round_robin_4);
1660 
1661 		val64 = 0x8040201008040201ULL;
1662 		writeq(val64, &bar0->rts_qos_steering);
1663 		break;
1664 	}
1665 
1666 	/* UDP Fix */
1667 	val64 = 0;
1668 	for (i = 0; i < 8; i++)
1669 		writeq(val64, &bar0->rts_frm_len_n[i]);
1670 
1671 	/* Set the default rts frame length for the rings configured */
1672 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1673 	for (i = 0 ; i < config->rx_ring_num ; i++)
1674 		writeq(val64, &bar0->rts_frm_len_n[i]);
1675 
1676 	/* Set the frame length for the configured rings
1677 	 * desired by the user
1678 	 */
1679 	for (i = 0; i < config->rx_ring_num; i++) {
1680 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1681 		 * specified frame length steering.
1682 		 * If the user provides the frame length then program
1683 		 * the rts_frm_len register for those values or else
1684 		 * leave it as it is.
1685 		 */
1686 		if (rts_frm_len[i] != 0) {
1687 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1688 				&bar0->rts_frm_len_n[i]);
1689 		}
1690 	}
1691 
1692 	/* Disable differentiated services steering logic */
1693 	for (i = 0; i < 64; i++) {
1694 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1695 			DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1696 				dev->name);
1697 			DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1698 			return -ENODEV;
1699 		}
1700 	}
1701 
1702 	/* Program statistics memory */
1703 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1704 
1705 	if (nic->device_type == XFRAME_II_DEVICE) {
1706 		val64 = STAT_BC(0x320);
1707 		writeq(val64, &bar0->stat_byte_cnt);
1708 	}
1709 
1710 	/*
1711 	 * Initializing the sampling rate for the device to calculate the
1712 	 * bandwidth utilization.
1713 	 */
1714 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1715 	    MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1716 	writeq(val64, &bar0->mac_link_util);
1717 
1718 	/*
1719 	 * Initializing the Transmit and Receive Traffic Interrupt
1720 	 * Scheme.
1721 	 */
1722 
1723 	/* Initialize TTI */
1724 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1725 		return -ENODEV;
1726 
1727 	/* RTI Initialization */
1728 	if (nic->device_type == XFRAME_II_DEVICE) {
1729 		/*
1730 		 * Programmed to generate Apprx 500 Intrs per
1731 		 * second
1732 		 */
1733 		int count = (nic->config.bus_speed * 125)/4;
1734 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1735 	} else
1736 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1737 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1738 		 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1739 		 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1740 
1741 	writeq(val64, &bar0->rti_data1_mem);
1742 
1743 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1744 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1745 	if (nic->config.intr_type == MSI_X)
1746 	    val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1747 			RTI_DATA2_MEM_RX_UFC_D(0x40));
1748 	else
1749 	    val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1750 			RTI_DATA2_MEM_RX_UFC_D(0x80));
1751 	writeq(val64, &bar0->rti_data2_mem);
1752 
1753 	for (i = 0; i < config->rx_ring_num; i++) {
1754 		val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1755 				| RTI_CMD_MEM_OFFSET(i);
1756 		writeq(val64, &bar0->rti_command_mem);
1757 
1758 		/*
1759 		 * Once the operation completes, the Strobe bit of the
1760 		 * command register will be reset. We poll for this
1761 		 * particular condition. We wait for a maximum of 500ms
1762 		 * for the operation to complete, if it's not complete
1763 		 * by then we return error.
1764 		 */
1765 		time = 0;
1766 		while (TRUE) {
1767 			val64 = readq(&bar0->rti_command_mem);
1768 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1769 				break;
1770 
1771 			if (time > 10) {
1772 				DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1773 					  dev->name);
1774 				return -ENODEV;
1775 			}
1776 			time++;
1777 			msleep(50);
1778 		}
1779 	}
1780 
1781 	/*
1782 	 * Initializing proper values as Pause threshold into all
1783 	 * the 8 Queues on Rx side.
1784 	 */
1785 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1786 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1787 
1788 	/* Disable RMAC PAD STRIPPING */
1789 	add = &bar0->mac_cfg;
1790 	val64 = readq(&bar0->mac_cfg);
1791 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1792 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1793 	writel((u32) (val64), add);
1794 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1795 	writel((u32) (val64 >> 32), (add + 4));
1796 	val64 = readq(&bar0->mac_cfg);
1797 
1798 	/* Enable FCS stripping by adapter */
1799 	add = &bar0->mac_cfg;
1800 	val64 = readq(&bar0->mac_cfg);
1801 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1802 	if (nic->device_type == XFRAME_II_DEVICE)
1803 		writeq(val64, &bar0->mac_cfg);
1804 	else {
1805 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1806 		writel((u32) (val64), add);
1807 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1808 		writel((u32) (val64 >> 32), (add + 4));
1809 	}
1810 
1811 	/*
1812 	 * Set the time value to be inserted in the pause frame
1813 	 * generated by xena.
1814 	 */
1815 	val64 = readq(&bar0->rmac_pause_cfg);
1816 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1817 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1818 	writeq(val64, &bar0->rmac_pause_cfg);
1819 
1820 	/*
1821 	 * Set the Threshold Limit for Generating the pause frame
1822 	 * If the amount of data in any Queue exceeds ratio of
1823 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1824 	 * pause frame is generated
1825 	 */
1826 	val64 = 0;
1827 	for (i = 0; i < 4; i++) {
1828 		val64 |=
1829 		    (((u64) 0xFF00 | nic->mac_control.
1830 		      mc_pause_threshold_q0q3)
1831 		     << (i * 2 * 8));
1832 	}
1833 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1834 
1835 	val64 = 0;
1836 	for (i = 0; i < 4; i++) {
1837 		val64 |=
1838 		    (((u64) 0xFF00 | nic->mac_control.
1839 		      mc_pause_threshold_q4q7)
1840 		     << (i * 2 * 8));
1841 	}
1842 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1843 
1844 	/*
1845 	 * TxDMA will stop Read request if the number of read split has
1846 	 * exceeded the limit pointed by shared_splits
1847 	 */
1848 	val64 = readq(&bar0->pic_control);
1849 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1850 	writeq(val64, &bar0->pic_control);
1851 
1852 	if (nic->config.bus_speed == 266) {
1853 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1854 		writeq(0x0, &bar0->read_retry_delay);
1855 		writeq(0x0, &bar0->write_retry_delay);
1856 	}
1857 
1858 	/*
1859 	 * Programming the Herc to split every write transaction
1860 	 * that does not start on an ADB to reduce disconnects.
1861 	 */
1862 	if (nic->device_type == XFRAME_II_DEVICE) {
1863 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1864 			MISC_LINK_STABILITY_PRD(3);
1865 		writeq(val64, &bar0->misc_control);
1866 		val64 = readq(&bar0->pic_control2);
1867 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1868 		writeq(val64, &bar0->pic_control2);
1869 	}
1870 	if (strstr(nic->product_name, "CX4")) {
1871 		val64 = TMAC_AVG_IPG(0x17);
1872 		writeq(val64, &bar0->tmac_avg_ipg);
1873 	}
1874 
1875 	return SUCCESS;
1876 }
1877 #define LINK_UP_DOWN_INTERRUPT		1
1878 #define MAC_RMAC_ERR_TIMER		2
1879 
s2io_link_fault_indication(struct s2io_nic * nic)1880 static int s2io_link_fault_indication(struct s2io_nic *nic)
1881 {
1882 	if (nic->device_type == XFRAME_II_DEVICE)
1883 		return LINK_UP_DOWN_INTERRUPT;
1884 	else
1885 		return MAC_RMAC_ERR_TIMER;
1886 }
1887 
1888 /**
1889  *  do_s2io_write_bits -  update alarm bits in alarm register
1890  *  @value: alarm bits
1891  *  @flag: interrupt status
1892  *  @addr: address value
1893  *  Description: update alarm bits in alarm register
1894  *  Return Value:
1895  *  NONE.
1896  */
do_s2io_write_bits(u64 value,int flag,void __iomem * addr)1897 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1898 {
1899 	u64 temp64;
1900 
1901 	temp64 = readq(addr);
1902 
1903 	if(flag == ENABLE_INTRS)
1904 		temp64 &= ~((u64) value);
1905 	else
1906 		temp64 |= ((u64) value);
1907 	writeq(temp64, addr);
1908 }
1909 
en_dis_err_alarms(struct s2io_nic * nic,u16 mask,int flag)1910 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1911 {
1912 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1913 	register u64 gen_int_mask = 0;
1914 	u64 interruptible;
1915 
1916 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1917 	if (mask & TX_DMA_INTR) {
1918 
1919 		gen_int_mask |= TXDMA_INT_M;
1920 
1921 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1922 				TXDMA_PCC_INT | TXDMA_TTI_INT |
1923 				TXDMA_LSO_INT | TXDMA_TPA_INT |
1924 				TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1925 
1926 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1927 				PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1928 				PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1929 				&bar0->pfc_err_mask);
1930 
1931 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1932 				TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1933 				TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1934 
1935 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1936 				PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1937 				PCC_N_SERR | PCC_6_COF_OV_ERR |
1938 				PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1939 				PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1940 				PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1941 
1942 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1943 				TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1944 
1945 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1946 				LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1947 				LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1948 				flag, &bar0->lso_err_mask);
1949 
1950 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1951 				flag, &bar0->tpa_err_mask);
1952 
1953 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1954 
1955 	}
1956 
1957 	if (mask & TX_MAC_INTR) {
1958 		gen_int_mask |= TXMAC_INT_M;
1959 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1960 				&bar0->mac_int_mask);
1961 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1962 				TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1963 				TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1964 				flag, &bar0->mac_tmac_err_mask);
1965 	}
1966 
1967 	if (mask & TX_XGXS_INTR) {
1968 		gen_int_mask |= TXXGXS_INT_M;
1969 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1970 				&bar0->xgxs_int_mask);
1971 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1972 				TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1973 				flag, &bar0->xgxs_txgxs_err_mask);
1974 	}
1975 
1976 	if (mask & RX_DMA_INTR) {
1977 		gen_int_mask |= RXDMA_INT_M;
1978 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1979 				RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1980 				flag, &bar0->rxdma_int_mask);
1981 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1982 				RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1983 				RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1984 				RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1985 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1986 				PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1987 				PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1988 				&bar0->prc_pcix_err_mask);
1989 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1990 				RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1991 				&bar0->rpa_err_mask);
1992 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1993 				RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1994 				RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1995 				RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1996 				flag, &bar0->rda_err_mask);
1997 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1998 				RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1999 				flag, &bar0->rti_err_mask);
2000 	}
2001 
2002 	if (mask & RX_MAC_INTR) {
2003 		gen_int_mask |= RXMAC_INT_M;
2004 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2005 				&bar0->mac_int_mask);
2006 		interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2007 				RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2008 				RMAC_DOUBLE_ECC_ERR;
2009 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2010 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2011 		do_s2io_write_bits(interruptible,
2012 				flag, &bar0->mac_rmac_err_mask);
2013 	}
2014 
2015 	if (mask & RX_XGXS_INTR)
2016 	{
2017 		gen_int_mask |= RXXGXS_INT_M;
2018 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2019 				&bar0->xgxs_int_mask);
2020 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2021 				&bar0->xgxs_rxgxs_err_mask);
2022 	}
2023 
2024 	if (mask & MC_INTR) {
2025 		gen_int_mask |= MC_INT_M;
2026 		do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2027 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2028 				MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2029 				&bar0->mc_err_mask);
2030 	}
2031 	nic->general_int_mask = gen_int_mask;
2032 
2033 	/* Remove this line when alarm interrupts are enabled */
2034 	nic->general_int_mask = 0;
2035 }
2036 /**
2037  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2038  *  @nic: device private variable,
2039  *  @mask: A mask indicating which Intr block must be modified and,
2040  *  @flag: A flag indicating whether to enable or disable the Intrs.
2041  *  Description: This function will either disable or enable the interrupts
2042  *  depending on the flag argument. The mask argument can be used to
2043  *  enable/disable any Intr block.
2044  *  Return Value: NONE.
2045  */
2046 
en_dis_able_nic_intrs(struct s2io_nic * nic,u16 mask,int flag)2047 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2048 {
2049 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2050 	register u64 temp64 = 0, intr_mask = 0;
2051 
2052 	intr_mask = nic->general_int_mask;
2053 
2054 	/*  Top level interrupt classification */
2055 	/*  PIC Interrupts */
2056 	if (mask & TX_PIC_INTR) {
2057 		/*  Enable PIC Intrs in the general intr mask register */
2058 		intr_mask |= TXPIC_INT_M;
2059 		if (flag == ENABLE_INTRS) {
2060 			/*
2061 			 * If Hercules adapter enable GPIO otherwise
2062 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2063 			 * interrupts for now.
2064 			 * TODO
2065 			 */
2066 			if (s2io_link_fault_indication(nic) ==
2067 					LINK_UP_DOWN_INTERRUPT ) {
2068 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2069 						&bar0->pic_int_mask);
2070 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2071 						&bar0->gpio_int_mask);
2072 			} else
2073 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2074 		} else if (flag == DISABLE_INTRS) {
2075 			/*
2076 			 * Disable PIC Intrs in the general
2077 			 * intr mask register
2078 			 */
2079 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2080 		}
2081 	}
2082 
2083 	/*  Tx traffic interrupts */
2084 	if (mask & TX_TRAFFIC_INTR) {
2085 		intr_mask |= TXTRAFFIC_INT_M;
2086 		if (flag == ENABLE_INTRS) {
2087 			/*
2088 			 * Enable all the Tx side interrupts
2089 			 * writing 0 Enables all 64 TX interrupt levels
2090 			 */
2091 			writeq(0x0, &bar0->tx_traffic_mask);
2092 		} else if (flag == DISABLE_INTRS) {
2093 			/*
2094 			 * Disable Tx Traffic Intrs in the general intr mask
2095 			 * register.
2096 			 */
2097 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2098 		}
2099 	}
2100 
2101 	/*  Rx traffic interrupts */
2102 	if (mask & RX_TRAFFIC_INTR) {
2103 		intr_mask |= RXTRAFFIC_INT_M;
2104 		if (flag == ENABLE_INTRS) {
2105 			/* writing 0 Enables all 8 RX interrupt levels */
2106 			writeq(0x0, &bar0->rx_traffic_mask);
2107 		} else if (flag == DISABLE_INTRS) {
2108 			/*
2109 			 * Disable Rx Traffic Intrs in the general intr mask
2110 			 * register.
2111 			 */
2112 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2113 		}
2114 	}
2115 
2116 	temp64 = readq(&bar0->general_int_mask);
2117 	if (flag == ENABLE_INTRS)
2118 		temp64 &= ~((u64) intr_mask);
2119 	else
2120 		temp64 = DISABLE_ALL_INTRS;
2121 	writeq(temp64, &bar0->general_int_mask);
2122 
2123 	nic->general_int_mask = readq(&bar0->general_int_mask);
2124 }
2125 
2126 /**
2127  *  verify_pcc_quiescent- Checks for PCC quiescent state
2128  *  Return: 1 If PCC is quiescence
2129  *          0 If PCC is not quiescence
2130  */
verify_pcc_quiescent(struct s2io_nic * sp,int flag)2131 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2132 {
2133 	int ret = 0, herc;
2134 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2135 	u64 val64 = readq(&bar0->adapter_status);
2136 
2137 	herc = (sp->device_type == XFRAME_II_DEVICE);
2138 
2139 	if (flag == FALSE) {
2140 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2141 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2142 				ret = 1;
2143 		} else {
2144 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2145 				ret = 1;
2146 		}
2147 	} else {
2148 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2149 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2150 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2151 				ret = 1;
2152 		} else {
2153 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2154 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2155 				ret = 1;
2156 		}
2157 	}
2158 
2159 	return ret;
2160 }
2161 /**
2162  *  verify_xena_quiescence - Checks whether the H/W is ready
2163  *  Description: Returns whether the H/W is ready to go or not. Depending
2164  *  on whether adapter enable bit was written or not the comparison
2165  *  differs and the calling function passes the input argument flag to
2166  *  indicate this.
2167  *  Return: 1 If xena is quiescence
2168  *          0 If Xena is not quiescence
2169  */
2170 
verify_xena_quiescence(struct s2io_nic * sp)2171 static int verify_xena_quiescence(struct s2io_nic *sp)
2172 {
2173 	int  mode;
2174 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2175 	u64 val64 = readq(&bar0->adapter_status);
2176 	mode = s2io_verify_pci_mode(sp);
2177 
2178 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2179 		DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2180 		return 0;
2181 	}
2182 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2183 	DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2184 		return 0;
2185 	}
2186 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2187 		DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2188 		return 0;
2189 	}
2190 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2191 		DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2192 		return 0;
2193 	}
2194 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2195 		DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2196 		return 0;
2197 	}
2198 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2199 		DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2200 		return 0;
2201 	}
2202 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2203 		DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2204 		return 0;
2205 	}
2206 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2207 		DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2208 		return 0;
2209 	}
2210 
2211 	/*
2212 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2213 	 * the the P_PLL_LOCK bit in the adapter_status register will
2214 	 * not be asserted.
2215 	 */
2216 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2217 		sp->device_type == XFRAME_II_DEVICE && mode !=
2218 		PCI_MODE_PCI_33) {
2219 		DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2220 		return 0;
2221 	}
2222 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2223 			ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2224 		DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2225 		return 0;
2226 	}
2227 	return 1;
2228 }
2229 
2230 /**
2231  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2232  * @sp: Pointer to device specifc structure
2233  * Description :
2234  * New procedure to clear mac address reading  problems on Alpha platforms
2235  *
2236  */
2237 
fix_mac_address(struct s2io_nic * sp)2238 static void fix_mac_address(struct s2io_nic * sp)
2239 {
2240 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2241 	u64 val64;
2242 	int i = 0;
2243 
2244 	while (fix_mac[i] != END_SIGN) {
2245 		writeq(fix_mac[i++], &bar0->gpio_control);
2246 		udelay(10);
2247 		val64 = readq(&bar0->gpio_control);
2248 	}
2249 }
2250 
2251 /**
2252  *  start_nic - Turns the device on
2253  *  @nic : device private variable.
2254  *  Description:
2255  *  This function actually turns the device on. Before this  function is
2256  *  called,all Registers are configured from their reset states
2257  *  and shared memory is allocated but the NIC is still quiescent. On
2258  *  calling this function, the device interrupts are cleared and the NIC is
2259  *  literally switched on by writing into the adapter control register.
2260  *  Return Value:
2261  *  SUCCESS on success and -1 on failure.
2262  */
2263 
start_nic(struct s2io_nic * nic)2264 static int start_nic(struct s2io_nic *nic)
2265 {
2266 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2267 	struct net_device *dev = nic->dev;
2268 	register u64 val64 = 0;
2269 	u16 subid, i;
2270 	struct mac_info *mac_control;
2271 	struct config_param *config;
2272 
2273 	mac_control = &nic->mac_control;
2274 	config = &nic->config;
2275 
2276 	/*  PRC Initialization and configuration */
2277 	for (i = 0; i < config->rx_ring_num; i++) {
2278 		writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2279 		       &bar0->prc_rxd0_n[i]);
2280 
2281 		val64 = readq(&bar0->prc_ctrl_n[i]);
2282 		if (nic->rxd_mode == RXD_MODE_1)
2283 			val64 |= PRC_CTRL_RC_ENABLED;
2284 		else
2285 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2286 		if (nic->device_type == XFRAME_II_DEVICE)
2287 			val64 |= PRC_CTRL_GROUP_READS;
2288 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2289 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2290 		writeq(val64, &bar0->prc_ctrl_n[i]);
2291 	}
2292 
2293 	if (nic->rxd_mode == RXD_MODE_3B) {
2294 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2295 		val64 = readq(&bar0->rx_pa_cfg);
2296 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2297 		writeq(val64, &bar0->rx_pa_cfg);
2298 	}
2299 
2300 	if (vlan_tag_strip == 0) {
2301 		val64 = readq(&bar0->rx_pa_cfg);
2302 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2303 		writeq(val64, &bar0->rx_pa_cfg);
2304 		nic->vlan_strip_flag = 0;
2305 	}
2306 
2307 	/*
2308 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2309 	 * for around 100ms, which is approximately the time required
2310 	 * for the device to be ready for operation.
2311 	 */
2312 	val64 = readq(&bar0->mc_rldram_mrs);
2313 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2314 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2315 	val64 = readq(&bar0->mc_rldram_mrs);
2316 
2317 	msleep(100);	/* Delay by around 100 ms. */
2318 
2319 	/* Enabling ECC Protection. */
2320 	val64 = readq(&bar0->adapter_control);
2321 	val64 &= ~ADAPTER_ECC_EN;
2322 	writeq(val64, &bar0->adapter_control);
2323 
2324 	/*
2325 	 * Verify if the device is ready to be enabled, if so enable
2326 	 * it.
2327 	 */
2328 	val64 = readq(&bar0->adapter_status);
2329 	if (!verify_xena_quiescence(nic)) {
2330 		DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2331 		DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2332 			  (unsigned long long) val64);
2333 		return FAILURE;
2334 	}
2335 
2336 	/*
2337 	 * With some switches, link might be already up at this point.
2338 	 * Because of this weird behavior, when we enable laser,
2339 	 * we may not get link. We need to handle this. We cannot
2340 	 * figure out which switch is misbehaving. So we are forced to
2341 	 * make a global change.
2342 	 */
2343 
2344 	/* Enabling Laser. */
2345 	val64 = readq(&bar0->adapter_control);
2346 	val64 |= ADAPTER_EOI_TX_ON;
2347 	writeq(val64, &bar0->adapter_control);
2348 
2349 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2350 		/*
2351 		 * Dont see link state interrupts initally on some switches,
2352 		 * so directly scheduling the link state task here.
2353 		 */
2354 		schedule_work(&nic->set_link_task);
2355 	}
2356 	/* SXE-002: Initialize link and activity LED */
2357 	subid = nic->pdev->subsystem_device;
2358 	if (((subid & 0xFF) >= 0x07) &&
2359 	    (nic->device_type == XFRAME_I_DEVICE)) {
2360 		val64 = readq(&bar0->gpio_control);
2361 		val64 |= 0x0000800000000000ULL;
2362 		writeq(val64, &bar0->gpio_control);
2363 		val64 = 0x0411040400000000ULL;
2364 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2365 	}
2366 
2367 	return SUCCESS;
2368 }
2369 /**
2370  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2371  */
s2io_txdl_getskb(struct fifo_info * fifo_data,struct TxD * txdlp,int get_off)2372 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2373 					TxD *txdlp, int get_off)
2374 {
2375 	struct s2io_nic *nic = fifo_data->nic;
2376 	struct sk_buff *skb;
2377 	struct TxD *txds;
2378 	u16 j, frg_cnt;
2379 
2380 	txds = txdlp;
2381 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2382 		pci_unmap_single(nic->pdev, (dma_addr_t)
2383 			txds->Buffer_Pointer, sizeof(u64),
2384 			PCI_DMA_TODEVICE);
2385 		txds++;
2386 	}
2387 
2388 	skb = (struct sk_buff *) ((unsigned long)
2389 			txds->Host_Control);
2390 	if (!skb) {
2391 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2392 		return NULL;
2393 	}
2394 	pci_unmap_single(nic->pdev, (dma_addr_t)
2395 			 txds->Buffer_Pointer,
2396 			 skb->len - skb->data_len,
2397 			 PCI_DMA_TODEVICE);
2398 	frg_cnt = skb_shinfo(skb)->nr_frags;
2399 	if (frg_cnt) {
2400 		txds++;
2401 		for (j = 0; j < frg_cnt; j++, txds++) {
2402 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2403 			if (!txds->Buffer_Pointer)
2404 				break;
2405 			pci_unmap_page(nic->pdev, (dma_addr_t)
2406 					txds->Buffer_Pointer,
2407 				       frag->size, PCI_DMA_TODEVICE);
2408 		}
2409 	}
2410 	memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2411 	return(skb);
2412 }
2413 
2414 /**
2415  *  free_tx_buffers - Free all queued Tx buffers
2416  *  @nic : device private variable.
2417  *  Description:
2418  *  Free all queued Tx buffers.
2419  *  Return Value: void
2420 */
2421 
free_tx_buffers(struct s2io_nic * nic)2422 static void free_tx_buffers(struct s2io_nic *nic)
2423 {
2424 	struct net_device *dev = nic->dev;
2425 	struct sk_buff *skb;
2426 	struct TxD *txdp;
2427 	int i, j;
2428 	struct mac_info *mac_control;
2429 	struct config_param *config;
2430 	int cnt = 0;
2431 
2432 	mac_control = &nic->mac_control;
2433 	config = &nic->config;
2434 
2435 	for (i = 0; i < config->tx_fifo_num; i++) {
2436 		unsigned long flags;
2437 		spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2438 		for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2439 			txdp = (struct TxD *) \
2440 			mac_control->fifos[i].list_info[j].list_virt_addr;
2441 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2442 			if (skb) {
2443 				nic->mac_control.stats_info->sw_stat.mem_freed
2444 					+= skb->truesize;
2445 				dev_kfree_skb(skb);
2446 				cnt++;
2447 			}
2448 		}
2449 		DBG_PRINT(INTR_DBG,
2450 			  "%s:forcibly freeing %d skbs on FIFO%d\n",
2451 			  dev->name, cnt, i);
2452 		mac_control->fifos[i].tx_curr_get_info.offset = 0;
2453 		mac_control->fifos[i].tx_curr_put_info.offset = 0;
2454 		spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2455 	}
2456 }
2457 
2458 /**
2459  *   stop_nic -  To stop the nic
2460  *   @nic ; device private variable.
2461  *   Description:
2462  *   This function does exactly the opposite of what the start_nic()
2463  *   function does. This function is called to stop the device.
2464  *   Return Value:
2465  *   void.
2466  */
2467 
stop_nic(struct s2io_nic * nic)2468 static void stop_nic(struct s2io_nic *nic)
2469 {
2470 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2471 	register u64 val64 = 0;
2472 	u16 interruptible;
2473 	struct mac_info *mac_control;
2474 	struct config_param *config;
2475 
2476 	mac_control = &nic->mac_control;
2477 	config = &nic->config;
2478 
2479 	/*  Disable all interrupts */
2480 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2481 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2482 	interruptible |= TX_PIC_INTR;
2483 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2484 
2485 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2486 	val64 = readq(&bar0->adapter_control);
2487 	val64 &= ~(ADAPTER_CNTL_EN);
2488 	writeq(val64, &bar0->adapter_control);
2489 }
2490 
2491 /**
2492  *  fill_rx_buffers - Allocates the Rx side skbs
2493  *  @ring_info: per ring structure
2494  *  @from_card_up: If this is true, we will map the buffer to get
2495  *     the dma address for buf0 and buf1 to give it to the card.
2496  *     Else we will sync the already mapped buffer to give it to the card.
2497  *  Description:
2498  *  The function allocates Rx side skbs and puts the physical
2499  *  address of these buffers into the RxD buffer pointers, so that the NIC
2500  *  can DMA the received frame into these locations.
2501  *  The NIC supports 3 receive modes, viz
2502  *  1. single buffer,
2503  *  2. three buffer and
2504  *  3. Five buffer modes.
2505  *  Each mode defines how many fragments the received frame will be split
2506  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2507  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2508  *  is split into 3 fragments. As of now only single buffer mode is
2509  *  supported.
2510  *   Return Value:
2511  *  SUCCESS on success or an appropriate -ve value on failure.
2512  */
fill_rx_buffers(struct s2io_nic * nic,struct ring_info * ring,int from_card_up)2513 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2514 				int from_card_up)
2515 {
2516 	struct sk_buff *skb;
2517 	struct RxD_t *rxdp;
2518 	int off, size, block_no, block_no1;
2519 	u32 alloc_tab = 0;
2520 	u32 alloc_cnt;
2521 	u64 tmp;
2522 	struct buffAdd *ba;
2523 	struct RxD_t *first_rxdp = NULL;
2524 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2525 	int rxd_index = 0;
2526 	struct RxD1 *rxdp1;
2527 	struct RxD3 *rxdp3;
2528 	struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2529 
2530 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2531 
2532 	block_no1 = ring->rx_curr_get_info.block_index;
2533 	while (alloc_tab < alloc_cnt) {
2534 		block_no = ring->rx_curr_put_info.block_index;
2535 
2536 		off = ring->rx_curr_put_info.offset;
2537 
2538 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2539 
2540 		rxd_index = off + 1;
2541 		if (block_no)
2542 			rxd_index += (block_no * ring->rxd_count);
2543 
2544 		if ((block_no == block_no1) &&
2545 			(off == ring->rx_curr_get_info.offset) &&
2546 			(rxdp->Host_Control)) {
2547 			DBG_PRINT(INTR_DBG, "%s: Get and Put",
2548 				ring->dev->name);
2549 			DBG_PRINT(INTR_DBG, " info equated\n");
2550 			goto end;
2551 		}
2552 		if (off && (off == ring->rxd_count)) {
2553 			ring->rx_curr_put_info.block_index++;
2554 			if (ring->rx_curr_put_info.block_index ==
2555 							ring->block_count)
2556 				ring->rx_curr_put_info.block_index = 0;
2557 			block_no = ring->rx_curr_put_info.block_index;
2558 			off = 0;
2559 			ring->rx_curr_put_info.offset = off;
2560 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2561 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2562 				  ring->dev->name, rxdp);
2563 
2564 		}
2565 
2566 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2567 			((ring->rxd_mode == RXD_MODE_3B) &&
2568 				(rxdp->Control_2 & s2BIT(0)))) {
2569 			ring->rx_curr_put_info.offset = off;
2570 			goto end;
2571 		}
2572 		/* calculate size of skb based on ring mode */
2573 		size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2574 				HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2575 		if (ring->rxd_mode == RXD_MODE_1)
2576 			size += NET_IP_ALIGN;
2577 		else
2578 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2579 
2580 		/* allocate skb */
2581 		skb = dev_alloc_skb(size);
2582 		if(!skb) {
2583 			DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2584 			DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2585 			if (first_rxdp) {
2586 				wmb();
2587 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2588 			}
2589 			stats->mem_alloc_fail_cnt++;
2590 
2591 			return -ENOMEM ;
2592 		}
2593 		stats->mem_allocated += skb->truesize;
2594 
2595 		if (ring->rxd_mode == RXD_MODE_1) {
2596 			/* 1 buffer mode - normal operation mode */
2597 			rxdp1 = (struct RxD1*)rxdp;
2598 			memset(rxdp, 0, sizeof(struct RxD1));
2599 			skb_reserve(skb, NET_IP_ALIGN);
2600 			rxdp1->Buffer0_ptr = pci_map_single
2601 			    (ring->pdev, skb->data, size - NET_IP_ALIGN,
2602 				PCI_DMA_FROMDEVICE);
2603 			if (pci_dma_mapping_error(nic->pdev,
2604 						rxdp1->Buffer0_ptr))
2605 				goto pci_map_failed;
2606 
2607 			rxdp->Control_2 =
2608 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2609 			rxdp->Host_Control = (unsigned long) (skb);
2610 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2611 			/*
2612 			 * 2 buffer mode -
2613 			 * 2 buffer mode provides 128
2614 			 * byte aligned receive buffers.
2615 			 */
2616 
2617 			rxdp3 = (struct RxD3*)rxdp;
2618 			/* save buffer pointers to avoid frequent dma mapping */
2619 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2620 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2621 			memset(rxdp, 0, sizeof(struct RxD3));
2622 			/* restore the buffer pointers for dma sync*/
2623 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2624 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2625 
2626 			ba = &ring->ba[block_no][off];
2627 			skb_reserve(skb, BUF0_LEN);
2628 			tmp = (u64)(unsigned long) skb->data;
2629 			tmp += ALIGN_SIZE;
2630 			tmp &= ~ALIGN_SIZE;
2631 			skb->data = (void *) (unsigned long)tmp;
2632 			skb_reset_tail_pointer(skb);
2633 
2634 			if (from_card_up) {
2635 				rxdp3->Buffer0_ptr =
2636 				   pci_map_single(ring->pdev, ba->ba_0,
2637 					BUF0_LEN, PCI_DMA_FROMDEVICE);
2638 			if (pci_dma_mapping_error(nic->pdev,
2639 						rxdp3->Buffer0_ptr))
2640 					goto pci_map_failed;
2641 			} else
2642 				pci_dma_sync_single_for_device(ring->pdev,
2643 				(dma_addr_t) rxdp3->Buffer0_ptr,
2644 				    BUF0_LEN, PCI_DMA_FROMDEVICE);
2645 
2646 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2647 			if (ring->rxd_mode == RXD_MODE_3B) {
2648 				/* Two buffer mode */
2649 
2650 				/*
2651 				 * Buffer2 will have L3/L4 header plus
2652 				 * L4 payload
2653 				 */
2654 				rxdp3->Buffer2_ptr = pci_map_single
2655 				(ring->pdev, skb->data, ring->mtu + 4,
2656 						PCI_DMA_FROMDEVICE);
2657 
2658 				if (pci_dma_mapping_error(nic->pdev,
2659 							rxdp3->Buffer2_ptr))
2660 					goto pci_map_failed;
2661 
2662 				if (from_card_up) {
2663 					rxdp3->Buffer1_ptr =
2664 						pci_map_single(ring->pdev,
2665 						ba->ba_1, BUF1_LEN,
2666 						PCI_DMA_FROMDEVICE);
2667 
2668 					if (pci_dma_mapping_error(nic->pdev,
2669 						rxdp3->Buffer1_ptr)) {
2670 						pci_unmap_single
2671 							(ring->pdev,
2672 						    (dma_addr_t)(unsigned long)
2673 							skb->data,
2674 							ring->mtu + 4,
2675 							PCI_DMA_FROMDEVICE);
2676 						goto pci_map_failed;
2677 					}
2678 				}
2679 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2680 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2681 								(ring->mtu + 4);
2682 			}
2683 			rxdp->Control_2 |= s2BIT(0);
2684 			rxdp->Host_Control = (unsigned long) (skb);
2685 		}
2686 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2687 			rxdp->Control_1 |= RXD_OWN_XENA;
2688 		off++;
2689 		if (off == (ring->rxd_count + 1))
2690 			off = 0;
2691 		ring->rx_curr_put_info.offset = off;
2692 
2693 		rxdp->Control_2 |= SET_RXD_MARKER;
2694 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2695 			if (first_rxdp) {
2696 				wmb();
2697 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2698 			}
2699 			first_rxdp = rxdp;
2700 		}
2701 		ring->rx_bufs_left += 1;
2702 		alloc_tab++;
2703 	}
2704 
2705       end:
2706 	/* Transfer ownership of first descriptor to adapter just before
2707 	 * exiting. Before that, use memory barrier so that ownership
2708 	 * and other fields are seen by adapter correctly.
2709 	 */
2710 	if (first_rxdp) {
2711 		wmb();
2712 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2713 	}
2714 
2715 	return SUCCESS;
2716 pci_map_failed:
2717 	stats->pci_map_fail_cnt++;
2718 	stats->mem_freed += skb->truesize;
2719 	dev_kfree_skb_irq(skb);
2720 	return -ENOMEM;
2721 }
2722 
free_rxd_blk(struct s2io_nic * sp,int ring_no,int blk)2723 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2724 {
2725 	struct net_device *dev = sp->dev;
2726 	int j;
2727 	struct sk_buff *skb;
2728 	struct RxD_t *rxdp;
2729 	struct mac_info *mac_control;
2730 	struct buffAdd *ba;
2731 	struct RxD1 *rxdp1;
2732 	struct RxD3 *rxdp3;
2733 
2734 	mac_control = &sp->mac_control;
2735 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2736 		rxdp = mac_control->rings[ring_no].
2737                                 rx_blocks[blk].rxds[j].virt_addr;
2738 		skb = (struct sk_buff *)
2739 			((unsigned long) rxdp->Host_Control);
2740 		if (!skb) {
2741 			continue;
2742 		}
2743 		if (sp->rxd_mode == RXD_MODE_1) {
2744 			rxdp1 = (struct RxD1*)rxdp;
2745 			pci_unmap_single(sp->pdev, (dma_addr_t)
2746 				rxdp1->Buffer0_ptr,
2747 				dev->mtu +
2748 				HEADER_ETHERNET_II_802_3_SIZE
2749 				+ HEADER_802_2_SIZE +
2750 				HEADER_SNAP_SIZE,
2751 				PCI_DMA_FROMDEVICE);
2752 			memset(rxdp, 0, sizeof(struct RxD1));
2753 		} else if(sp->rxd_mode == RXD_MODE_3B) {
2754 			rxdp3 = (struct RxD3*)rxdp;
2755 			ba = &mac_control->rings[ring_no].
2756 				ba[blk][j];
2757 			pci_unmap_single(sp->pdev, (dma_addr_t)
2758 				rxdp3->Buffer0_ptr,
2759 				BUF0_LEN,
2760 				PCI_DMA_FROMDEVICE);
2761 			pci_unmap_single(sp->pdev, (dma_addr_t)
2762 				rxdp3->Buffer1_ptr,
2763 				BUF1_LEN,
2764 				PCI_DMA_FROMDEVICE);
2765 			pci_unmap_single(sp->pdev, (dma_addr_t)
2766 				rxdp3->Buffer2_ptr,
2767 				dev->mtu + 4,
2768 				PCI_DMA_FROMDEVICE);
2769 			memset(rxdp, 0, sizeof(struct RxD3));
2770 		}
2771 		sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2772 		dev_kfree_skb(skb);
2773 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2774 	}
2775 }
2776 
2777 /**
2778  *  free_rx_buffers - Frees all Rx buffers
2779  *  @sp: device private variable.
2780  *  Description:
2781  *  This function will free all Rx buffers allocated by host.
2782  *  Return Value:
2783  *  NONE.
2784  */
2785 
free_rx_buffers(struct s2io_nic * sp)2786 static void free_rx_buffers(struct s2io_nic *sp)
2787 {
2788 	struct net_device *dev = sp->dev;
2789 	int i, blk = 0, buf_cnt = 0;
2790 	struct mac_info *mac_control;
2791 	struct config_param *config;
2792 
2793 	mac_control = &sp->mac_control;
2794 	config = &sp->config;
2795 
2796 	for (i = 0; i < config->rx_ring_num; i++) {
2797 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2798 			free_rxd_blk(sp,i,blk);
2799 
2800 		mac_control->rings[i].rx_curr_put_info.block_index = 0;
2801 		mac_control->rings[i].rx_curr_get_info.block_index = 0;
2802 		mac_control->rings[i].rx_curr_put_info.offset = 0;
2803 		mac_control->rings[i].rx_curr_get_info.offset = 0;
2804 		mac_control->rings[i].rx_bufs_left = 0;
2805 		DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2806 			  dev->name, buf_cnt, i);
2807 	}
2808 }
2809 
s2io_chk_rx_buffers(struct s2io_nic * nic,struct ring_info * ring)2810 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2811 {
2812 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2813 		DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2814 		DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2815 	}
2816 	return 0;
2817 }
2818 
2819 /**
2820  * s2io_poll - Rx interrupt handler for NAPI support
2821  * @napi : pointer to the napi structure.
2822  * @budget : The number of packets that were budgeted to be processed
2823  * during  one pass through the 'Poll" function.
2824  * Description:
2825  * Comes into picture only if NAPI support has been incorporated. It does
2826  * the same thing that rx_intr_handler does, but not in a interrupt context
2827  * also It will process only a given number of packets.
2828  * Return value:
2829  * 0 on success and 1 if there are No Rx packets to be processed.
2830  */
2831 
s2io_poll_msix(struct napi_struct * napi,int budget)2832 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2833 {
2834 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2835 	struct net_device *dev = ring->dev;
2836 	struct config_param *config;
2837 	struct mac_info *mac_control;
2838 	int pkts_processed = 0;
2839 	u8 __iomem *addr = NULL;
2840 	u8 val8 = 0;
2841 	struct s2io_nic *nic = netdev_priv(dev);
2842 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2843 	int budget_org = budget;
2844 
2845 	config = &nic->config;
2846 	mac_control = &nic->mac_control;
2847 
2848 	if (unlikely(!is_s2io_card_up(nic)))
2849 		return 0;
2850 
2851 	pkts_processed = rx_intr_handler(ring, budget);
2852 	s2io_chk_rx_buffers(nic, ring);
2853 
2854 	if (pkts_processed < budget_org) {
2855 		netif_rx_complete(napi);
2856 		/*Re Enable MSI-Rx Vector*/
2857 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2858 		addr += 7 - ring->ring_no;
2859 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2860 		writeb(val8, addr);
2861 		val8 = readb(addr);
2862 	}
2863 	return pkts_processed;
2864 }
s2io_poll_inta(struct napi_struct * napi,int budget)2865 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2866 {
2867 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2868 	struct ring_info *ring;
2869 	struct config_param *config;
2870 	struct mac_info *mac_control;
2871 	int pkts_processed = 0;
2872 	int ring_pkts_processed, i;
2873 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2874 	int budget_org = budget;
2875 
2876 	config = &nic->config;
2877 	mac_control = &nic->mac_control;
2878 
2879 	if (unlikely(!is_s2io_card_up(nic)))
2880 		return 0;
2881 
2882 	for (i = 0; i < config->rx_ring_num; i++) {
2883 		ring = &mac_control->rings[i];
2884 		ring_pkts_processed = rx_intr_handler(ring, budget);
2885 		s2io_chk_rx_buffers(nic, ring);
2886 		pkts_processed += ring_pkts_processed;
2887 		budget -= ring_pkts_processed;
2888 		if (budget <= 0)
2889 			break;
2890 	}
2891 	if (pkts_processed < budget_org) {
2892 		netif_rx_complete(napi);
2893 		/* Re enable the Rx interrupts for the ring */
2894 		writeq(0, &bar0->rx_traffic_mask);
2895 		readl(&bar0->rx_traffic_mask);
2896 	}
2897 	return pkts_processed;
2898 }
2899 
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 /**
2902  * s2io_netpoll - netpoll event handler entry point
2903  * @dev : pointer to the device structure.
2904  * Description:
2905  * 	This function will be called by upper layer to check for events on the
2906  * interface in situations where interrupts are disabled. It is used for
2907  * specific in-kernel networking tasks, such as remote consoles and kernel
2908  * debugging over the network (example netdump in RedHat).
2909  */
s2io_netpoll(struct net_device * dev)2910 static void s2io_netpoll(struct net_device *dev)
2911 {
2912 	struct s2io_nic *nic = netdev_priv(dev);
2913 	struct mac_info *mac_control;
2914 	struct config_param *config;
2915 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2916 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2917 	int i;
2918 
2919 	if (pci_channel_offline(nic->pdev))
2920 		return;
2921 
2922 	disable_irq(dev->irq);
2923 
2924 	mac_control = &nic->mac_control;
2925 	config = &nic->config;
2926 
2927 	writeq(val64, &bar0->rx_traffic_int);
2928 	writeq(val64, &bar0->tx_traffic_int);
2929 
2930 	/* we need to free up the transmitted skbufs or else netpoll will
2931 	 * run out of skbs and will fail and eventually netpoll application such
2932 	 * as netdump will fail.
2933 	 */
2934 	for (i = 0; i < config->tx_fifo_num; i++)
2935 		tx_intr_handler(&mac_control->fifos[i]);
2936 
2937 	/* check for received packet and indicate up to network */
2938 	for (i = 0; i < config->rx_ring_num; i++)
2939 		rx_intr_handler(&mac_control->rings[i], 0);
2940 
2941 	for (i = 0; i < config->rx_ring_num; i++) {
2942 		if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
2943 				-ENOMEM) {
2944 			DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2945 			DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2946 			break;
2947 		}
2948 	}
2949 	enable_irq(dev->irq);
2950 	return;
2951 }
2952 #endif
2953 
2954 /**
2955  *  rx_intr_handler - Rx interrupt handler
2956  *  @ring_info: per ring structure.
2957  *  @budget: budget for napi processing.
2958  *  Description:
2959  *  If the interrupt is because of a received frame or if the
2960  *  receive ring contains fresh as yet un-processed frames,this function is
2961  *  called. It picks out the RxD at which place the last Rx processing had
2962  *  stopped and sends the skb to the OSM's Rx handler and then increments
2963  *  the offset.
2964  *  Return Value:
2965  *  No. of napi packets processed.
2966  */
rx_intr_handler(struct ring_info * ring_data,int budget)2967 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2968 {
2969 	int get_block, put_block;
2970 	struct rx_curr_get_info get_info, put_info;
2971 	struct RxD_t *rxdp;
2972 	struct sk_buff *skb;
2973 	int pkt_cnt = 0, napi_pkts = 0;
2974 	int i;
2975 	struct RxD1* rxdp1;
2976 	struct RxD3* rxdp3;
2977 
2978 	get_info = ring_data->rx_curr_get_info;
2979 	get_block = get_info.block_index;
2980 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2981 	put_block = put_info.block_index;
2982 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2983 
2984 	while (RXD_IS_UP2DT(rxdp)) {
2985 		/*
2986 		 * If your are next to put index then it's
2987 		 * FIFO full condition
2988 		 */
2989 		if ((get_block == put_block) &&
2990 		    (get_info.offset + 1) == put_info.offset) {
2991 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2992 				ring_data->dev->name);
2993 			break;
2994 		}
2995 		skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2996 		if (skb == NULL) {
2997 			DBG_PRINT(ERR_DBG, "%s: The skb is ",
2998 				  ring_data->dev->name);
2999 			DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3000 			return 0;
3001 		}
3002 		if (ring_data->rxd_mode == RXD_MODE_1) {
3003 			rxdp1 = (struct RxD1*)rxdp;
3004 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
3005 				rxdp1->Buffer0_ptr,
3006 				ring_data->mtu +
3007 				HEADER_ETHERNET_II_802_3_SIZE +
3008 				HEADER_802_2_SIZE +
3009 				HEADER_SNAP_SIZE,
3010 				PCI_DMA_FROMDEVICE);
3011 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
3012 			rxdp3 = (struct RxD3*)rxdp;
3013 			pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3014 				rxdp3->Buffer0_ptr,
3015 				BUF0_LEN, PCI_DMA_FROMDEVICE);
3016 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
3017 				rxdp3->Buffer2_ptr,
3018 				ring_data->mtu + 4,
3019 				PCI_DMA_FROMDEVICE);
3020 		}
3021 		prefetch(skb->data);
3022 		rx_osm_handler(ring_data, rxdp);
3023 		get_info.offset++;
3024 		ring_data->rx_curr_get_info.offset = get_info.offset;
3025 		rxdp = ring_data->rx_blocks[get_block].
3026 				rxds[get_info.offset].virt_addr;
3027 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3028 			get_info.offset = 0;
3029 			ring_data->rx_curr_get_info.offset = get_info.offset;
3030 			get_block++;
3031 			if (get_block == ring_data->block_count)
3032 				get_block = 0;
3033 			ring_data->rx_curr_get_info.block_index = get_block;
3034 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3035 		}
3036 
3037 		if (ring_data->nic->config.napi) {
3038 			budget--;
3039 			napi_pkts++;
3040 			if (!budget)
3041 				break;
3042 		}
3043 		pkt_cnt++;
3044 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3045 			break;
3046 	}
3047 	if (ring_data->lro) {
3048 		/* Clear all LRO sessions before exiting */
3049 		for (i=0; i<MAX_LRO_SESSIONS; i++) {
3050 			struct lro *lro = &ring_data->lro0_n[i];
3051 			if (lro->in_use) {
3052 				update_L3L4_header(ring_data->nic, lro);
3053 				queue_rx_frame(lro->parent, lro->vlan_tag);
3054 				clear_lro_session(lro);
3055 			}
3056 		}
3057 	}
3058 	return(napi_pkts);
3059 }
3060 
3061 /**
3062  *  tx_intr_handler - Transmit interrupt handler
3063  *  @nic : device private variable
3064  *  Description:
3065  *  If an interrupt was raised to indicate DMA complete of the
3066  *  Tx packet, this function is called. It identifies the last TxD
3067  *  whose buffer was freed and frees all skbs whose data have already
3068  *  DMA'ed into the NICs internal memory.
3069  *  Return Value:
3070  *  NONE
3071  */
3072 
tx_intr_handler(struct fifo_info * fifo_data)3073 static void tx_intr_handler(struct fifo_info *fifo_data)
3074 {
3075 	struct s2io_nic *nic = fifo_data->nic;
3076 	struct tx_curr_get_info get_info, put_info;
3077 	struct sk_buff *skb = NULL;
3078 	struct TxD *txdlp;
3079 	int pkt_cnt = 0;
3080 	unsigned long flags = 0;
3081 	u8 err_mask;
3082 
3083 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3084 			return;
3085 
3086 	get_info = fifo_data->tx_curr_get_info;
3087 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3088 	txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3089 	    list_virt_addr;
3090 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3091 	       (get_info.offset != put_info.offset) &&
3092 	       (txdlp->Host_Control)) {
3093 		/* Check for TxD errors */
3094 		if (txdlp->Control_1 & TXD_T_CODE) {
3095 			unsigned long long err;
3096 			err = txdlp->Control_1 & TXD_T_CODE;
3097 			if (err & 0x1) {
3098 				nic->mac_control.stats_info->sw_stat.
3099 						parity_err_cnt++;
3100 			}
3101 
3102 			/* update t_code statistics */
3103 			err_mask = err >> 48;
3104 			switch(err_mask) {
3105 				case 2:
3106 					nic->mac_control.stats_info->sw_stat.
3107 							tx_buf_abort_cnt++;
3108 				break;
3109 
3110 				case 3:
3111 					nic->mac_control.stats_info->sw_stat.
3112 							tx_desc_abort_cnt++;
3113 				break;
3114 
3115 				case 7:
3116 					nic->mac_control.stats_info->sw_stat.
3117 							tx_parity_err_cnt++;
3118 				break;
3119 
3120 				case 10:
3121 					nic->mac_control.stats_info->sw_stat.
3122 							tx_link_loss_cnt++;
3123 				break;
3124 
3125 				case 15:
3126 					nic->mac_control.stats_info->sw_stat.
3127 							tx_list_proc_err_cnt++;
3128 				break;
3129                         }
3130 		}
3131 
3132 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3133 		if (skb == NULL) {
3134 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3135 			DBG_PRINT(ERR_DBG, "%s: Null skb ",
3136 			__func__);
3137 			DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3138 			return;
3139 		}
3140 		pkt_cnt++;
3141 
3142 		/* Updating the statistics block */
3143 		nic->dev->stats.tx_bytes += skb->len;
3144 		nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3145 		dev_kfree_skb_irq(skb);
3146 
3147 		get_info.offset++;
3148 		if (get_info.offset == get_info.fifo_len + 1)
3149 			get_info.offset = 0;
3150 		txdlp = (struct TxD *) fifo_data->list_info
3151 		    [get_info.offset].list_virt_addr;
3152 		fifo_data->tx_curr_get_info.offset =
3153 		    get_info.offset;
3154 	}
3155 
3156 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3157 
3158 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3159 }
3160 
3161 /**
3162  *  s2io_mdio_write - Function to write in to MDIO registers
3163  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3164  *  @addr     : address value
3165  *  @value    : data value
3166  *  @dev      : pointer to net_device structure
3167  *  Description:
3168  *  This function is used to write values to the MDIO registers
3169  *  NONE
3170  */
s2io_mdio_write(u32 mmd_type,u64 addr,u16 value,struct net_device * dev)3171 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3172 {
3173 	u64 val64 = 0x0;
3174 	struct s2io_nic *sp = netdev_priv(dev);
3175 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3176 
3177 	//address transaction
3178 	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3179 			| MDIO_MMD_DEV_ADDR(mmd_type)
3180 			| MDIO_MMS_PRT_ADDR(0x0);
3181 	writeq(val64, &bar0->mdio_control);
3182 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3183 	writeq(val64, &bar0->mdio_control);
3184 	udelay(100);
3185 
3186 	//Data transaction
3187 	val64 = 0x0;
3188 	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3189 			| MDIO_MMD_DEV_ADDR(mmd_type)
3190 			| MDIO_MMS_PRT_ADDR(0x0)
3191 			| MDIO_MDIO_DATA(value)
3192 			| MDIO_OP(MDIO_OP_WRITE_TRANS);
3193 	writeq(val64, &bar0->mdio_control);
3194 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3195 	writeq(val64, &bar0->mdio_control);
3196 	udelay(100);
3197 
3198 	val64 = 0x0;
3199 	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3200 	| MDIO_MMD_DEV_ADDR(mmd_type)
3201 	| MDIO_MMS_PRT_ADDR(0x0)
3202 	| MDIO_OP(MDIO_OP_READ_TRANS);
3203 	writeq(val64, &bar0->mdio_control);
3204 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3205 	writeq(val64, &bar0->mdio_control);
3206 	udelay(100);
3207 
3208 }
3209 
3210 /**
3211  *  s2io_mdio_read - Function to write in to MDIO registers
3212  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3213  *  @addr     : address value
3214  *  @dev      : pointer to net_device structure
3215  *  Description:
3216  *  This function is used to read values to the MDIO registers
3217  *  NONE
3218  */
s2io_mdio_read(u32 mmd_type,u64 addr,struct net_device * dev)3219 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3220 {
3221 	u64 val64 = 0x0;
3222 	u64 rval64 = 0x0;
3223 	struct s2io_nic *sp = netdev_priv(dev);
3224 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3225 
3226 	/* address transaction */
3227 	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3228 			| MDIO_MMD_DEV_ADDR(mmd_type)
3229 			| MDIO_MMS_PRT_ADDR(0x0);
3230 	writeq(val64, &bar0->mdio_control);
3231 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3232 	writeq(val64, &bar0->mdio_control);
3233 	udelay(100);
3234 
3235 	/* Data transaction */
3236 	val64 = 0x0;
3237 	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3238 			| MDIO_MMD_DEV_ADDR(mmd_type)
3239 			| MDIO_MMS_PRT_ADDR(0x0)
3240 			| MDIO_OP(MDIO_OP_READ_TRANS);
3241 	writeq(val64, &bar0->mdio_control);
3242 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3243 	writeq(val64, &bar0->mdio_control);
3244 	udelay(100);
3245 
3246 	/* Read the value from regs */
3247 	rval64 = readq(&bar0->mdio_control);
3248 	rval64 = rval64 & 0xFFFF0000;
3249 	rval64 = rval64 >> 16;
3250 	return rval64;
3251 }
3252 /**
3253  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3254  *  @counter      : couter value to be updated
3255  *  @flag         : flag to indicate the status
3256  *  @type         : counter type
3257  *  Description:
3258  *  This function is to check the status of the xpak counters value
3259  *  NONE
3260  */
3261 
s2io_chk_xpak_counter(u64 * counter,u64 * regs_stat,u32 index,u16 flag,u16 type)3262 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3263 {
3264 	u64 mask = 0x3;
3265 	u64 val64;
3266 	int i;
3267 	for(i = 0; i <index; i++)
3268 		mask = mask << 0x2;
3269 
3270 	if(flag > 0)
3271 	{
3272 		*counter = *counter + 1;
3273 		val64 = *regs_stat & mask;
3274 		val64 = val64 >> (index * 0x2);
3275 		val64 = val64 + 1;
3276 		if(val64 == 3)
3277 		{
3278 			switch(type)
3279 			{
3280 			case 1:
3281 				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3282 					  "service. Excessive temperatures may "
3283 					  "result in premature transceiver "
3284 					  "failure \n");
3285 			break;
3286 			case 2:
3287 				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3288 					  "service Excessive bias currents may "
3289 					  "indicate imminent laser diode "
3290 					  "failure \n");
3291 			break;
3292 			case 3:
3293 				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3294 					  "service Excessive laser output "
3295 					  "power may saturate far-end "
3296 					  "receiver\n");
3297 			break;
3298 			default:
3299 				DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3300 					  "type \n");
3301 			}
3302 			val64 = 0x0;
3303 		}
3304 		val64 = val64 << (index * 0x2);
3305 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3306 
3307 	} else {
3308 		*regs_stat = *regs_stat & (~mask);
3309 	}
3310 }
3311 
3312 /**
3313  *  s2io_updt_xpak_counter - Function to update the xpak counters
3314  *  @dev         : pointer to net_device struct
3315  *  Description:
3316  *  This function is to upate the status of the xpak counters value
3317  *  NONE
3318  */
s2io_updt_xpak_counter(struct net_device * dev)3319 static void s2io_updt_xpak_counter(struct net_device *dev)
3320 {
3321 	u16 flag  = 0x0;
3322 	u16 type  = 0x0;
3323 	u16 val16 = 0x0;
3324 	u64 val64 = 0x0;
3325 	u64 addr  = 0x0;
3326 
3327 	struct s2io_nic *sp = netdev_priv(dev);
3328 	struct stat_block *stat_info = sp->mac_control.stats_info;
3329 
3330 	/* Check the communication with the MDIO slave */
3331 	addr = 0x0000;
3332 	val64 = 0x0;
3333 	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3334 	if((val64 == 0xFFFF) || (val64 == 0x0000))
3335 	{
3336 		DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3337 			  "Returned %llx\n", (unsigned long long)val64);
3338 		return;
3339 	}
3340 
3341 	/* Check for the expecte value of 2040 at PMA address 0x0000 */
3342 	if(val64 != 0x2040)
3343 	{
3344 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3345 		DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3346 			  (unsigned long long)val64);
3347 		return;
3348 	}
3349 
3350 	/* Loading the DOM register to MDIO register */
3351 	addr = 0xA100;
3352 	s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3353 	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3354 
3355 	/* Reading the Alarm flags */
3356 	addr = 0xA070;
3357 	val64 = 0x0;
3358 	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3359 
3360 	flag = CHECKBIT(val64, 0x7);
3361 	type = 1;
3362 	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3363 				&stat_info->xpak_stat.xpak_regs_stat,
3364 				0x0, flag, type);
3365 
3366 	if(CHECKBIT(val64, 0x6))
3367 		stat_info->xpak_stat.alarm_transceiver_temp_low++;
3368 
3369 	flag = CHECKBIT(val64, 0x3);
3370 	type = 2;
3371 	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3372 				&stat_info->xpak_stat.xpak_regs_stat,
3373 				0x2, flag, type);
3374 
3375 	if(CHECKBIT(val64, 0x2))
3376 		stat_info->xpak_stat.alarm_laser_bias_current_low++;
3377 
3378 	flag = CHECKBIT(val64, 0x1);
3379 	type = 3;
3380 	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3381 				&stat_info->xpak_stat.xpak_regs_stat,
3382 				0x4, flag, type);
3383 
3384 	if(CHECKBIT(val64, 0x0))
3385 		stat_info->xpak_stat.alarm_laser_output_power_low++;
3386 
3387 	/* Reading the Warning flags */
3388 	addr = 0xA074;
3389 	val64 = 0x0;
3390 	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3391 
3392 	if(CHECKBIT(val64, 0x7))
3393 		stat_info->xpak_stat.warn_transceiver_temp_high++;
3394 
3395 	if(CHECKBIT(val64, 0x6))
3396 		stat_info->xpak_stat.warn_transceiver_temp_low++;
3397 
3398 	if(CHECKBIT(val64, 0x3))
3399 		stat_info->xpak_stat.warn_laser_bias_current_high++;
3400 
3401 	if(CHECKBIT(val64, 0x2))
3402 		stat_info->xpak_stat.warn_laser_bias_current_low++;
3403 
3404 	if(CHECKBIT(val64, 0x1))
3405 		stat_info->xpak_stat.warn_laser_output_power_high++;
3406 
3407 	if(CHECKBIT(val64, 0x0))
3408 		stat_info->xpak_stat.warn_laser_output_power_low++;
3409 }
3410 
3411 /**
3412  *  wait_for_cmd_complete - waits for a command to complete.
3413  *  @sp : private member of the device structure, which is a pointer to the
3414  *  s2io_nic structure.
3415  *  Description: Function that waits for a command to Write into RMAC
3416  *  ADDR DATA registers to be completed and returns either success or
3417  *  error depending on whether the command was complete or not.
3418  *  Return value:
3419  *   SUCCESS on success and FAILURE on failure.
3420  */
3421 
wait_for_cmd_complete(void __iomem * addr,u64 busy_bit,int bit_state)3422 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3423 				int bit_state)
3424 {
3425 	int ret = FAILURE, cnt = 0, delay = 1;
3426 	u64 val64;
3427 
3428 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3429 		return FAILURE;
3430 
3431 	do {
3432 		val64 = readq(addr);
3433 		if (bit_state == S2IO_BIT_RESET) {
3434 			if (!(val64 & busy_bit)) {
3435 				ret = SUCCESS;
3436 				break;
3437 			}
3438 		} else {
3439 			if (!(val64 & busy_bit)) {
3440 				ret = SUCCESS;
3441 				break;
3442 			}
3443 		}
3444 
3445 		if(in_interrupt())
3446 			mdelay(delay);
3447 		else
3448 			msleep(delay);
3449 
3450 		if (++cnt >= 10)
3451 			delay = 50;
3452 	} while (cnt < 20);
3453 	return ret;
3454 }
3455 /*
3456  * check_pci_device_id - Checks if the device id is supported
3457  * @id : device id
3458  * Description: Function to check if the pci device id is supported by driver.
3459  * Return value: Actual device id if supported else PCI_ANY_ID
3460  */
check_pci_device_id(u16 id)3461 static u16 check_pci_device_id(u16 id)
3462 {
3463 	switch (id) {
3464 	case PCI_DEVICE_ID_HERC_WIN:
3465 	case PCI_DEVICE_ID_HERC_UNI:
3466 		return XFRAME_II_DEVICE;
3467 	case PCI_DEVICE_ID_S2IO_UNI:
3468 	case PCI_DEVICE_ID_S2IO_WIN:
3469 		return XFRAME_I_DEVICE;
3470 	default:
3471 		return PCI_ANY_ID;
3472 	}
3473 }
3474 
3475 /**
3476  *  s2io_reset - Resets the card.
3477  *  @sp : private member of the device structure.
3478  *  Description: Function to Reset the card. This function then also
3479  *  restores the previously saved PCI configuration space registers as
3480  *  the card reset also resets the configuration space.
3481  *  Return value:
3482  *  void.
3483  */
3484 
s2io_reset(struct s2io_nic * sp)3485 static void s2io_reset(struct s2io_nic * sp)
3486 {
3487 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3488 	u64 val64;
3489 	u16 subid, pci_cmd;
3490 	int i;
3491 	u16 val16;
3492 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3493 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3494 
3495 	DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3496 			__func__, sp->dev->name);
3497 
3498 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3499 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3500 
3501 	val64 = SW_RESET_ALL;
3502 	writeq(val64, &bar0->sw_reset);
3503 	if (strstr(sp->product_name, "CX4")) {
3504 		msleep(750);
3505 	}
3506 	msleep(250);
3507 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3508 
3509 		/* Restore the PCI state saved during initialization. */
3510 		pci_restore_state(sp->pdev);
3511 		pci_read_config_word(sp->pdev, 0x2, &val16);
3512 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3513 			break;
3514 		msleep(200);
3515 	}
3516 
3517 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3518 		DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3519 	}
3520 
3521 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3522 
3523 	s2io_init_pci(sp);
3524 
3525 	/* Set swapper to enable I/O register access */
3526 	s2io_set_swapper(sp);
3527 
3528 	/* restore mac_addr entries */
3529 	do_s2io_restore_unicast_mc(sp);
3530 
3531 	/* Restore the MSIX table entries from local variables */
3532 	restore_xmsi_data(sp);
3533 
3534 	/* Clear certain PCI/PCI-X fields after reset */
3535 	if (sp->device_type == XFRAME_II_DEVICE) {
3536 		/* Clear "detected parity error" bit */
3537 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3538 
3539 		/* Clearing PCIX Ecc status register */
3540 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3541 
3542 		/* Clearing PCI_STATUS error reflected here */
3543 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3544 	}
3545 
3546 	/* Reset device statistics maintained by OS */
3547 	memset(&sp->stats, 0, sizeof (struct net_device_stats));
3548 
3549 	up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3550 	down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3551 	up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3552 	down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3553 	reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3554 	mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3555 	mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3556 	watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3557 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3558 	memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3559 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3560 	sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3561 	sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3562 	sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3563 	sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3564 	sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3565 	sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3566 	sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3567 	sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3568 
3569 	/* SXE-002: Configure link and activity LED to turn it off */
3570 	subid = sp->pdev->subsystem_device;
3571 	if (((subid & 0xFF) >= 0x07) &&
3572 	    (sp->device_type == XFRAME_I_DEVICE)) {
3573 		val64 = readq(&bar0->gpio_control);
3574 		val64 |= 0x0000800000000000ULL;
3575 		writeq(val64, &bar0->gpio_control);
3576 		val64 = 0x0411040400000000ULL;
3577 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3578 	}
3579 
3580 	/*
3581 	 * Clear spurious ECC interrupts that would have occured on
3582 	 * XFRAME II cards after reset.
3583 	 */
3584 	if (sp->device_type == XFRAME_II_DEVICE) {
3585 		val64 = readq(&bar0->pcc_err_reg);
3586 		writeq(val64, &bar0->pcc_err_reg);
3587 	}
3588 
3589 	sp->device_enabled_once = FALSE;
3590 }
3591 
3592 /**
3593  *  s2io_set_swapper - to set the swapper controle on the card
3594  *  @sp : private member of the device structure,
3595  *  pointer to the s2io_nic structure.
3596  *  Description: Function to set the swapper control on the card
3597  *  correctly depending on the 'endianness' of the system.
3598  *  Return value:
3599  *  SUCCESS on success and FAILURE on failure.
3600  */
3601 
s2io_set_swapper(struct s2io_nic * sp)3602 static int s2io_set_swapper(struct s2io_nic * sp)
3603 {
3604 	struct net_device *dev = sp->dev;
3605 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3606 	u64 val64, valt, valr;
3607 
3608 	/*
3609 	 * Set proper endian settings and verify the same by reading
3610 	 * the PIF Feed-back register.
3611 	 */
3612 
3613 	val64 = readq(&bar0->pif_rd_swapper_fb);
3614 	if (val64 != 0x0123456789ABCDEFULL) {
3615 		int i = 0;
3616 		u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3617 				0x8100008181000081ULL,  /* FE=1, SE=0 */
3618 				0x4200004242000042ULL,  /* FE=0, SE=1 */
3619 				0};                     /* FE=0, SE=0 */
3620 
3621 		while(i<4) {
3622 			writeq(value[i], &bar0->swapper_ctrl);
3623 			val64 = readq(&bar0->pif_rd_swapper_fb);
3624 			if (val64 == 0x0123456789ABCDEFULL)
3625 				break;
3626 			i++;
3627 		}
3628 		if (i == 4) {
3629 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3630 				dev->name);
3631 			DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3632 				(unsigned long long) val64);
3633 			return FAILURE;
3634 		}
3635 		valr = value[i];
3636 	} else {
3637 		valr = readq(&bar0->swapper_ctrl);
3638 	}
3639 
3640 	valt = 0x0123456789ABCDEFULL;
3641 	writeq(valt, &bar0->xmsi_address);
3642 	val64 = readq(&bar0->xmsi_address);
3643 
3644 	if(val64 != valt) {
3645 		int i = 0;
3646 		u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3647 				0x0081810000818100ULL,  /* FE=1, SE=0 */
3648 				0x0042420000424200ULL,  /* FE=0, SE=1 */
3649 				0};                     /* FE=0, SE=0 */
3650 
3651 		while(i<4) {
3652 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3653 			writeq(valt, &bar0->xmsi_address);
3654 			val64 = readq(&bar0->xmsi_address);
3655 			if(val64 == valt)
3656 				break;
3657 			i++;
3658 		}
3659 		if(i == 4) {
3660 			unsigned long long x = val64;
3661 			DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3662 			DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3663 			return FAILURE;
3664 		}
3665 	}
3666 	val64 = readq(&bar0->swapper_ctrl);
3667 	val64 &= 0xFFFF000000000000ULL;
3668 
3669 #ifdef  __BIG_ENDIAN
3670 	/*
3671 	 * The device by default set to a big endian format, so a
3672 	 * big endian driver need not set anything.
3673 	 */
3674 	val64 |= (SWAPPER_CTRL_TXP_FE |
3675 		 SWAPPER_CTRL_TXP_SE |
3676 		 SWAPPER_CTRL_TXD_R_FE |
3677 		 SWAPPER_CTRL_TXD_W_FE |
3678 		 SWAPPER_CTRL_TXF_R_FE |
3679 		 SWAPPER_CTRL_RXD_R_FE |
3680 		 SWAPPER_CTRL_RXD_W_FE |
3681 		 SWAPPER_CTRL_RXF_W_FE |
3682 		 SWAPPER_CTRL_XMSI_FE |
3683 		 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3684 	if (sp->config.intr_type == INTA)
3685 		val64 |= SWAPPER_CTRL_XMSI_SE;
3686 	writeq(val64, &bar0->swapper_ctrl);
3687 #else
3688 	/*
3689 	 * Initially we enable all bits to make it accessible by the
3690 	 * driver, then we selectively enable only those bits that
3691 	 * we want to set.
3692 	 */
3693 	val64 |= (SWAPPER_CTRL_TXP_FE |
3694 		 SWAPPER_CTRL_TXP_SE |
3695 		 SWAPPER_CTRL_TXD_R_FE |
3696 		 SWAPPER_CTRL_TXD_R_SE |
3697 		 SWAPPER_CTRL_TXD_W_FE |
3698 		 SWAPPER_CTRL_TXD_W_SE |
3699 		 SWAPPER_CTRL_TXF_R_FE |
3700 		 SWAPPER_CTRL_RXD_R_FE |
3701 		 SWAPPER_CTRL_RXD_R_SE |
3702 		 SWAPPER_CTRL_RXD_W_FE |
3703 		 SWAPPER_CTRL_RXD_W_SE |
3704 		 SWAPPER_CTRL_RXF_W_FE |
3705 		 SWAPPER_CTRL_XMSI_FE |
3706 		 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3707 	if (sp->config.intr_type == INTA)
3708 		val64 |= SWAPPER_CTRL_XMSI_SE;
3709 	writeq(val64, &bar0->swapper_ctrl);
3710 #endif
3711 	val64 = readq(&bar0->swapper_ctrl);
3712 
3713 	/*
3714 	 * Verifying if endian settings are accurate by reading a
3715 	 * feedback register.
3716 	 */
3717 	val64 = readq(&bar0->pif_rd_swapper_fb);
3718 	if (val64 != 0x0123456789ABCDEFULL) {
3719 		/* Endian settings are incorrect, calls for another dekko. */
3720 		DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3721 			  dev->name);
3722 		DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3723 			  (unsigned long long) val64);
3724 		return FAILURE;
3725 	}
3726 
3727 	return SUCCESS;
3728 }
3729 
wait_for_msix_trans(struct s2io_nic * nic,int i)3730 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3731 {
3732 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3733 	u64 val64;
3734 	int ret = 0, cnt = 0;
3735 
3736 	do {
3737 		val64 = readq(&bar0->xmsi_access);
3738 		if (!(val64 & s2BIT(15)))
3739 			break;
3740 		mdelay(1);
3741 		cnt++;
3742 	} while(cnt < 5);
3743 	if (cnt == 5) {
3744 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3745 		ret = 1;
3746 	}
3747 
3748 	return ret;
3749 }
3750 
restore_xmsi_data(struct s2io_nic * nic)3751 static void restore_xmsi_data(struct s2io_nic *nic)
3752 {
3753 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3754 	u64 val64;
3755 	int i, msix_index;
3756 
3757 
3758 	if (nic->device_type == XFRAME_I_DEVICE)
3759 		return;
3760 
3761 	for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3762 		msix_index = (i) ? ((i-1) * 8 + 1): 0;
3763 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3764 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3765 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3766 		writeq(val64, &bar0->xmsi_access);
3767 		if (wait_for_msix_trans(nic, msix_index)) {
3768 			DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3769 			continue;
3770 		}
3771 	}
3772 }
3773 
store_xmsi_data(struct s2io_nic * nic)3774 static void store_xmsi_data(struct s2io_nic *nic)
3775 {
3776 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3777 	u64 val64, addr, data;
3778 	int i, msix_index;
3779 
3780 	if (nic->device_type == XFRAME_I_DEVICE)
3781 		return;
3782 
3783 	/* Store and display */
3784 	for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3785 		msix_index = (i) ? ((i-1) * 8 + 1): 0;
3786 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3787 		writeq(val64, &bar0->xmsi_access);
3788 		if (wait_for_msix_trans(nic, msix_index)) {
3789 			DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3790 			continue;
3791 		}
3792 		addr = readq(&bar0->xmsi_address);
3793 		data = readq(&bar0->xmsi_data);
3794 		if (addr && data) {
3795 			nic->msix_info[i].addr = addr;
3796 			nic->msix_info[i].data = data;
3797 		}
3798 	}
3799 }
3800 
s2io_enable_msi_x(struct s2io_nic * nic)3801 static int s2io_enable_msi_x(struct s2io_nic *nic)
3802 {
3803 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3804 	u64 rx_mat;
3805 	u16 msi_control; /* Temp variable */
3806 	int ret, i, j, msix_indx = 1;
3807 
3808 	nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3809 			       GFP_KERNEL);
3810 	if (!nic->entries) {
3811 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3812 			__func__);
3813 		nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3814 		return -ENOMEM;
3815 	}
3816 	nic->mac_control.stats_info->sw_stat.mem_allocated
3817 		+= (nic->num_entries * sizeof(struct msix_entry));
3818 
3819 	memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3820 
3821 	nic->s2io_entries =
3822 		kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3823 				   GFP_KERNEL);
3824 	if (!nic->s2io_entries) {
3825 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3826 			__func__);
3827 		nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3828 		kfree(nic->entries);
3829 		nic->mac_control.stats_info->sw_stat.mem_freed
3830 			+= (nic->num_entries * sizeof(struct msix_entry));
3831 		return -ENOMEM;
3832 	}
3833 	 nic->mac_control.stats_info->sw_stat.mem_allocated
3834 		+= (nic->num_entries * sizeof(struct s2io_msix_entry));
3835 	memset(nic->s2io_entries, 0,
3836 		nic->num_entries * sizeof(struct s2io_msix_entry));
3837 
3838 	nic->entries[0].entry = 0;
3839 	nic->s2io_entries[0].entry = 0;
3840 	nic->s2io_entries[0].in_use = MSIX_FLG;
3841 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3842 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3843 
3844 	for (i = 1; i < nic->num_entries; i++) {
3845 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3846 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3847 		nic->s2io_entries[i].arg = NULL;
3848 		nic->s2io_entries[i].in_use = 0;
3849 	}
3850 
3851 	rx_mat = readq(&bar0->rx_mat);
3852 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3853 		rx_mat |= RX_MAT_SET(j, msix_indx);
3854 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3855 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3856 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3857 		msix_indx += 8;
3858 	}
3859 	writeq(rx_mat, &bar0->rx_mat);
3860 	readq(&bar0->rx_mat);
3861 
3862 	ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3863 	/* We fail init if error or we get less vectors than min required */
3864 	if (ret) {
3865 		DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3866 		kfree(nic->entries);
3867 		nic->mac_control.stats_info->sw_stat.mem_freed
3868 			+= (nic->num_entries * sizeof(struct msix_entry));
3869 		kfree(nic->s2io_entries);
3870 		nic->mac_control.stats_info->sw_stat.mem_freed
3871 			+= (nic->num_entries * sizeof(struct s2io_msix_entry));
3872 		nic->entries = NULL;
3873 		nic->s2io_entries = NULL;
3874 		return -ENOMEM;
3875 	}
3876 
3877 	/*
3878 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3879 	 * in the herc NIC. (Temp change, needs to be removed later)
3880 	 */
3881 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3882 	msi_control |= 0x1; /* Enable MSI */
3883 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3884 
3885 	return 0;
3886 }
3887 
3888 /* Handle software interrupt used during MSI(X) test */
s2io_test_intr(int irq,void * dev_id)3889 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3890 {
3891 	struct s2io_nic *sp = dev_id;
3892 
3893 	sp->msi_detected = 1;
3894 	wake_up(&sp->msi_wait);
3895 
3896 	return IRQ_HANDLED;
3897 }
3898 
3899 /* Test interrupt path by forcing a a software IRQ */
s2io_test_msi(struct s2io_nic * sp)3900 static int s2io_test_msi(struct s2io_nic *sp)
3901 {
3902 	struct pci_dev *pdev = sp->pdev;
3903 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3904 	int err;
3905 	u64 val64, saved64;
3906 
3907 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3908 			sp->name, sp);
3909 	if (err) {
3910 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3911 		       sp->dev->name, pci_name(pdev), pdev->irq);
3912 		return err;
3913 	}
3914 
3915 	init_waitqueue_head (&sp->msi_wait);
3916 	sp->msi_detected = 0;
3917 
3918 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3919 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3920 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3921 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3922 	writeq(val64, &bar0->scheduled_int_ctrl);
3923 
3924 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3925 
3926 	if (!sp->msi_detected) {
3927 		/* MSI(X) test failed, go back to INTx mode */
3928 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3929 			"using MSI(X) during test\n", sp->dev->name,
3930 			pci_name(pdev));
3931 
3932 		err = -EOPNOTSUPP;
3933 	}
3934 
3935 	free_irq(sp->entries[1].vector, sp);
3936 
3937 	writeq(saved64, &bar0->scheduled_int_ctrl);
3938 
3939 	return err;
3940 }
3941 
remove_msix_isr(struct s2io_nic * sp)3942 static void remove_msix_isr(struct s2io_nic *sp)
3943 {
3944 	int i;
3945 	u16 msi_control;
3946 
3947 	for (i = 0; i < sp->num_entries; i++) {
3948 		if (sp->s2io_entries[i].in_use ==
3949 			MSIX_REGISTERED_SUCCESS) {
3950 			int vector = sp->entries[i].vector;
3951 			void *arg = sp->s2io_entries[i].arg;
3952 			free_irq(vector, arg);
3953 		}
3954 	}
3955 
3956 	kfree(sp->entries);
3957 	kfree(sp->s2io_entries);
3958 	sp->entries = NULL;
3959 	sp->s2io_entries = NULL;
3960 
3961 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3962 	msi_control &= 0xFFFE; /* Disable MSI */
3963 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3964 
3965 	pci_disable_msix(sp->pdev);
3966 }
3967 
remove_inta_isr(struct s2io_nic * sp)3968 static void remove_inta_isr(struct s2io_nic *sp)
3969 {
3970 	struct net_device *dev = sp->dev;
3971 
3972 	free_irq(sp->pdev->irq, dev);
3973 }
3974 
3975 /* ********************************************************* *
3976  * Functions defined below concern the OS part of the driver *
3977  * ********************************************************* */
3978 
3979 /**
3980  *  s2io_open - open entry point of the driver
3981  *  @dev : pointer to the device structure.
3982  *  Description:
3983  *  This function is the open entry point of the driver. It mainly calls a
3984  *  function to allocate Rx buffers and inserts them into the buffer
3985  *  descriptors and then enables the Rx part of the NIC.
3986  *  Return value:
3987  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3988  *   file on failure.
3989  */
3990 
s2io_open(struct net_device * dev)3991 static int s2io_open(struct net_device *dev)
3992 {
3993 	struct s2io_nic *sp = netdev_priv(dev);
3994 	int err = 0;
3995 
3996 	/*
3997 	 * Make sure you have link off by default every time
3998 	 * Nic is initialized
3999 	 */
4000 	netif_carrier_off(dev);
4001 	sp->last_link_state = 0;
4002 
4003 	/* Initialize H/W and enable interrupts */
4004 	err = s2io_card_up(sp);
4005 	if (err) {
4006 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4007 			  dev->name);
4008 		goto hw_init_failed;
4009 	}
4010 
4011 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4012 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4013 		s2io_card_down(sp);
4014 		err = -ENODEV;
4015 		goto hw_init_failed;
4016 	}
4017 	s2io_start_all_tx_queue(sp);
4018 	return 0;
4019 
4020 hw_init_failed:
4021 	if (sp->config.intr_type == MSI_X) {
4022 		if (sp->entries) {
4023 			kfree(sp->entries);
4024 			sp->mac_control.stats_info->sw_stat.mem_freed
4025 			+= (sp->num_entries * sizeof(struct msix_entry));
4026 		}
4027 		if (sp->s2io_entries) {
4028 			kfree(sp->s2io_entries);
4029 			sp->mac_control.stats_info->sw_stat.mem_freed
4030 			+= (sp->num_entries * sizeof(struct s2io_msix_entry));
4031 		}
4032 	}
4033 	return err;
4034 }
4035 
4036 /**
4037  *  s2io_close -close entry point of the driver
4038  *  @dev : device pointer.
4039  *  Description:
4040  *  This is the stop entry point of the driver. It needs to undo exactly
4041  *  whatever was done by the open entry point,thus it's usually referred to
4042  *  as the close function.Among other things this function mainly stops the
4043  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4044  *  Return value:
4045  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4046  *  file on failure.
4047  */
4048 
s2io_close(struct net_device * dev)4049 static int s2io_close(struct net_device *dev)
4050 {
4051 	struct s2io_nic *sp = netdev_priv(dev);
4052 	struct config_param *config = &sp->config;
4053 	u64 tmp64;
4054 	int offset;
4055 
4056 	/* Return if the device is already closed               *
4057 	*  Can happen when s2io_card_up failed in change_mtu    *
4058 	*/
4059 	if (!is_s2io_card_up(sp))
4060 		return 0;
4061 
4062 	s2io_stop_all_tx_queue(sp);
4063 	/* delete all populated mac entries */
4064 	for (offset = 1; offset < config->max_mc_addr; offset++) {
4065 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
4066 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4067 			do_s2io_delete_unicast_mc(sp, tmp64);
4068 	}
4069 
4070 	s2io_card_down(sp);
4071 
4072 	return 0;
4073 }
4074 
4075 /**
4076  *  s2io_xmit - Tx entry point of te driver
4077  *  @skb : the socket buffer containing the Tx data.
4078  *  @dev : device pointer.
4079  *  Description :
4080  *  This function is the Tx entry point of the driver. S2IO NIC supports
4081  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4082  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4083  *  not be upadted.
4084  *  Return value:
4085  *  0 on success & 1 on failure.
4086  */
4087 
s2io_xmit(struct sk_buff * skb,struct net_device * dev)4088 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4089 {
4090 	struct s2io_nic *sp = netdev_priv(dev);
4091 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4092 	register u64 val64;
4093 	struct TxD *txdp;
4094 	struct TxFIFO_element __iomem *tx_fifo;
4095 	unsigned long flags = 0;
4096 	u16 vlan_tag = 0;
4097 	struct fifo_info *fifo = NULL;
4098 	struct mac_info *mac_control;
4099 	struct config_param *config;
4100 	int do_spin_lock = 1;
4101 	int offload_type;
4102 	int enable_per_list_interrupt = 0;
4103 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4104 
4105 	mac_control = &sp->mac_control;
4106 	config = &sp->config;
4107 
4108 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4109 
4110 	if (unlikely(skb->len <= 0)) {
4111 		DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4112 		dev_kfree_skb_any(skb);
4113 		return 0;
4114 	}
4115 
4116 	if (!is_s2io_card_up(sp)) {
4117 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4118 			  dev->name);
4119 		dev_kfree_skb(skb);
4120 		return 0;
4121 	}
4122 
4123 	queue = 0;
4124 	if (sp->vlgrp && vlan_tx_tag_present(skb))
4125 		vlan_tag = vlan_tx_tag_get(skb);
4126 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4127 		if (skb->protocol == htons(ETH_P_IP)) {
4128 			struct iphdr *ip;
4129 			struct tcphdr *th;
4130 			ip = ip_hdr(skb);
4131 
4132 			if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4133 				th = (struct tcphdr *)(((unsigned char *)ip) +
4134 						ip->ihl*4);
4135 
4136 				if (ip->protocol == IPPROTO_TCP) {
4137 					queue_len = sp->total_tcp_fifos;
4138 					queue = (ntohs(th->source) +
4139 							ntohs(th->dest)) &
4140 					    sp->fifo_selector[queue_len - 1];
4141 					if (queue >= queue_len)
4142 						queue = queue_len - 1;
4143 				} else if (ip->protocol == IPPROTO_UDP) {
4144 					queue_len = sp->total_udp_fifos;
4145 					queue = (ntohs(th->source) +
4146 							ntohs(th->dest)) &
4147 					    sp->fifo_selector[queue_len - 1];
4148 					if (queue >= queue_len)
4149 						queue = queue_len - 1;
4150 					queue += sp->udp_fifo_idx;
4151 					if (skb->len > 1024)
4152 						enable_per_list_interrupt = 1;
4153 					do_spin_lock = 0;
4154 				}
4155 			}
4156 		}
4157 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4158 		/* get fifo number based on skb->priority value */
4159 		queue = config->fifo_mapping
4160 					[skb->priority & (MAX_TX_FIFOS - 1)];
4161 	fifo = &mac_control->fifos[queue];
4162 
4163 	if (do_spin_lock)
4164 		spin_lock_irqsave(&fifo->tx_lock, flags);
4165 	else {
4166 		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4167 			return NETDEV_TX_LOCKED;
4168 	}
4169 
4170 	if (sp->config.multiq) {
4171 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4172 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4173 			return NETDEV_TX_BUSY;
4174 		}
4175 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4176 		if (netif_queue_stopped(dev)) {
4177 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4178 			return NETDEV_TX_BUSY;
4179 		}
4180 	}
4181 
4182 	put_off = (u16) fifo->tx_curr_put_info.offset;
4183 	get_off = (u16) fifo->tx_curr_get_info.offset;
4184 	txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4185 
4186 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4187 	/* Avoid "put" pointer going beyond "get" pointer */
4188 	if (txdp->Host_Control ||
4189 		   ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4190 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4191 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4192 		dev_kfree_skb(skb);
4193 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4194 		return 0;
4195 	}
4196 
4197 	offload_type = s2io_offload_type(skb);
4198 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4199 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4200 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4201 	}
4202 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4203 		txdp->Control_2 |=
4204 		    (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4205 		     TXD_TX_CKO_UDP_EN);
4206 	}
4207 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4208 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4209 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4210 	if (enable_per_list_interrupt)
4211 		if (put_off & (queue_len >> 5))
4212 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4213 	if (vlan_tag) {
4214 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4215 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4216 	}
4217 
4218 	frg_len = skb->len - skb->data_len;
4219 	if (offload_type == SKB_GSO_UDP) {
4220 		int ufo_size;
4221 
4222 		ufo_size = s2io_udp_mss(skb);
4223 		ufo_size &= ~7;
4224 		txdp->Control_1 |= TXD_UFO_EN;
4225 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4226 		txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4227 #ifdef __BIG_ENDIAN
4228 		/* both variants do cpu_to_be64(be32_to_cpu(...)) */
4229 		fifo->ufo_in_band_v[put_off] =
4230 				(__force u64)skb_shinfo(skb)->ip6_frag_id;
4231 #else
4232 		fifo->ufo_in_band_v[put_off] =
4233 				(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4234 #endif
4235 		txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4236 		txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4237 					fifo->ufo_in_band_v,
4238 					sizeof(u64), PCI_DMA_TODEVICE);
4239 		if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4240 			goto pci_map_failed;
4241 		txdp++;
4242 	}
4243 
4244 	txdp->Buffer_Pointer = pci_map_single
4245 	    (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4246 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4247 		goto pci_map_failed;
4248 
4249 	txdp->Host_Control = (unsigned long) skb;
4250 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4251 	if (offload_type == SKB_GSO_UDP)
4252 		txdp->Control_1 |= TXD_UFO_EN;
4253 
4254 	frg_cnt = skb_shinfo(skb)->nr_frags;
4255 	/* For fragmented SKB. */
4256 	for (i = 0; i < frg_cnt; i++) {
4257 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4258 		/* A '0' length fragment will be ignored */
4259 		if (!frag->size)
4260 			continue;
4261 		txdp++;
4262 		txdp->Buffer_Pointer = (u64) pci_map_page
4263 		    (sp->pdev, frag->page, frag->page_offset,
4264 		     frag->size, PCI_DMA_TODEVICE);
4265 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4266 		if (offload_type == SKB_GSO_UDP)
4267 			txdp->Control_1 |= TXD_UFO_EN;
4268 	}
4269 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4270 
4271 	if (offload_type == SKB_GSO_UDP)
4272 		frg_cnt++; /* as Txd0 was used for inband header */
4273 
4274 	tx_fifo = mac_control->tx_FIFO_start[queue];
4275 	val64 = fifo->list_info[put_off].list_phy_addr;
4276 	writeq(val64, &tx_fifo->TxDL_Pointer);
4277 
4278 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4279 		 TX_FIFO_LAST_LIST);
4280 	if (offload_type)
4281 		val64 |= TX_FIFO_SPECIAL_FUNC;
4282 
4283 	writeq(val64, &tx_fifo->List_Control);
4284 
4285 	mmiowb();
4286 
4287 	put_off++;
4288 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4289 		put_off = 0;
4290 	fifo->tx_curr_put_info.offset = put_off;
4291 
4292 	/* Avoid "put" pointer going beyond "get" pointer */
4293 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4294 		sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4295 		DBG_PRINT(TX_DBG,
4296 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4297 			  put_off, get_off);
4298 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4299 	}
4300 	mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4301 	dev->trans_start = jiffies;
4302 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4303 
4304 	if (sp->config.intr_type == MSI_X)
4305 		tx_intr_handler(fifo);
4306 
4307 	return 0;
4308 pci_map_failed:
4309 	stats->pci_map_fail_cnt++;
4310 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4311 	stats->mem_freed += skb->truesize;
4312 	dev_kfree_skb(skb);
4313 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4314 	return 0;
4315 }
4316 
4317 static void
s2io_alarm_handle(unsigned long data)4318 s2io_alarm_handle(unsigned long data)
4319 {
4320 	struct s2io_nic *sp = (struct s2io_nic *)data;
4321 	struct net_device *dev = sp->dev;
4322 
4323 	s2io_handle_errors(dev);
4324 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4325 }
4326 
s2io_msix_ring_handle(int irq,void * dev_id)4327 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4328 {
4329 	struct ring_info *ring = (struct ring_info *)dev_id;
4330 	struct s2io_nic *sp = ring->nic;
4331 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4332 
4333 	if (unlikely(!is_s2io_card_up(sp)))
4334 		return IRQ_HANDLED;
4335 
4336 	if (sp->config.napi) {
4337 		u8 __iomem *addr = NULL;
4338 		u8 val8 = 0;
4339 
4340 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4341 		addr += (7 - ring->ring_no);
4342 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4343 		writeb(val8, addr);
4344 		val8 = readb(addr);
4345 		netif_rx_schedule(&ring->napi);
4346 	} else {
4347 		rx_intr_handler(ring, 0);
4348 		s2io_chk_rx_buffers(sp, ring);
4349 	}
4350 
4351 	return IRQ_HANDLED;
4352 }
4353 
s2io_msix_fifo_handle(int irq,void * dev_id)4354 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4355 {
4356 	int i;
4357 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4358 	struct s2io_nic *sp = fifos->nic;
4359 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4360 	struct config_param *config  = &sp->config;
4361 	u64 reason;
4362 
4363 	if (unlikely(!is_s2io_card_up(sp)))
4364 		return IRQ_NONE;
4365 
4366 	reason = readq(&bar0->general_int_status);
4367 	if (unlikely(reason == S2IO_MINUS_ONE))
4368 		/* Nothing much can be done. Get out */
4369 		return IRQ_HANDLED;
4370 
4371 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4372 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4373 
4374 		if (reason & GEN_INTR_TXPIC)
4375 			s2io_txpic_intr_handle(sp);
4376 
4377 		if (reason & GEN_INTR_TXTRAFFIC)
4378 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4379 
4380 		for (i = 0; i < config->tx_fifo_num; i++)
4381 			tx_intr_handler(&fifos[i]);
4382 
4383 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4384 		readl(&bar0->general_int_status);
4385 		return IRQ_HANDLED;
4386 	}
4387 	/* The interrupt was not raised by us */
4388 	return IRQ_NONE;
4389 }
4390 
s2io_txpic_intr_handle(struct s2io_nic * sp)4391 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4392 {
4393 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4394 	u64 val64;
4395 
4396 	val64 = readq(&bar0->pic_int_status);
4397 	if (val64 & PIC_INT_GPIO) {
4398 		val64 = readq(&bar0->gpio_int_reg);
4399 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4400 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4401 			/*
4402 			 * This is unstable state so clear both up/down
4403 			 * interrupt and adapter to re-evaluate the link state.
4404 			 */
4405 			val64 |=  GPIO_INT_REG_LINK_DOWN;
4406 			val64 |= GPIO_INT_REG_LINK_UP;
4407 			writeq(val64, &bar0->gpio_int_reg);
4408 			val64 = readq(&bar0->gpio_int_mask);
4409 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4410 				   GPIO_INT_MASK_LINK_DOWN);
4411 			writeq(val64, &bar0->gpio_int_mask);
4412 		}
4413 		else if (val64 & GPIO_INT_REG_LINK_UP) {
4414 			val64 = readq(&bar0->adapter_status);
4415 				/* Enable Adapter */
4416 			val64 = readq(&bar0->adapter_control);
4417 			val64 |= ADAPTER_CNTL_EN;
4418 			writeq(val64, &bar0->adapter_control);
4419 			val64 |= ADAPTER_LED_ON;
4420 			writeq(val64, &bar0->adapter_control);
4421 			if (!sp->device_enabled_once)
4422 				sp->device_enabled_once = 1;
4423 
4424 			s2io_link(sp, LINK_UP);
4425 			/*
4426 			 * unmask link down interrupt and mask link-up
4427 			 * intr
4428 			 */
4429 			val64 = readq(&bar0->gpio_int_mask);
4430 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4431 			val64 |= GPIO_INT_MASK_LINK_UP;
4432 			writeq(val64, &bar0->gpio_int_mask);
4433 
4434 		}else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4435 			val64 = readq(&bar0->adapter_status);
4436 			s2io_link(sp, LINK_DOWN);
4437 			/* Link is down so unmaks link up interrupt */
4438 			val64 = readq(&bar0->gpio_int_mask);
4439 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4440 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4441 			writeq(val64, &bar0->gpio_int_mask);
4442 
4443 			/* turn off LED */
4444 			val64 = readq(&bar0->adapter_control);
4445 			val64 = val64 &(~ADAPTER_LED_ON);
4446 			writeq(val64, &bar0->adapter_control);
4447 		}
4448 	}
4449 	val64 = readq(&bar0->gpio_int_mask);
4450 }
4451 
4452 /**
4453  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4454  *  @value: alarm bits
4455  *  @addr: address value
4456  *  @cnt: counter variable
4457  *  Description: Check for alarm and increment the counter
4458  *  Return Value:
4459  *  1 - if alarm bit set
4460  *  0 - if alarm bit is not set
4461  */
do_s2io_chk_alarm_bit(u64 value,void __iomem * addr,unsigned long long * cnt)4462 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4463 			  unsigned long long *cnt)
4464 {
4465 	u64 val64;
4466 	val64 = readq(addr);
4467 	if ( val64 & value ) {
4468 		writeq(val64, addr);
4469 		(*cnt)++;
4470 		return 1;
4471 	}
4472 	return 0;
4473 
4474 }
4475 
4476 /**
4477  *  s2io_handle_errors - Xframe error indication handler
4478  *  @nic: device private variable
4479  *  Description: Handle alarms such as loss of link, single or
4480  *  double ECC errors, critical and serious errors.
4481  *  Return Value:
4482  *  NONE
4483  */
s2io_handle_errors(void * dev_id)4484 static void s2io_handle_errors(void * dev_id)
4485 {
4486 	struct net_device *dev = (struct net_device *) dev_id;
4487 	struct s2io_nic *sp = netdev_priv(dev);
4488 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4489 	u64 temp64 = 0,val64=0;
4490 	int i = 0;
4491 
4492 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4493 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4494 
4495 	if (!is_s2io_card_up(sp))
4496 		return;
4497 
4498 	if (pci_channel_offline(sp->pdev))
4499 		return;
4500 
4501 	memset(&sw_stat->ring_full_cnt, 0,
4502 		sizeof(sw_stat->ring_full_cnt));
4503 
4504 	/* Handling the XPAK counters update */
4505 	if(stats->xpak_timer_count < 72000) {
4506 		/* waiting for an hour */
4507 		stats->xpak_timer_count++;
4508 	} else {
4509 		s2io_updt_xpak_counter(dev);
4510 		/* reset the count to zero */
4511 		stats->xpak_timer_count = 0;
4512 	}
4513 
4514 	/* Handling link status change error Intr */
4515 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4516 		val64 = readq(&bar0->mac_rmac_err_reg);
4517 		writeq(val64, &bar0->mac_rmac_err_reg);
4518 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4519 			schedule_work(&sp->set_link_task);
4520 	}
4521 
4522 	/* In case of a serious error, the device will be Reset. */
4523 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4524 				&sw_stat->serious_err_cnt))
4525 		goto reset;
4526 
4527 	/* Check for data parity error */
4528 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4529 				&sw_stat->parity_err_cnt))
4530 		goto reset;
4531 
4532 	/* Check for ring full counter */
4533 	if (sp->device_type == XFRAME_II_DEVICE) {
4534 		val64 = readq(&bar0->ring_bump_counter1);
4535 		for (i=0; i<4; i++) {
4536 			temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4537 			temp64 >>= 64 - ((i+1)*16);
4538 			sw_stat->ring_full_cnt[i] += temp64;
4539 		}
4540 
4541 		val64 = readq(&bar0->ring_bump_counter2);
4542 		for (i=0; i<4; i++) {
4543 			temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4544 			temp64 >>= 64 - ((i+1)*16);
4545 			 sw_stat->ring_full_cnt[i+4] += temp64;
4546 		}
4547 	}
4548 
4549 	val64 = readq(&bar0->txdma_int_status);
4550 	/*check for pfc_err*/
4551 	if (val64 & TXDMA_PFC_INT) {
4552 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4553 				PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4554 				PFC_PCIX_ERR, &bar0->pfc_err_reg,
4555 				&sw_stat->pfc_err_cnt))
4556 			goto reset;
4557 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4558 				&sw_stat->pfc_err_cnt);
4559 	}
4560 
4561 	/*check for tda_err*/
4562 	if (val64 & TXDMA_TDA_INT) {
4563 		if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4564 				TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4565 				&sw_stat->tda_err_cnt))
4566 			goto reset;
4567 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4568 				&bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4569 	}
4570 	/*check for pcc_err*/
4571 	if (val64 & TXDMA_PCC_INT) {
4572 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4573 				| PCC_N_SERR | PCC_6_COF_OV_ERR
4574 				| PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4575 				| PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4576 				| PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4577 				&sw_stat->pcc_err_cnt))
4578 			goto reset;
4579 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4580 				&bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4581 	}
4582 
4583 	/*check for tti_err*/
4584 	if (val64 & TXDMA_TTI_INT) {
4585 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4586 				&sw_stat->tti_err_cnt))
4587 			goto reset;
4588 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4589 				&bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4590 	}
4591 
4592 	/*check for lso_err*/
4593 	if (val64 & TXDMA_LSO_INT) {
4594 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4595 				| LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4596 				&bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4597 			goto reset;
4598 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4599 				&bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4600 	}
4601 
4602 	/*check for tpa_err*/
4603 	if (val64 & TXDMA_TPA_INT) {
4604 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4605 			&sw_stat->tpa_err_cnt))
4606 			goto reset;
4607 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4608 			&sw_stat->tpa_err_cnt);
4609 	}
4610 
4611 	/*check for sm_err*/
4612 	if (val64 & TXDMA_SM_INT) {
4613 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4614 			&sw_stat->sm_err_cnt))
4615 			goto reset;
4616 	}
4617 
4618 	val64 = readq(&bar0->mac_int_status);
4619 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4620 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4621 				&bar0->mac_tmac_err_reg,
4622 				&sw_stat->mac_tmac_err_cnt))
4623 			goto reset;
4624 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4625 				| TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4626 				&bar0->mac_tmac_err_reg,
4627 				&sw_stat->mac_tmac_err_cnt);
4628 	}
4629 
4630 	val64 = readq(&bar0->xgxs_int_status);
4631 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4632 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4633 				&bar0->xgxs_txgxs_err_reg,
4634 				&sw_stat->xgxs_txgxs_err_cnt))
4635 			goto reset;
4636 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4637 				&bar0->xgxs_txgxs_err_reg,
4638 				&sw_stat->xgxs_txgxs_err_cnt);
4639 	}
4640 
4641 	val64 = readq(&bar0->rxdma_int_status);
4642 	if (val64 & RXDMA_INT_RC_INT_M) {
4643 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4644 				| RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4645 				&bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4646 			goto reset;
4647 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4648 				| RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4649 				&sw_stat->rc_err_cnt);
4650 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4651 				| PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4652 				&sw_stat->prc_pcix_err_cnt))
4653 			goto reset;
4654 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4655 				| PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4656 				&sw_stat->prc_pcix_err_cnt);
4657 	}
4658 
4659 	if (val64 & RXDMA_INT_RPA_INT_M) {
4660 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4661 				&bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4662 			goto reset;
4663 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4664 				&bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4665 	}
4666 
4667 	if (val64 & RXDMA_INT_RDA_INT_M) {
4668 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4669 				| RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4670 				| RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4671 				&bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4672 			goto reset;
4673 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4674 				| RDA_MISC_ERR | RDA_PCIX_ERR,
4675 				&bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4676 	}
4677 
4678 	if (val64 & RXDMA_INT_RTI_INT_M) {
4679 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4680 				&sw_stat->rti_err_cnt))
4681 			goto reset;
4682 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4683 				&bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4684 	}
4685 
4686 	val64 = readq(&bar0->mac_int_status);
4687 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4688 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4689 				&bar0->mac_rmac_err_reg,
4690 				&sw_stat->mac_rmac_err_cnt))
4691 			goto reset;
4692 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4693 				RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4694 				&sw_stat->mac_rmac_err_cnt);
4695 	}
4696 
4697 	val64 = readq(&bar0->xgxs_int_status);
4698 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4699 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4700 				&bar0->xgxs_rxgxs_err_reg,
4701 				&sw_stat->xgxs_rxgxs_err_cnt))
4702 			goto reset;
4703 	}
4704 
4705 	val64 = readq(&bar0->mc_int_status);
4706 	if(val64 & MC_INT_STATUS_MC_INT) {
4707 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4708 				&sw_stat->mc_err_cnt))
4709 			goto reset;
4710 
4711 		/* Handling Ecc errors */
4712 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4713 			writeq(val64, &bar0->mc_err_reg);
4714 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4715 				sw_stat->double_ecc_errs++;
4716 				if (sp->device_type != XFRAME_II_DEVICE) {
4717 					/*
4718 					 * Reset XframeI only if critical error
4719 					 */
4720 					if (val64 &
4721 						(MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4722 						MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4723 								goto reset;
4724 					}
4725 			} else
4726 				sw_stat->single_ecc_errs++;
4727 		}
4728 	}
4729 	return;
4730 
4731 reset:
4732 	s2io_stop_all_tx_queue(sp);
4733 	schedule_work(&sp->rst_timer_task);
4734 	sw_stat->soft_reset_cnt++;
4735 	return;
4736 }
4737 
4738 /**
4739  *  s2io_isr - ISR handler of the device .
4740  *  @irq: the irq of the device.
4741  *  @dev_id: a void pointer to the dev structure of the NIC.
4742  *  Description:  This function is the ISR handler of the device. It
4743  *  identifies the reason for the interrupt and calls the relevant
4744  *  service routines. As a contongency measure, this ISR allocates the
4745  *  recv buffers, if their numbers are below the panic value which is
4746  *  presently set to 25% of the original number of rcv buffers allocated.
4747  *  Return value:
4748  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4749  *   IRQ_NONE: will be returned if interrupt is not from our device
4750  */
s2io_isr(int irq,void * dev_id)4751 static irqreturn_t s2io_isr(int irq, void *dev_id)
4752 {
4753 	struct net_device *dev = (struct net_device *) dev_id;
4754 	struct s2io_nic *sp = netdev_priv(dev);
4755 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4756 	int i;
4757 	u64 reason = 0;
4758 	struct mac_info *mac_control;
4759 	struct config_param *config;
4760 
4761 	/* Pretend we handled any irq's from a disconnected card */
4762 	if (pci_channel_offline(sp->pdev))
4763 		return IRQ_NONE;
4764 
4765 	if (!is_s2io_card_up(sp))
4766 		return IRQ_NONE;
4767 
4768 	mac_control = &sp->mac_control;
4769 	config = &sp->config;
4770 
4771 	/*
4772 	 * Identify the cause for interrupt and call the appropriate
4773 	 * interrupt handler. Causes for the interrupt could be;
4774 	 * 1. Rx of packet.
4775 	 * 2. Tx complete.
4776 	 * 3. Link down.
4777 	 */
4778 	reason = readq(&bar0->general_int_status);
4779 
4780 	if (unlikely(reason == S2IO_MINUS_ONE) ) {
4781 		/* Nothing much can be done. Get out */
4782 		return IRQ_HANDLED;
4783 	}
4784 
4785 	if (reason & (GEN_INTR_RXTRAFFIC |
4786 		GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4787 	{
4788 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4789 
4790 		if (config->napi) {
4791 			if (reason & GEN_INTR_RXTRAFFIC) {
4792 				netif_rx_schedule(&sp->napi);
4793 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4794 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4795 				readl(&bar0->rx_traffic_int);
4796 			}
4797 		} else {
4798 			/*
4799 			 * rx_traffic_int reg is an R1 register, writing all 1's
4800 			 * will ensure that the actual interrupt causing bit
4801 			 * get's cleared and hence a read can be avoided.
4802 			 */
4803 			if (reason & GEN_INTR_RXTRAFFIC)
4804 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805 
4806 			for (i = 0; i < config->rx_ring_num; i++)
4807 				rx_intr_handler(&mac_control->rings[i], 0);
4808 		}
4809 
4810 		/*
4811 		 * tx_traffic_int reg is an R1 register, writing all 1's
4812 		 * will ensure that the actual interrupt causing bit get's
4813 		 * cleared and hence a read can be avoided.
4814 		 */
4815 		if (reason & GEN_INTR_TXTRAFFIC)
4816 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4817 
4818 		for (i = 0; i < config->tx_fifo_num; i++)
4819 			tx_intr_handler(&mac_control->fifos[i]);
4820 
4821 		if (reason & GEN_INTR_TXPIC)
4822 			s2io_txpic_intr_handle(sp);
4823 
4824 		/*
4825 		 * Reallocate the buffers from the interrupt handler itself.
4826 		 */
4827 		if (!config->napi) {
4828 			for (i = 0; i < config->rx_ring_num; i++)
4829 				s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
4830 		}
4831 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4832 		readl(&bar0->general_int_status);
4833 
4834 		return IRQ_HANDLED;
4835 
4836 	}
4837 	else if (!reason) {
4838 		/* The interrupt was not raised by us */
4839 		return IRQ_NONE;
4840 	}
4841 
4842 	return IRQ_HANDLED;
4843 }
4844 
4845 /**
4846  * s2io_updt_stats -
4847  */
s2io_updt_stats(struct s2io_nic * sp)4848 static void s2io_updt_stats(struct s2io_nic *sp)
4849 {
4850 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4851 	u64 val64;
4852 	int cnt = 0;
4853 
4854 	if (is_s2io_card_up(sp)) {
4855 		/* Apprx 30us on a 133 MHz bus */
4856 		val64 = SET_UPDT_CLICKS(10) |
4857 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4858 		writeq(val64, &bar0->stat_cfg);
4859 		do {
4860 			udelay(100);
4861 			val64 = readq(&bar0->stat_cfg);
4862 			if (!(val64 & s2BIT(0)))
4863 				break;
4864 			cnt++;
4865 			if (cnt == 5)
4866 				break; /* Updt failed */
4867 		} while(1);
4868 	}
4869 }
4870 
4871 /**
4872  *  s2io_get_stats - Updates the device statistics structure.
4873  *  @dev : pointer to the device structure.
4874  *  Description:
4875  *  This function updates the device statistics structure in the s2io_nic
4876  *  structure and returns a pointer to the same.
4877  *  Return value:
4878  *  pointer to the updated net_device_stats structure.
4879  */
4880 
s2io_get_stats(struct net_device * dev)4881 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4882 {
4883 	struct s2io_nic *sp = netdev_priv(dev);
4884 	struct mac_info *mac_control;
4885 	struct config_param *config;
4886 	int i;
4887 
4888 
4889 	mac_control = &sp->mac_control;
4890 	config = &sp->config;
4891 
4892 	/* Configure Stats for immediate updt */
4893 	s2io_updt_stats(sp);
4894 
4895 	/* Using sp->stats as a staging area, because reset (due to mtu
4896 	   change, for example) will clear some hardware counters */
4897 	dev->stats.tx_packets +=
4898 		le32_to_cpu(mac_control->stats_info->tmac_frms) -
4899 		sp->stats.tx_packets;
4900 	sp->stats.tx_packets =
4901 		le32_to_cpu(mac_control->stats_info->tmac_frms);
4902 	dev->stats.tx_errors +=
4903 		le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4904 		sp->stats.tx_errors;
4905 	sp->stats.tx_errors =
4906 		le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4907 	dev->stats.rx_errors +=
4908 		le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4909 		sp->stats.rx_errors;
4910 	sp->stats.rx_errors =
4911 		le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4912 	dev->stats.multicast =
4913 		le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4914 		sp->stats.multicast;
4915 	sp->stats.multicast =
4916 		le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4917 	dev->stats.rx_length_errors =
4918 		le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4919 		sp->stats.rx_length_errors;
4920 	sp->stats.rx_length_errors =
4921 		le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4922 
4923 	/* collect per-ring rx_packets and rx_bytes */
4924 	dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4925 	for (i = 0; i < config->rx_ring_num; i++) {
4926 		dev->stats.rx_packets += mac_control->rings[i].rx_packets;
4927 		dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4928 	}
4929 
4930 	return (&dev->stats);
4931 }
4932 
4933 /**
4934  *  s2io_set_multicast - entry point for multicast address enable/disable.
4935  *  @dev : pointer to the device structure
4936  *  Description:
4937  *  This function is a driver entry point which gets called by the kernel
4938  *  whenever multicast addresses must be enabled/disabled. This also gets
4939  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4940  *  determine, if multicast address must be enabled or if promiscuous mode
4941  *  is to be disabled etc.
4942  *  Return value:
4943  *  void.
4944  */
4945 
s2io_set_multicast(struct net_device * dev)4946 static void s2io_set_multicast(struct net_device *dev)
4947 {
4948 	int i, j, prev_cnt;
4949 	struct dev_mc_list *mclist;
4950 	struct s2io_nic *sp = netdev_priv(dev);
4951 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4952 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4953 	    0xfeffffffffffULL;
4954 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4955 	void __iomem *add;
4956 	struct config_param *config = &sp->config;
4957 
4958 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4959 		/*  Enable all Multicast addresses */
4960 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4961 		       &bar0->rmac_addr_data0_mem);
4962 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4963 		       &bar0->rmac_addr_data1_mem);
4964 		val64 = RMAC_ADDR_CMD_MEM_WE |
4965 		    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4966 		    RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4967 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4968 		/* Wait till command completes */
4969 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4970 					RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4971 					S2IO_BIT_RESET);
4972 
4973 		sp->m_cast_flg = 1;
4974 		sp->all_multi_pos = config->max_mc_addr - 1;
4975 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4976 		/*  Disable all Multicast addresses */
4977 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4978 		       &bar0->rmac_addr_data0_mem);
4979 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4980 		       &bar0->rmac_addr_data1_mem);
4981 		val64 = RMAC_ADDR_CMD_MEM_WE |
4982 		    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4983 		    RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4984 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4985 		/* Wait till command completes */
4986 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4987 					RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4988 					S2IO_BIT_RESET);
4989 
4990 		sp->m_cast_flg = 0;
4991 		sp->all_multi_pos = 0;
4992 	}
4993 
4994 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4995 		/*  Put the NIC into promiscuous mode */
4996 		add = &bar0->mac_cfg;
4997 		val64 = readq(&bar0->mac_cfg);
4998 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4999 
5000 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5001 		writel((u32) val64, add);
5002 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5003 		writel((u32) (val64 >> 32), (add + 4));
5004 
5005 		if (vlan_tag_strip != 1) {
5006 			val64 = readq(&bar0->rx_pa_cfg);
5007 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5008 			writeq(val64, &bar0->rx_pa_cfg);
5009 			sp->vlan_strip_flag = 0;
5010 		}
5011 
5012 		val64 = readq(&bar0->mac_cfg);
5013 		sp->promisc_flg = 1;
5014 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5015 			  dev->name);
5016 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5017 		/*  Remove the NIC from promiscuous mode */
5018 		add = &bar0->mac_cfg;
5019 		val64 = readq(&bar0->mac_cfg);
5020 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5021 
5022 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5023 		writel((u32) val64, add);
5024 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5025 		writel((u32) (val64 >> 32), (add + 4));
5026 
5027 		if (vlan_tag_strip != 0) {
5028 			val64 = readq(&bar0->rx_pa_cfg);
5029 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5030 			writeq(val64, &bar0->rx_pa_cfg);
5031 			sp->vlan_strip_flag = 1;
5032 		}
5033 
5034 		val64 = readq(&bar0->mac_cfg);
5035 		sp->promisc_flg = 0;
5036 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5037 			  dev->name);
5038 	}
5039 
5040 	/*  Update individual M_CAST address list */
5041 	if ((!sp->m_cast_flg) && dev->mc_count) {
5042 		if (dev->mc_count >
5043 		    (config->max_mc_addr - config->max_mac_addr)) {
5044 			DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5045 				  dev->name);
5046 			DBG_PRINT(ERR_DBG, "can be added, please enable ");
5047 			DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5048 			return;
5049 		}
5050 
5051 		prev_cnt = sp->mc_addr_count;
5052 		sp->mc_addr_count = dev->mc_count;
5053 
5054 		/* Clear out the previous list of Mc in the H/W. */
5055 		for (i = 0; i < prev_cnt; i++) {
5056 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5057 			       &bar0->rmac_addr_data0_mem);
5058 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5059 				&bar0->rmac_addr_data1_mem);
5060 			val64 = RMAC_ADDR_CMD_MEM_WE |
5061 			    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5062 			    RMAC_ADDR_CMD_MEM_OFFSET
5063 			    (config->mc_start_offset + i);
5064 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5065 
5066 			/* Wait for command completes */
5067 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5068 					RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5069 					S2IO_BIT_RESET)) {
5070 				DBG_PRINT(ERR_DBG, "%s: Adding ",
5071 					  dev->name);
5072 				DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5073 				return;
5074 			}
5075 		}
5076 
5077 		/* Create the new Rx filter list and update the same in H/W. */
5078 		for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5079 		     i++, mclist = mclist->next) {
5080 			memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5081 			       ETH_ALEN);
5082 			mac_addr = 0;
5083 			for (j = 0; j < ETH_ALEN; j++) {
5084 				mac_addr |= mclist->dmi_addr[j];
5085 				mac_addr <<= 8;
5086 			}
5087 			mac_addr >>= 8;
5088 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5089 			       &bar0->rmac_addr_data0_mem);
5090 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5091 				&bar0->rmac_addr_data1_mem);
5092 			val64 = RMAC_ADDR_CMD_MEM_WE |
5093 			    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5094 			    RMAC_ADDR_CMD_MEM_OFFSET
5095 			    (i + config->mc_start_offset);
5096 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5097 
5098 			/* Wait for command completes */
5099 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5100 					RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5101 					S2IO_BIT_RESET)) {
5102 				DBG_PRINT(ERR_DBG, "%s: Adding ",
5103 					  dev->name);
5104 				DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5105 				return;
5106 			}
5107 		}
5108 	}
5109 }
5110 
5111 /* read from CAM unicast & multicast addresses and store it in
5112  * def_mac_addr structure
5113  */
do_s2io_store_unicast_mc(struct s2io_nic * sp)5114 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5115 {
5116 	int offset;
5117 	u64 mac_addr = 0x0;
5118 	struct config_param *config = &sp->config;
5119 
5120 	/* store unicast & multicast mac addresses */
5121 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5122 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5123 		/* if read fails disable the entry */
5124 		if (mac_addr == FAILURE)
5125 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5126 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5127 	}
5128 }
5129 
5130 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
do_s2io_restore_unicast_mc(struct s2io_nic * sp)5131 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5132 {
5133 	int offset;
5134 	struct config_param *config = &sp->config;
5135 	/* restore unicast mac address */
5136 	for (offset = 0; offset < config->max_mac_addr; offset++)
5137 		do_s2io_prog_unicast(sp->dev,
5138 			sp->def_mac_addr[offset].mac_addr);
5139 
5140 	/* restore multicast mac address */
5141 	for (offset = config->mc_start_offset;
5142 		offset < config->max_mc_addr; offset++)
5143 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5144 }
5145 
5146 /* add a multicast MAC address to CAM */
do_s2io_add_mc(struct s2io_nic * sp,u8 * addr)5147 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5148 {
5149 	int i;
5150 	u64 mac_addr = 0;
5151 	struct config_param *config = &sp->config;
5152 
5153 	for (i = 0; i < ETH_ALEN; i++) {
5154 		mac_addr <<= 8;
5155 		mac_addr |= addr[i];
5156 	}
5157 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5158 		return SUCCESS;
5159 
5160 	/* check if the multicast mac already preset in CAM */
5161 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5162 		u64 tmp64;
5163 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5164 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5165 			break;
5166 
5167 		if (tmp64 == mac_addr)
5168 			return SUCCESS;
5169 	}
5170 	if (i == config->max_mc_addr) {
5171 		DBG_PRINT(ERR_DBG,
5172 			"CAM full no space left for multicast MAC\n");
5173 		return FAILURE;
5174 	}
5175 	/* Update the internal structure with this new mac address */
5176 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5177 
5178 	return (do_s2io_add_mac(sp, mac_addr, i));
5179 }
5180 
5181 /* add MAC address to CAM */
do_s2io_add_mac(struct s2io_nic * sp,u64 addr,int off)5182 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5183 {
5184 	u64 val64;
5185 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5186 
5187 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5188 		&bar0->rmac_addr_data0_mem);
5189 
5190 	val64 =
5191 		RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5192 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5193 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5194 
5195 	/* Wait till command completes */
5196 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5197 		RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5198 		S2IO_BIT_RESET)) {
5199 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5200 		return FAILURE;
5201 	}
5202 	return SUCCESS;
5203 }
5204 /* deletes a specified unicast/multicast mac entry from CAM */
do_s2io_delete_unicast_mc(struct s2io_nic * sp,u64 addr)5205 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5206 {
5207 	int offset;
5208 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5209 	struct config_param *config = &sp->config;
5210 
5211 	for (offset = 1;
5212 		offset < config->max_mc_addr; offset++) {
5213 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5214 		if (tmp64 == addr) {
5215 			/* disable the entry by writing  0xffffffffffffULL */
5216 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5217 				return FAILURE;
5218 			/* store the new mac list from CAM */
5219 			do_s2io_store_unicast_mc(sp);
5220 			return SUCCESS;
5221 		}
5222 	}
5223 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5224 			(unsigned long long)addr);
5225 	return FAILURE;
5226 }
5227 
5228 /* read mac entries from CAM */
do_s2io_read_unicast_mc(struct s2io_nic * sp,int offset)5229 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5230 {
5231 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5232 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5233 
5234 	/* read mac addr */
5235 	val64 =
5236 		RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5237 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5238 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5239 
5240 	/* Wait till command completes */
5241 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5242 		RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5243 		S2IO_BIT_RESET)) {
5244 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5245 		return FAILURE;
5246 	}
5247 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5248 	return (tmp64 >> 16);
5249 }
5250 
5251 /**
5252  * s2io_set_mac_addr driver entry point
5253  */
5254 
s2io_set_mac_addr(struct net_device * dev,void * p)5255 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5256 {
5257 	struct sockaddr *addr = p;
5258 
5259 	if (!is_valid_ether_addr(addr->sa_data))
5260 		return -EINVAL;
5261 
5262 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5263 
5264 	/* store the MAC address in CAM */
5265 	return (do_s2io_prog_unicast(dev, dev->dev_addr));
5266 }
5267 /**
5268  *  do_s2io_prog_unicast - Programs the Xframe mac address
5269  *  @dev : pointer to the device structure.
5270  *  @addr: a uchar pointer to the new mac address which is to be set.
5271  *  Description : This procedure will program the Xframe to receive
5272  *  frames with new Mac Address
5273  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5274  *  as defined in errno.h file on failure.
5275  */
5276 
do_s2io_prog_unicast(struct net_device * dev,u8 * addr)5277 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5278 {
5279 	struct s2io_nic *sp = netdev_priv(dev);
5280 	register u64 mac_addr = 0, perm_addr = 0;
5281 	int i;
5282 	u64 tmp64;
5283 	struct config_param *config = &sp->config;
5284 
5285 	/*
5286 	* Set the new MAC address as the new unicast filter and reflect this
5287 	* change on the device address registered with the OS. It will be
5288 	* at offset 0.
5289 	*/
5290 	for (i = 0; i < ETH_ALEN; i++) {
5291 		mac_addr <<= 8;
5292 		mac_addr |= addr[i];
5293 		perm_addr <<= 8;
5294 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5295 	}
5296 
5297 	/* check if the dev_addr is different than perm_addr */
5298 	if (mac_addr == perm_addr)
5299 		return SUCCESS;
5300 
5301 	/* check if the mac already preset in CAM */
5302 	for (i = 1; i < config->max_mac_addr; i++) {
5303 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5304 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5305 			break;
5306 
5307 		if (tmp64 == mac_addr) {
5308 			DBG_PRINT(INFO_DBG,
5309 			"MAC addr:0x%llx already present in CAM\n",
5310 			(unsigned long long)mac_addr);
5311 			return SUCCESS;
5312 		}
5313 	}
5314 	if (i == config->max_mac_addr) {
5315 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5316 		return FAILURE;
5317 	}
5318 	/* Update the internal structure with this new mac address */
5319 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5320 	return (do_s2io_add_mac(sp, mac_addr, i));
5321 }
5322 
5323 /**
5324  * s2io_ethtool_sset - Sets different link parameters.
5325  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5326  * @info: pointer to the structure with parameters given by ethtool to set
5327  * link information.
5328  * Description:
5329  * The function sets different link parameters provided by the user onto
5330  * the NIC.
5331  * Return value:
5332  * 0 on success.
5333 */
5334 
s2io_ethtool_sset(struct net_device * dev,struct ethtool_cmd * info)5335 static int s2io_ethtool_sset(struct net_device *dev,
5336 			     struct ethtool_cmd *info)
5337 {
5338 	struct s2io_nic *sp = netdev_priv(dev);
5339 	if ((info->autoneg == AUTONEG_ENABLE) ||
5340 	    (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5341 		return -EINVAL;
5342 	else {
5343 		s2io_close(sp->dev);
5344 		s2io_open(sp->dev);
5345 	}
5346 
5347 	return 0;
5348 }
5349 
5350 /**
5351  * s2io_ethtol_gset - Return link specific information.
5352  * @sp : private member of the device structure, pointer to the
5353  *      s2io_nic structure.
5354  * @info : pointer to the structure with parameters given by ethtool
5355  * to return link information.
5356  * Description:
5357  * Returns link specific information like speed, duplex etc.. to ethtool.
5358  * Return value :
5359  * return 0 on success.
5360  */
5361 
s2io_ethtool_gset(struct net_device * dev,struct ethtool_cmd * info)5362 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5363 {
5364 	struct s2io_nic *sp = netdev_priv(dev);
5365 	info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5366 	info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5367 	info->port = PORT_FIBRE;
5368 
5369 	/* info->transceiver */
5370 	info->transceiver = XCVR_EXTERNAL;
5371 
5372 	if (netif_carrier_ok(sp->dev)) {
5373 		info->speed = 10000;
5374 		info->duplex = DUPLEX_FULL;
5375 	} else {
5376 		info->speed = -1;
5377 		info->duplex = -1;
5378 	}
5379 
5380 	info->autoneg = AUTONEG_DISABLE;
5381 	return 0;
5382 }
5383 
5384 /**
5385  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5386  * @sp : private member of the device structure, which is a pointer to the
5387  * s2io_nic structure.
5388  * @info : pointer to the structure with parameters given by ethtool to
5389  * return driver information.
5390  * Description:
5391  * Returns driver specefic information like name, version etc.. to ethtool.
5392  * Return value:
5393  *  void
5394  */
5395 
s2io_ethtool_gdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)5396 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5397 				  struct ethtool_drvinfo *info)
5398 {
5399 	struct s2io_nic *sp = netdev_priv(dev);
5400 
5401 	strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5402 	strncpy(info->version, s2io_driver_version, sizeof(info->version));
5403 	strncpy(info->fw_version, "", sizeof(info->fw_version));
5404 	strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5405 	info->regdump_len = XENA_REG_SPACE;
5406 	info->eedump_len = XENA_EEPROM_SPACE;
5407 }
5408 
5409 /**
5410  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5411  *  @sp: private member of the device structure, which is a pointer to the
5412  *  s2io_nic structure.
5413  *  @regs : pointer to the structure with parameters given by ethtool for
5414  *  dumping the registers.
5415  *  @reg_space: The input argumnet into which all the registers are dumped.
5416  *  Description:
5417  *  Dumps the entire register space of xFrame NIC into the user given
5418  *  buffer area.
5419  * Return value :
5420  * void .
5421 */
5422 
s2io_ethtool_gregs(struct net_device * dev,struct ethtool_regs * regs,void * space)5423 static void s2io_ethtool_gregs(struct net_device *dev,
5424 			       struct ethtool_regs *regs, void *space)
5425 {
5426 	int i;
5427 	u64 reg;
5428 	u8 *reg_space = (u8 *) space;
5429 	struct s2io_nic *sp = netdev_priv(dev);
5430 
5431 	regs->len = XENA_REG_SPACE;
5432 	regs->version = sp->pdev->subsystem_device;
5433 
5434 	for (i = 0; i < regs->len; i += 8) {
5435 		reg = readq(sp->bar0 + i);
5436 		memcpy((reg_space + i), &reg, 8);
5437 	}
5438 }
5439 
5440 /**
5441  *  s2io_phy_id  - timer function that alternates adapter LED.
5442  *  @data : address of the private member of the device structure, which
5443  *  is a pointer to the s2io_nic structure, provided as an u32.
5444  * Description: This is actually the timer function that alternates the
5445  * adapter LED bit of the adapter control bit to set/reset every time on
5446  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5447  *  once every second.
5448 */
s2io_phy_id(unsigned long data)5449 static void s2io_phy_id(unsigned long data)
5450 {
5451 	struct s2io_nic *sp = (struct s2io_nic *) data;
5452 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5453 	u64 val64 = 0;
5454 	u16 subid;
5455 
5456 	subid = sp->pdev->subsystem_device;
5457 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5458 		   ((subid & 0xFF) >= 0x07)) {
5459 		val64 = readq(&bar0->gpio_control);
5460 		val64 ^= GPIO_CTRL_GPIO_0;
5461 		writeq(val64, &bar0->gpio_control);
5462 	} else {
5463 		val64 = readq(&bar0->adapter_control);
5464 		val64 ^= ADAPTER_LED_ON;
5465 		writeq(val64, &bar0->adapter_control);
5466 	}
5467 
5468 	mod_timer(&sp->id_timer, jiffies + HZ / 2);
5469 }
5470 
5471 /**
5472  * s2io_ethtool_idnic - To physically identify the nic on the system.
5473  * @sp : private member of the device structure, which is a pointer to the
5474  * s2io_nic structure.
5475  * @id : pointer to the structure with identification parameters given by
5476  * ethtool.
5477  * Description: Used to physically identify the NIC on the system.
5478  * The Link LED will blink for a time specified by the user for
5479  * identification.
5480  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5481  * identification is possible only if it's link is up.
5482  * Return value:
5483  * int , returns 0 on success
5484  */
5485 
s2io_ethtool_idnic(struct net_device * dev,u32 data)5486 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5487 {
5488 	u64 val64 = 0, last_gpio_ctrl_val;
5489 	struct s2io_nic *sp = netdev_priv(dev);
5490 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5491 	u16 subid;
5492 
5493 	subid = sp->pdev->subsystem_device;
5494 	last_gpio_ctrl_val = readq(&bar0->gpio_control);
5495 	if ((sp->device_type == XFRAME_I_DEVICE) &&
5496 		((subid & 0xFF) < 0x07)) {
5497 		val64 = readq(&bar0->adapter_control);
5498 		if (!(val64 & ADAPTER_CNTL_EN)) {
5499 			printk(KERN_ERR
5500 			       "Adapter Link down, cannot blink LED\n");
5501 			return -EFAULT;
5502 		}
5503 	}
5504 	if (sp->id_timer.function == NULL) {
5505 		init_timer(&sp->id_timer);
5506 		sp->id_timer.function = s2io_phy_id;
5507 		sp->id_timer.data = (unsigned long) sp;
5508 	}
5509 	mod_timer(&sp->id_timer, jiffies);
5510 	if (data)
5511 		msleep_interruptible(data * HZ);
5512 	else
5513 		msleep_interruptible(MAX_FLICKER_TIME);
5514 	del_timer_sync(&sp->id_timer);
5515 
5516 	if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5517 		writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5518 		last_gpio_ctrl_val = readq(&bar0->gpio_control);
5519 	}
5520 
5521 	return 0;
5522 }
5523 
s2io_ethtool_gringparam(struct net_device * dev,struct ethtool_ringparam * ering)5524 static void s2io_ethtool_gringparam(struct net_device *dev,
5525                                     struct ethtool_ringparam *ering)
5526 {
5527 	struct s2io_nic *sp = netdev_priv(dev);
5528 	int i,tx_desc_count=0,rx_desc_count=0;
5529 
5530 	if (sp->rxd_mode == RXD_MODE_1)
5531 		ering->rx_max_pending = MAX_RX_DESC_1;
5532 	else if (sp->rxd_mode == RXD_MODE_3B)
5533 		ering->rx_max_pending = MAX_RX_DESC_2;
5534 
5535 	ering->tx_max_pending = MAX_TX_DESC;
5536 	for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5537 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5538 
5539 	DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5540 	ering->tx_pending = tx_desc_count;
5541 	rx_desc_count = 0;
5542 	for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5543 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5544 
5545 	ering->rx_pending = rx_desc_count;
5546 
5547 	ering->rx_mini_max_pending = 0;
5548 	ering->rx_mini_pending = 0;
5549 	if(sp->rxd_mode == RXD_MODE_1)
5550 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5551 	else if (sp->rxd_mode == RXD_MODE_3B)
5552 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5553 	ering->rx_jumbo_pending = rx_desc_count;
5554 }
5555 
5556 /**
5557  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5558  * @sp : private member of the device structure, which is a pointer to the
5559  *	s2io_nic structure.
5560  * @ep : pointer to the structure with pause parameters given by ethtool.
5561  * Description:
5562  * Returns the Pause frame generation and reception capability of the NIC.
5563  * Return value:
5564  *  void
5565  */
s2io_ethtool_getpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5566 static void s2io_ethtool_getpause_data(struct net_device *dev,
5567 				       struct ethtool_pauseparam *ep)
5568 {
5569 	u64 val64;
5570 	struct s2io_nic *sp = netdev_priv(dev);
5571 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5572 
5573 	val64 = readq(&bar0->rmac_pause_cfg);
5574 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5575 		ep->tx_pause = TRUE;
5576 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5577 		ep->rx_pause = TRUE;
5578 	ep->autoneg = FALSE;
5579 }
5580 
5581 /**
5582  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5583  * @sp : private member of the device structure, which is a pointer to the
5584  *      s2io_nic structure.
5585  * @ep : pointer to the structure with pause parameters given by ethtool.
5586  * Description:
5587  * It can be used to set or reset Pause frame generation or reception
5588  * support of the NIC.
5589  * Return value:
5590  * int, returns 0 on Success
5591  */
5592 
s2io_ethtool_setpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5593 static int s2io_ethtool_setpause_data(struct net_device *dev,
5594 			       struct ethtool_pauseparam *ep)
5595 {
5596 	u64 val64;
5597 	struct s2io_nic *sp = netdev_priv(dev);
5598 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5599 
5600 	val64 = readq(&bar0->rmac_pause_cfg);
5601 	if (ep->tx_pause)
5602 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5603 	else
5604 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5605 	if (ep->rx_pause)
5606 		val64 |= RMAC_PAUSE_RX_ENABLE;
5607 	else
5608 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5609 	writeq(val64, &bar0->rmac_pause_cfg);
5610 	return 0;
5611 }
5612 
5613 /**
5614  * read_eeprom - reads 4 bytes of data from user given offset.
5615  * @sp : private member of the device structure, which is a pointer to the
5616  *      s2io_nic structure.
5617  * @off : offset at which the data must be written
5618  * @data : Its an output parameter where the data read at the given
5619  *	offset is stored.
5620  * Description:
5621  * Will read 4 bytes of data from the user given offset and return the
5622  * read data.
5623  * NOTE: Will allow to read only part of the EEPROM visible through the
5624  *   I2C bus.
5625  * Return value:
5626  *  -1 on failure and 0 on success.
5627  */
5628 
5629 #define S2IO_DEV_ID		5
read_eeprom(struct s2io_nic * sp,int off,u64 * data)5630 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5631 {
5632 	int ret = -1;
5633 	u32 exit_cnt = 0;
5634 	u64 val64;
5635 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5636 
5637 	if (sp->device_type == XFRAME_I_DEVICE) {
5638 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5639 		    I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5640 		    I2C_CONTROL_CNTL_START;
5641 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5642 
5643 		while (exit_cnt < 5) {
5644 			val64 = readq(&bar0->i2c_control);
5645 			if (I2C_CONTROL_CNTL_END(val64)) {
5646 				*data = I2C_CONTROL_GET_DATA(val64);
5647 				ret = 0;
5648 				break;
5649 			}
5650 			msleep(50);
5651 			exit_cnt++;
5652 		}
5653 	}
5654 
5655 	if (sp->device_type == XFRAME_II_DEVICE) {
5656 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5657 			SPI_CONTROL_BYTECNT(0x3) |
5658 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5659 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5660 		val64 |= SPI_CONTROL_REQ;
5661 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5662 		while (exit_cnt < 5) {
5663 			val64 = readq(&bar0->spi_control);
5664 			if (val64 & SPI_CONTROL_NACK) {
5665 				ret = 1;
5666 				break;
5667 			} else if (val64 & SPI_CONTROL_DONE) {
5668 				*data = readq(&bar0->spi_data);
5669 				*data &= 0xffffff;
5670 				ret = 0;
5671 				break;
5672 			}
5673 			msleep(50);
5674 			exit_cnt++;
5675 		}
5676 	}
5677 	return ret;
5678 }
5679 
5680 /**
5681  *  write_eeprom - actually writes the relevant part of the data value.
5682  *  @sp : private member of the device structure, which is a pointer to the
5683  *       s2io_nic structure.
5684  *  @off : offset at which the data must be written
5685  *  @data : The data that is to be written
5686  *  @cnt : Number of bytes of the data that are actually to be written into
5687  *  the Eeprom. (max of 3)
5688  * Description:
5689  *  Actually writes the relevant part of the data value into the Eeprom
5690  *  through the I2C bus.
5691  * Return value:
5692  *  0 on success, -1 on failure.
5693  */
5694 
write_eeprom(struct s2io_nic * sp,int off,u64 data,int cnt)5695 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5696 {
5697 	int exit_cnt = 0, ret = -1;
5698 	u64 val64;
5699 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5700 
5701 	if (sp->device_type == XFRAME_I_DEVICE) {
5702 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5703 		    I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5704 		    I2C_CONTROL_CNTL_START;
5705 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5706 
5707 		while (exit_cnt < 5) {
5708 			val64 = readq(&bar0->i2c_control);
5709 			if (I2C_CONTROL_CNTL_END(val64)) {
5710 				if (!(val64 & I2C_CONTROL_NACK))
5711 					ret = 0;
5712 				break;
5713 			}
5714 			msleep(50);
5715 			exit_cnt++;
5716 		}
5717 	}
5718 
5719 	if (sp->device_type == XFRAME_II_DEVICE) {
5720 		int write_cnt = (cnt == 8) ? 0 : cnt;
5721 		writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5722 
5723 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5724 			SPI_CONTROL_BYTECNT(write_cnt) |
5725 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5726 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5727 		val64 |= SPI_CONTROL_REQ;
5728 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5729 		while (exit_cnt < 5) {
5730 			val64 = readq(&bar0->spi_control);
5731 			if (val64 & SPI_CONTROL_NACK) {
5732 				ret = 1;
5733 				break;
5734 			} else if (val64 & SPI_CONTROL_DONE) {
5735 				ret = 0;
5736 				break;
5737 			}
5738 			msleep(50);
5739 			exit_cnt++;
5740 		}
5741 	}
5742 	return ret;
5743 }
s2io_vpd_read(struct s2io_nic * nic)5744 static void s2io_vpd_read(struct s2io_nic *nic)
5745 {
5746 	u8 *vpd_data;
5747 	u8 data;
5748 	int i=0, cnt, fail = 0;
5749 	int vpd_addr = 0x80;
5750 
5751 	if (nic->device_type == XFRAME_II_DEVICE) {
5752 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5753 		vpd_addr = 0x80;
5754 	}
5755 	else {
5756 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5757 		vpd_addr = 0x50;
5758 	}
5759 	strcpy(nic->serial_num, "NOT AVAILABLE");
5760 
5761 	vpd_data = kmalloc(256, GFP_KERNEL);
5762 	if (!vpd_data) {
5763 		nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5764 		return;
5765 	}
5766 	nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5767 
5768 	for (i = 0; i < 256; i +=4 ) {
5769 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5770 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5771 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5772 		for (cnt = 0; cnt <5; cnt++) {
5773 			msleep(2);
5774 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5775 			if (data == 0x80)
5776 				break;
5777 		}
5778 		if (cnt >= 5) {
5779 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5780 			fail = 1;
5781 			break;
5782 		}
5783 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5784 				      (u32 *)&vpd_data[i]);
5785 	}
5786 
5787 	if(!fail) {
5788 		/* read serial number of adapter */
5789 		for (cnt = 0; cnt < 256; cnt++) {
5790 		if ((vpd_data[cnt] == 'S') &&
5791 			(vpd_data[cnt+1] == 'N') &&
5792 			(vpd_data[cnt+2] < VPD_STRING_LEN)) {
5793 				memset(nic->serial_num, 0, VPD_STRING_LEN);
5794 				memcpy(nic->serial_num, &vpd_data[cnt + 3],
5795 					vpd_data[cnt+2]);
5796 				break;
5797 			}
5798 		}
5799 	}
5800 
5801 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5802 		memset(nic->product_name, 0, vpd_data[1]);
5803 		memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5804 	}
5805 	kfree(vpd_data);
5806 	nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5807 }
5808 
5809 /**
5810  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5811  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5812  *  @eeprom : pointer to the user level structure provided by ethtool,
5813  *  containing all relevant information.
5814  *  @data_buf : user defined value to be written into Eeprom.
5815  *  Description: Reads the values stored in the Eeprom at given offset
5816  *  for a given length. Stores these values int the input argument data
5817  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5818  *  Return value:
5819  *  int  0 on success
5820  */
5821 
s2io_ethtool_geeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5822 static int s2io_ethtool_geeprom(struct net_device *dev,
5823 			 struct ethtool_eeprom *eeprom, u8 * data_buf)
5824 {
5825 	u32 i, valid;
5826 	u64 data;
5827 	struct s2io_nic *sp = netdev_priv(dev);
5828 
5829 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5830 
5831 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5832 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5833 
5834 	for (i = 0; i < eeprom->len; i += 4) {
5835 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5836 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5837 			return -EFAULT;
5838 		}
5839 		valid = INV(data);
5840 		memcpy((data_buf + i), &valid, 4);
5841 	}
5842 	return 0;
5843 }
5844 
5845 /**
5846  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5847  *  @sp : private member of the device structure, which is a pointer to the
5848  *  s2io_nic structure.
5849  *  @eeprom : pointer to the user level structure provided by ethtool,
5850  *  containing all relevant information.
5851  *  @data_buf ; user defined value to be written into Eeprom.
5852  *  Description:
5853  *  Tries to write the user provided value in the Eeprom, at the offset
5854  *  given by the user.
5855  *  Return value:
5856  *  0 on success, -EFAULT on failure.
5857  */
5858 
s2io_ethtool_seeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5859 static int s2io_ethtool_seeprom(struct net_device *dev,
5860 				struct ethtool_eeprom *eeprom,
5861 				u8 * data_buf)
5862 {
5863 	int len = eeprom->len, cnt = 0;
5864 	u64 valid = 0, data;
5865 	struct s2io_nic *sp = netdev_priv(dev);
5866 
5867 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5868 		DBG_PRINT(ERR_DBG,
5869 			  "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5870 		DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5871 			  eeprom->magic);
5872 		return -EFAULT;
5873 	}
5874 
5875 	while (len) {
5876 		data = (u32) data_buf[cnt] & 0x000000FF;
5877 		if (data) {
5878 			valid = (u32) (data << 24);
5879 		} else
5880 			valid = data;
5881 
5882 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5883 			DBG_PRINT(ERR_DBG,
5884 				  "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5885 			DBG_PRINT(ERR_DBG,
5886 				  "write into the specified offset\n");
5887 			return -EFAULT;
5888 		}
5889 		cnt++;
5890 		len--;
5891 	}
5892 
5893 	return 0;
5894 }
5895 
5896 /**
5897  * s2io_register_test - reads and writes into all clock domains.
5898  * @sp : private member of the device structure, which is a pointer to the
5899  * s2io_nic structure.
5900  * @data : variable that returns the result of each of the test conducted b
5901  * by the driver.
5902  * Description:
5903  * Read and write into all clock domains. The NIC has 3 clock domains,
5904  * see that registers in all the three regions are accessible.
5905  * Return value:
5906  * 0 on success.
5907  */
5908 
s2io_register_test(struct s2io_nic * sp,uint64_t * data)5909 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5910 {
5911 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5912 	u64 val64 = 0, exp_val;
5913 	int fail = 0;
5914 
5915 	val64 = readq(&bar0->pif_rd_swapper_fb);
5916 	if (val64 != 0x123456789abcdefULL) {
5917 		fail = 1;
5918 		DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5919 	}
5920 
5921 	val64 = readq(&bar0->rmac_pause_cfg);
5922 	if (val64 != 0xc000ffff00000000ULL) {
5923 		fail = 1;
5924 		DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5925 	}
5926 
5927 	val64 = readq(&bar0->rx_queue_cfg);
5928 	if (sp->device_type == XFRAME_II_DEVICE)
5929 		exp_val = 0x0404040404040404ULL;
5930 	else
5931 		exp_val = 0x0808080808080808ULL;
5932 	if (val64 != exp_val) {
5933 		fail = 1;
5934 		DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5935 	}
5936 
5937 	val64 = readq(&bar0->xgxs_efifo_cfg);
5938 	if (val64 != 0x000000001923141EULL) {
5939 		fail = 1;
5940 		DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5941 	}
5942 
5943 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5944 	writeq(val64, &bar0->xmsi_data);
5945 	val64 = readq(&bar0->xmsi_data);
5946 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5947 		fail = 1;
5948 		DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5949 	}
5950 
5951 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5952 	writeq(val64, &bar0->xmsi_data);
5953 	val64 = readq(&bar0->xmsi_data);
5954 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5955 		fail = 1;
5956 		DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5957 	}
5958 
5959 	*data = fail;
5960 	return fail;
5961 }
5962 
5963 /**
5964  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5965  * @sp : private member of the device structure, which is a pointer to the
5966  * s2io_nic structure.
5967  * @data:variable that returns the result of each of the test conducted by
5968  * the driver.
5969  * Description:
5970  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5971  * register.
5972  * Return value:
5973  * 0 on success.
5974  */
5975 
s2io_eeprom_test(struct s2io_nic * sp,uint64_t * data)5976 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5977 {
5978 	int fail = 0;
5979 	u64 ret_data, org_4F0, org_7F0;
5980 	u8 saved_4F0 = 0, saved_7F0 = 0;
5981 	struct net_device *dev = sp->dev;
5982 
5983 	/* Test Write Error at offset 0 */
5984 	/* Note that SPI interface allows write access to all areas
5985 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5986 	 */
5987 	if (sp->device_type == XFRAME_I_DEVICE)
5988 		if (!write_eeprom(sp, 0, 0, 3))
5989 			fail = 1;
5990 
5991 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5992 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5993 		saved_4F0 = 1;
5994 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5995 		saved_7F0 = 1;
5996 
5997 	/* Test Write at offset 4f0 */
5998 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5999 		fail = 1;
6000 	if (read_eeprom(sp, 0x4F0, &ret_data))
6001 		fail = 1;
6002 
6003 	if (ret_data != 0x012345) {
6004 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6005 			"Data written %llx Data read %llx\n",
6006 			dev->name, (unsigned long long)0x12345,
6007 			(unsigned long long)ret_data);
6008 		fail = 1;
6009 	}
6010 
6011 	/* Reset the EEPROM data go FFFF */
6012 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6013 
6014 	/* Test Write Request Error at offset 0x7c */
6015 	if (sp->device_type == XFRAME_I_DEVICE)
6016 		if (!write_eeprom(sp, 0x07C, 0, 3))
6017 			fail = 1;
6018 
6019 	/* Test Write Request at offset 0x7f0 */
6020 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6021 		fail = 1;
6022 	if (read_eeprom(sp, 0x7F0, &ret_data))
6023 		fail = 1;
6024 
6025 	if (ret_data != 0x012345) {
6026 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6027 			"Data written %llx Data read %llx\n",
6028 			dev->name, (unsigned long long)0x12345,
6029 			(unsigned long long)ret_data);
6030 		fail = 1;
6031 	}
6032 
6033 	/* Reset the EEPROM data go FFFF */
6034 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6035 
6036 	if (sp->device_type == XFRAME_I_DEVICE) {
6037 		/* Test Write Error at offset 0x80 */
6038 		if (!write_eeprom(sp, 0x080, 0, 3))
6039 			fail = 1;
6040 
6041 		/* Test Write Error at offset 0xfc */
6042 		if (!write_eeprom(sp, 0x0FC, 0, 3))
6043 			fail = 1;
6044 
6045 		/* Test Write Error at offset 0x100 */
6046 		if (!write_eeprom(sp, 0x100, 0, 3))
6047 			fail = 1;
6048 
6049 		/* Test Write Error at offset 4ec */
6050 		if (!write_eeprom(sp, 0x4EC, 0, 3))
6051 			fail = 1;
6052 	}
6053 
6054 	/* Restore values at offsets 0x4F0 and 0x7F0 */
6055 	if (saved_4F0)
6056 		write_eeprom(sp, 0x4F0, org_4F0, 3);
6057 	if (saved_7F0)
6058 		write_eeprom(sp, 0x7F0, org_7F0, 3);
6059 
6060 	*data = fail;
6061 	return fail;
6062 }
6063 
6064 /**
6065  * s2io_bist_test - invokes the MemBist test of the card .
6066  * @sp : private member of the device structure, which is a pointer to the
6067  * s2io_nic structure.
6068  * @data:variable that returns the result of each of the test conducted by
6069  * the driver.
6070  * Description:
6071  * This invokes the MemBist test of the card. We give around
6072  * 2 secs time for the Test to complete. If it's still not complete
6073  * within this peiod, we consider that the test failed.
6074  * Return value:
6075  * 0 on success and -1 on failure.
6076  */
6077 
s2io_bist_test(struct s2io_nic * sp,uint64_t * data)6078 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6079 {
6080 	u8 bist = 0;
6081 	int cnt = 0, ret = -1;
6082 
6083 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6084 	bist |= PCI_BIST_START;
6085 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6086 
6087 	while (cnt < 20) {
6088 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6089 		if (!(bist & PCI_BIST_START)) {
6090 			*data = (bist & PCI_BIST_CODE_MASK);
6091 			ret = 0;
6092 			break;
6093 		}
6094 		msleep(100);
6095 		cnt++;
6096 	}
6097 
6098 	return ret;
6099 }
6100 
6101 /**
6102  * s2io-link_test - verifies the link state of the nic
6103  * @sp ; private member of the device structure, which is a pointer to the
6104  * s2io_nic structure.
6105  * @data: variable that returns the result of each of the test conducted by
6106  * the driver.
6107  * Description:
6108  * The function verifies the link state of the NIC and updates the input
6109  * argument 'data' appropriately.
6110  * Return value:
6111  * 0 on success.
6112  */
6113 
s2io_link_test(struct s2io_nic * sp,uint64_t * data)6114 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6115 {
6116 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6117 	u64 val64;
6118 
6119 	val64 = readq(&bar0->adapter_status);
6120 	if(!(LINK_IS_UP(val64)))
6121 		*data = 1;
6122 	else
6123 		*data = 0;
6124 
6125 	return *data;
6126 }
6127 
6128 /**
6129  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6130  * @sp - private member of the device structure, which is a pointer to the
6131  * s2io_nic structure.
6132  * @data - variable that returns the result of each of the test
6133  * conducted by the driver.
6134  * Description:
6135  *  This is one of the offline test that tests the read and write
6136  *  access to the RldRam chip on the NIC.
6137  * Return value:
6138  *  0 on success.
6139  */
6140 
s2io_rldram_test(struct s2io_nic * sp,uint64_t * data)6141 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6142 {
6143 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6144 	u64 val64;
6145 	int cnt, iteration = 0, test_fail = 0;
6146 
6147 	val64 = readq(&bar0->adapter_control);
6148 	val64 &= ~ADAPTER_ECC_EN;
6149 	writeq(val64, &bar0->adapter_control);
6150 
6151 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6152 	val64 |= MC_RLDRAM_TEST_MODE;
6153 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6154 
6155 	val64 = readq(&bar0->mc_rldram_mrs);
6156 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6157 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6158 
6159 	val64 |= MC_RLDRAM_MRS_ENABLE;
6160 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6161 
6162 	while (iteration < 2) {
6163 		val64 = 0x55555555aaaa0000ULL;
6164 		if (iteration == 1) {
6165 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6166 		}
6167 		writeq(val64, &bar0->mc_rldram_test_d0);
6168 
6169 		val64 = 0xaaaa5a5555550000ULL;
6170 		if (iteration == 1) {
6171 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6172 		}
6173 		writeq(val64, &bar0->mc_rldram_test_d1);
6174 
6175 		val64 = 0x55aaaaaaaa5a0000ULL;
6176 		if (iteration == 1) {
6177 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6178 		}
6179 		writeq(val64, &bar0->mc_rldram_test_d2);
6180 
6181 		val64 = (u64) (0x0000003ffffe0100ULL);
6182 		writeq(val64, &bar0->mc_rldram_test_add);
6183 
6184 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6185 		    	MC_RLDRAM_TEST_GO;
6186 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6187 
6188 		for (cnt = 0; cnt < 5; cnt++) {
6189 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6190 			if (val64 & MC_RLDRAM_TEST_DONE)
6191 				break;
6192 			msleep(200);
6193 		}
6194 
6195 		if (cnt == 5)
6196 			break;
6197 
6198 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6199 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6200 
6201 		for (cnt = 0; cnt < 5; cnt++) {
6202 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6203 			if (val64 & MC_RLDRAM_TEST_DONE)
6204 				break;
6205 			msleep(500);
6206 		}
6207 
6208 		if (cnt == 5)
6209 			break;
6210 
6211 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6212 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6213 			test_fail = 1;
6214 
6215 		iteration++;
6216 	}
6217 
6218 	*data = test_fail;
6219 
6220 	/* Bring the adapter out of test mode */
6221 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6222 
6223 	return test_fail;
6224 }
6225 
6226 /**
6227  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6228  *  @sp : private member of the device structure, which is a pointer to the
6229  *  s2io_nic structure.
6230  *  @ethtest : pointer to a ethtool command specific structure that will be
6231  *  returned to the user.
6232  *  @data : variable that returns the result of each of the test
6233  * conducted by the driver.
6234  * Description:
6235  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6236  *  the health of the card.
6237  * Return value:
6238  *  void
6239  */
6240 
s2io_ethtool_test(struct net_device * dev,struct ethtool_test * ethtest,uint64_t * data)6241 static void s2io_ethtool_test(struct net_device *dev,
6242 			      struct ethtool_test *ethtest,
6243 			      uint64_t * data)
6244 {
6245 	struct s2io_nic *sp = netdev_priv(dev);
6246 	int orig_state = netif_running(sp->dev);
6247 
6248 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6249 		/* Offline Tests. */
6250 		if (orig_state)
6251 			s2io_close(sp->dev);
6252 
6253 		if (s2io_register_test(sp, &data[0]))
6254 			ethtest->flags |= ETH_TEST_FL_FAILED;
6255 
6256 		s2io_reset(sp);
6257 
6258 		if (s2io_rldram_test(sp, &data[3]))
6259 			ethtest->flags |= ETH_TEST_FL_FAILED;
6260 
6261 		s2io_reset(sp);
6262 
6263 		if (s2io_eeprom_test(sp, &data[1]))
6264 			ethtest->flags |= ETH_TEST_FL_FAILED;
6265 
6266 		if (s2io_bist_test(sp, &data[4]))
6267 			ethtest->flags |= ETH_TEST_FL_FAILED;
6268 
6269 		if (orig_state)
6270 			s2io_open(sp->dev);
6271 
6272 		data[2] = 0;
6273 	} else {
6274 		/* Online Tests. */
6275 		if (!orig_state) {
6276 			DBG_PRINT(ERR_DBG,
6277 				  "%s: is not up, cannot run test\n",
6278 				  dev->name);
6279 			data[0] = -1;
6280 			data[1] = -1;
6281 			data[2] = -1;
6282 			data[3] = -1;
6283 			data[4] = -1;
6284 		}
6285 
6286 		if (s2io_link_test(sp, &data[2]))
6287 			ethtest->flags |= ETH_TEST_FL_FAILED;
6288 
6289 		data[0] = 0;
6290 		data[1] = 0;
6291 		data[3] = 0;
6292 		data[4] = 0;
6293 	}
6294 }
6295 
s2io_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)6296 static void s2io_get_ethtool_stats(struct net_device *dev,
6297 				   struct ethtool_stats *estats,
6298 				   u64 * tmp_stats)
6299 {
6300 	int i = 0, k;
6301 	struct s2io_nic *sp = netdev_priv(dev);
6302 	struct stat_block *stat_info = sp->mac_control.stats_info;
6303 
6304 	s2io_updt_stats(sp);
6305 	tmp_stats[i++] =
6306 		(u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6307 		le32_to_cpu(stat_info->tmac_frms);
6308 	tmp_stats[i++] =
6309 		(u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6310 		le32_to_cpu(stat_info->tmac_data_octets);
6311 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6314 		le32_to_cpu(stat_info->tmac_mcst_frms);
6315 	tmp_stats[i++] =
6316 		(u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6317 		le32_to_cpu(stat_info->tmac_bcst_frms);
6318 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6319         tmp_stats[i++] =
6320                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6321                 le32_to_cpu(stat_info->tmac_ttl_octets);
6322 	tmp_stats[i++] =
6323                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6324                 le32_to_cpu(stat_info->tmac_ucst_frms);
6325 	tmp_stats[i++] =
6326                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6327                 le32_to_cpu(stat_info->tmac_nucst_frms);
6328 	tmp_stats[i++] =
6329 		(u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6330 		le32_to_cpu(stat_info->tmac_any_err_frms);
6331         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6332 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6333 	tmp_stats[i++] =
6334 		(u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6335 		le32_to_cpu(stat_info->tmac_vld_ip);
6336 	tmp_stats[i++] =
6337 		(u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6338 		le32_to_cpu(stat_info->tmac_drop_ip);
6339 	tmp_stats[i++] =
6340 		(u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6341 		le32_to_cpu(stat_info->tmac_icmp);
6342 	tmp_stats[i++] =
6343 		(u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6344 		le32_to_cpu(stat_info->tmac_rst_tcp);
6345 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6346 	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6347 		le32_to_cpu(stat_info->tmac_udp);
6348 	tmp_stats[i++] =
6349 		(u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6350 		le32_to_cpu(stat_info->rmac_vld_frms);
6351 	tmp_stats[i++] =
6352 		(u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6353 		le32_to_cpu(stat_info->rmac_data_octets);
6354 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6355 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6356 	tmp_stats[i++] =
6357 		(u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6358 		le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6359 	tmp_stats[i++] =
6360 		(u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6361 		le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6362 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6363 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6364 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6365 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6366 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6367         tmp_stats[i++] =
6368                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6369 		le32_to_cpu(stat_info->rmac_ttl_octets);
6370         tmp_stats[i++] =
6371                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6372 		<< 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6373 	tmp_stats[i++] =
6374                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6375                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6376 	tmp_stats[i++] =
6377 		(u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6378 		le32_to_cpu(stat_info->rmac_discarded_frms);
6379         tmp_stats[i++] =
6380                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6381                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6382         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6384 	tmp_stats[i++] =
6385 		(u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6386 		le32_to_cpu(stat_info->rmac_usized_frms);
6387 	tmp_stats[i++] =
6388 		(u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6389 		le32_to_cpu(stat_info->rmac_osized_frms);
6390 	tmp_stats[i++] =
6391 		(u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6392 		le32_to_cpu(stat_info->rmac_frag_frms);
6393 	tmp_stats[i++] =
6394 		(u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6395 		le32_to_cpu(stat_info->rmac_jabber_frms);
6396 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6397         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6398         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6399         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6400         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6401         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6402 	tmp_stats[i++] =
6403 		(u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6404 		le32_to_cpu(stat_info->rmac_ip);
6405 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6406 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6407 	tmp_stats[i++] =
6408 		(u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6409 		le32_to_cpu(stat_info->rmac_drop_ip);
6410 	tmp_stats[i++] =
6411 		(u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6412 		le32_to_cpu(stat_info->rmac_icmp);
6413 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6414 	tmp_stats[i++] =
6415 		(u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6416 		le32_to_cpu(stat_info->rmac_udp);
6417 	tmp_stats[i++] =
6418 		(u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6419 		le32_to_cpu(stat_info->rmac_err_drp_udp);
6420 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6421         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6422         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6423         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6424         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6425         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6426         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6427         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6428         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6429         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6430         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6431         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6432         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6433         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6434         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6435         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6436         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6437 	tmp_stats[i++] =
6438 		(u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6439 		le32_to_cpu(stat_info->rmac_pause_cnt);
6440 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6441         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6442 	tmp_stats[i++] =
6443 		(u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6444 		le32_to_cpu(stat_info->rmac_accepted_ip);
6445 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6446 	tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6447 	tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6448 	tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6449 	tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6450 	tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6451 	tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6452 	tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6453 	tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6454 	tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6455 	tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6456 	tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6457 	tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6458 	tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6459 	tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6460 	tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6461 	tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6462 	tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6463 	tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6464 
6465 	/* Enhanced statistics exist only for Hercules */
6466 	if(sp->device_type == XFRAME_II_DEVICE) {
6467 		tmp_stats[i++] =
6468 				le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6469 		tmp_stats[i++] =
6470 				le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6471 		tmp_stats[i++] =
6472 				le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6473 		tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6474 		tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6475 		tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6476 		tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6477 		tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6478 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6479 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6480 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6481 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6482 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6483 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6484 		tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6485 		tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6486 	}
6487 
6488 	tmp_stats[i++] = 0;
6489 	tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6490 	tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6491 	tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6492 	tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6493 	tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6494 	tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6495 	for (k = 0; k < MAX_RX_RINGS; k++)
6496 		tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6497 	tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6498 	tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6499 	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6500 	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6501 	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6502 	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6503 	tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6504 	tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6505 	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6506 	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6507 	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6508 	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6509 	tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6510 	tmp_stats[i++] = stat_info->sw_stat.sending_both;
6511 	tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6512 	tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6513 	if (stat_info->sw_stat.num_aggregations) {
6514 		u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6515 		int count = 0;
6516 		/*
6517 		 * Since 64-bit divide does not work on all platforms,
6518 		 * do repeated subtraction.
6519 		 */
6520 		while (tmp >= stat_info->sw_stat.num_aggregations) {
6521 			tmp -= stat_info->sw_stat.num_aggregations;
6522 			count++;
6523 		}
6524 		tmp_stats[i++] = count;
6525 	}
6526 	else
6527 		tmp_stats[i++] = 0;
6528 	tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6529 	tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6530 	tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6531 	tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6532 	tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6533 	tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6534 	tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6535 	tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6536 	tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6537 
6538 	tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6539 	tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6540 	tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6541 	tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6542 	tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6543 
6544 	tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6545 	tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6546 	tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6547 	tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6548 	tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6549 	tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6550 	tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6551 	tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6552 	tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6553 	tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6554 	tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6555 	tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6556 	tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6557 	tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6558 	tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6559 	tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6560 	tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6561 	tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6562 	tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6563 	tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6564 	tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6565 	tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6566 	tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6567 	tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6568 	tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6569 	tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6570 }
6571 
s2io_ethtool_get_regs_len(struct net_device * dev)6572 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6573 {
6574 	return (XENA_REG_SPACE);
6575 }
6576 
6577 
s2io_ethtool_get_rx_csum(struct net_device * dev)6578 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6579 {
6580 	struct s2io_nic *sp = netdev_priv(dev);
6581 
6582 	return (sp->rx_csum);
6583 }
6584 
s2io_ethtool_set_rx_csum(struct net_device * dev,u32 data)6585 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6586 {
6587 	struct s2io_nic *sp = netdev_priv(dev);
6588 
6589 	if (data)
6590 		sp->rx_csum = 1;
6591 	else
6592 		sp->rx_csum = 0;
6593 
6594 	return 0;
6595 }
6596 
s2io_get_eeprom_len(struct net_device * dev)6597 static int s2io_get_eeprom_len(struct net_device *dev)
6598 {
6599 	return (XENA_EEPROM_SPACE);
6600 }
6601 
s2io_get_sset_count(struct net_device * dev,int sset)6602 static int s2io_get_sset_count(struct net_device *dev, int sset)
6603 {
6604 	struct s2io_nic *sp = netdev_priv(dev);
6605 
6606 	switch (sset) {
6607 	case ETH_SS_TEST:
6608 		return S2IO_TEST_LEN;
6609 	case ETH_SS_STATS:
6610 		switch(sp->device_type) {
6611 		case XFRAME_I_DEVICE:
6612 			return XFRAME_I_STAT_LEN;
6613 		case XFRAME_II_DEVICE:
6614 			return XFRAME_II_STAT_LEN;
6615 		default:
6616 			return 0;
6617 		}
6618 	default:
6619 		return -EOPNOTSUPP;
6620 	}
6621 }
6622 
s2io_ethtool_get_strings(struct net_device * dev,u32 stringset,u8 * data)6623 static void s2io_ethtool_get_strings(struct net_device *dev,
6624 				     u32 stringset, u8 * data)
6625 {
6626 	int stat_size = 0;
6627 	struct s2io_nic *sp = netdev_priv(dev);
6628 
6629 	switch (stringset) {
6630 	case ETH_SS_TEST:
6631 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6632 		break;
6633 	case ETH_SS_STATS:
6634 		stat_size = sizeof(ethtool_xena_stats_keys);
6635 		memcpy(data, &ethtool_xena_stats_keys,stat_size);
6636 		if(sp->device_type == XFRAME_II_DEVICE) {
6637 			memcpy(data + stat_size,
6638 				&ethtool_enhanced_stats_keys,
6639 				sizeof(ethtool_enhanced_stats_keys));
6640 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6641 		}
6642 
6643 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6644 			sizeof(ethtool_driver_stats_keys));
6645 	}
6646 }
6647 
s2io_ethtool_op_set_tx_csum(struct net_device * dev,u32 data)6648 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6649 {
6650 	if (data)
6651 		dev->features |= NETIF_F_IP_CSUM;
6652 	else
6653 		dev->features &= ~NETIF_F_IP_CSUM;
6654 
6655 	return 0;
6656 }
6657 
s2io_ethtool_op_get_tso(struct net_device * dev)6658 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6659 {
6660 	return (dev->features & NETIF_F_TSO) != 0;
6661 }
s2io_ethtool_op_set_tso(struct net_device * dev,u32 data)6662 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6663 {
6664 	if (data)
6665 		dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6666 	else
6667 		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6668 
6669 	return 0;
6670 }
6671 
6672 static const struct ethtool_ops netdev_ethtool_ops = {
6673 	.get_settings = s2io_ethtool_gset,
6674 	.set_settings = s2io_ethtool_sset,
6675 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6676 	.get_regs_len = s2io_ethtool_get_regs_len,
6677 	.get_regs = s2io_ethtool_gregs,
6678 	.get_link = ethtool_op_get_link,
6679 	.get_eeprom_len = s2io_get_eeprom_len,
6680 	.get_eeprom = s2io_ethtool_geeprom,
6681 	.set_eeprom = s2io_ethtool_seeprom,
6682 	.get_ringparam = s2io_ethtool_gringparam,
6683 	.get_pauseparam = s2io_ethtool_getpause_data,
6684 	.set_pauseparam = s2io_ethtool_setpause_data,
6685 	.get_rx_csum = s2io_ethtool_get_rx_csum,
6686 	.set_rx_csum = s2io_ethtool_set_rx_csum,
6687 	.set_tx_csum = s2io_ethtool_op_set_tx_csum,
6688 	.set_sg = ethtool_op_set_sg,
6689 	.get_tso = s2io_ethtool_op_get_tso,
6690 	.set_tso = s2io_ethtool_op_set_tso,
6691 	.set_ufo = ethtool_op_set_ufo,
6692 	.self_test = s2io_ethtool_test,
6693 	.get_strings = s2io_ethtool_get_strings,
6694 	.phys_id = s2io_ethtool_idnic,
6695 	.get_ethtool_stats = s2io_get_ethtool_stats,
6696 	.get_sset_count = s2io_get_sset_count,
6697 };
6698 
6699 /**
6700  *  s2io_ioctl - Entry point for the Ioctl
6701  *  @dev :  Device pointer.
6702  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6703  *  a proprietary structure used to pass information to the driver.
6704  *  @cmd :  This is used to distinguish between the different commands that
6705  *  can be passed to the IOCTL functions.
6706  *  Description:
6707  *  Currently there are no special functionality supported in IOCTL, hence
6708  *  function always return EOPNOTSUPPORTED
6709  */
6710 
s2io_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6711 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6712 {
6713 	return -EOPNOTSUPP;
6714 }
6715 
6716 /**
6717  *  s2io_change_mtu - entry point to change MTU size for the device.
6718  *   @dev : device pointer.
6719  *   @new_mtu : the new MTU size for the device.
6720  *   Description: A driver entry point to change MTU size for the device.
6721  *   Before changing the MTU the device must be stopped.
6722  *  Return value:
6723  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6724  *   file on failure.
6725  */
6726 
s2io_change_mtu(struct net_device * dev,int new_mtu)6727 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6728 {
6729 	struct s2io_nic *sp = netdev_priv(dev);
6730 	int ret = 0;
6731 
6732 	if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6733 		DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6734 			  dev->name);
6735 		return -EPERM;
6736 	}
6737 
6738 	dev->mtu = new_mtu;
6739 	if (netif_running(dev)) {
6740 		s2io_stop_all_tx_queue(sp);
6741 		s2io_card_down(sp);
6742 		ret = s2io_card_up(sp);
6743 		if (ret) {
6744 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6745 				  __func__);
6746 			return ret;
6747 		}
6748 		s2io_wake_all_tx_queue(sp);
6749 	} else { /* Device is down */
6750 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6751 		u64 val64 = new_mtu;
6752 
6753 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6754 	}
6755 
6756 	return ret;
6757 }
6758 
6759 /**
6760  * s2io_set_link - Set the LInk status
6761  * @data: long pointer to device private structue
6762  * Description: Sets the link status for the adapter
6763  */
6764 
s2io_set_link(struct work_struct * work)6765 static void s2io_set_link(struct work_struct *work)
6766 {
6767 	struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6768 	struct net_device *dev = nic->dev;
6769 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6770 	register u64 val64;
6771 	u16 subid;
6772 
6773 	rtnl_lock();
6774 
6775 	if (!netif_running(dev))
6776 		goto out_unlock;
6777 
6778 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6779 		/* The card is being reset, no point doing anything */
6780 		goto out_unlock;
6781 	}
6782 
6783 	subid = nic->pdev->subsystem_device;
6784 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6785 		/*
6786 		 * Allow a small delay for the NICs self initiated
6787 		 * cleanup to complete.
6788 		 */
6789 		msleep(100);
6790 	}
6791 
6792 	val64 = readq(&bar0->adapter_status);
6793 	if (LINK_IS_UP(val64)) {
6794 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6795 			if (verify_xena_quiescence(nic)) {
6796 				val64 = readq(&bar0->adapter_control);
6797 				val64 |= ADAPTER_CNTL_EN;
6798 				writeq(val64, &bar0->adapter_control);
6799 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6800 					nic->device_type, subid)) {
6801 					val64 = readq(&bar0->gpio_control);
6802 					val64 |= GPIO_CTRL_GPIO_0;
6803 					writeq(val64, &bar0->gpio_control);
6804 					val64 = readq(&bar0->gpio_control);
6805 				} else {
6806 					val64 |= ADAPTER_LED_ON;
6807 					writeq(val64, &bar0->adapter_control);
6808 				}
6809 				nic->device_enabled_once = TRUE;
6810 			} else {
6811 				DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6812 				DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6813 				s2io_stop_all_tx_queue(nic);
6814 			}
6815 		}
6816 		val64 = readq(&bar0->adapter_control);
6817 		val64 |= ADAPTER_LED_ON;
6818 		writeq(val64, &bar0->adapter_control);
6819 		s2io_link(nic, LINK_UP);
6820 	} else {
6821 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6822 						      subid)) {
6823 			val64 = readq(&bar0->gpio_control);
6824 			val64 &= ~GPIO_CTRL_GPIO_0;
6825 			writeq(val64, &bar0->gpio_control);
6826 			val64 = readq(&bar0->gpio_control);
6827 		}
6828 		/* turn off LED */
6829 		val64 = readq(&bar0->adapter_control);
6830 		val64 = val64 &(~ADAPTER_LED_ON);
6831 		writeq(val64, &bar0->adapter_control);
6832 		s2io_link(nic, LINK_DOWN);
6833 	}
6834 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6835 
6836 out_unlock:
6837 	rtnl_unlock();
6838 }
6839 
set_rxd_buffer_pointer(struct s2io_nic * sp,struct RxD_t * rxdp,struct buffAdd * ba,struct sk_buff ** skb,u64 * temp0,u64 * temp1,u64 * temp2,int size)6840 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6841 				struct buffAdd *ba,
6842 				struct sk_buff **skb, u64 *temp0, u64 *temp1,
6843 				u64 *temp2, int size)
6844 {
6845 	struct net_device *dev = sp->dev;
6846 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6847 
6848 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6849 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6850 		/* allocate skb */
6851 		if (*skb) {
6852 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6853 			/*
6854 			 * As Rx frame are not going to be processed,
6855 			 * using same mapped address for the Rxd
6856 			 * buffer pointer
6857 			 */
6858 			rxdp1->Buffer0_ptr = *temp0;
6859 		} else {
6860 			*skb = dev_alloc_skb(size);
6861 			if (!(*skb)) {
6862 				DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6863 				DBG_PRINT(INFO_DBG, "memory to allocate ");
6864 				DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6865 				sp->mac_control.stats_info->sw_stat. \
6866 					mem_alloc_fail_cnt++;
6867 				return -ENOMEM ;
6868 			}
6869 			sp->mac_control.stats_info->sw_stat.mem_allocated
6870 				+= (*skb)->truesize;
6871 			/* storing the mapped addr in a temp variable
6872 			 * such it will be used for next rxd whose
6873 			 * Host Control is NULL
6874 			 */
6875 			rxdp1->Buffer0_ptr = *temp0 =
6876 				pci_map_single( sp->pdev, (*skb)->data,
6877 					size - NET_IP_ALIGN,
6878 					PCI_DMA_FROMDEVICE);
6879 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6880 				goto memalloc_failed;
6881 			rxdp->Host_Control = (unsigned long) (*skb);
6882 		}
6883 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6884 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6885 		/* Two buffer Mode */
6886 		if (*skb) {
6887 			rxdp3->Buffer2_ptr = *temp2;
6888 			rxdp3->Buffer0_ptr = *temp0;
6889 			rxdp3->Buffer1_ptr = *temp1;
6890 		} else {
6891 			*skb = dev_alloc_skb(size);
6892 			if (!(*skb)) {
6893 				DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6894 				DBG_PRINT(INFO_DBG, "memory to allocate ");
6895 				DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6896 				sp->mac_control.stats_info->sw_stat. \
6897 					mem_alloc_fail_cnt++;
6898 				return -ENOMEM;
6899 			}
6900 			sp->mac_control.stats_info->sw_stat.mem_allocated
6901 				+= (*skb)->truesize;
6902 			rxdp3->Buffer2_ptr = *temp2 =
6903 				pci_map_single(sp->pdev, (*skb)->data,
6904 					       dev->mtu + 4,
6905 					       PCI_DMA_FROMDEVICE);
6906 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6907 				goto memalloc_failed;
6908 			rxdp3->Buffer0_ptr = *temp0 =
6909 				pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6910 						PCI_DMA_FROMDEVICE);
6911 			if (pci_dma_mapping_error(sp->pdev,
6912 						rxdp3->Buffer0_ptr)) {
6913 				pci_unmap_single (sp->pdev,
6914 					(dma_addr_t)rxdp3->Buffer2_ptr,
6915 					dev->mtu + 4, PCI_DMA_FROMDEVICE);
6916 				goto memalloc_failed;
6917 			}
6918 			rxdp->Host_Control = (unsigned long) (*skb);
6919 
6920 			/* Buffer-1 will be dummy buffer not used */
6921 			rxdp3->Buffer1_ptr = *temp1 =
6922 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6923 						PCI_DMA_FROMDEVICE);
6924 			if (pci_dma_mapping_error(sp->pdev,
6925 						rxdp3->Buffer1_ptr)) {
6926 				pci_unmap_single (sp->pdev,
6927 					(dma_addr_t)rxdp3->Buffer0_ptr,
6928 					BUF0_LEN, PCI_DMA_FROMDEVICE);
6929 				pci_unmap_single (sp->pdev,
6930 					(dma_addr_t)rxdp3->Buffer2_ptr,
6931 					dev->mtu + 4, PCI_DMA_FROMDEVICE);
6932 				goto memalloc_failed;
6933 			}
6934 		}
6935 	}
6936 	return 0;
6937 	memalloc_failed:
6938 		stats->pci_map_fail_cnt++;
6939 		stats->mem_freed += (*skb)->truesize;
6940 		dev_kfree_skb(*skb);
6941 		return -ENOMEM;
6942 }
6943 
set_rxd_buffer_size(struct s2io_nic * sp,struct RxD_t * rxdp,int size)6944 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6945 				int size)
6946 {
6947 	struct net_device *dev = sp->dev;
6948 	if (sp->rxd_mode == RXD_MODE_1) {
6949 		rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6950 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6951 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6952 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6953 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6954 	}
6955 }
6956 
rxd_owner_bit_reset(struct s2io_nic * sp)6957 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6958 {
6959 	int i, j, k, blk_cnt = 0, size;
6960 	struct mac_info * mac_control = &sp->mac_control;
6961 	struct config_param *config = &sp->config;
6962 	struct net_device *dev = sp->dev;
6963 	struct RxD_t *rxdp = NULL;
6964 	struct sk_buff *skb = NULL;
6965 	struct buffAdd *ba = NULL;
6966 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6967 
6968 	/* Calculate the size based on ring mode */
6969 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6970 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6971 	if (sp->rxd_mode == RXD_MODE_1)
6972 		size += NET_IP_ALIGN;
6973 	else if (sp->rxd_mode == RXD_MODE_3B)
6974 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6975 
6976 	for (i = 0; i < config->rx_ring_num; i++) {
6977 		blk_cnt = config->rx_cfg[i].num_rxd /
6978 			(rxd_count[sp->rxd_mode] +1);
6979 
6980 		for (j = 0; j < blk_cnt; j++) {
6981 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6982 				rxdp = mac_control->rings[i].
6983 					rx_blocks[j].rxds[k].virt_addr;
6984 				if(sp->rxd_mode == RXD_MODE_3B)
6985 					ba = &mac_control->rings[i].ba[j][k];
6986 				if (set_rxd_buffer_pointer(sp, rxdp, ba,
6987 						       &skb,(u64 *)&temp0_64,
6988 						       (u64 *)&temp1_64,
6989 						       (u64 *)&temp2_64,
6990 							size) == -ENOMEM) {
6991 					return 0;
6992 				}
6993 
6994 				set_rxd_buffer_size(sp, rxdp, size);
6995 				wmb();
6996 				/* flip the Ownership bit to Hardware */
6997 				rxdp->Control_1 |= RXD_OWN_XENA;
6998 			}
6999 		}
7000 	}
7001 	return 0;
7002 
7003 }
7004 
s2io_add_isr(struct s2io_nic * sp)7005 static int s2io_add_isr(struct s2io_nic * sp)
7006 {
7007 	int ret = 0;
7008 	struct net_device *dev = sp->dev;
7009 	int err = 0;
7010 
7011 	if (sp->config.intr_type == MSI_X)
7012 		ret = s2io_enable_msi_x(sp);
7013 	if (ret) {
7014 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7015 		sp->config.intr_type = INTA;
7016 	}
7017 
7018 	/* Store the values of the MSIX table in the struct s2io_nic structure */
7019 	store_xmsi_data(sp);
7020 
7021 	/* After proper initialization of H/W, register ISR */
7022 	if (sp->config.intr_type == MSI_X) {
7023 		int i, msix_rx_cnt = 0;
7024 
7025 		for (i = 0; i < sp->num_entries; i++) {
7026 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7027 				if (sp->s2io_entries[i].type ==
7028 					MSIX_RING_TYPE) {
7029 					sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7030 						dev->name, i);
7031 					err = request_irq(sp->entries[i].vector,
7032 						s2io_msix_ring_handle, 0,
7033 						sp->desc[i],
7034 						sp->s2io_entries[i].arg);
7035 				} else if (sp->s2io_entries[i].type ==
7036 					MSIX_ALARM_TYPE) {
7037 					sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7038 					dev->name, i);
7039 					err = request_irq(sp->entries[i].vector,
7040 						s2io_msix_fifo_handle, 0,
7041 						sp->desc[i],
7042 						sp->s2io_entries[i].arg);
7043 
7044 				}
7045 				/* if either data or addr is zero print it. */
7046 				if (!(sp->msix_info[i].addr &&
7047 					sp->msix_info[i].data)) {
7048 					DBG_PRINT(ERR_DBG,
7049 						"%s @Addr:0x%llx Data:0x%llx\n",
7050 						sp->desc[i],
7051 						(unsigned long long)
7052 						sp->msix_info[i].addr,
7053 						(unsigned long long)
7054 						ntohl(sp->msix_info[i].data));
7055 				} else
7056 					msix_rx_cnt++;
7057 				if (err) {
7058 					remove_msix_isr(sp);
7059 
7060 					DBG_PRINT(ERR_DBG,
7061 						"%s:MSI-X-%d registration "
7062 						"failed\n", dev->name, i);
7063 
7064 					DBG_PRINT(ERR_DBG,
7065 						"%s: Defaulting to INTA\n",
7066 						dev->name);
7067 					sp->config.intr_type = INTA;
7068 					break;
7069 				}
7070 				sp->s2io_entries[i].in_use =
7071 					MSIX_REGISTERED_SUCCESS;
7072 			}
7073 		}
7074 		if (!err) {
7075 			printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7076 				--msix_rx_cnt);
7077 			DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7078 						" through alarm vector\n");
7079 		}
7080 	}
7081 	if (sp->config.intr_type == INTA) {
7082 		err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7083 				sp->name, dev);
7084 		if (err) {
7085 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7086 				  dev->name);
7087 			return -1;
7088 		}
7089 	}
7090 	return 0;
7091 }
s2io_rem_isr(struct s2io_nic * sp)7092 static void s2io_rem_isr(struct s2io_nic * sp)
7093 {
7094 	if (sp->config.intr_type == MSI_X)
7095 		remove_msix_isr(sp);
7096 	else
7097 		remove_inta_isr(sp);
7098 }
7099 
do_s2io_card_down(struct s2io_nic * sp,int do_io)7100 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7101 {
7102 	int cnt = 0;
7103 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7104 	register u64 val64 = 0;
7105 	struct config_param *config;
7106 	config = &sp->config;
7107 
7108 	if (!is_s2io_card_up(sp))
7109 		return;
7110 
7111 	del_timer_sync(&sp->alarm_timer);
7112 	/* If s2io_set_link task is executing, wait till it completes. */
7113 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7114 		msleep(50);
7115 	}
7116 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7117 
7118 	/* Disable napi */
7119 	if (sp->config.napi) {
7120 		int off = 0;
7121 		if (config->intr_type ==  MSI_X) {
7122 			for (; off < sp->config.rx_ring_num; off++)
7123 				napi_disable(&sp->mac_control.rings[off].napi);
7124 			}
7125 		else
7126 			napi_disable(&sp->napi);
7127 	}
7128 
7129 	/* disable Tx and Rx traffic on the NIC */
7130 	if (do_io)
7131 		stop_nic(sp);
7132 
7133 	s2io_rem_isr(sp);
7134 
7135 	/* stop the tx queue, indicate link down */
7136 	s2io_link(sp, LINK_DOWN);
7137 
7138 	/* Check if the device is Quiescent and then Reset the NIC */
7139 	while(do_io) {
7140 		/* As per the HW requirement we need to replenish the
7141 		 * receive buffer to avoid the ring bump. Since there is
7142 		 * no intention of processing the Rx frame at this pointwe are
7143 		 * just settting the ownership bit of rxd in Each Rx
7144 		 * ring to HW and set the appropriate buffer size
7145 		 * based on the ring mode
7146 		 */
7147 		rxd_owner_bit_reset(sp);
7148 
7149 		val64 = readq(&bar0->adapter_status);
7150 		if (verify_xena_quiescence(sp)) {
7151 			if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7152 			break;
7153 		}
7154 
7155 		msleep(50);
7156 		cnt++;
7157 		if (cnt == 10) {
7158 			DBG_PRINT(ERR_DBG,
7159 				  "s2io_close:Device not Quiescent ");
7160 			DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7161 				  (unsigned long long) val64);
7162 			break;
7163 		}
7164 	}
7165 	if (do_io)
7166 		s2io_reset(sp);
7167 
7168 	/* Free all Tx buffers */
7169 	free_tx_buffers(sp);
7170 
7171 	/* Free all Rx buffers */
7172 	free_rx_buffers(sp);
7173 
7174 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7175 }
7176 
s2io_card_down(struct s2io_nic * sp)7177 static void s2io_card_down(struct s2io_nic * sp)
7178 {
7179 	do_s2io_card_down(sp, 1);
7180 }
7181 
s2io_card_up(struct s2io_nic * sp)7182 static int s2io_card_up(struct s2io_nic * sp)
7183 {
7184 	int i, ret = 0;
7185 	struct mac_info *mac_control;
7186 	struct config_param *config;
7187 	struct net_device *dev = (struct net_device *) sp->dev;
7188 	u16 interruptible;
7189 
7190 	/* Initialize the H/W I/O registers */
7191 	ret = init_nic(sp);
7192 	if (ret != 0) {
7193 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7194 			  dev->name);
7195 		if (ret != -EIO)
7196 			s2io_reset(sp);
7197 		return ret;
7198 	}
7199 
7200 	/*
7201 	 * Initializing the Rx buffers. For now we are considering only 1
7202 	 * Rx ring and initializing buffers into 30 Rx blocks
7203 	 */
7204 	mac_control = &sp->mac_control;
7205 	config = &sp->config;
7206 
7207 	for (i = 0; i < config->rx_ring_num; i++) {
7208 		mac_control->rings[i].mtu = dev->mtu;
7209 		ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
7210 		if (ret) {
7211 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7212 				  dev->name);
7213 			s2io_reset(sp);
7214 			free_rx_buffers(sp);
7215 			return -ENOMEM;
7216 		}
7217 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7218 			  mac_control->rings[i].rx_bufs_left);
7219 	}
7220 
7221 	/* Initialise napi */
7222 	if (config->napi) {
7223 		int i;
7224 		if (config->intr_type ==  MSI_X) {
7225 			for (i = 0; i < sp->config.rx_ring_num; i++)
7226 				napi_enable(&sp->mac_control.rings[i].napi);
7227 		} else {
7228 			napi_enable(&sp->napi);
7229 		}
7230 	}
7231 
7232 	/* Maintain the state prior to the open */
7233 	if (sp->promisc_flg)
7234 		sp->promisc_flg = 0;
7235 	if (sp->m_cast_flg) {
7236 		sp->m_cast_flg = 0;
7237 		sp->all_multi_pos= 0;
7238 	}
7239 
7240 	/* Setting its receive mode */
7241 	s2io_set_multicast(dev);
7242 
7243 	if (sp->lro) {
7244 		/* Initialize max aggregatable pkts per session based on MTU */
7245 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7246 		/* Check if we can use(if specified) user provided value */
7247 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7248 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7249 	}
7250 
7251 	/* Enable Rx Traffic and interrupts on the NIC */
7252 	if (start_nic(sp)) {
7253 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7254 		s2io_reset(sp);
7255 		free_rx_buffers(sp);
7256 		return -ENODEV;
7257 	}
7258 
7259 	/* Add interrupt service routine */
7260 	if (s2io_add_isr(sp) != 0) {
7261 		if (sp->config.intr_type == MSI_X)
7262 			s2io_rem_isr(sp);
7263 		s2io_reset(sp);
7264 		free_rx_buffers(sp);
7265 		return -ENODEV;
7266 	}
7267 
7268 	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7269 
7270 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7271 
7272 	/*  Enable select interrupts */
7273 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7274 	if (sp->config.intr_type != INTA) {
7275 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7276 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7277 	} else {
7278 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7279 		interruptible |= TX_PIC_INTR;
7280 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7281 	}
7282 
7283 	return 0;
7284 }
7285 
7286 /**
7287  * s2io_restart_nic - Resets the NIC.
7288  * @data : long pointer to the device private structure
7289  * Description:
7290  * This function is scheduled to be run by the s2io_tx_watchdog
7291  * function after 0.5 secs to reset the NIC. The idea is to reduce
7292  * the run time of the watch dog routine which is run holding a
7293  * spin lock.
7294  */
7295 
s2io_restart_nic(struct work_struct * work)7296 static void s2io_restart_nic(struct work_struct *work)
7297 {
7298 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7299 	struct net_device *dev = sp->dev;
7300 
7301 	rtnl_lock();
7302 
7303 	if (!netif_running(dev))
7304 		goto out_unlock;
7305 
7306 	s2io_card_down(sp);
7307 	if (s2io_card_up(sp)) {
7308 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7309 			  dev->name);
7310 	}
7311 	s2io_wake_all_tx_queue(sp);
7312 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7313 		  dev->name);
7314 out_unlock:
7315 	rtnl_unlock();
7316 }
7317 
7318 /**
7319  *  s2io_tx_watchdog - Watchdog for transmit side.
7320  *  @dev : Pointer to net device structure
7321  *  Description:
7322  *  This function is triggered if the Tx Queue is stopped
7323  *  for a pre-defined amount of time when the Interface is still up.
7324  *  If the Interface is jammed in such a situation, the hardware is
7325  *  reset (by s2io_close) and restarted again (by s2io_open) to
7326  *  overcome any problem that might have been caused in the hardware.
7327  *  Return value:
7328  *  void
7329  */
7330 
s2io_tx_watchdog(struct net_device * dev)7331 static void s2io_tx_watchdog(struct net_device *dev)
7332 {
7333 	struct s2io_nic *sp = netdev_priv(dev);
7334 
7335 	if (netif_carrier_ok(dev)) {
7336 		sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7337 		schedule_work(&sp->rst_timer_task);
7338 		sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7339 	}
7340 }
7341 
7342 /**
7343  *   rx_osm_handler - To perform some OS related operations on SKB.
7344  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7345  *   @skb : the socket buffer pointer.
7346  *   @len : length of the packet
7347  *   @cksum : FCS checksum of the frame.
7348  *   @ring_no : the ring from which this RxD was extracted.
7349  *   Description:
7350  *   This function is called by the Rx interrupt serivce routine to perform
7351  *   some OS related operations on the SKB before passing it to the upper
7352  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7353  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7354  *   to the upper layer. If the checksum is wrong, it increments the Rx
7355  *   packet error count, frees the SKB and returns error.
7356  *   Return value:
7357  *   SUCCESS on success and -1 on failure.
7358  */
rx_osm_handler(struct ring_info * ring_data,struct RxD_t * rxdp)7359 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7360 {
7361 	struct s2io_nic *sp = ring_data->nic;
7362 	struct net_device *dev = (struct net_device *) ring_data->dev;
7363 	struct sk_buff *skb = (struct sk_buff *)
7364 		((unsigned long) rxdp->Host_Control);
7365 	int ring_no = ring_data->ring_no;
7366 	u16 l3_csum, l4_csum;
7367 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7368 	struct lro *uninitialized_var(lro);
7369 	u8 err_mask;
7370 
7371 	skb->dev = dev;
7372 
7373 	if (err) {
7374 		/* Check for parity error */
7375 		if (err & 0x1) {
7376 			sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7377 		}
7378 		err_mask = err >> 48;
7379 		switch(err_mask) {
7380 			case 1:
7381 				sp->mac_control.stats_info->sw_stat.
7382 				rx_parity_err_cnt++;
7383 			break;
7384 
7385 			case 2:
7386 				sp->mac_control.stats_info->sw_stat.
7387 				rx_abort_cnt++;
7388 			break;
7389 
7390 			case 3:
7391 				sp->mac_control.stats_info->sw_stat.
7392 				rx_parity_abort_cnt++;
7393 			break;
7394 
7395 			case 4:
7396 				sp->mac_control.stats_info->sw_stat.
7397 				rx_rda_fail_cnt++;
7398 			break;
7399 
7400 			case 5:
7401 				sp->mac_control.stats_info->sw_stat.
7402 				rx_unkn_prot_cnt++;
7403 			break;
7404 
7405 			case 6:
7406 				sp->mac_control.stats_info->sw_stat.
7407 				rx_fcs_err_cnt++;
7408 			break;
7409 
7410 			case 7:
7411 				sp->mac_control.stats_info->sw_stat.
7412 				rx_buf_size_err_cnt++;
7413 			break;
7414 
7415 			case 8:
7416 				sp->mac_control.stats_info->sw_stat.
7417 				rx_rxd_corrupt_cnt++;
7418 			break;
7419 
7420 			case 15:
7421 				sp->mac_control.stats_info->sw_stat.
7422 				rx_unkn_err_cnt++;
7423 			break;
7424 		}
7425 		/*
7426 		* Drop the packet if bad transfer code. Exception being
7427 		* 0x5, which could be due to unsupported IPv6 extension header.
7428 		* In this case, we let stack handle the packet.
7429 		* Note that in this case, since checksum will be incorrect,
7430 		* stack will validate the same.
7431 		*/
7432 		if (err_mask != 0x5) {
7433 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7434 				dev->name, err_mask);
7435 			dev->stats.rx_crc_errors++;
7436 			sp->mac_control.stats_info->sw_stat.mem_freed
7437 				+= skb->truesize;
7438 			dev_kfree_skb(skb);
7439 			ring_data->rx_bufs_left -= 1;
7440 			rxdp->Host_Control = 0;
7441 			return 0;
7442 		}
7443 	}
7444 
7445 	/* Updating statistics */
7446 	ring_data->rx_packets++;
7447 	rxdp->Host_Control = 0;
7448 	if (sp->rxd_mode == RXD_MODE_1) {
7449 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7450 
7451 		ring_data->rx_bytes += len;
7452 		skb_put(skb, len);
7453 
7454 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7455 		int get_block = ring_data->rx_curr_get_info.block_index;
7456 		int get_off = ring_data->rx_curr_get_info.offset;
7457 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7458 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7459 		unsigned char *buff = skb_push(skb, buf0_len);
7460 
7461 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7462 		ring_data->rx_bytes += buf0_len + buf2_len;
7463 		memcpy(buff, ba->ba_0, buf0_len);
7464 		skb_put(skb, buf2_len);
7465 	}
7466 
7467 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7468 	    (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7469 	    (sp->rx_csum)) {
7470 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7471 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7472 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7473 			/*
7474 			 * NIC verifies if the Checksum of the received
7475 			 * frame is Ok or not and accordingly returns
7476 			 * a flag in the RxD.
7477 			 */
7478 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7479 			if (ring_data->lro) {
7480 				u32 tcp_len;
7481 				u8 *tcp;
7482 				int ret = 0;
7483 
7484 				ret = s2io_club_tcp_session(ring_data,
7485 					skb->data, &tcp, &tcp_len, &lro,
7486 					rxdp, sp);
7487 				switch (ret) {
7488 					case 3: /* Begin anew */
7489 						lro->parent = skb;
7490 						goto aggregate;
7491 					case 1: /* Aggregate */
7492 					{
7493 						lro_append_pkt(sp, lro,
7494 							skb, tcp_len);
7495 						goto aggregate;
7496 					}
7497 					case 4: /* Flush session */
7498 					{
7499 						lro_append_pkt(sp, lro,
7500 							skb, tcp_len);
7501 						queue_rx_frame(lro->parent,
7502 							lro->vlan_tag);
7503 						clear_lro_session(lro);
7504 						sp->mac_control.stats_info->
7505 						    sw_stat.flush_max_pkts++;
7506 						goto aggregate;
7507 					}
7508 					case 2: /* Flush both */
7509 						lro->parent->data_len =
7510 							lro->frags_len;
7511 						sp->mac_control.stats_info->
7512 						     sw_stat.sending_both++;
7513 						queue_rx_frame(lro->parent,
7514 							lro->vlan_tag);
7515 						clear_lro_session(lro);
7516 						goto send_up;
7517 					case 0: /* sessions exceeded */
7518 					case -1: /* non-TCP or not
7519 						  * L2 aggregatable
7520 						  */
7521 					case 5: /*
7522 						 * First pkt in session not
7523 						 * L3/L4 aggregatable
7524 						 */
7525 						break;
7526 					default:
7527 						DBG_PRINT(ERR_DBG,
7528 							"%s: Samadhana!!\n",
7529 							 __func__);
7530 						BUG();
7531 				}
7532 			}
7533 		} else {
7534 			/*
7535 			 * Packet with erroneous checksum, let the
7536 			 * upper layers deal with it.
7537 			 */
7538 			skb->ip_summed = CHECKSUM_NONE;
7539 		}
7540 	} else
7541 		skb->ip_summed = CHECKSUM_NONE;
7542 
7543 	sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7544 send_up:
7545 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7546 aggregate:
7547 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7548 	return SUCCESS;
7549 }
7550 
7551 /**
7552  *  s2io_link - stops/starts the Tx queue.
7553  *  @sp : private member of the device structure, which is a pointer to the
7554  *  s2io_nic structure.
7555  *  @link : inidicates whether link is UP/DOWN.
7556  *  Description:
7557  *  This function stops/starts the Tx queue depending on whether the link
7558  *  status of the NIC is is down or up. This is called by the Alarm
7559  *  interrupt handler whenever a link change interrupt comes up.
7560  *  Return value:
7561  *  void.
7562  */
7563 
s2io_link(struct s2io_nic * sp,int link)7564 static void s2io_link(struct s2io_nic * sp, int link)
7565 {
7566 	struct net_device *dev = (struct net_device *) sp->dev;
7567 
7568 	if (link != sp->last_link_state) {
7569 		init_tti(sp, link);
7570 		if (link == LINK_DOWN) {
7571 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7572 			s2io_stop_all_tx_queue(sp);
7573 			netif_carrier_off(dev);
7574 			if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7575 			sp->mac_control.stats_info->sw_stat.link_up_time =
7576 				jiffies - sp->start_time;
7577 			sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7578 		} else {
7579 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7580 			if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7581 			sp->mac_control.stats_info->sw_stat.link_down_time =
7582 				jiffies - sp->start_time;
7583 			sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7584 			netif_carrier_on(dev);
7585 			s2io_wake_all_tx_queue(sp);
7586 		}
7587 	}
7588 	sp->last_link_state = link;
7589 	sp->start_time = jiffies;
7590 }
7591 
7592 /**
7593  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7594  *  @sp : private member of the device structure, which is a pointer to the
7595  *  s2io_nic structure.
7596  *  Description:
7597  *  This function initializes a few of the PCI and PCI-X configuration registers
7598  *  with recommended values.
7599  *  Return value:
7600  *  void
7601  */
7602 
s2io_init_pci(struct s2io_nic * sp)7603 static void s2io_init_pci(struct s2io_nic * sp)
7604 {
7605 	u16 pci_cmd = 0, pcix_cmd = 0;
7606 
7607 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7608 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7609 			     &(pcix_cmd));
7610 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7611 			      (pcix_cmd | 1));
7612 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7613 			     &(pcix_cmd));
7614 
7615 	/* Set the PErr Response bit in PCI command register. */
7616 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7617 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7618 			      (pci_cmd | PCI_COMMAND_PARITY));
7619 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7620 }
7621 
s2io_verify_parm(struct pci_dev * pdev,u8 * dev_intr_type,u8 * dev_multiq)7622 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7623 	u8 *dev_multiq)
7624 {
7625 	if ((tx_fifo_num > MAX_TX_FIFOS) ||
7626 		(tx_fifo_num < 1)) {
7627 		DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7628 			"(%d) not supported\n", tx_fifo_num);
7629 
7630 		if (tx_fifo_num < 1)
7631 			tx_fifo_num = 1;
7632 		else
7633 			tx_fifo_num = MAX_TX_FIFOS;
7634 
7635 		DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7636 		DBG_PRINT(ERR_DBG, "tx fifos\n");
7637 	}
7638 
7639 	if (multiq)
7640 		*dev_multiq = multiq;
7641 
7642 	if (tx_steering_type && (1 == tx_fifo_num)) {
7643 		if (tx_steering_type != TX_DEFAULT_STEERING)
7644 			DBG_PRINT(ERR_DBG,
7645 				"s2io: Tx steering is not supported with "
7646 				"one fifo. Disabling Tx steering.\n");
7647 		tx_steering_type = NO_STEERING;
7648 	}
7649 
7650 	if ((tx_steering_type < NO_STEERING) ||
7651 		(tx_steering_type > TX_DEFAULT_STEERING)) {
7652 		DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7653 			 "supported\n");
7654 		DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7655 		tx_steering_type = NO_STEERING;
7656 	}
7657 
7658 	if (rx_ring_num > MAX_RX_RINGS) {
7659 		DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7660 			 "supported\n");
7661 		DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7662 			MAX_RX_RINGS);
7663 		rx_ring_num = MAX_RX_RINGS;
7664 	}
7665 
7666 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7667 		DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7668 			  "Defaulting to INTA\n");
7669 		*dev_intr_type = INTA;
7670 	}
7671 
7672 	if ((*dev_intr_type == MSI_X) &&
7673 			((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7674 			(pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7675 		DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7676 					"Defaulting to INTA\n");
7677 		*dev_intr_type = INTA;
7678 	}
7679 
7680 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7681 		DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7682 		DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7683 		rx_ring_mode = 1;
7684 	}
7685 	return SUCCESS;
7686 }
7687 
7688 /**
7689  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7690  * or Traffic class respectively.
7691  * @nic: device private variable
7692  * Description: The function configures the receive steering to
7693  * desired receive ring.
7694  * Return Value:  SUCCESS on success and
7695  * '-1' on failure (endian settings incorrect).
7696  */
rts_ds_steer(struct s2io_nic * nic,u8 ds_codepoint,u8 ring)7697 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7698 {
7699 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7700 	register u64 val64 = 0;
7701 
7702 	if (ds_codepoint > 63)
7703 		return FAILURE;
7704 
7705 	val64 = RTS_DS_MEM_DATA(ring);
7706 	writeq(val64, &bar0->rts_ds_mem_data);
7707 
7708 	val64 = RTS_DS_MEM_CTRL_WE |
7709 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7710 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7711 
7712 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7713 
7714 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7715 				RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7716 				S2IO_BIT_RESET);
7717 }
7718 
7719 static const struct net_device_ops s2io_netdev_ops = {
7720 	.ndo_open	        = s2io_open,
7721 	.ndo_stop	        = s2io_close,
7722 	.ndo_get_stats	        = s2io_get_stats,
7723 	.ndo_start_xmit    	= s2io_xmit,
7724 	.ndo_validate_addr	= eth_validate_addr,
7725 	.ndo_set_multicast_list = s2io_set_multicast,
7726 	.ndo_do_ioctl	   	= s2io_ioctl,
7727 	.ndo_set_mac_address    = s2io_set_mac_addr,
7728 	.ndo_change_mtu	   	= s2io_change_mtu,
7729 	.ndo_vlan_rx_register   = s2io_vlan_rx_register,
7730 	.ndo_vlan_rx_kill_vid   = s2io_vlan_rx_kill_vid,
7731 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7732 #ifdef CONFIG_NET_POLL_CONTROLLER
7733 	.ndo_poll_controller    = s2io_netpoll,
7734 #endif
7735 };
7736 
7737 /**
7738  *  s2io_init_nic - Initialization of the adapter .
7739  *  @pdev : structure containing the PCI related information of the device.
7740  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7741  *  Description:
7742  *  The function initializes an adapter identified by the pci_dec structure.
7743  *  All OS related initialization including memory and device structure and
7744  *  initlaization of the device private variable is done. Also the swapper
7745  *  control register is initialized to enable read and write into the I/O
7746  *  registers of the device.
7747  *  Return value:
7748  *  returns 0 on success and negative on failure.
7749  */
7750 
7751 static int __devinit
s2io_init_nic(struct pci_dev * pdev,const struct pci_device_id * pre)7752 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7753 {
7754 	struct s2io_nic *sp;
7755 	struct net_device *dev;
7756 	int i, j, ret;
7757 	int dma_flag = FALSE;
7758 	u32 mac_up, mac_down;
7759 	u64 val64 = 0, tmp64 = 0;
7760 	struct XENA_dev_config __iomem *bar0 = NULL;
7761 	u16 subid;
7762 	struct mac_info *mac_control;
7763 	struct config_param *config;
7764 	int mode;
7765 	u8 dev_intr_type = intr_type;
7766 	u8 dev_multiq = 0;
7767 
7768 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7769 	if (ret)
7770 		return ret;
7771 
7772 	if ((ret = pci_enable_device(pdev))) {
7773 		DBG_PRINT(ERR_DBG,
7774 			  "s2io_init_nic: pci_enable_device failed\n");
7775 		return ret;
7776 	}
7777 
7778 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7779 		DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7780 		dma_flag = TRUE;
7781 		if (pci_set_consistent_dma_mask
7782 		    (pdev, DMA_64BIT_MASK)) {
7783 			DBG_PRINT(ERR_DBG,
7784 				  "Unable to obtain 64bit DMA for \
7785 					consistent allocations\n");
7786 			pci_disable_device(pdev);
7787 			return -ENOMEM;
7788 		}
7789 	} else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7790 		DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7791 	} else {
7792 		pci_disable_device(pdev);
7793 		return -ENOMEM;
7794 	}
7795 	if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7796 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7797 		pci_disable_device(pdev);
7798 		return -ENODEV;
7799 	}
7800 	if (dev_multiq)
7801 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7802 	else
7803 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7804 	if (dev == NULL) {
7805 		DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7806 		pci_disable_device(pdev);
7807 		pci_release_regions(pdev);
7808 		return -ENODEV;
7809 	}
7810 
7811 	pci_set_master(pdev);
7812 	pci_set_drvdata(pdev, dev);
7813 	SET_NETDEV_DEV(dev, &pdev->dev);
7814 
7815 	/*  Private member variable initialized to s2io NIC structure */
7816 	sp = netdev_priv(dev);
7817 	memset(sp, 0, sizeof(struct s2io_nic));
7818 	sp->dev = dev;
7819 	sp->pdev = pdev;
7820 	sp->high_dma_flag = dma_flag;
7821 	sp->device_enabled_once = FALSE;
7822 	if (rx_ring_mode == 1)
7823 		sp->rxd_mode = RXD_MODE_1;
7824 	if (rx_ring_mode == 2)
7825 		sp->rxd_mode = RXD_MODE_3B;
7826 
7827 	sp->config.intr_type = dev_intr_type;
7828 
7829 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7830 		(pdev->device == PCI_DEVICE_ID_HERC_UNI))
7831 		sp->device_type = XFRAME_II_DEVICE;
7832 	else
7833 		sp->device_type = XFRAME_I_DEVICE;
7834 
7835 	sp->lro = lro_enable;
7836 
7837 	/* Initialize some PCI/PCI-X fields of the NIC. */
7838 	s2io_init_pci(sp);
7839 
7840 	/*
7841 	 * Setting the device configuration parameters.
7842 	 * Most of these parameters can be specified by the user during
7843 	 * module insertion as they are module loadable parameters. If
7844 	 * these parameters are not not specified during load time, they
7845 	 * are initialized with default values.
7846 	 */
7847 	mac_control = &sp->mac_control;
7848 	config = &sp->config;
7849 
7850 	config->napi = napi;
7851 	config->tx_steering_type = tx_steering_type;
7852 
7853 	/* Tx side parameters. */
7854 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7855 		config->tx_fifo_num = MAX_TX_FIFOS;
7856 	else
7857 		config->tx_fifo_num = tx_fifo_num;
7858 
7859 	/* Initialize the fifos used for tx steering */
7860 	if (config->tx_fifo_num < 5) {
7861 			if (config->tx_fifo_num  == 1)
7862 				sp->total_tcp_fifos = 1;
7863 			else
7864 				sp->total_tcp_fifos = config->tx_fifo_num - 1;
7865 			sp->udp_fifo_idx = config->tx_fifo_num - 1;
7866 			sp->total_udp_fifos = 1;
7867 			sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7868 	} else {
7869 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7870 						FIFO_OTHER_MAX_NUM);
7871 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7872 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7873 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7874 	}
7875 
7876 	config->multiq = dev_multiq;
7877 	for (i = 0; i < config->tx_fifo_num; i++) {
7878 		config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7879 		config->tx_cfg[i].fifo_priority = i;
7880 	}
7881 
7882 	/* mapping the QoS priority to the configured fifos */
7883 	for (i = 0; i < MAX_TX_FIFOS; i++)
7884 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7885 
7886 	/* map the hashing selector table to the configured fifos */
7887 	for (i = 0; i < config->tx_fifo_num; i++)
7888 		sp->fifo_selector[i] = fifo_selector[i];
7889 
7890 
7891 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7892 	for (i = 0; i < config->tx_fifo_num; i++) {
7893 		config->tx_cfg[i].f_no_snoop =
7894 		    (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7895 		if (config->tx_cfg[i].fifo_len < 65) {
7896 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7897 			break;
7898 		}
7899 	}
7900 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7901 	config->max_txds = MAX_SKB_FRAGS + 2;
7902 
7903 	/* Rx side parameters. */
7904 	config->rx_ring_num = rx_ring_num;
7905 	for (i = 0; i < config->rx_ring_num; i++) {
7906 		config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7907 		    (rxd_count[sp->rxd_mode] + 1);
7908 		config->rx_cfg[i].ring_priority = i;
7909 		mac_control->rings[i].rx_bufs_left = 0;
7910 		mac_control->rings[i].rxd_mode = sp->rxd_mode;
7911 		mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7912 		mac_control->rings[i].pdev = sp->pdev;
7913 		mac_control->rings[i].dev = sp->dev;
7914 	}
7915 
7916 	for (i = 0; i < rx_ring_num; i++) {
7917 		config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7918 		config->rx_cfg[i].f_no_snoop =
7919 		    (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7920 	}
7921 
7922 	/*  Setting Mac Control parameters */
7923 	mac_control->rmac_pause_time = rmac_pause_time;
7924 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7925 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7926 
7927 
7928 	/*  initialize the shared memory used by the NIC and the host */
7929 	if (init_shared_mem(sp)) {
7930 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7931 			  dev->name);
7932 		ret = -ENOMEM;
7933 		goto mem_alloc_failed;
7934 	}
7935 
7936 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7937 	if (!sp->bar0) {
7938 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7939 			  dev->name);
7940 		ret = -ENOMEM;
7941 		goto bar0_remap_failed;
7942 	}
7943 
7944 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7945 	if (!sp->bar1) {
7946 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7947 			  dev->name);
7948 		ret = -ENOMEM;
7949 		goto bar1_remap_failed;
7950 	}
7951 
7952 	dev->irq = pdev->irq;
7953 	dev->base_addr = (unsigned long) sp->bar0;
7954 
7955 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7956 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7957 		mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7958 		    (sp->bar1 + (j * 0x00020000));
7959 	}
7960 
7961 	/*  Driver entry points */
7962 	dev->netdev_ops = &s2io_netdev_ops;
7963 	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7964 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7965 
7966 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7967 	if (sp->high_dma_flag == TRUE)
7968 		dev->features |= NETIF_F_HIGHDMA;
7969 	dev->features |= NETIF_F_TSO;
7970 	dev->features |= NETIF_F_TSO6;
7971 	if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7972 		dev->features |= NETIF_F_UFO;
7973 		dev->features |= NETIF_F_HW_CSUM;
7974 	}
7975 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7976 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7977 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7978 
7979 	pci_save_state(sp->pdev);
7980 
7981 	/* Setting swapper control on the NIC, for proper reset operation */
7982 	if (s2io_set_swapper(sp)) {
7983 		DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7984 			  dev->name);
7985 		ret = -EAGAIN;
7986 		goto set_swap_failed;
7987 	}
7988 
7989 	/* Verify if the Herc works on the slot its placed into */
7990 	if (sp->device_type & XFRAME_II_DEVICE) {
7991 		mode = s2io_verify_pci_mode(sp);
7992 		if (mode < 0) {
7993 			DBG_PRINT(ERR_DBG, "%s: ", __func__);
7994 			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7995 			ret = -EBADSLT;
7996 			goto set_swap_failed;
7997 		}
7998 	}
7999 
8000 	if (sp->config.intr_type == MSI_X) {
8001 		sp->num_entries = config->rx_ring_num + 1;
8002 		ret = s2io_enable_msi_x(sp);
8003 
8004 		if (!ret) {
8005 			ret = s2io_test_msi(sp);
8006 			/* rollback MSI-X, will re-enable during add_isr() */
8007 			remove_msix_isr(sp);
8008 		}
8009 		if (ret) {
8010 
8011 			DBG_PRINT(ERR_DBG,
8012 			  "%s: MSI-X requested but failed to enable\n",
8013 			  dev->name);
8014 			sp->config.intr_type = INTA;
8015 		}
8016 	}
8017 
8018 	if (config->intr_type ==  MSI_X) {
8019 		for (i = 0; i < config->rx_ring_num ; i++)
8020 			netif_napi_add(dev, &mac_control->rings[i].napi,
8021 				s2io_poll_msix, 64);
8022 	} else {
8023 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8024 	}
8025 
8026 	/* Not needed for Herc */
8027 	if (sp->device_type & XFRAME_I_DEVICE) {
8028 		/*
8029 		 * Fix for all "FFs" MAC address problems observed on
8030 		 * Alpha platforms
8031 		 */
8032 		fix_mac_address(sp);
8033 		s2io_reset(sp);
8034 	}
8035 
8036 	/*
8037 	 * MAC address initialization.
8038 	 * For now only one mac address will be read and used.
8039 	 */
8040 	bar0 = sp->bar0;
8041 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8042 	    RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8043 	writeq(val64, &bar0->rmac_addr_cmd_mem);
8044 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8045 		      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8046 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
8047 	mac_down = (u32) tmp64;
8048 	mac_up = (u32) (tmp64 >> 32);
8049 
8050 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8051 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8052 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8053 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8054 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8055 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8056 
8057 	/*  Set the factory defined MAC address initially   */
8058 	dev->addr_len = ETH_ALEN;
8059 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8060 	memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8061 
8062 	/* initialize number of multicast & unicast MAC entries variables */
8063 	if (sp->device_type == XFRAME_I_DEVICE) {
8064 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8065 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8066 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8067 	} else if (sp->device_type == XFRAME_II_DEVICE) {
8068 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8069 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8070 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8071 	}
8072 
8073 	/* store mac addresses from CAM to s2io_nic structure */
8074 	do_s2io_store_unicast_mc(sp);
8075 
8076 	/* Configure MSIX vector for number of rings configured plus one */
8077 	if ((sp->device_type == XFRAME_II_DEVICE) &&
8078 		(config->intr_type == MSI_X))
8079 		sp->num_entries = config->rx_ring_num + 1;
8080 
8081 	 /* Store the values of the MSIX table in the s2io_nic structure */
8082 	store_xmsi_data(sp);
8083 	/* reset Nic and bring it to known state */
8084 	s2io_reset(sp);
8085 
8086 	/*
8087 	 * Initialize link state flags
8088 	 * and the card state parameter
8089 	 */
8090 	sp->state = 0;
8091 
8092 	/* Initialize spinlocks */
8093 	for (i = 0; i < sp->config.tx_fifo_num; i++)
8094 		spin_lock_init(&mac_control->fifos[i].tx_lock);
8095 
8096 	/*
8097 	 * SXE-002: Configure link and activity LED to init state
8098 	 * on driver load.
8099 	 */
8100 	subid = sp->pdev->subsystem_device;
8101 	if ((subid & 0xFF) >= 0x07) {
8102 		val64 = readq(&bar0->gpio_control);
8103 		val64 |= 0x0000800000000000ULL;
8104 		writeq(val64, &bar0->gpio_control);
8105 		val64 = 0x0411040400000000ULL;
8106 		writeq(val64, (void __iomem *) bar0 + 0x2700);
8107 		val64 = readq(&bar0->gpio_control);
8108 	}
8109 
8110 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8111 
8112 	if (register_netdev(dev)) {
8113 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8114 		ret = -ENODEV;
8115 		goto register_failed;
8116 	}
8117 	s2io_vpd_read(sp);
8118 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8119 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8120 		  sp->product_name, pdev->revision);
8121 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8122 		  s2io_driver_version);
8123 	DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
8124 	DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8125 	if (sp->device_type & XFRAME_II_DEVICE) {
8126 		mode = s2io_print_pci_mode(sp);
8127 		if (mode < 0) {
8128 			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8129 			ret = -EBADSLT;
8130 			unregister_netdev(dev);
8131 			goto set_swap_failed;
8132 		}
8133 	}
8134 	switch(sp->rxd_mode) {
8135 		case RXD_MODE_1:
8136 		    DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8137 						dev->name);
8138 		    break;
8139 		case RXD_MODE_3B:
8140 		    DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8141 						dev->name);
8142 		    break;
8143 	}
8144 
8145 	switch (sp->config.napi) {
8146 	case 0:
8147 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8148 		break;
8149 	case 1:
8150 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8151 		break;
8152 	}
8153 
8154 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8155 		sp->config.tx_fifo_num);
8156 
8157 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8158 		  sp->config.rx_ring_num);
8159 
8160 	switch(sp->config.intr_type) {
8161 		case INTA:
8162 		    DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8163 		    break;
8164 		case MSI_X:
8165 		    DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8166 		    break;
8167 	}
8168 	if (sp->config.multiq) {
8169 		for (i = 0; i < sp->config.tx_fifo_num; i++)
8170 			mac_control->fifos[i].multiq = config->multiq;
8171 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8172 			dev->name);
8173 	} else
8174 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8175 			dev->name);
8176 
8177 	switch (sp->config.tx_steering_type) {
8178 	case NO_STEERING:
8179 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8180 			" transmit\n", dev->name);
8181 			break;
8182 	case TX_PRIORITY_STEERING:
8183 		DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8184 			" transmit\n", dev->name);
8185 		break;
8186 	case TX_DEFAULT_STEERING:
8187 		DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8188 			" transmit\n", dev->name);
8189 	}
8190 
8191 	if (sp->lro)
8192 		DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8193 			  dev->name);
8194 	if (ufo)
8195 		DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8196 					" enabled\n", dev->name);
8197 	/* Initialize device name */
8198 	sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8199 
8200 	if (vlan_tag_strip)
8201 		sp->vlan_strip_flag = 1;
8202 	else
8203 		sp->vlan_strip_flag = 0;
8204 
8205 	/*
8206 	 * Make Link state as off at this point, when the Link change
8207 	 * interrupt comes the state will be automatically changed to
8208 	 * the right state.
8209 	 */
8210 	netif_carrier_off(dev);
8211 
8212 	return 0;
8213 
8214       register_failed:
8215       set_swap_failed:
8216 	iounmap(sp->bar1);
8217       bar1_remap_failed:
8218 	iounmap(sp->bar0);
8219       bar0_remap_failed:
8220       mem_alloc_failed:
8221 	free_shared_mem(sp);
8222 	pci_disable_device(pdev);
8223 	pci_release_regions(pdev);
8224 	pci_set_drvdata(pdev, NULL);
8225 	free_netdev(dev);
8226 
8227 	return ret;
8228 }
8229 
8230 /**
8231  * s2io_rem_nic - Free the PCI device
8232  * @pdev: structure containing the PCI related information of the device.
8233  * Description: This function is called by the Pci subsystem to release a
8234  * PCI device and free up all resource held up by the device. This could
8235  * be in response to a Hot plug event or when the driver is to be removed
8236  * from memory.
8237  */
8238 
s2io_rem_nic(struct pci_dev * pdev)8239 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8240 {
8241 	struct net_device *dev =
8242 	    (struct net_device *) pci_get_drvdata(pdev);
8243 	struct s2io_nic *sp;
8244 
8245 	if (dev == NULL) {
8246 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8247 		return;
8248 	}
8249 
8250 	flush_scheduled_work();
8251 
8252 	sp = netdev_priv(dev);
8253 	unregister_netdev(dev);
8254 
8255 	free_shared_mem(sp);
8256 	iounmap(sp->bar0);
8257 	iounmap(sp->bar1);
8258 	pci_release_regions(pdev);
8259 	pci_set_drvdata(pdev, NULL);
8260 	free_netdev(dev);
8261 	pci_disable_device(pdev);
8262 }
8263 
8264 /**
8265  * s2io_starter - Entry point for the driver
8266  * Description: This function is the entry point for the driver. It verifies
8267  * the module loadable parameters and initializes PCI configuration space.
8268  */
8269 
s2io_starter(void)8270 static int __init s2io_starter(void)
8271 {
8272 	return pci_register_driver(&s2io_driver);
8273 }
8274 
8275 /**
8276  * s2io_closer - Cleanup routine for the driver
8277  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8278  */
8279 
s2io_closer(void)8280 static __exit void s2io_closer(void)
8281 {
8282 	pci_unregister_driver(&s2io_driver);
8283 	DBG_PRINT(INIT_DBG, "cleanup done\n");
8284 }
8285 
8286 module_init(s2io_starter);
8287 module_exit(s2io_closer);
8288 
check_L2_lro_capable(u8 * buffer,struct iphdr ** ip,struct tcphdr ** tcp,struct RxD_t * rxdp,struct s2io_nic * sp)8289 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8290 		struct tcphdr **tcp, struct RxD_t *rxdp,
8291 		struct s2io_nic *sp)
8292 {
8293 	int ip_off;
8294 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8295 
8296 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8297 		DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8298 			  __func__);
8299 		return -1;
8300 	}
8301 
8302 	/* Checking for DIX type or DIX type with VLAN */
8303 	if ((l2_type == 0)
8304 		|| (l2_type == 4)) {
8305 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8306 		/*
8307 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8308 		 * shift the offset by the VLAN header size bytes.
8309 		 */
8310 		if ((!sp->vlan_strip_flag) &&
8311 			(rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8312 			ip_off += HEADER_VLAN_SIZE;
8313 	} else {
8314 		/* LLC, SNAP etc are considered non-mergeable */
8315 		return -1;
8316 	}
8317 
8318 	*ip = (struct iphdr *)((u8 *)buffer + ip_off);
8319 	ip_len = (u8)((*ip)->ihl);
8320 	ip_len <<= 2;
8321 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8322 
8323 	return 0;
8324 }
8325 
check_for_socket_match(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp)8326 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8327 				  struct tcphdr *tcp)
8328 {
8329 	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8330 	if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8331 	   (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8332 		return -1;
8333 	return 0;
8334 }
8335 
get_l4_pyld_length(struct iphdr * ip,struct tcphdr * tcp)8336 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8337 {
8338 	return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8339 }
8340 
initiate_new_session(struct lro * lro,u8 * l2h,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len,u16 vlan_tag)8341 static void initiate_new_session(struct lro *lro, u8 *l2h,
8342 	struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8343 {
8344 	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8345 	lro->l2h = l2h;
8346 	lro->iph = ip;
8347 	lro->tcph = tcp;
8348 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8349 	lro->tcp_ack = tcp->ack_seq;
8350 	lro->sg_num = 1;
8351 	lro->total_len = ntohs(ip->tot_len);
8352 	lro->frags_len = 0;
8353 	lro->vlan_tag = vlan_tag;
8354 	/*
8355 	 * check if we saw TCP timestamp. Other consistency checks have
8356 	 * already been done.
8357  	 */
8358 	if (tcp->doff == 8) {
8359 		__be32 *ptr;
8360 		ptr = (__be32 *)(tcp+1);
8361 		lro->saw_ts = 1;
8362 		lro->cur_tsval = ntohl(*(ptr+1));
8363 		lro->cur_tsecr = *(ptr+2);
8364 	}
8365 	lro->in_use = 1;
8366 }
8367 
update_L3L4_header(struct s2io_nic * sp,struct lro * lro)8368 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8369 {
8370 	struct iphdr *ip = lro->iph;
8371 	struct tcphdr *tcp = lro->tcph;
8372 	__sum16 nchk;
8373 	struct stat_block *statinfo = sp->mac_control.stats_info;
8374 	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8375 
8376 	/* Update L3 header */
8377 	ip->tot_len = htons(lro->total_len);
8378 	ip->check = 0;
8379 	nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8380 	ip->check = nchk;
8381 
8382 	/* Update L4 header */
8383 	tcp->ack_seq = lro->tcp_ack;
8384 	tcp->window = lro->window;
8385 
8386 	/* Update tsecr field if this session has timestamps enabled */
8387 	if (lro->saw_ts) {
8388 		__be32 *ptr = (__be32 *)(tcp + 1);
8389 		*(ptr+2) = lro->cur_tsecr;
8390 	}
8391 
8392 	/* Update counters required for calculation of
8393 	 * average no. of packets aggregated.
8394 	 */
8395 	statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8396 	statinfo->sw_stat.num_aggregations++;
8397 }
8398 
aggregate_new_rx(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp,u32 l4_pyld)8399 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8400 		struct tcphdr *tcp, u32 l4_pyld)
8401 {
8402 	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8403 	lro->total_len += l4_pyld;
8404 	lro->frags_len += l4_pyld;
8405 	lro->tcp_next_seq += l4_pyld;
8406 	lro->sg_num++;
8407 
8408 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8409 	lro->tcp_ack = tcp->ack_seq;
8410 	lro->window = tcp->window;
8411 
8412 	if (lro->saw_ts) {
8413 		__be32 *ptr;
8414 		/* Update tsecr and tsval from this packet */
8415 		ptr = (__be32 *)(tcp+1);
8416 		lro->cur_tsval = ntohl(*(ptr+1));
8417 		lro->cur_tsecr = *(ptr + 2);
8418 	}
8419 }
8420 
verify_l3_l4_lro_capable(struct lro * l_lro,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len)8421 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8422 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8423 {
8424 	u8 *ptr;
8425 
8426 	DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8427 
8428 	if (!tcp_pyld_len) {
8429 		/* Runt frame or a pure ack */
8430 		return -1;
8431 	}
8432 
8433 	if (ip->ihl != 5) /* IP has options */
8434 		return -1;
8435 
8436 	/* If we see CE codepoint in IP header, packet is not mergeable */
8437 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8438 		return -1;
8439 
8440 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8441 	if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8442 				    tcp->ece || tcp->cwr || !tcp->ack) {
8443 		/*
8444 		 * Currently recognize only the ack control word and
8445 		 * any other control field being set would result in
8446 		 * flushing the LRO session
8447 		 */
8448 		return -1;
8449 	}
8450 
8451 	/*
8452 	 * Allow only one TCP timestamp option. Don't aggregate if
8453 	 * any other options are detected.
8454 	 */
8455 	if (tcp->doff != 5 && tcp->doff != 8)
8456 		return -1;
8457 
8458 	if (tcp->doff == 8) {
8459 		ptr = (u8 *)(tcp + 1);
8460 		while (*ptr == TCPOPT_NOP)
8461 			ptr++;
8462 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8463 			return -1;
8464 
8465 		/* Ensure timestamp value increases monotonically */
8466 		if (l_lro)
8467 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8468 				return -1;
8469 
8470 		/* timestamp echo reply should be non-zero */
8471 		if (*((__be32 *)(ptr+6)) == 0)
8472 			return -1;
8473 	}
8474 
8475 	return 0;
8476 }
8477 
8478 static int
s2io_club_tcp_session(struct ring_info * ring_data,u8 * buffer,u8 ** tcp,u32 * tcp_len,struct lro ** lro,struct RxD_t * rxdp,struct s2io_nic * sp)8479 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8480 	u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8481 	struct s2io_nic *sp)
8482 {
8483 	struct iphdr *ip;
8484 	struct tcphdr *tcph;
8485 	int ret = 0, i;
8486 	u16 vlan_tag = 0;
8487 
8488 	if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8489 					 rxdp, sp))) {
8490 		DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8491 			  ip->saddr, ip->daddr);
8492 	} else
8493 		return ret;
8494 
8495 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8496 	tcph = (struct tcphdr *)*tcp;
8497 	*tcp_len = get_l4_pyld_length(ip, tcph);
8498 	for (i=0; i<MAX_LRO_SESSIONS; i++) {
8499 		struct lro *l_lro = &ring_data->lro0_n[i];
8500 		if (l_lro->in_use) {
8501 			if (check_for_socket_match(l_lro, ip, tcph))
8502 				continue;
8503 			/* Sock pair matched */
8504 			*lro = l_lro;
8505 
8506 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8507 				DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8508 					  "0x%x, actual 0x%x\n", __func__,
8509 					  (*lro)->tcp_next_seq,
8510 					  ntohl(tcph->seq));
8511 
8512 				sp->mac_control.stats_info->
8513 				   sw_stat.outof_sequence_pkts++;
8514 				ret = 2;
8515 				break;
8516 			}
8517 
8518 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8519 				ret = 1; /* Aggregate */
8520 			else
8521 				ret = 2; /* Flush both */
8522 			break;
8523 		}
8524 	}
8525 
8526 	if (ret == 0) {
8527 		/* Before searching for available LRO objects,
8528 		 * check if the pkt is L3/L4 aggregatable. If not
8529 		 * don't create new LRO session. Just send this
8530 		 * packet up.
8531 		 */
8532 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8533 			return 5;
8534 		}
8535 
8536 		for (i=0; i<MAX_LRO_SESSIONS; i++) {
8537 			struct lro *l_lro = &ring_data->lro0_n[i];
8538 			if (!(l_lro->in_use)) {
8539 				*lro = l_lro;
8540 				ret = 3; /* Begin anew */
8541 				break;
8542 			}
8543 		}
8544 	}
8545 
8546 	if (ret == 0) { /* sessions exceeded */
8547 		DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8548 			  __func__);
8549 		*lro = NULL;
8550 		return ret;
8551 	}
8552 
8553 	switch (ret) {
8554 		case 3:
8555 			initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8556 								vlan_tag);
8557 			break;
8558 		case 2:
8559 			update_L3L4_header(sp, *lro);
8560 			break;
8561 		case 1:
8562 			aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8563 			if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8564 				update_L3L4_header(sp, *lro);
8565 				ret = 4; /* Flush the LRO */
8566 			}
8567 			break;
8568 		default:
8569 			DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8570 				__func__);
8571 			break;
8572 	}
8573 
8574 	return ret;
8575 }
8576 
clear_lro_session(struct lro * lro)8577 static void clear_lro_session(struct lro *lro)
8578 {
8579 	static u16 lro_struct_size = sizeof(struct lro);
8580 
8581 	memset(lro, 0, lro_struct_size);
8582 }
8583 
queue_rx_frame(struct sk_buff * skb,u16 vlan_tag)8584 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8585 {
8586 	struct net_device *dev = skb->dev;
8587 	struct s2io_nic *sp = netdev_priv(dev);
8588 
8589 	skb->protocol = eth_type_trans(skb, dev);
8590 	if (sp->vlgrp && vlan_tag
8591 		&& (sp->vlan_strip_flag)) {
8592 		/* Queueing the vlan frame to the upper layer */
8593 		if (sp->config.napi)
8594 			vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8595 		else
8596 			vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8597 	} else {
8598 		if (sp->config.napi)
8599 			netif_receive_skb(skb);
8600 		else
8601 			netif_rx(skb);
8602 	}
8603 }
8604 
lro_append_pkt(struct s2io_nic * sp,struct lro * lro,struct sk_buff * skb,u32 tcp_len)8605 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8606 			   struct sk_buff *skb,
8607 			   u32 tcp_len)
8608 {
8609 	struct sk_buff *first = lro->parent;
8610 
8611 	first->len += tcp_len;
8612 	first->data_len = lro->frags_len;
8613 	skb_pull(skb, (skb->len - tcp_len));
8614 	if (skb_shinfo(first)->frag_list)
8615 		lro->last_frag->next = skb;
8616 	else
8617 		skb_shinfo(first)->frag_list = skb;
8618 	first->truesize += skb->truesize;
8619 	lro->last_frag = skb;
8620 	sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8621 	return;
8622 }
8623 
8624 /**
8625  * s2io_io_error_detected - called when PCI error is detected
8626  * @pdev: Pointer to PCI device
8627  * @state: The current pci connection state
8628  *
8629  * This function is called after a PCI bus error affecting
8630  * this device has been detected.
8631  */
s2io_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8632 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8633                                                pci_channel_state_t state)
8634 {
8635 	struct net_device *netdev = pci_get_drvdata(pdev);
8636 	struct s2io_nic *sp = netdev_priv(netdev);
8637 
8638 	netif_device_detach(netdev);
8639 
8640 	if (netif_running(netdev)) {
8641 		/* Bring down the card, while avoiding PCI I/O */
8642 		do_s2io_card_down(sp, 0);
8643 	}
8644 	pci_disable_device(pdev);
8645 
8646 	return PCI_ERS_RESULT_NEED_RESET;
8647 }
8648 
8649 /**
8650  * s2io_io_slot_reset - called after the pci bus has been reset.
8651  * @pdev: Pointer to PCI device
8652  *
8653  * Restart the card from scratch, as if from a cold-boot.
8654  * At this point, the card has exprienced a hard reset,
8655  * followed by fixups by BIOS, and has its config space
8656  * set up identically to what it was at cold boot.
8657  */
s2io_io_slot_reset(struct pci_dev * pdev)8658 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8659 {
8660 	struct net_device *netdev = pci_get_drvdata(pdev);
8661 	struct s2io_nic *sp = netdev_priv(netdev);
8662 
8663 	if (pci_enable_device(pdev)) {
8664 		printk(KERN_ERR "s2io: "
8665 		       "Cannot re-enable PCI device after reset.\n");
8666 		return PCI_ERS_RESULT_DISCONNECT;
8667 	}
8668 
8669 	pci_set_master(pdev);
8670 	s2io_reset(sp);
8671 
8672 	return PCI_ERS_RESULT_RECOVERED;
8673 }
8674 
8675 /**
8676  * s2io_io_resume - called when traffic can start flowing again.
8677  * @pdev: Pointer to PCI device
8678  *
8679  * This callback is called when the error recovery driver tells
8680  * us that its OK to resume normal operation.
8681  */
s2io_io_resume(struct pci_dev * pdev)8682 static void s2io_io_resume(struct pci_dev *pdev)
8683 {
8684 	struct net_device *netdev = pci_get_drvdata(pdev);
8685 	struct s2io_nic *sp = netdev_priv(netdev);
8686 
8687 	if (netif_running(netdev)) {
8688 		if (s2io_card_up(sp)) {
8689 			printk(KERN_ERR "s2io: "
8690 			       "Can't bring device back up after reset.\n");
8691 			return;
8692 		}
8693 
8694 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8695 			s2io_card_down(sp);
8696 			printk(KERN_ERR "s2io: "
8697 			       "Can't resetore mac addr after reset.\n");
8698 			return;
8699 		}
8700 	}
8701 
8702 	netif_device_attach(netdev);
8703 	netif_tx_wake_all_queues(netdev);
8704 }
8705