1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
4 *
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explanation of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46 * Possible values '1' for enable , '0' for disable.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
50 * Possible values '1' for enable and '0' for disable. Default is '0'
51 ************************************************************************/
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90
91 #define DRV_VERSION "2.0.26.28"
92
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99
RXD_IS_UP2DT(struct RxD_t * rxdp)100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106
107 return ret;
108 }
109
110 /*
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122
is_s2io_card_up(const struct s2io_nic * sp)123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135 };
136
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
147 {"tmac_any_err_frms"},
148 {"tmac_ttl_less_fb_octets"},
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
163 {"rmac_out_rng_len_err_frms"},
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
170 {"rmac_discarded_frms"},
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
209 {"rmac_pause_cnt"},
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
231 {"rxf_wr_cnt"}
232 };
233
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
250 {"link_fault_cnt"}
251 };
252
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
326 };
327
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
331
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
340
341 /* copy mac addr to def_mac_addr array */
do_s2io_copy_mac_addr(struct s2io_nic * sp,int offset,u64 mac_addr)342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351
352 /*
353 * Constants to be programmed into the Xena's registers, to configure
354 * the XAUI.
355 */
356
357 #define END_SIGN 0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 /* Set address */
360 0x8000051536750000ULL, 0x80000515367500E0ULL,
361 /* Write data */
362 0x8000051536750004ULL, 0x80000515367500E4ULL,
363 /* Set address */
364 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 /* Write data */
366 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 /* Set address */
368 0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 /* Write data */
370 0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 /* Set address */
372 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 /* Write data */
374 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 /* Done */
376 END_SIGN
377 };
378
379 static const u64 xena_dtx_cfg[] = {
380 /* Set address */
381 0x8000051500000000ULL, 0x80000515000000E0ULL,
382 /* Write data */
383 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 /* Set address */
385 0x8001051500000000ULL, 0x80010515000000E0ULL,
386 /* Write data */
387 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 /* Set address */
389 0x8002051500000000ULL, 0x80020515000000E0ULL,
390 /* Write data */
391 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 END_SIGN
393 };
394
395 /*
396 * Constants for Fixing the MacAddress problem seen mostly on
397 * Alpha machines.
398 */
399 static const u64 fix_mac[] = {
400 0x0060000000000000ULL, 0x0060600000000000ULL,
401 0x0040600000000000ULL, 0x0000600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0060600000000000ULL,
405 0x0020600000000000ULL, 0x0060600000000000ULL,
406 0x0020600000000000ULL, 0x0060600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0000600000000000ULL,
413 0x0040600000000000ULL, 0x0060600000000000ULL,
414 END_SIGN
415 };
416
417 MODULE_LICENSE("GPL");
418 MODULE_VERSION(DRV_VERSION);
419
420
421 /* Module Loadable parameters. */
422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423 S2IO_PARM_INT(rx_ring_num, 1);
424 S2IO_PARM_INT(multiq, 0);
425 S2IO_PARM_INT(rx_ring_mode, 1);
426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427 S2IO_PARM_INT(rmac_pause_time, 0x100);
428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430 S2IO_PARM_INT(shared_splits, 0);
431 S2IO_PARM_INT(tmac_util_period, 5);
432 S2IO_PARM_INT(rmac_util_period, 5);
433 S2IO_PARM_INT(l3l4hdr_size, 128);
434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436 /* Frequency of Rx desc syncs expressed as power of 2 */
437 S2IO_PARM_INT(rxsync_frequency, 3);
438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439 S2IO_PARM_INT(intr_type, 2);
440 /* Large receive offload feature */
441
442 /* Max pkts to be aggregated by LRO at one time. If not specified,
443 * aggregation happens until we hit max IP pkt size(64K)
444 */
445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446 S2IO_PARM_INT(indicate_max_pkts, 0);
447
448 S2IO_PARM_INT(napi, 1);
449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450
451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457
458 module_param_array(tx_fifo_len, uint, NULL, 0);
459 module_param_array(rx_ring_sz, uint, NULL, 0);
460 module_param_array(rts_frm_len, uint, NULL, 0);
461
462 /*
463 * S2IO device table.
464 * This table lists all the devices that this driver supports.
465 */
466 static const struct pci_device_id s2io_tbl[] = {
467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 PCI_ANY_ID, PCI_ANY_ID},
469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 PCI_ANY_ID, PCI_ANY_ID},
471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 PCI_ANY_ID, PCI_ANY_ID},
473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 PCI_ANY_ID, PCI_ANY_ID},
475 {0,}
476 };
477
478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
479
480 static const struct pci_error_handlers s2io_err_handler = {
481 .error_detected = s2io_io_error_detected,
482 .slot_reset = s2io_io_slot_reset,
483 .resume = s2io_io_resume,
484 };
485
486 static struct pci_driver s2io_driver = {
487 .name = "S2IO",
488 .id_table = s2io_tbl,
489 .probe = s2io_init_nic,
490 .remove = s2io_rem_nic,
491 .err_handler = &s2io_err_handler,
492 };
493
494 /* A simplifier macro used both by init and free shared_mem Fns(). */
495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496
497 /* netqueue manipulation helper functions */
s2io_stop_all_tx_queue(struct s2io_nic * sp)498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499 {
500 if (!sp->config.multiq) {
501 int i;
502
503 for (i = 0; i < sp->config.tx_fifo_num; i++)
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 }
506 netif_tx_stop_all_queues(sp->dev);
507 }
508
s2io_stop_tx_queue(struct s2io_nic * sp,int fifo_no)509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510 {
511 if (!sp->config.multiq)
512 sp->mac_control.fifos[fifo_no].queue_state =
513 FIFO_QUEUE_STOP;
514
515 netif_tx_stop_all_queues(sp->dev);
516 }
517
s2io_start_all_tx_queue(struct s2io_nic * sp)518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519 {
520 if (!sp->config.multiq) {
521 int i;
522
523 for (i = 0; i < sp->config.tx_fifo_num; i++)
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 }
526 netif_tx_start_all_queues(sp->dev);
527 }
528
s2io_wake_all_tx_queue(struct s2io_nic * sp)529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530 {
531 if (!sp->config.multiq) {
532 int i;
533
534 for (i = 0; i < sp->config.tx_fifo_num; i++)
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 }
537 netif_tx_wake_all_queues(sp->dev);
538 }
539
s2io_wake_tx_queue(struct fifo_info * fifo,int cnt,u8 multiq)540 static inline void s2io_wake_tx_queue(
541 struct fifo_info *fifo, int cnt, u8 multiq)
542 {
543
544 if (multiq) {
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 if (netif_queue_stopped(fifo->dev)) {
549 fifo->queue_state = FIFO_QUEUE_START;
550 netif_wake_queue(fifo->dev);
551 }
552 }
553 }
554
555 /**
556 * init_shared_mem - Allocation and Initialization of Memory
557 * @nic: Device private variable.
558 * Description: The function allocates all the memory areas shared
559 * between the NIC and the driver. This includes Tx descriptors,
560 * Rx descriptors and the statistics block.
561 */
562
init_shared_mem(struct s2io_nic * nic)563 static int init_shared_mem(struct s2io_nic *nic)
564 {
565 u32 size;
566 void *tmp_v_addr, *tmp_v_addr_next;
567 dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 struct RxD_block *pre_rxd_blk = NULL;
569 int i, j, blk_cnt;
570 int lst_size, lst_per_page;
571 struct net_device *dev = nic->dev;
572 unsigned long tmp;
573 struct buffAdd *ba;
574 struct config_param *config = &nic->config;
575 struct mac_info *mac_control = &nic->mac_control;
576 unsigned long long mem_allocated = 0;
577
578 /* Allocation and initialization of TXDLs in FIFOs */
579 size = 0;
580 for (i = 0; i < config->tx_fifo_num; i++) {
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582
583 size += tx_cfg->fifo_len;
584 }
585 if (size > MAX_AVAILABLE_TXDS) {
586 DBG_PRINT(ERR_DBG,
587 "Too many TxDs requested: %d, max supported: %d\n",
588 size, MAX_AVAILABLE_TXDS);
589 return -EINVAL;
590 }
591
592 size = 0;
593 for (i = 0; i < config->tx_fifo_num; i++) {
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595
596 size = tx_cfg->fifo_len;
597 /*
598 * Legal values are from 2 to 8192
599 */
600 if (size < 2) {
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 "Valid lengths are 2 through 8192\n",
603 i, size);
604 return -EINVAL;
605 }
606 }
607
608 lst_size = (sizeof(struct TxD) * config->max_txds);
609 lst_per_page = PAGE_SIZE / lst_size;
610
611 for (i = 0; i < config->tx_fifo_num; i++) {
612 struct fifo_info *fifo = &mac_control->fifos[i];
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 int fifo_len = tx_cfg->fifo_len;
615 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 if (!fifo->list_info) {
619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 return -ENOMEM;
621 }
622 mem_allocated += list_holder_size;
623 }
624 for (i = 0; i < config->tx_fifo_num; i++) {
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 lst_per_page);
627 struct fifo_info *fifo = &mac_control->fifos[i];
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629
630 fifo->tx_curr_put_info.offset = 0;
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 fifo->tx_curr_get_info.offset = 0;
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 fifo->fifo_no = i;
635 fifo->nic = nic;
636 fifo->max_txds = MAX_SKB_FRAGS + 2;
637 fifo->dev = dev;
638
639 for (j = 0; j < page_num; j++) {
640 int k = 0;
641 dma_addr_t tmp_p;
642 void *tmp_v;
643 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
644 &tmp_p, GFP_KERNEL);
645 if (!tmp_v) {
646 DBG_PRINT(INFO_DBG,
647 "dma_alloc_coherent failed for TxDL\n");
648 return -ENOMEM;
649 }
650 /* If we got a zero DMA address(can happen on
651 * certain platforms like PPC), reallocate.
652 * Store virtual address of page we don't want,
653 * to be freed later.
654 */
655 if (!tmp_p) {
656 mac_control->zerodma_virt_addr = tmp_v;
657 DBG_PRINT(INIT_DBG,
658 "%s: Zero DMA address for TxDL. "
659 "Virtual address %p\n",
660 dev->name, tmp_v);
661 tmp_v = dma_alloc_coherent(&nic->pdev->dev,
662 PAGE_SIZE, &tmp_p,
663 GFP_KERNEL);
664 if (!tmp_v) {
665 DBG_PRINT(INFO_DBG,
666 "dma_alloc_coherent failed for TxDL\n");
667 return -ENOMEM;
668 }
669 mem_allocated += PAGE_SIZE;
670 }
671 while (k < lst_per_page) {
672 int l = (j * lst_per_page) + k;
673 if (l == tx_cfg->fifo_len)
674 break;
675 fifo->list_info[l].list_virt_addr =
676 tmp_v + (k * lst_size);
677 fifo->list_info[l].list_phy_addr =
678 tmp_p + (k * lst_size);
679 k++;
680 }
681 }
682 }
683
684 for (i = 0; i < config->tx_fifo_num; i++) {
685 struct fifo_info *fifo = &mac_control->fifos[i];
686 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
687
688 size = tx_cfg->fifo_len;
689 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
690 if (!fifo->ufo_in_band_v)
691 return -ENOMEM;
692 mem_allocated += (size * sizeof(u64));
693 }
694
695 /* Allocation and initialization of RXDs in Rings */
696 size = 0;
697 for (i = 0; i < config->rx_ring_num; i++) {
698 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
699 struct ring_info *ring = &mac_control->rings[i];
700
701 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
702 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
703 "multiple of RxDs per Block\n",
704 dev->name, i);
705 return FAILURE;
706 }
707 size += rx_cfg->num_rxd;
708 ring->block_count = rx_cfg->num_rxd /
709 (rxd_count[nic->rxd_mode] + 1);
710 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
711 }
712 if (nic->rxd_mode == RXD_MODE_1)
713 size = (size * (sizeof(struct RxD1)));
714 else
715 size = (size * (sizeof(struct RxD3)));
716
717 for (i = 0; i < config->rx_ring_num; i++) {
718 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
719 struct ring_info *ring = &mac_control->rings[i];
720
721 ring->rx_curr_get_info.block_index = 0;
722 ring->rx_curr_get_info.offset = 0;
723 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
724 ring->rx_curr_put_info.block_index = 0;
725 ring->rx_curr_put_info.offset = 0;
726 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
727 ring->nic = nic;
728 ring->ring_no = i;
729
730 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
731 /* Allocating all the Rx blocks */
732 for (j = 0; j < blk_cnt; j++) {
733 struct rx_block_info *rx_blocks;
734 int l;
735
736 rx_blocks = &ring->rx_blocks[j];
737 size = SIZE_OF_BLOCK; /* size is always page size */
738 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
739 &tmp_p_addr, GFP_KERNEL);
740 if (tmp_v_addr == NULL) {
741 /*
742 * In case of failure, free_shared_mem()
743 * is called, which should free any
744 * memory that was alloced till the
745 * failure happened.
746 */
747 rx_blocks->block_virt_addr = tmp_v_addr;
748 return -ENOMEM;
749 }
750 mem_allocated += size;
751
752 size = sizeof(struct rxd_info) *
753 rxd_count[nic->rxd_mode];
754 rx_blocks->block_virt_addr = tmp_v_addr;
755 rx_blocks->block_dma_addr = tmp_p_addr;
756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
757 if (!rx_blocks->rxds)
758 return -ENOMEM;
759 mem_allocated += size;
760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761 rx_blocks->rxds[l].virt_addr =
762 rx_blocks->block_virt_addr +
763 (rxd_size[nic->rxd_mode] * l);
764 rx_blocks->rxds[l].dma_addr =
765 rx_blocks->block_dma_addr +
766 (rxd_size[nic->rxd_mode] * l);
767 }
768 }
769 /* Interlinking all Rx Blocks */
770 for (j = 0; j < blk_cnt; j++) {
771 int next = (j + 1) % blk_cnt;
772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776
777 pre_rxd_blk = tmp_v_addr;
778 pre_rxd_blk->reserved_2_pNext_RxD_block =
779 (unsigned long)tmp_v_addr_next;
780 pre_rxd_blk->pNext_RxD_Blk_physical =
781 (u64)tmp_p_addr_next;
782 }
783 }
784 if (nic->rxd_mode == RXD_MODE_3B) {
785 /*
786 * Allocation of Storages for buffer addresses in 2BUFF mode
787 * and the buffers as well.
788 */
789 for (i = 0; i < config->rx_ring_num; i++) {
790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 struct ring_info *ring = &mac_control->rings[i];
792
793 blk_cnt = rx_cfg->num_rxd /
794 (rxd_count[nic->rxd_mode] + 1);
795 size = sizeof(struct buffAdd *) * blk_cnt;
796 ring->ba = kmalloc(size, GFP_KERNEL);
797 if (!ring->ba)
798 return -ENOMEM;
799 mem_allocated += size;
800 for (j = 0; j < blk_cnt; j++) {
801 int k = 0;
802
803 size = sizeof(struct buffAdd) *
804 (rxd_count[nic->rxd_mode] + 1);
805 ring->ba[j] = kmalloc(size, GFP_KERNEL);
806 if (!ring->ba[j])
807 return -ENOMEM;
808 mem_allocated += size;
809 while (k != rxd_count[nic->rxd_mode]) {
810 ba = &ring->ba[j][k];
811 size = BUF0_LEN + ALIGN_SIZE;
812 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813 if (!ba->ba_0_org)
814 return -ENOMEM;
815 mem_allocated += size;
816 tmp = (unsigned long)ba->ba_0_org;
817 tmp += ALIGN_SIZE;
818 tmp &= ~((unsigned long)ALIGN_SIZE);
819 ba->ba_0 = (void *)tmp;
820
821 size = BUF1_LEN + ALIGN_SIZE;
822 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823 if (!ba->ba_1_org)
824 return -ENOMEM;
825 mem_allocated += size;
826 tmp = (unsigned long)ba->ba_1_org;
827 tmp += ALIGN_SIZE;
828 tmp &= ~((unsigned long)ALIGN_SIZE);
829 ba->ba_1 = (void *)tmp;
830 k++;
831 }
832 }
833 }
834 }
835
836 /* Allocation and initialization of Statistics block */
837 size = sizeof(struct stat_block);
838 mac_control->stats_mem =
839 dma_alloc_coherent(&nic->pdev->dev, size,
840 &mac_control->stats_mem_phy, GFP_KERNEL);
841
842 if (!mac_control->stats_mem) {
843 /*
844 * In case of failure, free_shared_mem() is called, which
845 * should free any memory that was alloced till the
846 * failure happened.
847 */
848 return -ENOMEM;
849 }
850 mem_allocated += size;
851 mac_control->stats_mem_sz = size;
852
853 tmp_v_addr = mac_control->stats_mem;
854 mac_control->stats_info = tmp_v_addr;
855 memset(tmp_v_addr, 0, size);
856 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859 return SUCCESS;
860 }
861
862 /**
863 * free_shared_mem - Free the allocated Memory
864 * @nic: Device private variable.
865 * Description: This function is to free all memory locations allocated by
866 * the init_shared_mem() function and return it to the kernel.
867 */
868
free_shared_mem(struct s2io_nic * nic)869 static void free_shared_mem(struct s2io_nic *nic)
870 {
871 int i, j, blk_cnt, size;
872 void *tmp_v_addr;
873 dma_addr_t tmp_p_addr;
874 int lst_size, lst_per_page;
875 struct net_device *dev;
876 int page_num = 0;
877 struct config_param *config;
878 struct mac_info *mac_control;
879 struct stat_block *stats;
880 struct swStat *swstats;
881
882 if (!nic)
883 return;
884
885 dev = nic->dev;
886
887 config = &nic->config;
888 mac_control = &nic->mac_control;
889 stats = mac_control->stats_info;
890 swstats = &stats->sw_stat;
891
892 lst_size = sizeof(struct TxD) * config->max_txds;
893 lst_per_page = PAGE_SIZE / lst_size;
894
895 for (i = 0; i < config->tx_fifo_num; i++) {
896 struct fifo_info *fifo = &mac_control->fifos[i];
897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898
899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900 for (j = 0; j < page_num; j++) {
901 int mem_blks = (j * lst_per_page);
902 struct list_info_hold *fli;
903
904 if (!fifo->list_info)
905 return;
906
907 fli = &fifo->list_info[mem_blks];
908 if (!fli->list_virt_addr)
909 break;
910 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
911 fli->list_virt_addr,
912 fli->list_phy_addr);
913 swstats->mem_freed += PAGE_SIZE;
914 }
915 /* If we got a zero DMA address during allocation,
916 * free the page now
917 */
918 if (mac_control->zerodma_virt_addr) {
919 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
920 mac_control->zerodma_virt_addr,
921 (dma_addr_t)0);
922 DBG_PRINT(INIT_DBG,
923 "%s: Freeing TxDL with zero DMA address. "
924 "Virtual address %p\n",
925 dev->name, mac_control->zerodma_virt_addr);
926 swstats->mem_freed += PAGE_SIZE;
927 }
928 kfree(fifo->list_info);
929 swstats->mem_freed += tx_cfg->fifo_len *
930 sizeof(struct list_info_hold);
931 }
932
933 size = SIZE_OF_BLOCK;
934 for (i = 0; i < config->rx_ring_num; i++) {
935 struct ring_info *ring = &mac_control->rings[i];
936
937 blk_cnt = ring->block_count;
938 for (j = 0; j < blk_cnt; j++) {
939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941 if (tmp_v_addr == NULL)
942 break;
943 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
944 tmp_p_addr);
945 swstats->mem_freed += size;
946 kfree(ring->rx_blocks[j].rxds);
947 swstats->mem_freed += sizeof(struct rxd_info) *
948 rxd_count[nic->rxd_mode];
949 }
950 }
951
952 if (nic->rxd_mode == RXD_MODE_3B) {
953 /* Freeing buffer storage addresses in 2BUFF mode. */
954 for (i = 0; i < config->rx_ring_num; i++) {
955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 struct ring_info *ring = &mac_control->rings[i];
957
958 blk_cnt = rx_cfg->num_rxd /
959 (rxd_count[nic->rxd_mode] + 1);
960 for (j = 0; j < blk_cnt; j++) {
961 int k = 0;
962 if (!ring->ba[j])
963 continue;
964 while (k != rxd_count[nic->rxd_mode]) {
965 struct buffAdd *ba = &ring->ba[j][k];
966 kfree(ba->ba_0_org);
967 swstats->mem_freed +=
968 BUF0_LEN + ALIGN_SIZE;
969 kfree(ba->ba_1_org);
970 swstats->mem_freed +=
971 BUF1_LEN + ALIGN_SIZE;
972 k++;
973 }
974 kfree(ring->ba[j]);
975 swstats->mem_freed += sizeof(struct buffAdd) *
976 (rxd_count[nic->rxd_mode] + 1);
977 }
978 kfree(ring->ba);
979 swstats->mem_freed += sizeof(struct buffAdd *) *
980 blk_cnt;
981 }
982 }
983
984 for (i = 0; i < nic->config.tx_fifo_num; i++) {
985 struct fifo_info *fifo = &mac_control->fifos[i];
986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987
988 if (fifo->ufo_in_band_v) {
989 swstats->mem_freed += tx_cfg->fifo_len *
990 sizeof(u64);
991 kfree(fifo->ufo_in_band_v);
992 }
993 }
994
995 if (mac_control->stats_mem) {
996 swstats->mem_freed += mac_control->stats_mem_sz;
997 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
998 mac_control->stats_mem,
999 mac_control->stats_mem_phy);
1000 }
1001 }
1002
1003 /*
1004 * s2io_verify_pci_mode -
1005 */
1006
s2io_verify_pci_mode(struct s2io_nic * nic)1007 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008 {
1009 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010 register u64 val64 = 0;
1011 int mode;
1012
1013 val64 = readq(&bar0->pci_mode);
1014 mode = (u8)GET_PCI_MODE(val64);
1015
1016 if (val64 & PCI_MODE_UNKNOWN_MODE)
1017 return -1; /* Unknown PCI mode */
1018 return mode;
1019 }
1020
1021 #define NEC_VENID 0x1033
1022 #define NEC_DEVID 0x0125
s2io_on_nec_bridge(struct pci_dev * s2io_pdev)1023 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024 {
1025 struct pci_dev *tdev = NULL;
1026 for_each_pci_dev(tdev) {
1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028 if (tdev->bus == s2io_pdev->bus->parent) {
1029 pci_dev_put(tdev);
1030 return 1;
1031 }
1032 }
1033 }
1034 return 0;
1035 }
1036
1037 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038 /*
1039 * s2io_print_pci_mode -
1040 */
s2io_print_pci_mode(struct s2io_nic * nic)1041 static int s2io_print_pci_mode(struct s2io_nic *nic)
1042 {
1043 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044 register u64 val64 = 0;
1045 int mode;
1046 struct config_param *config = &nic->config;
1047 const char *pcimode;
1048
1049 val64 = readq(&bar0->pci_mode);
1050 mode = (u8)GET_PCI_MODE(val64);
1051
1052 if (val64 & PCI_MODE_UNKNOWN_MODE)
1053 return -1; /* Unknown PCI mode */
1054
1055 config->bus_speed = bus_speed[mode];
1056
1057 if (s2io_on_nec_bridge(nic->pdev)) {
1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059 nic->dev->name);
1060 return mode;
1061 }
1062
1063 switch (mode) {
1064 case PCI_MODE_PCI_33:
1065 pcimode = "33MHz PCI bus";
1066 break;
1067 case PCI_MODE_PCI_66:
1068 pcimode = "66MHz PCI bus";
1069 break;
1070 case PCI_MODE_PCIX_M1_66:
1071 pcimode = "66MHz PCIX(M1) bus";
1072 break;
1073 case PCI_MODE_PCIX_M1_100:
1074 pcimode = "100MHz PCIX(M1) bus";
1075 break;
1076 case PCI_MODE_PCIX_M1_133:
1077 pcimode = "133MHz PCIX(M1) bus";
1078 break;
1079 case PCI_MODE_PCIX_M2_66:
1080 pcimode = "133MHz PCIX(M2) bus";
1081 break;
1082 case PCI_MODE_PCIX_M2_100:
1083 pcimode = "200MHz PCIX(M2) bus";
1084 break;
1085 case PCI_MODE_PCIX_M2_133:
1086 pcimode = "266MHz PCIX(M2) bus";
1087 break;
1088 default:
1089 pcimode = "unsupported bus!";
1090 mode = -1;
1091 }
1092
1093 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096 return mode;
1097 }
1098
1099 /**
1100 * init_tti - Initialization transmit traffic interrupt scheme
1101 * @nic: device private variable
1102 * @link: link status (UP/DOWN) used to enable/disable continuous
1103 * transmit interrupts
1104 * Description: The function configures transmit traffic interrupts
1105 * Return Value: SUCCESS on success and
1106 * '-1' on failure
1107 */
1108
init_tti(struct s2io_nic * nic,int link)1109 static int init_tti(struct s2io_nic *nic, int link)
1110 {
1111 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112 register u64 val64 = 0;
1113 int i;
1114 struct config_param *config = &nic->config;
1115
1116 for (i = 0; i < config->tx_fifo_num; i++) {
1117 /*
1118 * TTI Initialization. Default Tx timer gets us about
1119 * 250 interrupts per sec. Continuous interrupts are enabled
1120 * by default.
1121 */
1122 if (nic->device_type == XFRAME_II_DEVICE) {
1123 int count = (nic->config.bus_speed * 125)/2;
1124 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125 } else
1126 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127
1128 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132 if (i == 0)
1133 if (use_continuous_tx_intrs && (link == LINK_UP))
1134 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135 writeq(val64, &bar0->tti_data1_mem);
1136
1137 if (nic->config.intr_type == MSI_X) {
1138 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141 TTI_DATA2_MEM_TX_UFC_D(0x300);
1142 } else {
1143 if ((nic->config.tx_steering_type ==
1144 TX_DEFAULT_STEERING) &&
1145 (config->tx_fifo_num > 1) &&
1146 (i >= nic->udp_fifo_idx) &&
1147 (i < (nic->udp_fifo_idx +
1148 nic->total_udp_fifos)))
1149 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152 TTI_DATA2_MEM_TX_UFC_D(0x120);
1153 else
1154 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157 TTI_DATA2_MEM_TX_UFC_D(0x80);
1158 }
1159
1160 writeq(val64, &bar0->tti_data2_mem);
1161
1162 val64 = TTI_CMD_MEM_WE |
1163 TTI_CMD_MEM_STROBE_NEW_CMD |
1164 TTI_CMD_MEM_OFFSET(i);
1165 writeq(val64, &bar0->tti_command_mem);
1166
1167 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168 TTI_CMD_MEM_STROBE_NEW_CMD,
1169 S2IO_BIT_RESET) != SUCCESS)
1170 return FAILURE;
1171 }
1172
1173 return SUCCESS;
1174 }
1175
1176 /**
1177 * init_nic - Initialization of hardware
1178 * @nic: device private variable
1179 * Description: The function sequentially configures every block
1180 * of the H/W from their reset values.
1181 * Return Value: SUCCESS on success and
1182 * '-1' on failure (endian settings incorrect).
1183 */
1184
init_nic(struct s2io_nic * nic)1185 static int init_nic(struct s2io_nic *nic)
1186 {
1187 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188 struct net_device *dev = nic->dev;
1189 register u64 val64 = 0;
1190 void __iomem *add;
1191 u32 time;
1192 int i, j;
1193 int dtx_cnt = 0;
1194 unsigned long long mem_share;
1195 int mem_size;
1196 struct config_param *config = &nic->config;
1197 struct mac_info *mac_control = &nic->mac_control;
1198
1199 /* to set the swapper controle on the card */
1200 if (s2io_set_swapper(nic)) {
1201 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202 return -EIO;
1203 }
1204
1205 /*
1206 * Herc requires EOI to be removed from reset before XGXS, so..
1207 */
1208 if (nic->device_type & XFRAME_II_DEVICE) {
1209 val64 = 0xA500000000ULL;
1210 writeq(val64, &bar0->sw_reset);
1211 msleep(500);
1212 val64 = readq(&bar0->sw_reset);
1213 }
1214
1215 /* Remove XGXS from reset state */
1216 val64 = 0;
1217 writeq(val64, &bar0->sw_reset);
1218 msleep(500);
1219 val64 = readq(&bar0->sw_reset);
1220
1221 /* Ensure that it's safe to access registers by checking
1222 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223 */
1224 if (nic->device_type == XFRAME_II_DEVICE) {
1225 for (i = 0; i < 50; i++) {
1226 val64 = readq(&bar0->adapter_status);
1227 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228 break;
1229 msleep(10);
1230 }
1231 if (i == 50)
1232 return -ENODEV;
1233 }
1234
1235 /* Enable Receiving broadcasts */
1236 add = &bar0->mac_cfg;
1237 val64 = readq(&bar0->mac_cfg);
1238 val64 |= MAC_RMAC_BCAST_ENABLE;
1239 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240 writel((u32)val64, add);
1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242 writel((u32) (val64 >> 32), (add + 4));
1243
1244 /* Read registers in all blocks */
1245 val64 = readq(&bar0->mac_int_mask);
1246 val64 = readq(&bar0->mc_int_mask);
1247 val64 = readq(&bar0->xgxs_int_mask);
1248
1249 /* Set MTU */
1250 val64 = dev->mtu;
1251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252
1253 if (nic->device_type & XFRAME_II_DEVICE) {
1254 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256 &bar0->dtx_control, UF);
1257 if (dtx_cnt & 0x1)
1258 msleep(1); /* Necessary!! */
1259 dtx_cnt++;
1260 }
1261 } else {
1262 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264 &bar0->dtx_control, UF);
1265 val64 = readq(&bar0->dtx_control);
1266 dtx_cnt++;
1267 }
1268 }
1269
1270 /* Tx DMA Initialization */
1271 val64 = 0;
1272 writeq(val64, &bar0->tx_fifo_partition_0);
1273 writeq(val64, &bar0->tx_fifo_partition_1);
1274 writeq(val64, &bar0->tx_fifo_partition_2);
1275 writeq(val64, &bar0->tx_fifo_partition_3);
1276
1277 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279
1280 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282
1283 if (i == (config->tx_fifo_num - 1)) {
1284 if (i % 2 == 0)
1285 i++;
1286 }
1287
1288 switch (i) {
1289 case 1:
1290 writeq(val64, &bar0->tx_fifo_partition_0);
1291 val64 = 0;
1292 j = 0;
1293 break;
1294 case 3:
1295 writeq(val64, &bar0->tx_fifo_partition_1);
1296 val64 = 0;
1297 j = 0;
1298 break;
1299 case 5:
1300 writeq(val64, &bar0->tx_fifo_partition_2);
1301 val64 = 0;
1302 j = 0;
1303 break;
1304 case 7:
1305 writeq(val64, &bar0->tx_fifo_partition_3);
1306 val64 = 0;
1307 j = 0;
1308 break;
1309 default:
1310 j++;
1311 break;
1312 }
1313 }
1314
1315 /*
1316 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318 */
1319 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321
1322 val64 = readq(&bar0->tx_fifo_partition_0);
1323 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325
1326 /*
1327 * Initialization of Tx_PA_CONFIG register to ignore packet
1328 * integrity checking.
1329 */
1330 val64 = readq(&bar0->tx_pa_cfg);
1331 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332 TX_PA_CFG_IGNORE_SNAP_OUI |
1333 TX_PA_CFG_IGNORE_LLC_CTRL |
1334 TX_PA_CFG_IGNORE_L2_ERR;
1335 writeq(val64, &bar0->tx_pa_cfg);
1336
1337 /* Rx DMA initialization. */
1338 val64 = 0;
1339 for (i = 0; i < config->rx_ring_num; i++) {
1340 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341
1342 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343 }
1344 writeq(val64, &bar0->rx_queue_priority);
1345
1346 /*
1347 * Allocating equal share of memory to all the
1348 * configured Rings.
1349 */
1350 val64 = 0;
1351 if (nic->device_type & XFRAME_II_DEVICE)
1352 mem_size = 32;
1353 else
1354 mem_size = 64;
1355
1356 for (i = 0; i < config->rx_ring_num; i++) {
1357 switch (i) {
1358 case 0:
1359 mem_share = (mem_size / config->rx_ring_num +
1360 mem_size % config->rx_ring_num);
1361 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362 continue;
1363 case 1:
1364 mem_share = (mem_size / config->rx_ring_num);
1365 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366 continue;
1367 case 2:
1368 mem_share = (mem_size / config->rx_ring_num);
1369 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370 continue;
1371 case 3:
1372 mem_share = (mem_size / config->rx_ring_num);
1373 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374 continue;
1375 case 4:
1376 mem_share = (mem_size / config->rx_ring_num);
1377 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378 continue;
1379 case 5:
1380 mem_share = (mem_size / config->rx_ring_num);
1381 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382 continue;
1383 case 6:
1384 mem_share = (mem_size / config->rx_ring_num);
1385 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386 continue;
1387 case 7:
1388 mem_share = (mem_size / config->rx_ring_num);
1389 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390 continue;
1391 }
1392 }
1393 writeq(val64, &bar0->rx_queue_cfg);
1394
1395 /*
1396 * Filling Tx round robin registers
1397 * as per the number of FIFOs for equal scheduling priority
1398 */
1399 switch (config->tx_fifo_num) {
1400 case 1:
1401 val64 = 0x0;
1402 writeq(val64, &bar0->tx_w_round_robin_0);
1403 writeq(val64, &bar0->tx_w_round_robin_1);
1404 writeq(val64, &bar0->tx_w_round_robin_2);
1405 writeq(val64, &bar0->tx_w_round_robin_3);
1406 writeq(val64, &bar0->tx_w_round_robin_4);
1407 break;
1408 case 2:
1409 val64 = 0x0001000100010001ULL;
1410 writeq(val64, &bar0->tx_w_round_robin_0);
1411 writeq(val64, &bar0->tx_w_round_robin_1);
1412 writeq(val64, &bar0->tx_w_round_robin_2);
1413 writeq(val64, &bar0->tx_w_round_robin_3);
1414 val64 = 0x0001000100000000ULL;
1415 writeq(val64, &bar0->tx_w_round_robin_4);
1416 break;
1417 case 3:
1418 val64 = 0x0001020001020001ULL;
1419 writeq(val64, &bar0->tx_w_round_robin_0);
1420 val64 = 0x0200010200010200ULL;
1421 writeq(val64, &bar0->tx_w_round_robin_1);
1422 val64 = 0x0102000102000102ULL;
1423 writeq(val64, &bar0->tx_w_round_robin_2);
1424 val64 = 0x0001020001020001ULL;
1425 writeq(val64, &bar0->tx_w_round_robin_3);
1426 val64 = 0x0200010200000000ULL;
1427 writeq(val64, &bar0->tx_w_round_robin_4);
1428 break;
1429 case 4:
1430 val64 = 0x0001020300010203ULL;
1431 writeq(val64, &bar0->tx_w_round_robin_0);
1432 writeq(val64, &bar0->tx_w_round_robin_1);
1433 writeq(val64, &bar0->tx_w_round_robin_2);
1434 writeq(val64, &bar0->tx_w_round_robin_3);
1435 val64 = 0x0001020300000000ULL;
1436 writeq(val64, &bar0->tx_w_round_robin_4);
1437 break;
1438 case 5:
1439 val64 = 0x0001020304000102ULL;
1440 writeq(val64, &bar0->tx_w_round_robin_0);
1441 val64 = 0x0304000102030400ULL;
1442 writeq(val64, &bar0->tx_w_round_robin_1);
1443 val64 = 0x0102030400010203ULL;
1444 writeq(val64, &bar0->tx_w_round_robin_2);
1445 val64 = 0x0400010203040001ULL;
1446 writeq(val64, &bar0->tx_w_round_robin_3);
1447 val64 = 0x0203040000000000ULL;
1448 writeq(val64, &bar0->tx_w_round_robin_4);
1449 break;
1450 case 6:
1451 val64 = 0x0001020304050001ULL;
1452 writeq(val64, &bar0->tx_w_round_robin_0);
1453 val64 = 0x0203040500010203ULL;
1454 writeq(val64, &bar0->tx_w_round_robin_1);
1455 val64 = 0x0405000102030405ULL;
1456 writeq(val64, &bar0->tx_w_round_robin_2);
1457 val64 = 0x0001020304050001ULL;
1458 writeq(val64, &bar0->tx_w_round_robin_3);
1459 val64 = 0x0203040500000000ULL;
1460 writeq(val64, &bar0->tx_w_round_robin_4);
1461 break;
1462 case 7:
1463 val64 = 0x0001020304050600ULL;
1464 writeq(val64, &bar0->tx_w_round_robin_0);
1465 val64 = 0x0102030405060001ULL;
1466 writeq(val64, &bar0->tx_w_round_robin_1);
1467 val64 = 0x0203040506000102ULL;
1468 writeq(val64, &bar0->tx_w_round_robin_2);
1469 val64 = 0x0304050600010203ULL;
1470 writeq(val64, &bar0->tx_w_round_robin_3);
1471 val64 = 0x0405060000000000ULL;
1472 writeq(val64, &bar0->tx_w_round_robin_4);
1473 break;
1474 case 8:
1475 val64 = 0x0001020304050607ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 val64 = 0x0001020300000000ULL;
1481 writeq(val64, &bar0->tx_w_round_robin_4);
1482 break;
1483 }
1484
1485 /* Enable all configured Tx FIFO partitions */
1486 val64 = readq(&bar0->tx_fifo_partition_0);
1487 val64 |= (TX_FIFO_PARTITION_EN);
1488 writeq(val64, &bar0->tx_fifo_partition_0);
1489
1490 /* Filling the Rx round robin registers as per the
1491 * number of Rings and steering based on QoS with
1492 * equal priority.
1493 */
1494 switch (config->rx_ring_num) {
1495 case 1:
1496 val64 = 0x0;
1497 writeq(val64, &bar0->rx_w_round_robin_0);
1498 writeq(val64, &bar0->rx_w_round_robin_1);
1499 writeq(val64, &bar0->rx_w_round_robin_2);
1500 writeq(val64, &bar0->rx_w_round_robin_3);
1501 writeq(val64, &bar0->rx_w_round_robin_4);
1502
1503 val64 = 0x8080808080808080ULL;
1504 writeq(val64, &bar0->rts_qos_steering);
1505 break;
1506 case 2:
1507 val64 = 0x0001000100010001ULL;
1508 writeq(val64, &bar0->rx_w_round_robin_0);
1509 writeq(val64, &bar0->rx_w_round_robin_1);
1510 writeq(val64, &bar0->rx_w_round_robin_2);
1511 writeq(val64, &bar0->rx_w_round_robin_3);
1512 val64 = 0x0001000100000000ULL;
1513 writeq(val64, &bar0->rx_w_round_robin_4);
1514
1515 val64 = 0x8080808040404040ULL;
1516 writeq(val64, &bar0->rts_qos_steering);
1517 break;
1518 case 3:
1519 val64 = 0x0001020001020001ULL;
1520 writeq(val64, &bar0->rx_w_round_robin_0);
1521 val64 = 0x0200010200010200ULL;
1522 writeq(val64, &bar0->rx_w_round_robin_1);
1523 val64 = 0x0102000102000102ULL;
1524 writeq(val64, &bar0->rx_w_round_robin_2);
1525 val64 = 0x0001020001020001ULL;
1526 writeq(val64, &bar0->rx_w_round_robin_3);
1527 val64 = 0x0200010200000000ULL;
1528 writeq(val64, &bar0->rx_w_round_robin_4);
1529
1530 val64 = 0x8080804040402020ULL;
1531 writeq(val64, &bar0->rts_qos_steering);
1532 break;
1533 case 4:
1534 val64 = 0x0001020300010203ULL;
1535 writeq(val64, &bar0->rx_w_round_robin_0);
1536 writeq(val64, &bar0->rx_w_round_robin_1);
1537 writeq(val64, &bar0->rx_w_round_robin_2);
1538 writeq(val64, &bar0->rx_w_round_robin_3);
1539 val64 = 0x0001020300000000ULL;
1540 writeq(val64, &bar0->rx_w_round_robin_4);
1541
1542 val64 = 0x8080404020201010ULL;
1543 writeq(val64, &bar0->rts_qos_steering);
1544 break;
1545 case 5:
1546 val64 = 0x0001020304000102ULL;
1547 writeq(val64, &bar0->rx_w_round_robin_0);
1548 val64 = 0x0304000102030400ULL;
1549 writeq(val64, &bar0->rx_w_round_robin_1);
1550 val64 = 0x0102030400010203ULL;
1551 writeq(val64, &bar0->rx_w_round_robin_2);
1552 val64 = 0x0400010203040001ULL;
1553 writeq(val64, &bar0->rx_w_round_robin_3);
1554 val64 = 0x0203040000000000ULL;
1555 writeq(val64, &bar0->rx_w_round_robin_4);
1556
1557 val64 = 0x8080404020201008ULL;
1558 writeq(val64, &bar0->rts_qos_steering);
1559 break;
1560 case 6:
1561 val64 = 0x0001020304050001ULL;
1562 writeq(val64, &bar0->rx_w_round_robin_0);
1563 val64 = 0x0203040500010203ULL;
1564 writeq(val64, &bar0->rx_w_round_robin_1);
1565 val64 = 0x0405000102030405ULL;
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 val64 = 0x0001020304050001ULL;
1568 writeq(val64, &bar0->rx_w_round_robin_3);
1569 val64 = 0x0203040500000000ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_4);
1571
1572 val64 = 0x8080404020100804ULL;
1573 writeq(val64, &bar0->rts_qos_steering);
1574 break;
1575 case 7:
1576 val64 = 0x0001020304050600ULL;
1577 writeq(val64, &bar0->rx_w_round_robin_0);
1578 val64 = 0x0102030405060001ULL;
1579 writeq(val64, &bar0->rx_w_round_robin_1);
1580 val64 = 0x0203040506000102ULL;
1581 writeq(val64, &bar0->rx_w_round_robin_2);
1582 val64 = 0x0304050600010203ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_3);
1584 val64 = 0x0405060000000000ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587 val64 = 0x8080402010080402ULL;
1588 writeq(val64, &bar0->rts_qos_steering);
1589 break;
1590 case 8:
1591 val64 = 0x0001020304050607ULL;
1592 writeq(val64, &bar0->rx_w_round_robin_0);
1593 writeq(val64, &bar0->rx_w_round_robin_1);
1594 writeq(val64, &bar0->rx_w_round_robin_2);
1595 writeq(val64, &bar0->rx_w_round_robin_3);
1596 val64 = 0x0001020300000000ULL;
1597 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599 val64 = 0x8040201008040201ULL;
1600 writeq(val64, &bar0->rts_qos_steering);
1601 break;
1602 }
1603
1604 /* UDP Fix */
1605 val64 = 0;
1606 for (i = 0; i < 8; i++)
1607 writeq(val64, &bar0->rts_frm_len_n[i]);
1608
1609 /* Set the default rts frame length for the rings configured */
1610 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611 for (i = 0 ; i < config->rx_ring_num ; i++)
1612 writeq(val64, &bar0->rts_frm_len_n[i]);
1613
1614 /* Set the frame length for the configured rings
1615 * desired by the user
1616 */
1617 for (i = 0; i < config->rx_ring_num; i++) {
1618 /* If rts_frm_len[i] == 0 then it is assumed that user not
1619 * specified frame length steering.
1620 * If the user provides the frame length then program
1621 * the rts_frm_len register for those values or else
1622 * leave it as it is.
1623 */
1624 if (rts_frm_len[i] != 0) {
1625 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626 &bar0->rts_frm_len_n[i]);
1627 }
1628 }
1629
1630 /* Disable differentiated services steering logic */
1631 for (i = 0; i < 64; i++) {
1632 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633 DBG_PRINT(ERR_DBG,
1634 "%s: rts_ds_steer failed on codepoint %d\n",
1635 dev->name, i);
1636 return -ENODEV;
1637 }
1638 }
1639
1640 /* Program statistics memory */
1641 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642
1643 if (nic->device_type == XFRAME_II_DEVICE) {
1644 val64 = STAT_BC(0x320);
1645 writeq(val64, &bar0->stat_byte_cnt);
1646 }
1647
1648 /*
1649 * Initializing the sampling rate for the device to calculate the
1650 * bandwidth utilization.
1651 */
1652 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654 writeq(val64, &bar0->mac_link_util);
1655
1656 /*
1657 * Initializing the Transmit and Receive Traffic Interrupt
1658 * Scheme.
1659 */
1660
1661 /* Initialize TTI */
1662 if (SUCCESS != init_tti(nic, nic->last_link_state))
1663 return -ENODEV;
1664
1665 /* RTI Initialization */
1666 if (nic->device_type == XFRAME_II_DEVICE) {
1667 /*
1668 * Programmed to generate Apprx 500 Intrs per
1669 * second
1670 */
1671 int count = (nic->config.bus_speed * 125)/4;
1672 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673 } else
1674 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679
1680 writeq(val64, &bar0->rti_data1_mem);
1681
1682 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684 if (nic->config.intr_type == MSI_X)
1685 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686 RTI_DATA2_MEM_RX_UFC_D(0x40));
1687 else
1688 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689 RTI_DATA2_MEM_RX_UFC_D(0x80));
1690 writeq(val64, &bar0->rti_data2_mem);
1691
1692 for (i = 0; i < config->rx_ring_num; i++) {
1693 val64 = RTI_CMD_MEM_WE |
1694 RTI_CMD_MEM_STROBE_NEW_CMD |
1695 RTI_CMD_MEM_OFFSET(i);
1696 writeq(val64, &bar0->rti_command_mem);
1697
1698 /*
1699 * Once the operation completes, the Strobe bit of the
1700 * command register will be reset. We poll for this
1701 * particular condition. We wait for a maximum of 500ms
1702 * for the operation to complete, if it's not complete
1703 * by then we return error.
1704 */
1705 time = 0;
1706 while (true) {
1707 val64 = readq(&bar0->rti_command_mem);
1708 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709 break;
1710
1711 if (time > 10) {
1712 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713 dev->name);
1714 return -ENODEV;
1715 }
1716 time++;
1717 msleep(50);
1718 }
1719 }
1720
1721 /*
1722 * Initializing proper values as Pause threshold into all
1723 * the 8 Queues on Rx side.
1724 */
1725 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727
1728 /* Disable RMAC PAD STRIPPING */
1729 add = &bar0->mac_cfg;
1730 val64 = readq(&bar0->mac_cfg);
1731 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733 writel((u32) (val64), add);
1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 writel((u32) (val64 >> 32), (add + 4));
1736 val64 = readq(&bar0->mac_cfg);
1737
1738 /* Enable FCS stripping by adapter */
1739 add = &bar0->mac_cfg;
1740 val64 = readq(&bar0->mac_cfg);
1741 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742 if (nic->device_type == XFRAME_II_DEVICE)
1743 writeq(val64, &bar0->mac_cfg);
1744 else {
1745 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746 writel((u32) (val64), add);
1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 writel((u32) (val64 >> 32), (add + 4));
1749 }
1750
1751 /*
1752 * Set the time value to be inserted in the pause frame
1753 * generated by xena.
1754 */
1755 val64 = readq(&bar0->rmac_pause_cfg);
1756 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758 writeq(val64, &bar0->rmac_pause_cfg);
1759
1760 /*
1761 * Set the Threshold Limit for Generating the pause frame
1762 * If the amount of data in any Queue exceeds ratio of
1763 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764 * pause frame is generated
1765 */
1766 val64 = 0;
1767 for (i = 0; i < 4; i++) {
1768 val64 |= (((u64)0xFF00 |
1769 nic->mac_control.mc_pause_threshold_q0q3)
1770 << (i * 2 * 8));
1771 }
1772 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773
1774 val64 = 0;
1775 for (i = 0; i < 4; i++) {
1776 val64 |= (((u64)0xFF00 |
1777 nic->mac_control.mc_pause_threshold_q4q7)
1778 << (i * 2 * 8));
1779 }
1780 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781
1782 /*
1783 * TxDMA will stop Read request if the number of read split has
1784 * exceeded the limit pointed by shared_splits
1785 */
1786 val64 = readq(&bar0->pic_control);
1787 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788 writeq(val64, &bar0->pic_control);
1789
1790 if (nic->config.bus_speed == 266) {
1791 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792 writeq(0x0, &bar0->read_retry_delay);
1793 writeq(0x0, &bar0->write_retry_delay);
1794 }
1795
1796 /*
1797 * Programming the Herc to split every write transaction
1798 * that does not start on an ADB to reduce disconnects.
1799 */
1800 if (nic->device_type == XFRAME_II_DEVICE) {
1801 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802 MISC_LINK_STABILITY_PRD(3);
1803 writeq(val64, &bar0->misc_control);
1804 val64 = readq(&bar0->pic_control2);
1805 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806 writeq(val64, &bar0->pic_control2);
1807 }
1808 if (strstr(nic->product_name, "CX4")) {
1809 val64 = TMAC_AVG_IPG(0x17);
1810 writeq(val64, &bar0->tmac_avg_ipg);
1811 }
1812
1813 return SUCCESS;
1814 }
1815 #define LINK_UP_DOWN_INTERRUPT 1
1816 #define MAC_RMAC_ERR_TIMER 2
1817
s2io_link_fault_indication(struct s2io_nic * nic)1818 static int s2io_link_fault_indication(struct s2io_nic *nic)
1819 {
1820 if (nic->device_type == XFRAME_II_DEVICE)
1821 return LINK_UP_DOWN_INTERRUPT;
1822 else
1823 return MAC_RMAC_ERR_TIMER;
1824 }
1825
1826 /**
1827 * do_s2io_write_bits - update alarm bits in alarm register
1828 * @value: alarm bits
1829 * @flag: interrupt status
1830 * @addr: address value
1831 * Description: update alarm bits in alarm register
1832 * Return Value:
1833 * NONE.
1834 */
do_s2io_write_bits(u64 value,int flag,void __iomem * addr)1835 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836 {
1837 u64 temp64;
1838
1839 temp64 = readq(addr);
1840
1841 if (flag == ENABLE_INTRS)
1842 temp64 &= ~((u64)value);
1843 else
1844 temp64 |= ((u64)value);
1845 writeq(temp64, addr);
1846 }
1847
en_dis_err_alarms(struct s2io_nic * nic,u16 mask,int flag)1848 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849 {
1850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851 register u64 gen_int_mask = 0;
1852 u64 interruptible;
1853
1854 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855 if (mask & TX_DMA_INTR) {
1856 gen_int_mask |= TXDMA_INT_M;
1857
1858 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859 TXDMA_PCC_INT | TXDMA_TTI_INT |
1860 TXDMA_LSO_INT | TXDMA_TPA_INT |
1861 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862
1863 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866 &bar0->pfc_err_mask);
1867
1868 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871
1872 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874 PCC_N_SERR | PCC_6_COF_OV_ERR |
1875 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877 PCC_TXB_ECC_SG_ERR,
1878 flag, &bar0->pcc_err_mask);
1879
1880 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882
1883 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886 flag, &bar0->lso_err_mask);
1887
1888 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889 flag, &bar0->tpa_err_mask);
1890
1891 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892 }
1893
1894 if (mask & TX_MAC_INTR) {
1895 gen_int_mask |= TXMAC_INT_M;
1896 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897 &bar0->mac_int_mask);
1898 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901 flag, &bar0->mac_tmac_err_mask);
1902 }
1903
1904 if (mask & TX_XGXS_INTR) {
1905 gen_int_mask |= TXXGXS_INT_M;
1906 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907 &bar0->xgxs_int_mask);
1908 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910 flag, &bar0->xgxs_txgxs_err_mask);
1911 }
1912
1913 if (mask & RX_DMA_INTR) {
1914 gen_int_mask |= RXDMA_INT_M;
1915 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917 flag, &bar0->rxdma_int_mask);
1918 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925 &bar0->prc_pcix_err_mask);
1926 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928 &bar0->rpa_err_mask);
1929 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932 RDA_FRM_ECC_SG_ERR |
1933 RDA_MISC_ERR|RDA_PCIX_ERR,
1934 flag, &bar0->rda_err_mask);
1935 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937 flag, &bar0->rti_err_mask);
1938 }
1939
1940 if (mask & RX_MAC_INTR) {
1941 gen_int_mask |= RXMAC_INT_M;
1942 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943 &bar0->mac_int_mask);
1944 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946 RMAC_DOUBLE_ECC_ERR);
1947 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949 do_s2io_write_bits(interruptible,
1950 flag, &bar0->mac_rmac_err_mask);
1951 }
1952
1953 if (mask & RX_XGXS_INTR) {
1954 gen_int_mask |= RXXGXS_INT_M;
1955 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956 &bar0->xgxs_int_mask);
1957 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958 &bar0->xgxs_rxgxs_err_mask);
1959 }
1960
1961 if (mask & MC_INTR) {
1962 gen_int_mask |= MC_INT_M;
1963 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964 flag, &bar0->mc_int_mask);
1965 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967 &bar0->mc_err_mask);
1968 }
1969 nic->general_int_mask = gen_int_mask;
1970
1971 /* Remove this line when alarm interrupts are enabled */
1972 nic->general_int_mask = 0;
1973 }
1974
1975 /**
1976 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1977 * @nic: device private variable,
1978 * @mask: A mask indicating which Intr block must be modified and,
1979 * @flag: A flag indicating whether to enable or disable the Intrs.
1980 * Description: This function will either disable or enable the interrupts
1981 * depending on the flag argument. The mask argument can be used to
1982 * enable/disable any Intr block.
1983 * Return Value: NONE.
1984 */
1985
en_dis_able_nic_intrs(struct s2io_nic * nic,u16 mask,int flag)1986 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987 {
1988 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989 register u64 temp64 = 0, intr_mask = 0;
1990
1991 intr_mask = nic->general_int_mask;
1992
1993 /* Top level interrupt classification */
1994 /* PIC Interrupts */
1995 if (mask & TX_PIC_INTR) {
1996 /* Enable PIC Intrs in the general intr mask register */
1997 intr_mask |= TXPIC_INT_M;
1998 if (flag == ENABLE_INTRS) {
1999 /*
2000 * If Hercules adapter enable GPIO otherwise
2001 * disable all PCIX, Flash, MDIO, IIC and GPIO
2002 * interrupts for now.
2003 * TODO
2004 */
2005 if (s2io_link_fault_indication(nic) ==
2006 LINK_UP_DOWN_INTERRUPT) {
2007 do_s2io_write_bits(PIC_INT_GPIO, flag,
2008 &bar0->pic_int_mask);
2009 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010 &bar0->gpio_int_mask);
2011 } else
2012 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013 } else if (flag == DISABLE_INTRS) {
2014 /*
2015 * Disable PIC Intrs in the general
2016 * intr mask register
2017 */
2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 }
2020 }
2021
2022 /* Tx traffic interrupts */
2023 if (mask & TX_TRAFFIC_INTR) {
2024 intr_mask |= TXTRAFFIC_INT_M;
2025 if (flag == ENABLE_INTRS) {
2026 /*
2027 * Enable all the Tx side interrupts
2028 * writing 0 Enables all 64 TX interrupt levels
2029 */
2030 writeq(0x0, &bar0->tx_traffic_mask);
2031 } else if (flag == DISABLE_INTRS) {
2032 /*
2033 * Disable Tx Traffic Intrs in the general intr mask
2034 * register.
2035 */
2036 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037 }
2038 }
2039
2040 /* Rx traffic interrupts */
2041 if (mask & RX_TRAFFIC_INTR) {
2042 intr_mask |= RXTRAFFIC_INT_M;
2043 if (flag == ENABLE_INTRS) {
2044 /* writing 0 Enables all 8 RX interrupt levels */
2045 writeq(0x0, &bar0->rx_traffic_mask);
2046 } else if (flag == DISABLE_INTRS) {
2047 /*
2048 * Disable Rx Traffic Intrs in the general intr mask
2049 * register.
2050 */
2051 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052 }
2053 }
2054
2055 temp64 = readq(&bar0->general_int_mask);
2056 if (flag == ENABLE_INTRS)
2057 temp64 &= ~((u64)intr_mask);
2058 else
2059 temp64 = DISABLE_ALL_INTRS;
2060 writeq(temp64, &bar0->general_int_mask);
2061
2062 nic->general_int_mask = readq(&bar0->general_int_mask);
2063 }
2064
2065 /**
2066 * verify_pcc_quiescent- Checks for PCC quiescent state
2067 * @sp : private member of the device structure, which is a pointer to the
2068 * s2io_nic structure.
2069 * @flag: boolean controlling function path
2070 * Return: 1 If PCC is quiescence
2071 * 0 If PCC is not quiescence
2072 */
verify_pcc_quiescent(struct s2io_nic * sp,int flag)2073 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2074 {
2075 int ret = 0, herc;
2076 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2077 u64 val64 = readq(&bar0->adapter_status);
2078
2079 herc = (sp->device_type == XFRAME_II_DEVICE);
2080
2081 if (flag == false) {
2082 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2083 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2084 ret = 1;
2085 } else {
2086 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2087 ret = 1;
2088 }
2089 } else {
2090 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2091 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2092 ADAPTER_STATUS_RMAC_PCC_IDLE))
2093 ret = 1;
2094 } else {
2095 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2096 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2097 ret = 1;
2098 }
2099 }
2100
2101 return ret;
2102 }
2103 /**
2104 * verify_xena_quiescence - Checks whether the H/W is ready
2105 * @sp : private member of the device structure, which is a pointer to the
2106 * s2io_nic structure.
2107 * Description: Returns whether the H/W is ready to go or not. Depending
2108 * on whether adapter enable bit was written or not the comparison
2109 * differs and the calling function passes the input argument flag to
2110 * indicate this.
2111 * Return: 1 If xena is quiescence
2112 * 0 If Xena is not quiescence
2113 */
2114
verify_xena_quiescence(struct s2io_nic * sp)2115 static int verify_xena_quiescence(struct s2io_nic *sp)
2116 {
2117 int mode;
2118 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2119 u64 val64 = readq(&bar0->adapter_status);
2120 mode = s2io_verify_pci_mode(sp);
2121
2122 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2123 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2124 return 0;
2125 }
2126 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2127 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2128 return 0;
2129 }
2130 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2131 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2132 return 0;
2133 }
2134 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2135 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2136 return 0;
2137 }
2138 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2139 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2140 return 0;
2141 }
2142 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2143 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2144 return 0;
2145 }
2146 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2147 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2148 return 0;
2149 }
2150 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2151 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2152 return 0;
2153 }
2154
2155 /*
2156 * In PCI 33 mode, the P_PLL is not used, and therefore,
2157 * the the P_PLL_LOCK bit in the adapter_status register will
2158 * not be asserted.
2159 */
2160 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2161 sp->device_type == XFRAME_II_DEVICE &&
2162 mode != PCI_MODE_PCI_33) {
2163 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2164 return 0;
2165 }
2166 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2167 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2168 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2169 return 0;
2170 }
2171 return 1;
2172 }
2173
2174 /**
2175 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2176 * @sp: Pointer to device specifc structure
2177 * Description :
2178 * New procedure to clear mac address reading problems on Alpha platforms
2179 *
2180 */
2181
fix_mac_address(struct s2io_nic * sp)2182 static void fix_mac_address(struct s2io_nic *sp)
2183 {
2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185 int i = 0;
2186
2187 while (fix_mac[i] != END_SIGN) {
2188 writeq(fix_mac[i++], &bar0->gpio_control);
2189 udelay(10);
2190 (void) readq(&bar0->gpio_control);
2191 }
2192 }
2193
2194 /**
2195 * start_nic - Turns the device on
2196 * @nic : device private variable.
2197 * Description:
2198 * This function actually turns the device on. Before this function is
2199 * called,all Registers are configured from their reset states
2200 * and shared memory is allocated but the NIC is still quiescent. On
2201 * calling this function, the device interrupts are cleared and the NIC is
2202 * literally switched on by writing into the adapter control register.
2203 * Return Value:
2204 * SUCCESS on success and -1 on failure.
2205 */
2206
start_nic(struct s2io_nic * nic)2207 static int start_nic(struct s2io_nic *nic)
2208 {
2209 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2210 struct net_device *dev = nic->dev;
2211 register u64 val64 = 0;
2212 u16 subid, i;
2213 struct config_param *config = &nic->config;
2214 struct mac_info *mac_control = &nic->mac_control;
2215
2216 /* PRC Initialization and configuration */
2217 for (i = 0; i < config->rx_ring_num; i++) {
2218 struct ring_info *ring = &mac_control->rings[i];
2219
2220 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2221 &bar0->prc_rxd0_n[i]);
2222
2223 val64 = readq(&bar0->prc_ctrl_n[i]);
2224 if (nic->rxd_mode == RXD_MODE_1)
2225 val64 |= PRC_CTRL_RC_ENABLED;
2226 else
2227 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2228 if (nic->device_type == XFRAME_II_DEVICE)
2229 val64 |= PRC_CTRL_GROUP_READS;
2230 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2231 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2232 writeq(val64, &bar0->prc_ctrl_n[i]);
2233 }
2234
2235 if (nic->rxd_mode == RXD_MODE_3B) {
2236 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2237 val64 = readq(&bar0->rx_pa_cfg);
2238 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2239 writeq(val64, &bar0->rx_pa_cfg);
2240 }
2241
2242 if (vlan_tag_strip == 0) {
2243 val64 = readq(&bar0->rx_pa_cfg);
2244 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2245 writeq(val64, &bar0->rx_pa_cfg);
2246 nic->vlan_strip_flag = 0;
2247 }
2248
2249 /*
2250 * Enabling MC-RLDRAM. After enabling the device, we timeout
2251 * for around 100ms, which is approximately the time required
2252 * for the device to be ready for operation.
2253 */
2254 val64 = readq(&bar0->mc_rldram_mrs);
2255 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2256 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2257 val64 = readq(&bar0->mc_rldram_mrs);
2258
2259 msleep(100); /* Delay by around 100 ms. */
2260
2261 /* Enabling ECC Protection. */
2262 val64 = readq(&bar0->adapter_control);
2263 val64 &= ~ADAPTER_ECC_EN;
2264 writeq(val64, &bar0->adapter_control);
2265
2266 /*
2267 * Verify if the device is ready to be enabled, if so enable
2268 * it.
2269 */
2270 val64 = readq(&bar0->adapter_status);
2271 if (!verify_xena_quiescence(nic)) {
2272 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2273 "Adapter status reads: 0x%llx\n",
2274 dev->name, (unsigned long long)val64);
2275 return FAILURE;
2276 }
2277
2278 /*
2279 * With some switches, link might be already up at this point.
2280 * Because of this weird behavior, when we enable laser,
2281 * we may not get link. We need to handle this. We cannot
2282 * figure out which switch is misbehaving. So we are forced to
2283 * make a global change.
2284 */
2285
2286 /* Enabling Laser. */
2287 val64 = readq(&bar0->adapter_control);
2288 val64 |= ADAPTER_EOI_TX_ON;
2289 writeq(val64, &bar0->adapter_control);
2290
2291 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2292 /*
2293 * Dont see link state interrupts initially on some switches,
2294 * so directly scheduling the link state task here.
2295 */
2296 schedule_work(&nic->set_link_task);
2297 }
2298 /* SXE-002: Initialize link and activity LED */
2299 subid = nic->pdev->subsystem_device;
2300 if (((subid & 0xFF) >= 0x07) &&
2301 (nic->device_type == XFRAME_I_DEVICE)) {
2302 val64 = readq(&bar0->gpio_control);
2303 val64 |= 0x0000800000000000ULL;
2304 writeq(val64, &bar0->gpio_control);
2305 val64 = 0x0411040400000000ULL;
2306 writeq(val64, (void __iomem *)bar0 + 0x2700);
2307 }
2308
2309 return SUCCESS;
2310 }
2311 /**
2312 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2313 * @fifo_data: fifo data pointer
2314 * @txdlp: descriptor
2315 * @get_off: unused
2316 */
s2io_txdl_getskb(struct fifo_info * fifo_data,struct TxD * txdlp,int get_off)2317 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2318 struct TxD *txdlp, int get_off)
2319 {
2320 struct s2io_nic *nic = fifo_data->nic;
2321 struct sk_buff *skb;
2322 struct TxD *txds;
2323 u16 j, frg_cnt;
2324
2325 txds = txdlp;
2326 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2327 dma_unmap_single(&nic->pdev->dev,
2328 (dma_addr_t)txds->Buffer_Pointer,
2329 sizeof(u64), DMA_TO_DEVICE);
2330 txds++;
2331 }
2332
2333 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334 if (!skb) {
2335 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336 return NULL;
2337 }
2338 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2339 skb_headlen(skb), DMA_TO_DEVICE);
2340 frg_cnt = skb_shinfo(skb)->nr_frags;
2341 if (frg_cnt) {
2342 txds++;
2343 for (j = 0; j < frg_cnt; j++, txds++) {
2344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345 if (!txds->Buffer_Pointer)
2346 break;
2347 dma_unmap_page(&nic->pdev->dev,
2348 (dma_addr_t)txds->Buffer_Pointer,
2349 skb_frag_size(frag), DMA_TO_DEVICE);
2350 }
2351 }
2352 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353 return skb;
2354 }
2355
2356 /**
2357 * free_tx_buffers - Free all queued Tx buffers
2358 * @nic : device private variable.
2359 * Description:
2360 * Free all queued Tx buffers.
2361 * Return Value: void
2362 */
2363
free_tx_buffers(struct s2io_nic * nic)2364 static void free_tx_buffers(struct s2io_nic *nic)
2365 {
2366 struct net_device *dev = nic->dev;
2367 struct sk_buff *skb;
2368 struct TxD *txdp;
2369 int i, j;
2370 int cnt = 0;
2371 struct config_param *config = &nic->config;
2372 struct mac_info *mac_control = &nic->mac_control;
2373 struct stat_block *stats = mac_control->stats_info;
2374 struct swStat *swstats = &stats->sw_stat;
2375
2376 for (i = 0; i < config->tx_fifo_num; i++) {
2377 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378 struct fifo_info *fifo = &mac_control->fifos[i];
2379 unsigned long flags;
2380
2381 spin_lock_irqsave(&fifo->tx_lock, flags);
2382 for (j = 0; j < tx_cfg->fifo_len; j++) {
2383 txdp = fifo->list_info[j].list_virt_addr;
2384 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385 if (skb) {
2386 swstats->mem_freed += skb->truesize;
2387 dev_kfree_skb_irq(skb);
2388 cnt++;
2389 }
2390 }
2391 DBG_PRINT(INTR_DBG,
2392 "%s: forcibly freeing %d skbs on FIFO%d\n",
2393 dev->name, cnt, i);
2394 fifo->tx_curr_get_info.offset = 0;
2395 fifo->tx_curr_put_info.offset = 0;
2396 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397 }
2398 }
2399
2400 /**
2401 * stop_nic - To stop the nic
2402 * @nic : device private variable.
2403 * Description:
2404 * This function does exactly the opposite of what the start_nic()
2405 * function does. This function is called to stop the device.
2406 * Return Value:
2407 * void.
2408 */
2409
stop_nic(struct s2io_nic * nic)2410 static void stop_nic(struct s2io_nic *nic)
2411 {
2412 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413 register u64 val64 = 0;
2414 u16 interruptible;
2415
2416 /* Disable all interrupts */
2417 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419 interruptible |= TX_PIC_INTR;
2420 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421
2422 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423 val64 = readq(&bar0->adapter_control);
2424 val64 &= ~(ADAPTER_CNTL_EN);
2425 writeq(val64, &bar0->adapter_control);
2426 }
2427
2428 /**
2429 * fill_rx_buffers - Allocates the Rx side skbs
2430 * @nic : device private variable.
2431 * @ring: per ring structure
2432 * @from_card_up: If this is true, we will map the buffer to get
2433 * the dma address for buf0 and buf1 to give it to the card.
2434 * Else we will sync the already mapped buffer to give it to the card.
2435 * Description:
2436 * The function allocates Rx side skbs and puts the physical
2437 * address of these buffers into the RxD buffer pointers, so that the NIC
2438 * can DMA the received frame into these locations.
2439 * The NIC supports 3 receive modes, viz
2440 * 1. single buffer,
2441 * 2. three buffer and
2442 * 3. Five buffer modes.
2443 * Each mode defines how many fragments the received frame will be split
2444 * up into by the NIC. The frame is split into L3 header, L4 Header,
2445 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2446 * is split into 3 fragments. As of now only single buffer mode is
2447 * supported.
2448 * Return Value:
2449 * SUCCESS on success or an appropriate -ve value on failure.
2450 */
fill_rx_buffers(struct s2io_nic * nic,struct ring_info * ring,int from_card_up)2451 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2452 int from_card_up)
2453 {
2454 struct sk_buff *skb;
2455 struct RxD_t *rxdp;
2456 int off, size, block_no, block_no1;
2457 u32 alloc_tab = 0;
2458 u32 alloc_cnt;
2459 u64 tmp;
2460 struct buffAdd *ba;
2461 struct RxD_t *first_rxdp = NULL;
2462 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2463 struct RxD1 *rxdp1;
2464 struct RxD3 *rxdp3;
2465 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2466
2467 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2468
2469 block_no1 = ring->rx_curr_get_info.block_index;
2470 while (alloc_tab < alloc_cnt) {
2471 block_no = ring->rx_curr_put_info.block_index;
2472
2473 off = ring->rx_curr_put_info.offset;
2474
2475 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476
2477 if ((block_no == block_no1) &&
2478 (off == ring->rx_curr_get_info.offset) &&
2479 (rxdp->Host_Control)) {
2480 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2481 ring->dev->name);
2482 goto end;
2483 }
2484 if (off && (off == ring->rxd_count)) {
2485 ring->rx_curr_put_info.block_index++;
2486 if (ring->rx_curr_put_info.block_index ==
2487 ring->block_count)
2488 ring->rx_curr_put_info.block_index = 0;
2489 block_no = ring->rx_curr_put_info.block_index;
2490 off = 0;
2491 ring->rx_curr_put_info.offset = off;
2492 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2493 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2494 ring->dev->name, rxdp);
2495
2496 }
2497
2498 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2499 ((ring->rxd_mode == RXD_MODE_3B) &&
2500 (rxdp->Control_2 & s2BIT(0)))) {
2501 ring->rx_curr_put_info.offset = off;
2502 goto end;
2503 }
2504 /* calculate size of skb based on ring mode */
2505 size = ring->mtu +
2506 HEADER_ETHERNET_II_802_3_SIZE +
2507 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2508 if (ring->rxd_mode == RXD_MODE_1)
2509 size += NET_IP_ALIGN;
2510 else
2511 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2512
2513 /* allocate skb */
2514 skb = netdev_alloc_skb(nic->dev, size);
2515 if (!skb) {
2516 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2517 ring->dev->name);
2518 if (first_rxdp) {
2519 dma_wmb();
2520 first_rxdp->Control_1 |= RXD_OWN_XENA;
2521 }
2522 swstats->mem_alloc_fail_cnt++;
2523
2524 return -ENOMEM ;
2525 }
2526 swstats->mem_allocated += skb->truesize;
2527
2528 if (ring->rxd_mode == RXD_MODE_1) {
2529 /* 1 buffer mode - normal operation mode */
2530 rxdp1 = (struct RxD1 *)rxdp;
2531 memset(rxdp, 0, sizeof(struct RxD1));
2532 skb_reserve(skb, NET_IP_ALIGN);
2533 rxdp1->Buffer0_ptr =
2534 dma_map_single(&ring->pdev->dev, skb->data,
2535 size - NET_IP_ALIGN,
2536 DMA_FROM_DEVICE);
2537 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2538 goto pci_map_failed;
2539
2540 rxdp->Control_2 =
2541 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2542 rxdp->Host_Control = (unsigned long)skb;
2543 } else if (ring->rxd_mode == RXD_MODE_3B) {
2544 /*
2545 * 2 buffer mode -
2546 * 2 buffer mode provides 128
2547 * byte aligned receive buffers.
2548 */
2549
2550 rxdp3 = (struct RxD3 *)rxdp;
2551 /* save buffer pointers to avoid frequent dma mapping */
2552 Buffer0_ptr = rxdp3->Buffer0_ptr;
2553 Buffer1_ptr = rxdp3->Buffer1_ptr;
2554 memset(rxdp, 0, sizeof(struct RxD3));
2555 /* restore the buffer pointers for dma sync*/
2556 rxdp3->Buffer0_ptr = Buffer0_ptr;
2557 rxdp3->Buffer1_ptr = Buffer1_ptr;
2558
2559 ba = &ring->ba[block_no][off];
2560 skb_reserve(skb, BUF0_LEN);
2561 tmp = (u64)(unsigned long)skb->data;
2562 tmp += ALIGN_SIZE;
2563 tmp &= ~ALIGN_SIZE;
2564 skb->data = (void *) (unsigned long)tmp;
2565 skb_reset_tail_pointer(skb);
2566
2567 if (from_card_up) {
2568 rxdp3->Buffer0_ptr =
2569 dma_map_single(&ring->pdev->dev,
2570 ba->ba_0, BUF0_LEN,
2571 DMA_FROM_DEVICE);
2572 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2573 goto pci_map_failed;
2574 } else
2575 dma_sync_single_for_device(&ring->pdev->dev,
2576 (dma_addr_t)rxdp3->Buffer0_ptr,
2577 BUF0_LEN,
2578 DMA_FROM_DEVICE);
2579
2580 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2581 if (ring->rxd_mode == RXD_MODE_3B) {
2582 /* Two buffer mode */
2583
2584 /*
2585 * Buffer2 will have L3/L4 header plus
2586 * L4 payload
2587 */
2588 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2589 skb->data,
2590 ring->mtu + 4,
2591 DMA_FROM_DEVICE);
2592
2593 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2594 goto pci_map_failed;
2595
2596 if (from_card_up) {
2597 rxdp3->Buffer1_ptr =
2598 dma_map_single(&ring->pdev->dev,
2599 ba->ba_1,
2600 BUF1_LEN,
2601 DMA_FROM_DEVICE);
2602
2603 if (dma_mapping_error(&nic->pdev->dev,
2604 rxdp3->Buffer1_ptr)) {
2605 dma_unmap_single(&ring->pdev->dev,
2606 (dma_addr_t)(unsigned long)
2607 skb->data,
2608 ring->mtu + 4,
2609 DMA_FROM_DEVICE);
2610 goto pci_map_failed;
2611 }
2612 }
2613 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2614 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2615 (ring->mtu + 4);
2616 }
2617 rxdp->Control_2 |= s2BIT(0);
2618 rxdp->Host_Control = (unsigned long) (skb);
2619 }
2620 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2621 rxdp->Control_1 |= RXD_OWN_XENA;
2622 off++;
2623 if (off == (ring->rxd_count + 1))
2624 off = 0;
2625 ring->rx_curr_put_info.offset = off;
2626
2627 rxdp->Control_2 |= SET_RXD_MARKER;
2628 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2629 if (first_rxdp) {
2630 dma_wmb();
2631 first_rxdp->Control_1 |= RXD_OWN_XENA;
2632 }
2633 first_rxdp = rxdp;
2634 }
2635 ring->rx_bufs_left += 1;
2636 alloc_tab++;
2637 }
2638
2639 end:
2640 /* Transfer ownership of first descriptor to adapter just before
2641 * exiting. Before that, use memory barrier so that ownership
2642 * and other fields are seen by adapter correctly.
2643 */
2644 if (first_rxdp) {
2645 dma_wmb();
2646 first_rxdp->Control_1 |= RXD_OWN_XENA;
2647 }
2648
2649 return SUCCESS;
2650
2651 pci_map_failed:
2652 swstats->pci_map_fail_cnt++;
2653 swstats->mem_freed += skb->truesize;
2654 dev_kfree_skb_irq(skb);
2655 return -ENOMEM;
2656 }
2657
free_rxd_blk(struct s2io_nic * sp,int ring_no,int blk)2658 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2659 {
2660 struct net_device *dev = sp->dev;
2661 int j;
2662 struct sk_buff *skb;
2663 struct RxD_t *rxdp;
2664 struct RxD1 *rxdp1;
2665 struct RxD3 *rxdp3;
2666 struct mac_info *mac_control = &sp->mac_control;
2667 struct stat_block *stats = mac_control->stats_info;
2668 struct swStat *swstats = &stats->sw_stat;
2669
2670 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2671 rxdp = mac_control->rings[ring_no].
2672 rx_blocks[blk].rxds[j].virt_addr;
2673 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2674 if (!skb)
2675 continue;
2676 if (sp->rxd_mode == RXD_MODE_1) {
2677 rxdp1 = (struct RxD1 *)rxdp;
2678 dma_unmap_single(&sp->pdev->dev,
2679 (dma_addr_t)rxdp1->Buffer0_ptr,
2680 dev->mtu +
2681 HEADER_ETHERNET_II_802_3_SIZE +
2682 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2683 DMA_FROM_DEVICE);
2684 memset(rxdp, 0, sizeof(struct RxD1));
2685 } else if (sp->rxd_mode == RXD_MODE_3B) {
2686 rxdp3 = (struct RxD3 *)rxdp;
2687 dma_unmap_single(&sp->pdev->dev,
2688 (dma_addr_t)rxdp3->Buffer0_ptr,
2689 BUF0_LEN, DMA_FROM_DEVICE);
2690 dma_unmap_single(&sp->pdev->dev,
2691 (dma_addr_t)rxdp3->Buffer1_ptr,
2692 BUF1_LEN, DMA_FROM_DEVICE);
2693 dma_unmap_single(&sp->pdev->dev,
2694 (dma_addr_t)rxdp3->Buffer2_ptr,
2695 dev->mtu + 4, DMA_FROM_DEVICE);
2696 memset(rxdp, 0, sizeof(struct RxD3));
2697 }
2698 swstats->mem_freed += skb->truesize;
2699 dev_kfree_skb(skb);
2700 mac_control->rings[ring_no].rx_bufs_left -= 1;
2701 }
2702 }
2703
2704 /**
2705 * free_rx_buffers - Frees all Rx buffers
2706 * @sp: device private variable.
2707 * Description:
2708 * This function will free all Rx buffers allocated by host.
2709 * Return Value:
2710 * NONE.
2711 */
2712
free_rx_buffers(struct s2io_nic * sp)2713 static void free_rx_buffers(struct s2io_nic *sp)
2714 {
2715 struct net_device *dev = sp->dev;
2716 int i, blk = 0, buf_cnt = 0;
2717 struct config_param *config = &sp->config;
2718 struct mac_info *mac_control = &sp->mac_control;
2719
2720 for (i = 0; i < config->rx_ring_num; i++) {
2721 struct ring_info *ring = &mac_control->rings[i];
2722
2723 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2724 free_rxd_blk(sp, i, blk);
2725
2726 ring->rx_curr_put_info.block_index = 0;
2727 ring->rx_curr_get_info.block_index = 0;
2728 ring->rx_curr_put_info.offset = 0;
2729 ring->rx_curr_get_info.offset = 0;
2730 ring->rx_bufs_left = 0;
2731 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2732 dev->name, buf_cnt, i);
2733 }
2734 }
2735
s2io_chk_rx_buffers(struct s2io_nic * nic,struct ring_info * ring)2736 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2737 {
2738 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2739 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2740 ring->dev->name);
2741 }
2742 return 0;
2743 }
2744
2745 /**
2746 * s2io_poll - Rx interrupt handler for NAPI support
2747 * @napi : pointer to the napi structure.
2748 * @budget : The number of packets that were budgeted to be processed
2749 * during one pass through the 'Poll" function.
2750 * Description:
2751 * Comes into picture only if NAPI support has been incorporated. It does
2752 * the same thing that rx_intr_handler does, but not in a interrupt context
2753 * also It will process only a given number of packets.
2754 * Return value:
2755 * 0 on success and 1 if there are No Rx packets to be processed.
2756 */
2757
s2io_poll_msix(struct napi_struct * napi,int budget)2758 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2759 {
2760 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2761 struct net_device *dev = ring->dev;
2762 int pkts_processed = 0;
2763 u8 __iomem *addr = NULL;
2764 u8 val8 = 0;
2765 struct s2io_nic *nic = netdev_priv(dev);
2766 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2767 int budget_org = budget;
2768
2769 if (unlikely(!is_s2io_card_up(nic)))
2770 return 0;
2771
2772 pkts_processed = rx_intr_handler(ring, budget);
2773 s2io_chk_rx_buffers(nic, ring);
2774
2775 if (pkts_processed < budget_org) {
2776 napi_complete_done(napi, pkts_processed);
2777 /*Re Enable MSI-Rx Vector*/
2778 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2779 addr += 7 - ring->ring_no;
2780 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2781 writeb(val8, addr);
2782 val8 = readb(addr);
2783 }
2784 return pkts_processed;
2785 }
2786
s2io_poll_inta(struct napi_struct * napi,int budget)2787 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2788 {
2789 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2790 int pkts_processed = 0;
2791 int ring_pkts_processed, i;
2792 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2793 int budget_org = budget;
2794 struct config_param *config = &nic->config;
2795 struct mac_info *mac_control = &nic->mac_control;
2796
2797 if (unlikely(!is_s2io_card_up(nic)))
2798 return 0;
2799
2800 for (i = 0; i < config->rx_ring_num; i++) {
2801 struct ring_info *ring = &mac_control->rings[i];
2802 ring_pkts_processed = rx_intr_handler(ring, budget);
2803 s2io_chk_rx_buffers(nic, ring);
2804 pkts_processed += ring_pkts_processed;
2805 budget -= ring_pkts_processed;
2806 if (budget <= 0)
2807 break;
2808 }
2809 if (pkts_processed < budget_org) {
2810 napi_complete_done(napi, pkts_processed);
2811 /* Re enable the Rx interrupts for the ring */
2812 writeq(0, &bar0->rx_traffic_mask);
2813 readl(&bar0->rx_traffic_mask);
2814 }
2815 return pkts_processed;
2816 }
2817
2818 #ifdef CONFIG_NET_POLL_CONTROLLER
2819 /**
2820 * s2io_netpoll - netpoll event handler entry point
2821 * @dev : pointer to the device structure.
2822 * Description:
2823 * This function will be called by upper layer to check for events on the
2824 * interface in situations where interrupts are disabled. It is used for
2825 * specific in-kernel networking tasks, such as remote consoles and kernel
2826 * debugging over the network (example netdump in RedHat).
2827 */
s2io_netpoll(struct net_device * dev)2828 static void s2io_netpoll(struct net_device *dev)
2829 {
2830 struct s2io_nic *nic = netdev_priv(dev);
2831 const int irq = nic->pdev->irq;
2832 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2833 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2834 int i;
2835 struct config_param *config = &nic->config;
2836 struct mac_info *mac_control = &nic->mac_control;
2837
2838 if (pci_channel_offline(nic->pdev))
2839 return;
2840
2841 disable_irq(irq);
2842
2843 writeq(val64, &bar0->rx_traffic_int);
2844 writeq(val64, &bar0->tx_traffic_int);
2845
2846 /* we need to free up the transmitted skbufs or else netpoll will
2847 * run out of skbs and will fail and eventually netpoll application such
2848 * as netdump will fail.
2849 */
2850 for (i = 0; i < config->tx_fifo_num; i++)
2851 tx_intr_handler(&mac_control->fifos[i]);
2852
2853 /* check for received packet and indicate up to network */
2854 for (i = 0; i < config->rx_ring_num; i++) {
2855 struct ring_info *ring = &mac_control->rings[i];
2856
2857 rx_intr_handler(ring, 0);
2858 }
2859
2860 for (i = 0; i < config->rx_ring_num; i++) {
2861 struct ring_info *ring = &mac_control->rings[i];
2862
2863 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2864 DBG_PRINT(INFO_DBG,
2865 "%s: Out of memory in Rx Netpoll!!\n",
2866 dev->name);
2867 break;
2868 }
2869 }
2870 enable_irq(irq);
2871 }
2872 #endif
2873
2874 /**
2875 * rx_intr_handler - Rx interrupt handler
2876 * @ring_data: per ring structure.
2877 * @budget: budget for napi processing.
2878 * Description:
2879 * If the interrupt is because of a received frame or if the
2880 * receive ring contains fresh as yet un-processed frames,this function is
2881 * called. It picks out the RxD at which place the last Rx processing had
2882 * stopped and sends the skb to the OSM's Rx handler and then increments
2883 * the offset.
2884 * Return Value:
2885 * No. of napi packets processed.
2886 */
rx_intr_handler(struct ring_info * ring_data,int budget)2887 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2888 {
2889 int get_block, put_block;
2890 struct rx_curr_get_info get_info, put_info;
2891 struct RxD_t *rxdp;
2892 struct sk_buff *skb;
2893 int pkt_cnt = 0, napi_pkts = 0;
2894 int i;
2895 struct RxD1 *rxdp1;
2896 struct RxD3 *rxdp3;
2897
2898 if (budget <= 0)
2899 return napi_pkts;
2900
2901 get_info = ring_data->rx_curr_get_info;
2902 get_block = get_info.block_index;
2903 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2904 put_block = put_info.block_index;
2905 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2906
2907 while (RXD_IS_UP2DT(rxdp)) {
2908 /*
2909 * If your are next to put index then it's
2910 * FIFO full condition
2911 */
2912 if ((get_block == put_block) &&
2913 (get_info.offset + 1) == put_info.offset) {
2914 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2915 ring_data->dev->name);
2916 break;
2917 }
2918 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2919 if (skb == NULL) {
2920 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2921 ring_data->dev->name);
2922 return 0;
2923 }
2924 if (ring_data->rxd_mode == RXD_MODE_1) {
2925 rxdp1 = (struct RxD1 *)rxdp;
2926 dma_unmap_single(&ring_data->pdev->dev,
2927 (dma_addr_t)rxdp1->Buffer0_ptr,
2928 ring_data->mtu +
2929 HEADER_ETHERNET_II_802_3_SIZE +
2930 HEADER_802_2_SIZE +
2931 HEADER_SNAP_SIZE,
2932 DMA_FROM_DEVICE);
2933 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2934 rxdp3 = (struct RxD3 *)rxdp;
2935 dma_sync_single_for_cpu(&ring_data->pdev->dev,
2936 (dma_addr_t)rxdp3->Buffer0_ptr,
2937 BUF0_LEN, DMA_FROM_DEVICE);
2938 dma_unmap_single(&ring_data->pdev->dev,
2939 (dma_addr_t)rxdp3->Buffer2_ptr,
2940 ring_data->mtu + 4, DMA_FROM_DEVICE);
2941 }
2942 prefetch(skb->data);
2943 rx_osm_handler(ring_data, rxdp);
2944 get_info.offset++;
2945 ring_data->rx_curr_get_info.offset = get_info.offset;
2946 rxdp = ring_data->rx_blocks[get_block].
2947 rxds[get_info.offset].virt_addr;
2948 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2949 get_info.offset = 0;
2950 ring_data->rx_curr_get_info.offset = get_info.offset;
2951 get_block++;
2952 if (get_block == ring_data->block_count)
2953 get_block = 0;
2954 ring_data->rx_curr_get_info.block_index = get_block;
2955 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2956 }
2957
2958 if (ring_data->nic->config.napi) {
2959 budget--;
2960 napi_pkts++;
2961 if (!budget)
2962 break;
2963 }
2964 pkt_cnt++;
2965 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2966 break;
2967 }
2968 if (ring_data->lro) {
2969 /* Clear all LRO sessions before exiting */
2970 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2971 struct lro *lro = &ring_data->lro0_n[i];
2972 if (lro->in_use) {
2973 update_L3L4_header(ring_data->nic, lro);
2974 queue_rx_frame(lro->parent, lro->vlan_tag);
2975 clear_lro_session(lro);
2976 }
2977 }
2978 }
2979 return napi_pkts;
2980 }
2981
2982 /**
2983 * tx_intr_handler - Transmit interrupt handler
2984 * @fifo_data : fifo data pointer
2985 * Description:
2986 * If an interrupt was raised to indicate DMA complete of the
2987 * Tx packet, this function is called. It identifies the last TxD
2988 * whose buffer was freed and frees all skbs whose data have already
2989 * DMA'ed into the NICs internal memory.
2990 * Return Value:
2991 * NONE
2992 */
2993
tx_intr_handler(struct fifo_info * fifo_data)2994 static void tx_intr_handler(struct fifo_info *fifo_data)
2995 {
2996 struct s2io_nic *nic = fifo_data->nic;
2997 struct tx_curr_get_info get_info, put_info;
2998 struct sk_buff *skb = NULL;
2999 struct TxD *txdlp;
3000 int pkt_cnt = 0;
3001 unsigned long flags = 0;
3002 u8 err_mask;
3003 struct stat_block *stats = nic->mac_control.stats_info;
3004 struct swStat *swstats = &stats->sw_stat;
3005
3006 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3007 return;
3008
3009 get_info = fifo_data->tx_curr_get_info;
3010 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3011 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3012 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3013 (get_info.offset != put_info.offset) &&
3014 (txdlp->Host_Control)) {
3015 /* Check for TxD errors */
3016 if (txdlp->Control_1 & TXD_T_CODE) {
3017 unsigned long long err;
3018 err = txdlp->Control_1 & TXD_T_CODE;
3019 if (err & 0x1) {
3020 swstats->parity_err_cnt++;
3021 }
3022
3023 /* update t_code statistics */
3024 err_mask = err >> 48;
3025 switch (err_mask) {
3026 case 2:
3027 swstats->tx_buf_abort_cnt++;
3028 break;
3029
3030 case 3:
3031 swstats->tx_desc_abort_cnt++;
3032 break;
3033
3034 case 7:
3035 swstats->tx_parity_err_cnt++;
3036 break;
3037
3038 case 10:
3039 swstats->tx_link_loss_cnt++;
3040 break;
3041
3042 case 15:
3043 swstats->tx_list_proc_err_cnt++;
3044 break;
3045 }
3046 }
3047
3048 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3049 if (skb == NULL) {
3050 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3051 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3052 __func__);
3053 return;
3054 }
3055 pkt_cnt++;
3056
3057 /* Updating the statistics block */
3058 swstats->mem_freed += skb->truesize;
3059 dev_consume_skb_irq(skb);
3060
3061 get_info.offset++;
3062 if (get_info.offset == get_info.fifo_len + 1)
3063 get_info.offset = 0;
3064 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3065 fifo_data->tx_curr_get_info.offset = get_info.offset;
3066 }
3067
3068 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3069
3070 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3071 }
3072
3073 /**
3074 * s2io_mdio_write - Function to write in to MDIO registers
3075 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3076 * @addr : address value
3077 * @value : data value
3078 * @dev : pointer to net_device structure
3079 * Description:
3080 * This function is used to write values to the MDIO registers
3081 * NONE
3082 */
s2io_mdio_write(u32 mmd_type,u64 addr,u16 value,struct net_device * dev)3083 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3084 struct net_device *dev)
3085 {
3086 u64 val64;
3087 struct s2io_nic *sp = netdev_priv(dev);
3088 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3089
3090 /* address transaction */
3091 val64 = MDIO_MMD_INDX_ADDR(addr) |
3092 MDIO_MMD_DEV_ADDR(mmd_type) |
3093 MDIO_MMS_PRT_ADDR(0x0);
3094 writeq(val64, &bar0->mdio_control);
3095 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3096 writeq(val64, &bar0->mdio_control);
3097 udelay(100);
3098
3099 /* Data transaction */
3100 val64 = MDIO_MMD_INDX_ADDR(addr) |
3101 MDIO_MMD_DEV_ADDR(mmd_type) |
3102 MDIO_MMS_PRT_ADDR(0x0) |
3103 MDIO_MDIO_DATA(value) |
3104 MDIO_OP(MDIO_OP_WRITE_TRANS);
3105 writeq(val64, &bar0->mdio_control);
3106 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107 writeq(val64, &bar0->mdio_control);
3108 udelay(100);
3109
3110 val64 = MDIO_MMD_INDX_ADDR(addr) |
3111 MDIO_MMD_DEV_ADDR(mmd_type) |
3112 MDIO_MMS_PRT_ADDR(0x0) |
3113 MDIO_OP(MDIO_OP_READ_TRANS);
3114 writeq(val64, &bar0->mdio_control);
3115 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3116 writeq(val64, &bar0->mdio_control);
3117 udelay(100);
3118 }
3119
3120 /**
3121 * s2io_mdio_read - Function to write in to MDIO registers
3122 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3123 * @addr : address value
3124 * @dev : pointer to net_device structure
3125 * Description:
3126 * This function is used to read values to the MDIO registers
3127 * NONE
3128 */
s2io_mdio_read(u32 mmd_type,u64 addr,struct net_device * dev)3129 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3130 {
3131 u64 val64 = 0x0;
3132 u64 rval64 = 0x0;
3133 struct s2io_nic *sp = netdev_priv(dev);
3134 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3135
3136 /* address transaction */
3137 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3138 | MDIO_MMD_DEV_ADDR(mmd_type)
3139 | MDIO_MMS_PRT_ADDR(0x0));
3140 writeq(val64, &bar0->mdio_control);
3141 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3142 writeq(val64, &bar0->mdio_control);
3143 udelay(100);
3144
3145 /* Data transaction */
3146 val64 = MDIO_MMD_INDX_ADDR(addr) |
3147 MDIO_MMD_DEV_ADDR(mmd_type) |
3148 MDIO_MMS_PRT_ADDR(0x0) |
3149 MDIO_OP(MDIO_OP_READ_TRANS);
3150 writeq(val64, &bar0->mdio_control);
3151 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3152 writeq(val64, &bar0->mdio_control);
3153 udelay(100);
3154
3155 /* Read the value from regs */
3156 rval64 = readq(&bar0->mdio_control);
3157 rval64 = rval64 & 0xFFFF0000;
3158 rval64 = rval64 >> 16;
3159 return rval64;
3160 }
3161
3162 /**
3163 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3164 * @counter : counter value to be updated
3165 * @regs_stat : registers status
3166 * @index : index
3167 * @flag : flag to indicate the status
3168 * @type : counter type
3169 * Description:
3170 * This function is to check the status of the xpak counters value
3171 * NONE
3172 */
3173
s2io_chk_xpak_counter(u64 * counter,u64 * regs_stat,u32 index,u16 flag,u16 type)3174 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3175 u16 flag, u16 type)
3176 {
3177 u64 mask = 0x3;
3178 u64 val64;
3179 int i;
3180 for (i = 0; i < index; i++)
3181 mask = mask << 0x2;
3182
3183 if (flag > 0) {
3184 *counter = *counter + 1;
3185 val64 = *regs_stat & mask;
3186 val64 = val64 >> (index * 0x2);
3187 val64 = val64 + 1;
3188 if (val64 == 3) {
3189 switch (type) {
3190 case 1:
3191 DBG_PRINT(ERR_DBG,
3192 "Take Xframe NIC out of service.\n");
3193 DBG_PRINT(ERR_DBG,
3194 "Excessive temperatures may result in premature transceiver failure.\n");
3195 break;
3196 case 2:
3197 DBG_PRINT(ERR_DBG,
3198 "Take Xframe NIC out of service.\n");
3199 DBG_PRINT(ERR_DBG,
3200 "Excessive bias currents may indicate imminent laser diode failure.\n");
3201 break;
3202 case 3:
3203 DBG_PRINT(ERR_DBG,
3204 "Take Xframe NIC out of service.\n");
3205 DBG_PRINT(ERR_DBG,
3206 "Excessive laser output power may saturate far-end receiver.\n");
3207 break;
3208 default:
3209 DBG_PRINT(ERR_DBG,
3210 "Incorrect XPAK Alarm type\n");
3211 }
3212 val64 = 0x0;
3213 }
3214 val64 = val64 << (index * 0x2);
3215 *regs_stat = (*regs_stat & (~mask)) | (val64);
3216
3217 } else {
3218 *regs_stat = *regs_stat & (~mask);
3219 }
3220 }
3221
3222 /**
3223 * s2io_updt_xpak_counter - Function to update the xpak counters
3224 * @dev : pointer to net_device struct
3225 * Description:
3226 * This function is to upate the status of the xpak counters value
3227 * NONE
3228 */
s2io_updt_xpak_counter(struct net_device * dev)3229 static void s2io_updt_xpak_counter(struct net_device *dev)
3230 {
3231 u16 flag = 0x0;
3232 u16 type = 0x0;
3233 u16 val16 = 0x0;
3234 u64 val64 = 0x0;
3235 u64 addr = 0x0;
3236
3237 struct s2io_nic *sp = netdev_priv(dev);
3238 struct stat_block *stats = sp->mac_control.stats_info;
3239 struct xpakStat *xstats = &stats->xpak_stat;
3240
3241 /* Check the communication with the MDIO slave */
3242 addr = MDIO_CTRL1;
3243 val64 = 0x0;
3244 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3245 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3246 DBG_PRINT(ERR_DBG,
3247 "ERR: MDIO slave access failed - Returned %llx\n",
3248 (unsigned long long)val64);
3249 return;
3250 }
3251
3252 /* Check for the expected value of control reg 1 */
3253 if (val64 != MDIO_CTRL1_SPEED10G) {
3254 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3255 "Returned: %llx- Expected: 0x%x\n",
3256 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3257 return;
3258 }
3259
3260 /* Loading the DOM register to MDIO register */
3261 addr = 0xA100;
3262 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3263 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3264
3265 /* Reading the Alarm flags */
3266 addr = 0xA070;
3267 val64 = 0x0;
3268 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3269
3270 flag = CHECKBIT(val64, 0x7);
3271 type = 1;
3272 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3273 &xstats->xpak_regs_stat,
3274 0x0, flag, type);
3275
3276 if (CHECKBIT(val64, 0x6))
3277 xstats->alarm_transceiver_temp_low++;
3278
3279 flag = CHECKBIT(val64, 0x3);
3280 type = 2;
3281 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3282 &xstats->xpak_regs_stat,
3283 0x2, flag, type);
3284
3285 if (CHECKBIT(val64, 0x2))
3286 xstats->alarm_laser_bias_current_low++;
3287
3288 flag = CHECKBIT(val64, 0x1);
3289 type = 3;
3290 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3291 &xstats->xpak_regs_stat,
3292 0x4, flag, type);
3293
3294 if (CHECKBIT(val64, 0x0))
3295 xstats->alarm_laser_output_power_low++;
3296
3297 /* Reading the Warning flags */
3298 addr = 0xA074;
3299 val64 = 0x0;
3300 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3301
3302 if (CHECKBIT(val64, 0x7))
3303 xstats->warn_transceiver_temp_high++;
3304
3305 if (CHECKBIT(val64, 0x6))
3306 xstats->warn_transceiver_temp_low++;
3307
3308 if (CHECKBIT(val64, 0x3))
3309 xstats->warn_laser_bias_current_high++;
3310
3311 if (CHECKBIT(val64, 0x2))
3312 xstats->warn_laser_bias_current_low++;
3313
3314 if (CHECKBIT(val64, 0x1))
3315 xstats->warn_laser_output_power_high++;
3316
3317 if (CHECKBIT(val64, 0x0))
3318 xstats->warn_laser_output_power_low++;
3319 }
3320
3321 /**
3322 * wait_for_cmd_complete - waits for a command to complete.
3323 * @addr: address
3324 * @busy_bit: bit to check for busy
3325 * @bit_state: state to check
3326 * Description: Function that waits for a command to Write into RMAC
3327 * ADDR DATA registers to be completed and returns either success or
3328 * error depending on whether the command was complete or not.
3329 * Return value:
3330 * SUCCESS on success and FAILURE on failure.
3331 */
3332
wait_for_cmd_complete(void __iomem * addr,u64 busy_bit,int bit_state)3333 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3334 int bit_state)
3335 {
3336 int ret = FAILURE, cnt = 0, delay = 1;
3337 u64 val64;
3338
3339 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3340 return FAILURE;
3341
3342 do {
3343 val64 = readq(addr);
3344 if (bit_state == S2IO_BIT_RESET) {
3345 if (!(val64 & busy_bit)) {
3346 ret = SUCCESS;
3347 break;
3348 }
3349 } else {
3350 if (val64 & busy_bit) {
3351 ret = SUCCESS;
3352 break;
3353 }
3354 }
3355
3356 if (in_interrupt())
3357 mdelay(delay);
3358 else
3359 msleep(delay);
3360
3361 if (++cnt >= 10)
3362 delay = 50;
3363 } while (cnt < 20);
3364 return ret;
3365 }
3366 /**
3367 * check_pci_device_id - Checks if the device id is supported
3368 * @id : device id
3369 * Description: Function to check if the pci device id is supported by driver.
3370 * Return value: Actual device id if supported else PCI_ANY_ID
3371 */
check_pci_device_id(u16 id)3372 static u16 check_pci_device_id(u16 id)
3373 {
3374 switch (id) {
3375 case PCI_DEVICE_ID_HERC_WIN:
3376 case PCI_DEVICE_ID_HERC_UNI:
3377 return XFRAME_II_DEVICE;
3378 case PCI_DEVICE_ID_S2IO_UNI:
3379 case PCI_DEVICE_ID_S2IO_WIN:
3380 return XFRAME_I_DEVICE;
3381 default:
3382 return PCI_ANY_ID;
3383 }
3384 }
3385
3386 /**
3387 * s2io_reset - Resets the card.
3388 * @sp : private member of the device structure.
3389 * Description: Function to Reset the card. This function then also
3390 * restores the previously saved PCI configuration space registers as
3391 * the card reset also resets the configuration space.
3392 * Return value:
3393 * void.
3394 */
3395
s2io_reset(struct s2io_nic * sp)3396 static void s2io_reset(struct s2io_nic *sp)
3397 {
3398 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3399 u64 val64;
3400 u16 subid, pci_cmd;
3401 int i;
3402 u16 val16;
3403 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3404 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3405 struct stat_block *stats;
3406 struct swStat *swstats;
3407
3408 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3409 __func__, pci_name(sp->pdev));
3410
3411 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3412 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3413
3414 val64 = SW_RESET_ALL;
3415 writeq(val64, &bar0->sw_reset);
3416 if (strstr(sp->product_name, "CX4"))
3417 msleep(750);
3418 msleep(250);
3419 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3420
3421 /* Restore the PCI state saved during initialization. */
3422 pci_restore_state(sp->pdev);
3423 pci_save_state(sp->pdev);
3424 pci_read_config_word(sp->pdev, 0x2, &val16);
3425 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3426 break;
3427 msleep(200);
3428 }
3429
3430 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3431 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3432
3433 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3434
3435 s2io_init_pci(sp);
3436
3437 /* Set swapper to enable I/O register access */
3438 s2io_set_swapper(sp);
3439
3440 /* restore mac_addr entries */
3441 do_s2io_restore_unicast_mc(sp);
3442
3443 /* Restore the MSIX table entries from local variables */
3444 restore_xmsi_data(sp);
3445
3446 /* Clear certain PCI/PCI-X fields after reset */
3447 if (sp->device_type == XFRAME_II_DEVICE) {
3448 /* Clear "detected parity error" bit */
3449 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3450
3451 /* Clearing PCIX Ecc status register */
3452 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3453
3454 /* Clearing PCI_STATUS error reflected here */
3455 writeq(s2BIT(62), &bar0->txpic_int_reg);
3456 }
3457
3458 /* Reset device statistics maintained by OS */
3459 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3460
3461 stats = sp->mac_control.stats_info;
3462 swstats = &stats->sw_stat;
3463
3464 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3465 up_cnt = swstats->link_up_cnt;
3466 down_cnt = swstats->link_down_cnt;
3467 up_time = swstats->link_up_time;
3468 down_time = swstats->link_down_time;
3469 reset_cnt = swstats->soft_reset_cnt;
3470 mem_alloc_cnt = swstats->mem_allocated;
3471 mem_free_cnt = swstats->mem_freed;
3472 watchdog_cnt = swstats->watchdog_timer_cnt;
3473
3474 memset(stats, 0, sizeof(struct stat_block));
3475
3476 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3477 swstats->link_up_cnt = up_cnt;
3478 swstats->link_down_cnt = down_cnt;
3479 swstats->link_up_time = up_time;
3480 swstats->link_down_time = down_time;
3481 swstats->soft_reset_cnt = reset_cnt;
3482 swstats->mem_allocated = mem_alloc_cnt;
3483 swstats->mem_freed = mem_free_cnt;
3484 swstats->watchdog_timer_cnt = watchdog_cnt;
3485
3486 /* SXE-002: Configure link and activity LED to turn it off */
3487 subid = sp->pdev->subsystem_device;
3488 if (((subid & 0xFF) >= 0x07) &&
3489 (sp->device_type == XFRAME_I_DEVICE)) {
3490 val64 = readq(&bar0->gpio_control);
3491 val64 |= 0x0000800000000000ULL;
3492 writeq(val64, &bar0->gpio_control);
3493 val64 = 0x0411040400000000ULL;
3494 writeq(val64, (void __iomem *)bar0 + 0x2700);
3495 }
3496
3497 /*
3498 * Clear spurious ECC interrupts that would have occurred on
3499 * XFRAME II cards after reset.
3500 */
3501 if (sp->device_type == XFRAME_II_DEVICE) {
3502 val64 = readq(&bar0->pcc_err_reg);
3503 writeq(val64, &bar0->pcc_err_reg);
3504 }
3505
3506 sp->device_enabled_once = false;
3507 }
3508
3509 /**
3510 * s2io_set_swapper - to set the swapper controle on the card
3511 * @sp : private member of the device structure,
3512 * pointer to the s2io_nic structure.
3513 * Description: Function to set the swapper control on the card
3514 * correctly depending on the 'endianness' of the system.
3515 * Return value:
3516 * SUCCESS on success and FAILURE on failure.
3517 */
3518
s2io_set_swapper(struct s2io_nic * sp)3519 static int s2io_set_swapper(struct s2io_nic *sp)
3520 {
3521 struct net_device *dev = sp->dev;
3522 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3523 u64 val64, valt, valr;
3524
3525 /*
3526 * Set proper endian settings and verify the same by reading
3527 * the PIF Feed-back register.
3528 */
3529
3530 val64 = readq(&bar0->pif_rd_swapper_fb);
3531 if (val64 != 0x0123456789ABCDEFULL) {
3532 int i = 0;
3533 static const u64 value[] = {
3534 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3535 0x8100008181000081ULL, /* FE=1, SE=0 */
3536 0x4200004242000042ULL, /* FE=0, SE=1 */
3537 0 /* FE=0, SE=0 */
3538 };
3539
3540 while (i < 4) {
3541 writeq(value[i], &bar0->swapper_ctrl);
3542 val64 = readq(&bar0->pif_rd_swapper_fb);
3543 if (val64 == 0x0123456789ABCDEFULL)
3544 break;
3545 i++;
3546 }
3547 if (i == 4) {
3548 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3549 "feedback read %llx\n",
3550 dev->name, (unsigned long long)val64);
3551 return FAILURE;
3552 }
3553 valr = value[i];
3554 } else {
3555 valr = readq(&bar0->swapper_ctrl);
3556 }
3557
3558 valt = 0x0123456789ABCDEFULL;
3559 writeq(valt, &bar0->xmsi_address);
3560 val64 = readq(&bar0->xmsi_address);
3561
3562 if (val64 != valt) {
3563 int i = 0;
3564 static const u64 value[] = {
3565 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3566 0x0081810000818100ULL, /* FE=1, SE=0 */
3567 0x0042420000424200ULL, /* FE=0, SE=1 */
3568 0 /* FE=0, SE=0 */
3569 };
3570
3571 while (i < 4) {
3572 writeq((value[i] | valr), &bar0->swapper_ctrl);
3573 writeq(valt, &bar0->xmsi_address);
3574 val64 = readq(&bar0->xmsi_address);
3575 if (val64 == valt)
3576 break;
3577 i++;
3578 }
3579 if (i == 4) {
3580 unsigned long long x = val64;
3581 DBG_PRINT(ERR_DBG,
3582 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3583 return FAILURE;
3584 }
3585 }
3586 val64 = readq(&bar0->swapper_ctrl);
3587 val64 &= 0xFFFF000000000000ULL;
3588
3589 #ifdef __BIG_ENDIAN
3590 /*
3591 * The device by default set to a big endian format, so a
3592 * big endian driver need not set anything.
3593 */
3594 val64 |= (SWAPPER_CTRL_TXP_FE |
3595 SWAPPER_CTRL_TXP_SE |
3596 SWAPPER_CTRL_TXD_R_FE |
3597 SWAPPER_CTRL_TXD_W_FE |
3598 SWAPPER_CTRL_TXF_R_FE |
3599 SWAPPER_CTRL_RXD_R_FE |
3600 SWAPPER_CTRL_RXD_W_FE |
3601 SWAPPER_CTRL_RXF_W_FE |
3602 SWAPPER_CTRL_XMSI_FE |
3603 SWAPPER_CTRL_STATS_FE |
3604 SWAPPER_CTRL_STATS_SE);
3605 if (sp->config.intr_type == INTA)
3606 val64 |= SWAPPER_CTRL_XMSI_SE;
3607 writeq(val64, &bar0->swapper_ctrl);
3608 #else
3609 /*
3610 * Initially we enable all bits to make it accessible by the
3611 * driver, then we selectively enable only those bits that
3612 * we want to set.
3613 */
3614 val64 |= (SWAPPER_CTRL_TXP_FE |
3615 SWAPPER_CTRL_TXP_SE |
3616 SWAPPER_CTRL_TXD_R_FE |
3617 SWAPPER_CTRL_TXD_R_SE |
3618 SWAPPER_CTRL_TXD_W_FE |
3619 SWAPPER_CTRL_TXD_W_SE |
3620 SWAPPER_CTRL_TXF_R_FE |
3621 SWAPPER_CTRL_RXD_R_FE |
3622 SWAPPER_CTRL_RXD_R_SE |
3623 SWAPPER_CTRL_RXD_W_FE |
3624 SWAPPER_CTRL_RXD_W_SE |
3625 SWAPPER_CTRL_RXF_W_FE |
3626 SWAPPER_CTRL_XMSI_FE |
3627 SWAPPER_CTRL_STATS_FE |
3628 SWAPPER_CTRL_STATS_SE);
3629 if (sp->config.intr_type == INTA)
3630 val64 |= SWAPPER_CTRL_XMSI_SE;
3631 writeq(val64, &bar0->swapper_ctrl);
3632 #endif
3633 val64 = readq(&bar0->swapper_ctrl);
3634
3635 /*
3636 * Verifying if endian settings are accurate by reading a
3637 * feedback register.
3638 */
3639 val64 = readq(&bar0->pif_rd_swapper_fb);
3640 if (val64 != 0x0123456789ABCDEFULL) {
3641 /* Endian settings are incorrect, calls for another dekko. */
3642 DBG_PRINT(ERR_DBG,
3643 "%s: Endian settings are wrong, feedback read %llx\n",
3644 dev->name, (unsigned long long)val64);
3645 return FAILURE;
3646 }
3647
3648 return SUCCESS;
3649 }
3650
wait_for_msix_trans(struct s2io_nic * nic,int i)3651 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3652 {
3653 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3654 u64 val64;
3655 int ret = 0, cnt = 0;
3656
3657 do {
3658 val64 = readq(&bar0->xmsi_access);
3659 if (!(val64 & s2BIT(15)))
3660 break;
3661 mdelay(1);
3662 cnt++;
3663 } while (cnt < 5);
3664 if (cnt == 5) {
3665 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3666 ret = 1;
3667 }
3668
3669 return ret;
3670 }
3671
restore_xmsi_data(struct s2io_nic * nic)3672 static void restore_xmsi_data(struct s2io_nic *nic)
3673 {
3674 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3675 u64 val64;
3676 int i, msix_index;
3677
3678 if (nic->device_type == XFRAME_I_DEVICE)
3679 return;
3680
3681 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3682 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3683 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3684 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3685 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3686 writeq(val64, &bar0->xmsi_access);
3687 if (wait_for_msix_trans(nic, msix_index))
3688 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3689 __func__, msix_index);
3690 }
3691 }
3692
store_xmsi_data(struct s2io_nic * nic)3693 static void store_xmsi_data(struct s2io_nic *nic)
3694 {
3695 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3696 u64 val64, addr, data;
3697 int i, msix_index;
3698
3699 if (nic->device_type == XFRAME_I_DEVICE)
3700 return;
3701
3702 /* Store and display */
3703 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3704 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3705 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3706 writeq(val64, &bar0->xmsi_access);
3707 if (wait_for_msix_trans(nic, msix_index)) {
3708 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3709 __func__, msix_index);
3710 continue;
3711 }
3712 addr = readq(&bar0->xmsi_address);
3713 data = readq(&bar0->xmsi_data);
3714 if (addr && data) {
3715 nic->msix_info[i].addr = addr;
3716 nic->msix_info[i].data = data;
3717 }
3718 }
3719 }
3720
s2io_enable_msi_x(struct s2io_nic * nic)3721 static int s2io_enable_msi_x(struct s2io_nic *nic)
3722 {
3723 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3724 u64 rx_mat;
3725 u16 msi_control; /* Temp variable */
3726 int ret, i, j, msix_indx = 1;
3727 int size;
3728 struct stat_block *stats = nic->mac_control.stats_info;
3729 struct swStat *swstats = &stats->sw_stat;
3730
3731 size = nic->num_entries * sizeof(struct msix_entry);
3732 nic->entries = kzalloc(size, GFP_KERNEL);
3733 if (!nic->entries) {
3734 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3735 __func__);
3736 swstats->mem_alloc_fail_cnt++;
3737 return -ENOMEM;
3738 }
3739 swstats->mem_allocated += size;
3740
3741 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3742 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3743 if (!nic->s2io_entries) {
3744 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3745 __func__);
3746 swstats->mem_alloc_fail_cnt++;
3747 kfree(nic->entries);
3748 swstats->mem_freed
3749 += (nic->num_entries * sizeof(struct msix_entry));
3750 return -ENOMEM;
3751 }
3752 swstats->mem_allocated += size;
3753
3754 nic->entries[0].entry = 0;
3755 nic->s2io_entries[0].entry = 0;
3756 nic->s2io_entries[0].in_use = MSIX_FLG;
3757 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3758 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3759
3760 for (i = 1; i < nic->num_entries; i++) {
3761 nic->entries[i].entry = ((i - 1) * 8) + 1;
3762 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3763 nic->s2io_entries[i].arg = NULL;
3764 nic->s2io_entries[i].in_use = 0;
3765 }
3766
3767 rx_mat = readq(&bar0->rx_mat);
3768 for (j = 0; j < nic->config.rx_ring_num; j++) {
3769 rx_mat |= RX_MAT_SET(j, msix_indx);
3770 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3771 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3772 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3773 msix_indx += 8;
3774 }
3775 writeq(rx_mat, &bar0->rx_mat);
3776 readq(&bar0->rx_mat);
3777
3778 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3779 nic->num_entries, nic->num_entries);
3780 /* We fail init if error or we get less vectors than min required */
3781 if (ret < 0) {
3782 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3783 kfree(nic->entries);
3784 swstats->mem_freed += nic->num_entries *
3785 sizeof(struct msix_entry);
3786 kfree(nic->s2io_entries);
3787 swstats->mem_freed += nic->num_entries *
3788 sizeof(struct s2io_msix_entry);
3789 nic->entries = NULL;
3790 nic->s2io_entries = NULL;
3791 return -ENOMEM;
3792 }
3793
3794 /*
3795 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3796 * in the herc NIC. (Temp change, needs to be removed later)
3797 */
3798 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3799 msi_control |= 0x1; /* Enable MSI */
3800 pci_write_config_word(nic->pdev, 0x42, msi_control);
3801
3802 return 0;
3803 }
3804
3805 /* Handle software interrupt used during MSI(X) test */
s2io_test_intr(int irq,void * dev_id)3806 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3807 {
3808 struct s2io_nic *sp = dev_id;
3809
3810 sp->msi_detected = 1;
3811 wake_up(&sp->msi_wait);
3812
3813 return IRQ_HANDLED;
3814 }
3815
3816 /* Test interrupt path by forcing a a software IRQ */
s2io_test_msi(struct s2io_nic * sp)3817 static int s2io_test_msi(struct s2io_nic *sp)
3818 {
3819 struct pci_dev *pdev = sp->pdev;
3820 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3821 int err;
3822 u64 val64, saved64;
3823
3824 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3825 sp->name, sp);
3826 if (err) {
3827 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3828 sp->dev->name, pci_name(pdev), pdev->irq);
3829 return err;
3830 }
3831
3832 init_waitqueue_head(&sp->msi_wait);
3833 sp->msi_detected = 0;
3834
3835 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3836 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3837 val64 |= SCHED_INT_CTRL_TIMER_EN;
3838 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3839 writeq(val64, &bar0->scheduled_int_ctrl);
3840
3841 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3842
3843 if (!sp->msi_detected) {
3844 /* MSI(X) test failed, go back to INTx mode */
3845 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3846 "using MSI(X) during test\n",
3847 sp->dev->name, pci_name(pdev));
3848
3849 err = -EOPNOTSUPP;
3850 }
3851
3852 free_irq(sp->entries[1].vector, sp);
3853
3854 writeq(saved64, &bar0->scheduled_int_ctrl);
3855
3856 return err;
3857 }
3858
remove_msix_isr(struct s2io_nic * sp)3859 static void remove_msix_isr(struct s2io_nic *sp)
3860 {
3861 int i;
3862 u16 msi_control;
3863
3864 for (i = 0; i < sp->num_entries; i++) {
3865 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3866 int vector = sp->entries[i].vector;
3867 void *arg = sp->s2io_entries[i].arg;
3868 free_irq(vector, arg);
3869 }
3870 }
3871
3872 kfree(sp->entries);
3873 kfree(sp->s2io_entries);
3874 sp->entries = NULL;
3875 sp->s2io_entries = NULL;
3876
3877 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3878 msi_control &= 0xFFFE; /* Disable MSI */
3879 pci_write_config_word(sp->pdev, 0x42, msi_control);
3880
3881 pci_disable_msix(sp->pdev);
3882 }
3883
remove_inta_isr(struct s2io_nic * sp)3884 static void remove_inta_isr(struct s2io_nic *sp)
3885 {
3886 free_irq(sp->pdev->irq, sp->dev);
3887 }
3888
3889 /* ********************************************************* *
3890 * Functions defined below concern the OS part of the driver *
3891 * ********************************************************* */
3892
3893 /**
3894 * s2io_open - open entry point of the driver
3895 * @dev : pointer to the device structure.
3896 * Description:
3897 * This function is the open entry point of the driver. It mainly calls a
3898 * function to allocate Rx buffers and inserts them into the buffer
3899 * descriptors and then enables the Rx part of the NIC.
3900 * Return value:
3901 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3902 * file on failure.
3903 */
3904
s2io_open(struct net_device * dev)3905 static int s2io_open(struct net_device *dev)
3906 {
3907 struct s2io_nic *sp = netdev_priv(dev);
3908 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3909 int err = 0;
3910
3911 /*
3912 * Make sure you have link off by default every time
3913 * Nic is initialized
3914 */
3915 netif_carrier_off(dev);
3916 sp->last_link_state = 0;
3917
3918 /* Initialize H/W and enable interrupts */
3919 err = s2io_card_up(sp);
3920 if (err) {
3921 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3922 dev->name);
3923 goto hw_init_failed;
3924 }
3925
3926 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3927 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3928 s2io_card_down(sp);
3929 err = -ENODEV;
3930 goto hw_init_failed;
3931 }
3932 s2io_start_all_tx_queue(sp);
3933 return 0;
3934
3935 hw_init_failed:
3936 if (sp->config.intr_type == MSI_X) {
3937 if (sp->entries) {
3938 kfree(sp->entries);
3939 swstats->mem_freed += sp->num_entries *
3940 sizeof(struct msix_entry);
3941 }
3942 if (sp->s2io_entries) {
3943 kfree(sp->s2io_entries);
3944 swstats->mem_freed += sp->num_entries *
3945 sizeof(struct s2io_msix_entry);
3946 }
3947 }
3948 return err;
3949 }
3950
3951 /**
3952 * s2io_close -close entry point of the driver
3953 * @dev : device pointer.
3954 * Description:
3955 * This is the stop entry point of the driver. It needs to undo exactly
3956 * whatever was done by the open entry point,thus it's usually referred to
3957 * as the close function.Among other things this function mainly stops the
3958 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3959 * Return value:
3960 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3961 * file on failure.
3962 */
3963
s2io_close(struct net_device * dev)3964 static int s2io_close(struct net_device *dev)
3965 {
3966 struct s2io_nic *sp = netdev_priv(dev);
3967 struct config_param *config = &sp->config;
3968 u64 tmp64;
3969 int offset;
3970
3971 /* Return if the device is already closed *
3972 * Can happen when s2io_card_up failed in change_mtu *
3973 */
3974 if (!is_s2io_card_up(sp))
3975 return 0;
3976
3977 s2io_stop_all_tx_queue(sp);
3978 /* delete all populated mac entries */
3979 for (offset = 1; offset < config->max_mc_addr; offset++) {
3980 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3981 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3982 do_s2io_delete_unicast_mc(sp, tmp64);
3983 }
3984
3985 s2io_card_down(sp);
3986
3987 return 0;
3988 }
3989
3990 /**
3991 * s2io_xmit - Tx entry point of te driver
3992 * @skb : the socket buffer containing the Tx data.
3993 * @dev : device pointer.
3994 * Description :
3995 * This function is the Tx entry point of the driver. S2IO NIC supports
3996 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3997 * NOTE: when device can't queue the pkt,just the trans_start variable will
3998 * not be upadted.
3999 * Return value:
4000 * 0 on success & 1 on failure.
4001 */
4002
s2io_xmit(struct sk_buff * skb,struct net_device * dev)4003 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4004 {
4005 struct s2io_nic *sp = netdev_priv(dev);
4006 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4007 register u64 val64;
4008 struct TxD *txdp;
4009 struct TxFIFO_element __iomem *tx_fifo;
4010 unsigned long flags = 0;
4011 u16 vlan_tag = 0;
4012 struct fifo_info *fifo = NULL;
4013 int offload_type;
4014 int enable_per_list_interrupt = 0;
4015 struct config_param *config = &sp->config;
4016 struct mac_info *mac_control = &sp->mac_control;
4017 struct stat_block *stats = mac_control->stats_info;
4018 struct swStat *swstats = &stats->sw_stat;
4019
4020 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4021
4022 if (unlikely(skb->len <= 0)) {
4023 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4024 dev_kfree_skb_any(skb);
4025 return NETDEV_TX_OK;
4026 }
4027
4028 if (!is_s2io_card_up(sp)) {
4029 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4030 dev->name);
4031 dev_kfree_skb_any(skb);
4032 return NETDEV_TX_OK;
4033 }
4034
4035 queue = 0;
4036 if (skb_vlan_tag_present(skb))
4037 vlan_tag = skb_vlan_tag_get(skb);
4038 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4039 if (skb->protocol == htons(ETH_P_IP)) {
4040 struct iphdr *ip;
4041 struct tcphdr *th;
4042 ip = ip_hdr(skb);
4043
4044 if (!ip_is_fragment(ip)) {
4045 th = (struct tcphdr *)(((unsigned char *)ip) +
4046 ip->ihl*4);
4047
4048 if (ip->protocol == IPPROTO_TCP) {
4049 queue_len = sp->total_tcp_fifos;
4050 queue = (ntohs(th->source) +
4051 ntohs(th->dest)) &
4052 sp->fifo_selector[queue_len - 1];
4053 if (queue >= queue_len)
4054 queue = queue_len - 1;
4055 } else if (ip->protocol == IPPROTO_UDP) {
4056 queue_len = sp->total_udp_fifos;
4057 queue = (ntohs(th->source) +
4058 ntohs(th->dest)) &
4059 sp->fifo_selector[queue_len - 1];
4060 if (queue >= queue_len)
4061 queue = queue_len - 1;
4062 queue += sp->udp_fifo_idx;
4063 if (skb->len > 1024)
4064 enable_per_list_interrupt = 1;
4065 }
4066 }
4067 }
4068 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4069 /* get fifo number based on skb->priority value */
4070 queue = config->fifo_mapping
4071 [skb->priority & (MAX_TX_FIFOS - 1)];
4072 fifo = &mac_control->fifos[queue];
4073
4074 spin_lock_irqsave(&fifo->tx_lock, flags);
4075
4076 if (sp->config.multiq) {
4077 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4078 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4079 return NETDEV_TX_BUSY;
4080 }
4081 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4082 if (netif_queue_stopped(dev)) {
4083 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4084 return NETDEV_TX_BUSY;
4085 }
4086 }
4087
4088 put_off = (u16)fifo->tx_curr_put_info.offset;
4089 get_off = (u16)fifo->tx_curr_get_info.offset;
4090 txdp = fifo->list_info[put_off].list_virt_addr;
4091
4092 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4093 /* Avoid "put" pointer going beyond "get" pointer */
4094 if (txdp->Host_Control ||
4095 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4096 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4097 s2io_stop_tx_queue(sp, fifo->fifo_no);
4098 dev_kfree_skb_any(skb);
4099 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4100 return NETDEV_TX_OK;
4101 }
4102
4103 offload_type = s2io_offload_type(skb);
4104 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4105 txdp->Control_1 |= TXD_TCP_LSO_EN;
4106 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4107 }
4108 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4109 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4110 TXD_TX_CKO_TCP_EN |
4111 TXD_TX_CKO_UDP_EN);
4112 }
4113 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4114 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4115 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4116 if (enable_per_list_interrupt)
4117 if (put_off & (queue_len >> 5))
4118 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4119 if (vlan_tag) {
4120 txdp->Control_2 |= TXD_VLAN_ENABLE;
4121 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4122 }
4123
4124 frg_len = skb_headlen(skb);
4125 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4126 frg_len, DMA_TO_DEVICE);
4127 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4128 goto pci_map_failed;
4129
4130 txdp->Host_Control = (unsigned long)skb;
4131 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4132
4133 frg_cnt = skb_shinfo(skb)->nr_frags;
4134 /* For fragmented SKB. */
4135 for (i = 0; i < frg_cnt; i++) {
4136 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4137 /* A '0' length fragment will be ignored */
4138 if (!skb_frag_size(frag))
4139 continue;
4140 txdp++;
4141 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4142 frag, 0,
4143 skb_frag_size(frag),
4144 DMA_TO_DEVICE);
4145 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4146 }
4147 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4148
4149 tx_fifo = mac_control->tx_FIFO_start[queue];
4150 val64 = fifo->list_info[put_off].list_phy_addr;
4151 writeq(val64, &tx_fifo->TxDL_Pointer);
4152
4153 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4154 TX_FIFO_LAST_LIST);
4155 if (offload_type)
4156 val64 |= TX_FIFO_SPECIAL_FUNC;
4157
4158 writeq(val64, &tx_fifo->List_Control);
4159
4160 put_off++;
4161 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4162 put_off = 0;
4163 fifo->tx_curr_put_info.offset = put_off;
4164
4165 /* Avoid "put" pointer going beyond "get" pointer */
4166 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4167 swstats->fifo_full_cnt++;
4168 DBG_PRINT(TX_DBG,
4169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4170 put_off, get_off);
4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
4172 }
4173 swstats->mem_allocated += skb->truesize;
4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175
4176 if (sp->config.intr_type == MSI_X)
4177 tx_intr_handler(fifo);
4178
4179 return NETDEV_TX_OK;
4180
4181 pci_map_failed:
4182 swstats->pci_map_fail_cnt++;
4183 s2io_stop_tx_queue(sp, fifo->fifo_no);
4184 swstats->mem_freed += skb->truesize;
4185 dev_kfree_skb_any(skb);
4186 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4187 return NETDEV_TX_OK;
4188 }
4189
4190 static void
s2io_alarm_handle(struct timer_list * t)4191 s2io_alarm_handle(struct timer_list *t)
4192 {
4193 struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4194 struct net_device *dev = sp->dev;
4195
4196 s2io_handle_errors(dev);
4197 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4198 }
4199
s2io_msix_ring_handle(int irq,void * dev_id)4200 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4201 {
4202 struct ring_info *ring = (struct ring_info *)dev_id;
4203 struct s2io_nic *sp = ring->nic;
4204 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4205
4206 if (unlikely(!is_s2io_card_up(sp)))
4207 return IRQ_HANDLED;
4208
4209 if (sp->config.napi) {
4210 u8 __iomem *addr = NULL;
4211 u8 val8 = 0;
4212
4213 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4214 addr += (7 - ring->ring_no);
4215 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4216 writeb(val8, addr);
4217 val8 = readb(addr);
4218 napi_schedule(&ring->napi);
4219 } else {
4220 rx_intr_handler(ring, 0);
4221 s2io_chk_rx_buffers(sp, ring);
4222 }
4223
4224 return IRQ_HANDLED;
4225 }
4226
s2io_msix_fifo_handle(int irq,void * dev_id)4227 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4228 {
4229 int i;
4230 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4231 struct s2io_nic *sp = fifos->nic;
4232 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4233 struct config_param *config = &sp->config;
4234 u64 reason;
4235
4236 if (unlikely(!is_s2io_card_up(sp)))
4237 return IRQ_NONE;
4238
4239 reason = readq(&bar0->general_int_status);
4240 if (unlikely(reason == S2IO_MINUS_ONE))
4241 /* Nothing much can be done. Get out */
4242 return IRQ_HANDLED;
4243
4244 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4245 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4246
4247 if (reason & GEN_INTR_TXPIC)
4248 s2io_txpic_intr_handle(sp);
4249
4250 if (reason & GEN_INTR_TXTRAFFIC)
4251 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4252
4253 for (i = 0; i < config->tx_fifo_num; i++)
4254 tx_intr_handler(&fifos[i]);
4255
4256 writeq(sp->general_int_mask, &bar0->general_int_mask);
4257 readl(&bar0->general_int_status);
4258 return IRQ_HANDLED;
4259 }
4260 /* The interrupt was not raised by us */
4261 return IRQ_NONE;
4262 }
4263
s2io_txpic_intr_handle(struct s2io_nic * sp)4264 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4265 {
4266 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4267 u64 val64;
4268
4269 val64 = readq(&bar0->pic_int_status);
4270 if (val64 & PIC_INT_GPIO) {
4271 val64 = readq(&bar0->gpio_int_reg);
4272 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4273 (val64 & GPIO_INT_REG_LINK_UP)) {
4274 /*
4275 * This is unstable state so clear both up/down
4276 * interrupt and adapter to re-evaluate the link state.
4277 */
4278 val64 |= GPIO_INT_REG_LINK_DOWN;
4279 val64 |= GPIO_INT_REG_LINK_UP;
4280 writeq(val64, &bar0->gpio_int_reg);
4281 val64 = readq(&bar0->gpio_int_mask);
4282 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4283 GPIO_INT_MASK_LINK_DOWN);
4284 writeq(val64, &bar0->gpio_int_mask);
4285 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4286 val64 = readq(&bar0->adapter_status);
4287 /* Enable Adapter */
4288 val64 = readq(&bar0->adapter_control);
4289 val64 |= ADAPTER_CNTL_EN;
4290 writeq(val64, &bar0->adapter_control);
4291 val64 |= ADAPTER_LED_ON;
4292 writeq(val64, &bar0->adapter_control);
4293 if (!sp->device_enabled_once)
4294 sp->device_enabled_once = 1;
4295
4296 s2io_link(sp, LINK_UP);
4297 /*
4298 * unmask link down interrupt and mask link-up
4299 * intr
4300 */
4301 val64 = readq(&bar0->gpio_int_mask);
4302 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4303 val64 |= GPIO_INT_MASK_LINK_UP;
4304 writeq(val64, &bar0->gpio_int_mask);
4305
4306 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4307 val64 = readq(&bar0->adapter_status);
4308 s2io_link(sp, LINK_DOWN);
4309 /* Link is down so unmaks link up interrupt */
4310 val64 = readq(&bar0->gpio_int_mask);
4311 val64 &= ~GPIO_INT_MASK_LINK_UP;
4312 val64 |= GPIO_INT_MASK_LINK_DOWN;
4313 writeq(val64, &bar0->gpio_int_mask);
4314
4315 /* turn off LED */
4316 val64 = readq(&bar0->adapter_control);
4317 val64 = val64 & (~ADAPTER_LED_ON);
4318 writeq(val64, &bar0->adapter_control);
4319 }
4320 }
4321 val64 = readq(&bar0->gpio_int_mask);
4322 }
4323
4324 /**
4325 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4326 * @value: alarm bits
4327 * @addr: address value
4328 * @cnt: counter variable
4329 * Description: Check for alarm and increment the counter
4330 * Return Value:
4331 * 1 - if alarm bit set
4332 * 0 - if alarm bit is not set
4333 */
do_s2io_chk_alarm_bit(u64 value,void __iomem * addr,unsigned long long * cnt)4334 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4335 unsigned long long *cnt)
4336 {
4337 u64 val64;
4338 val64 = readq(addr);
4339 if (val64 & value) {
4340 writeq(val64, addr);
4341 (*cnt)++;
4342 return 1;
4343 }
4344 return 0;
4345
4346 }
4347
4348 /**
4349 * s2io_handle_errors - Xframe error indication handler
4350 * @dev_id: opaque handle to dev
4351 * Description: Handle alarms such as loss of link, single or
4352 * double ECC errors, critical and serious errors.
4353 * Return Value:
4354 * NONE
4355 */
s2io_handle_errors(void * dev_id)4356 static void s2io_handle_errors(void *dev_id)
4357 {
4358 struct net_device *dev = (struct net_device *)dev_id;
4359 struct s2io_nic *sp = netdev_priv(dev);
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361 u64 temp64 = 0, val64 = 0;
4362 int i = 0;
4363
4364 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4365 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4366
4367 if (!is_s2io_card_up(sp))
4368 return;
4369
4370 if (pci_channel_offline(sp->pdev))
4371 return;
4372
4373 memset(&sw_stat->ring_full_cnt, 0,
4374 sizeof(sw_stat->ring_full_cnt));
4375
4376 /* Handling the XPAK counters update */
4377 if (stats->xpak_timer_count < 72000) {
4378 /* waiting for an hour */
4379 stats->xpak_timer_count++;
4380 } else {
4381 s2io_updt_xpak_counter(dev);
4382 /* reset the count to zero */
4383 stats->xpak_timer_count = 0;
4384 }
4385
4386 /* Handling link status change error Intr */
4387 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4388 val64 = readq(&bar0->mac_rmac_err_reg);
4389 writeq(val64, &bar0->mac_rmac_err_reg);
4390 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4391 schedule_work(&sp->set_link_task);
4392 }
4393
4394 /* In case of a serious error, the device will be Reset. */
4395 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4396 &sw_stat->serious_err_cnt))
4397 goto reset;
4398
4399 /* Check for data parity error */
4400 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4401 &sw_stat->parity_err_cnt))
4402 goto reset;
4403
4404 /* Check for ring full counter */
4405 if (sp->device_type == XFRAME_II_DEVICE) {
4406 val64 = readq(&bar0->ring_bump_counter1);
4407 for (i = 0; i < 4; i++) {
4408 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4409 temp64 >>= 64 - ((i+1)*16);
4410 sw_stat->ring_full_cnt[i] += temp64;
4411 }
4412
4413 val64 = readq(&bar0->ring_bump_counter2);
4414 for (i = 0; i < 4; i++) {
4415 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4416 temp64 >>= 64 - ((i+1)*16);
4417 sw_stat->ring_full_cnt[i+4] += temp64;
4418 }
4419 }
4420
4421 val64 = readq(&bar0->txdma_int_status);
4422 /*check for pfc_err*/
4423 if (val64 & TXDMA_PFC_INT) {
4424 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4425 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4426 PFC_PCIX_ERR,
4427 &bar0->pfc_err_reg,
4428 &sw_stat->pfc_err_cnt))
4429 goto reset;
4430 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4431 &bar0->pfc_err_reg,
4432 &sw_stat->pfc_err_cnt);
4433 }
4434
4435 /*check for tda_err*/
4436 if (val64 & TXDMA_TDA_INT) {
4437 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4438 TDA_SM0_ERR_ALARM |
4439 TDA_SM1_ERR_ALARM,
4440 &bar0->tda_err_reg,
4441 &sw_stat->tda_err_cnt))
4442 goto reset;
4443 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4444 &bar0->tda_err_reg,
4445 &sw_stat->tda_err_cnt);
4446 }
4447 /*check for pcc_err*/
4448 if (val64 & TXDMA_PCC_INT) {
4449 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4450 PCC_N_SERR | PCC_6_COF_OV_ERR |
4451 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4452 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4453 PCC_TXB_ECC_DB_ERR,
4454 &bar0->pcc_err_reg,
4455 &sw_stat->pcc_err_cnt))
4456 goto reset;
4457 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4458 &bar0->pcc_err_reg,
4459 &sw_stat->pcc_err_cnt);
4460 }
4461
4462 /*check for tti_err*/
4463 if (val64 & TXDMA_TTI_INT) {
4464 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4465 &bar0->tti_err_reg,
4466 &sw_stat->tti_err_cnt))
4467 goto reset;
4468 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4469 &bar0->tti_err_reg,
4470 &sw_stat->tti_err_cnt);
4471 }
4472
4473 /*check for lso_err*/
4474 if (val64 & TXDMA_LSO_INT) {
4475 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4476 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4477 &bar0->lso_err_reg,
4478 &sw_stat->lso_err_cnt))
4479 goto reset;
4480 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4481 &bar0->lso_err_reg,
4482 &sw_stat->lso_err_cnt);
4483 }
4484
4485 /*check for tpa_err*/
4486 if (val64 & TXDMA_TPA_INT) {
4487 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4488 &bar0->tpa_err_reg,
4489 &sw_stat->tpa_err_cnt))
4490 goto reset;
4491 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4492 &bar0->tpa_err_reg,
4493 &sw_stat->tpa_err_cnt);
4494 }
4495
4496 /*check for sm_err*/
4497 if (val64 & TXDMA_SM_INT) {
4498 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4499 &bar0->sm_err_reg,
4500 &sw_stat->sm_err_cnt))
4501 goto reset;
4502 }
4503
4504 val64 = readq(&bar0->mac_int_status);
4505 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4506 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4507 &bar0->mac_tmac_err_reg,
4508 &sw_stat->mac_tmac_err_cnt))
4509 goto reset;
4510 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4511 TMAC_DESC_ECC_SG_ERR |
4512 TMAC_DESC_ECC_DB_ERR,
4513 &bar0->mac_tmac_err_reg,
4514 &sw_stat->mac_tmac_err_cnt);
4515 }
4516
4517 val64 = readq(&bar0->xgxs_int_status);
4518 if (val64 & XGXS_INT_STATUS_TXGXS) {
4519 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4520 &bar0->xgxs_txgxs_err_reg,
4521 &sw_stat->xgxs_txgxs_err_cnt))
4522 goto reset;
4523 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4524 &bar0->xgxs_txgxs_err_reg,
4525 &sw_stat->xgxs_txgxs_err_cnt);
4526 }
4527
4528 val64 = readq(&bar0->rxdma_int_status);
4529 if (val64 & RXDMA_INT_RC_INT_M) {
4530 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4531 RC_FTC_ECC_DB_ERR |
4532 RC_PRCn_SM_ERR_ALARM |
4533 RC_FTC_SM_ERR_ALARM,
4534 &bar0->rc_err_reg,
4535 &sw_stat->rc_err_cnt))
4536 goto reset;
4537 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4538 RC_FTC_ECC_SG_ERR |
4539 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4540 &sw_stat->rc_err_cnt);
4541 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4542 PRC_PCI_AB_WR_Rn |
4543 PRC_PCI_AB_F_WR_Rn,
4544 &bar0->prc_pcix_err_reg,
4545 &sw_stat->prc_pcix_err_cnt))
4546 goto reset;
4547 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4548 PRC_PCI_DP_WR_Rn |
4549 PRC_PCI_DP_F_WR_Rn,
4550 &bar0->prc_pcix_err_reg,
4551 &sw_stat->prc_pcix_err_cnt);
4552 }
4553
4554 if (val64 & RXDMA_INT_RPA_INT_M) {
4555 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4556 &bar0->rpa_err_reg,
4557 &sw_stat->rpa_err_cnt))
4558 goto reset;
4559 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4560 &bar0->rpa_err_reg,
4561 &sw_stat->rpa_err_cnt);
4562 }
4563
4564 if (val64 & RXDMA_INT_RDA_INT_M) {
4565 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4566 RDA_FRM_ECC_DB_N_AERR |
4567 RDA_SM1_ERR_ALARM |
4568 RDA_SM0_ERR_ALARM |
4569 RDA_RXD_ECC_DB_SERR,
4570 &bar0->rda_err_reg,
4571 &sw_stat->rda_err_cnt))
4572 goto reset;
4573 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4574 RDA_FRM_ECC_SG_ERR |
4575 RDA_MISC_ERR |
4576 RDA_PCIX_ERR,
4577 &bar0->rda_err_reg,
4578 &sw_stat->rda_err_cnt);
4579 }
4580
4581 if (val64 & RXDMA_INT_RTI_INT_M) {
4582 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4583 &bar0->rti_err_reg,
4584 &sw_stat->rti_err_cnt))
4585 goto reset;
4586 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4587 &bar0->rti_err_reg,
4588 &sw_stat->rti_err_cnt);
4589 }
4590
4591 val64 = readq(&bar0->mac_int_status);
4592 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4593 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4594 &bar0->mac_rmac_err_reg,
4595 &sw_stat->mac_rmac_err_cnt))
4596 goto reset;
4597 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4598 RMAC_SINGLE_ECC_ERR |
4599 RMAC_DOUBLE_ECC_ERR,
4600 &bar0->mac_rmac_err_reg,
4601 &sw_stat->mac_rmac_err_cnt);
4602 }
4603
4604 val64 = readq(&bar0->xgxs_int_status);
4605 if (val64 & XGXS_INT_STATUS_RXGXS) {
4606 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4607 &bar0->xgxs_rxgxs_err_reg,
4608 &sw_stat->xgxs_rxgxs_err_cnt))
4609 goto reset;
4610 }
4611
4612 val64 = readq(&bar0->mc_int_status);
4613 if (val64 & MC_INT_STATUS_MC_INT) {
4614 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4615 &bar0->mc_err_reg,
4616 &sw_stat->mc_err_cnt))
4617 goto reset;
4618
4619 /* Handling Ecc errors */
4620 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4621 writeq(val64, &bar0->mc_err_reg);
4622 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4623 sw_stat->double_ecc_errs++;
4624 if (sp->device_type != XFRAME_II_DEVICE) {
4625 /*
4626 * Reset XframeI only if critical error
4627 */
4628 if (val64 &
4629 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4630 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4631 goto reset;
4632 }
4633 } else
4634 sw_stat->single_ecc_errs++;
4635 }
4636 }
4637 return;
4638
4639 reset:
4640 s2io_stop_all_tx_queue(sp);
4641 schedule_work(&sp->rst_timer_task);
4642 sw_stat->soft_reset_cnt++;
4643 }
4644
4645 /**
4646 * s2io_isr - ISR handler of the device .
4647 * @irq: the irq of the device.
4648 * @dev_id: a void pointer to the dev structure of the NIC.
4649 * Description: This function is the ISR handler of the device. It
4650 * identifies the reason for the interrupt and calls the relevant
4651 * service routines. As a contongency measure, this ISR allocates the
4652 * recv buffers, if their numbers are below the panic value which is
4653 * presently set to 25% of the original number of rcv buffers allocated.
4654 * Return value:
4655 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4656 * IRQ_NONE: will be returned if interrupt is not from our device
4657 */
s2io_isr(int irq,void * dev_id)4658 static irqreturn_t s2io_isr(int irq, void *dev_id)
4659 {
4660 struct net_device *dev = (struct net_device *)dev_id;
4661 struct s2io_nic *sp = netdev_priv(dev);
4662 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4663 int i;
4664 u64 reason = 0;
4665 struct mac_info *mac_control;
4666 struct config_param *config;
4667
4668 /* Pretend we handled any irq's from a disconnected card */
4669 if (pci_channel_offline(sp->pdev))
4670 return IRQ_NONE;
4671
4672 if (!is_s2io_card_up(sp))
4673 return IRQ_NONE;
4674
4675 config = &sp->config;
4676 mac_control = &sp->mac_control;
4677
4678 /*
4679 * Identify the cause for interrupt and call the appropriate
4680 * interrupt handler. Causes for the interrupt could be;
4681 * 1. Rx of packet.
4682 * 2. Tx complete.
4683 * 3. Link down.
4684 */
4685 reason = readq(&bar0->general_int_status);
4686
4687 if (unlikely(reason == S2IO_MINUS_ONE))
4688 return IRQ_HANDLED; /* Nothing much can be done. Get out */
4689
4690 if (reason &
4691 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4692 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4693
4694 if (config->napi) {
4695 if (reason & GEN_INTR_RXTRAFFIC) {
4696 napi_schedule(&sp->napi);
4697 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4698 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4699 readl(&bar0->rx_traffic_int);
4700 }
4701 } else {
4702 /*
4703 * rx_traffic_int reg is an R1 register, writing all 1's
4704 * will ensure that the actual interrupt causing bit
4705 * get's cleared and hence a read can be avoided.
4706 */
4707 if (reason & GEN_INTR_RXTRAFFIC)
4708 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4709
4710 for (i = 0; i < config->rx_ring_num; i++) {
4711 struct ring_info *ring = &mac_control->rings[i];
4712
4713 rx_intr_handler(ring, 0);
4714 }
4715 }
4716
4717 /*
4718 * tx_traffic_int reg is an R1 register, writing all 1's
4719 * will ensure that the actual interrupt causing bit get's
4720 * cleared and hence a read can be avoided.
4721 */
4722 if (reason & GEN_INTR_TXTRAFFIC)
4723 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4724
4725 for (i = 0; i < config->tx_fifo_num; i++)
4726 tx_intr_handler(&mac_control->fifos[i]);
4727
4728 if (reason & GEN_INTR_TXPIC)
4729 s2io_txpic_intr_handle(sp);
4730
4731 /*
4732 * Reallocate the buffers from the interrupt handler itself.
4733 */
4734 if (!config->napi) {
4735 for (i = 0; i < config->rx_ring_num; i++) {
4736 struct ring_info *ring = &mac_control->rings[i];
4737
4738 s2io_chk_rx_buffers(sp, ring);
4739 }
4740 }
4741 writeq(sp->general_int_mask, &bar0->general_int_mask);
4742 readl(&bar0->general_int_status);
4743
4744 return IRQ_HANDLED;
4745
4746 } else if (!reason) {
4747 /* The interrupt was not raised by us */
4748 return IRQ_NONE;
4749 }
4750
4751 return IRQ_HANDLED;
4752 }
4753
4754 /*
4755 * s2io_updt_stats -
4756 */
s2io_updt_stats(struct s2io_nic * sp)4757 static void s2io_updt_stats(struct s2io_nic *sp)
4758 {
4759 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4760 u64 val64;
4761 int cnt = 0;
4762
4763 if (is_s2io_card_up(sp)) {
4764 /* Apprx 30us on a 133 MHz bus */
4765 val64 = SET_UPDT_CLICKS(10) |
4766 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4767 writeq(val64, &bar0->stat_cfg);
4768 do {
4769 udelay(100);
4770 val64 = readq(&bar0->stat_cfg);
4771 if (!(val64 & s2BIT(0)))
4772 break;
4773 cnt++;
4774 if (cnt == 5)
4775 break; /* Updt failed */
4776 } while (1);
4777 }
4778 }
4779
4780 /**
4781 * s2io_get_stats - Updates the device statistics structure.
4782 * @dev : pointer to the device structure.
4783 * Description:
4784 * This function updates the device statistics structure in the s2io_nic
4785 * structure and returns a pointer to the same.
4786 * Return value:
4787 * pointer to the updated net_device_stats structure.
4788 */
s2io_get_stats(struct net_device * dev)4789 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4790 {
4791 struct s2io_nic *sp = netdev_priv(dev);
4792 struct mac_info *mac_control = &sp->mac_control;
4793 struct stat_block *stats = mac_control->stats_info;
4794 u64 delta;
4795
4796 /* Configure Stats for immediate updt */
4797 s2io_updt_stats(sp);
4798
4799 /* A device reset will cause the on-adapter statistics to be zero'ed.
4800 * This can be done while running by changing the MTU. To prevent the
4801 * system from having the stats zero'ed, the driver keeps a copy of the
4802 * last update to the system (which is also zero'ed on reset). This
4803 * enables the driver to accurately know the delta between the last
4804 * update and the current update.
4805 */
4806 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4807 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4808 sp->stats.rx_packets += delta;
4809 dev->stats.rx_packets += delta;
4810
4811 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4812 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4813 sp->stats.tx_packets += delta;
4814 dev->stats.tx_packets += delta;
4815
4816 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4817 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4818 sp->stats.rx_bytes += delta;
4819 dev->stats.rx_bytes += delta;
4820
4821 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4822 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4823 sp->stats.tx_bytes += delta;
4824 dev->stats.tx_bytes += delta;
4825
4826 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4827 sp->stats.rx_errors += delta;
4828 dev->stats.rx_errors += delta;
4829
4830 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4831 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4832 sp->stats.tx_errors += delta;
4833 dev->stats.tx_errors += delta;
4834
4835 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4836 sp->stats.rx_dropped += delta;
4837 dev->stats.rx_dropped += delta;
4838
4839 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4840 sp->stats.tx_dropped += delta;
4841 dev->stats.tx_dropped += delta;
4842
4843 /* The adapter MAC interprets pause frames as multicast packets, but
4844 * does not pass them up. This erroneously increases the multicast
4845 * packet count and needs to be deducted when the multicast frame count
4846 * is queried.
4847 */
4848 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4849 le32_to_cpu(stats->rmac_vld_mcst_frms);
4850 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4851 delta -= sp->stats.multicast;
4852 sp->stats.multicast += delta;
4853 dev->stats.multicast += delta;
4854
4855 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4856 le32_to_cpu(stats->rmac_usized_frms)) +
4857 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4858 sp->stats.rx_length_errors += delta;
4859 dev->stats.rx_length_errors += delta;
4860
4861 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4862 sp->stats.rx_crc_errors += delta;
4863 dev->stats.rx_crc_errors += delta;
4864
4865 return &dev->stats;
4866 }
4867
4868 /**
4869 * s2io_set_multicast - entry point for multicast address enable/disable.
4870 * @dev : pointer to the device structure
4871 * Description:
4872 * This function is a driver entry point which gets called by the kernel
4873 * whenever multicast addresses must be enabled/disabled. This also gets
4874 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4875 * determine, if multicast address must be enabled or if promiscuous mode
4876 * is to be disabled etc.
4877 * Return value:
4878 * void.
4879 */
4880
s2io_set_multicast(struct net_device * dev)4881 static void s2io_set_multicast(struct net_device *dev)
4882 {
4883 int i, j, prev_cnt;
4884 struct netdev_hw_addr *ha;
4885 struct s2io_nic *sp = netdev_priv(dev);
4886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4887 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4888 0xfeffffffffffULL;
4889 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4890 void __iomem *add;
4891 struct config_param *config = &sp->config;
4892
4893 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4894 /* Enable all Multicast addresses */
4895 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4896 &bar0->rmac_addr_data0_mem);
4897 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4898 &bar0->rmac_addr_data1_mem);
4899 val64 = RMAC_ADDR_CMD_MEM_WE |
4900 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4901 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4902 writeq(val64, &bar0->rmac_addr_cmd_mem);
4903 /* Wait till command completes */
4904 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4905 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4906 S2IO_BIT_RESET);
4907
4908 sp->m_cast_flg = 1;
4909 sp->all_multi_pos = config->max_mc_addr - 1;
4910 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4911 /* Disable all Multicast addresses */
4912 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4913 &bar0->rmac_addr_data0_mem);
4914 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4915 &bar0->rmac_addr_data1_mem);
4916 val64 = RMAC_ADDR_CMD_MEM_WE |
4917 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4918 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4919 writeq(val64, &bar0->rmac_addr_cmd_mem);
4920 /* Wait till command completes */
4921 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4922 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4923 S2IO_BIT_RESET);
4924
4925 sp->m_cast_flg = 0;
4926 sp->all_multi_pos = 0;
4927 }
4928
4929 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4930 /* Put the NIC into promiscuous mode */
4931 add = &bar0->mac_cfg;
4932 val64 = readq(&bar0->mac_cfg);
4933 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4934
4935 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4936 writel((u32)val64, add);
4937 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4938 writel((u32) (val64 >> 32), (add + 4));
4939
4940 if (vlan_tag_strip != 1) {
4941 val64 = readq(&bar0->rx_pa_cfg);
4942 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4943 writeq(val64, &bar0->rx_pa_cfg);
4944 sp->vlan_strip_flag = 0;
4945 }
4946
4947 val64 = readq(&bar0->mac_cfg);
4948 sp->promisc_flg = 1;
4949 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4950 dev->name);
4951 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4952 /* Remove the NIC from promiscuous mode */
4953 add = &bar0->mac_cfg;
4954 val64 = readq(&bar0->mac_cfg);
4955 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4956
4957 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4958 writel((u32)val64, add);
4959 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4960 writel((u32) (val64 >> 32), (add + 4));
4961
4962 if (vlan_tag_strip != 0) {
4963 val64 = readq(&bar0->rx_pa_cfg);
4964 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4965 writeq(val64, &bar0->rx_pa_cfg);
4966 sp->vlan_strip_flag = 1;
4967 }
4968
4969 val64 = readq(&bar0->mac_cfg);
4970 sp->promisc_flg = 0;
4971 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4972 }
4973
4974 /* Update individual M_CAST address list */
4975 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4976 if (netdev_mc_count(dev) >
4977 (config->max_mc_addr - config->max_mac_addr)) {
4978 DBG_PRINT(ERR_DBG,
4979 "%s: No more Rx filters can be added - "
4980 "please enable ALL_MULTI instead\n",
4981 dev->name);
4982 return;
4983 }
4984
4985 prev_cnt = sp->mc_addr_count;
4986 sp->mc_addr_count = netdev_mc_count(dev);
4987
4988 /* Clear out the previous list of Mc in the H/W. */
4989 for (i = 0; i < prev_cnt; i++) {
4990 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4991 &bar0->rmac_addr_data0_mem);
4992 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4993 &bar0->rmac_addr_data1_mem);
4994 val64 = RMAC_ADDR_CMD_MEM_WE |
4995 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4996 RMAC_ADDR_CMD_MEM_OFFSET
4997 (config->mc_start_offset + i);
4998 writeq(val64, &bar0->rmac_addr_cmd_mem);
4999
5000 /* Wait for command completes */
5001 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5002 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5003 S2IO_BIT_RESET)) {
5004 DBG_PRINT(ERR_DBG,
5005 "%s: Adding Multicasts failed\n",
5006 dev->name);
5007 return;
5008 }
5009 }
5010
5011 /* Create the new Rx filter list and update the same in H/W. */
5012 i = 0;
5013 netdev_for_each_mc_addr(ha, dev) {
5014 mac_addr = 0;
5015 for (j = 0; j < ETH_ALEN; j++) {
5016 mac_addr |= ha->addr[j];
5017 mac_addr <<= 8;
5018 }
5019 mac_addr >>= 8;
5020 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5021 &bar0->rmac_addr_data0_mem);
5022 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5023 &bar0->rmac_addr_data1_mem);
5024 val64 = RMAC_ADDR_CMD_MEM_WE |
5025 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5026 RMAC_ADDR_CMD_MEM_OFFSET
5027 (i + config->mc_start_offset);
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029
5030 /* Wait for command completes */
5031 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET)) {
5034 DBG_PRINT(ERR_DBG,
5035 "%s: Adding Multicasts failed\n",
5036 dev->name);
5037 return;
5038 }
5039 i++;
5040 }
5041 }
5042 }
5043
5044 /* read from CAM unicast & multicast addresses and store it in
5045 * def_mac_addr structure
5046 */
do_s2io_store_unicast_mc(struct s2io_nic * sp)5047 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5048 {
5049 int offset;
5050 u64 mac_addr = 0x0;
5051 struct config_param *config = &sp->config;
5052
5053 /* store unicast & multicast mac addresses */
5054 for (offset = 0; offset < config->max_mc_addr; offset++) {
5055 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5056 /* if read fails disable the entry */
5057 if (mac_addr == FAILURE)
5058 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5059 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5060 }
5061 }
5062
5063 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
do_s2io_restore_unicast_mc(struct s2io_nic * sp)5064 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5065 {
5066 int offset;
5067 struct config_param *config = &sp->config;
5068 /* restore unicast mac address */
5069 for (offset = 0; offset < config->max_mac_addr; offset++)
5070 do_s2io_prog_unicast(sp->dev,
5071 sp->def_mac_addr[offset].mac_addr);
5072
5073 /* restore multicast mac address */
5074 for (offset = config->mc_start_offset;
5075 offset < config->max_mc_addr; offset++)
5076 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5077 }
5078
5079 /* add a multicast MAC address to CAM */
do_s2io_add_mc(struct s2io_nic * sp,u8 * addr)5080 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5081 {
5082 int i;
5083 u64 mac_addr = 0;
5084 struct config_param *config = &sp->config;
5085
5086 for (i = 0; i < ETH_ALEN; i++) {
5087 mac_addr <<= 8;
5088 mac_addr |= addr[i];
5089 }
5090 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5091 return SUCCESS;
5092
5093 /* check if the multicast mac already preset in CAM */
5094 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5095 u64 tmp64;
5096 tmp64 = do_s2io_read_unicast_mc(sp, i);
5097 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5098 break;
5099
5100 if (tmp64 == mac_addr)
5101 return SUCCESS;
5102 }
5103 if (i == config->max_mc_addr) {
5104 DBG_PRINT(ERR_DBG,
5105 "CAM full no space left for multicast MAC\n");
5106 return FAILURE;
5107 }
5108 /* Update the internal structure with this new mac address */
5109 do_s2io_copy_mac_addr(sp, i, mac_addr);
5110
5111 return do_s2io_add_mac(sp, mac_addr, i);
5112 }
5113
5114 /* add MAC address to CAM */
do_s2io_add_mac(struct s2io_nic * sp,u64 addr,int off)5115 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5116 {
5117 u64 val64;
5118 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5119
5120 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5121 &bar0->rmac_addr_data0_mem);
5122
5123 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5124 RMAC_ADDR_CMD_MEM_OFFSET(off);
5125 writeq(val64, &bar0->rmac_addr_cmd_mem);
5126
5127 /* Wait till command completes */
5128 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5129 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5130 S2IO_BIT_RESET)) {
5131 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5132 return FAILURE;
5133 }
5134 return SUCCESS;
5135 }
5136 /* deletes a specified unicast/multicast mac entry from CAM */
do_s2io_delete_unicast_mc(struct s2io_nic * sp,u64 addr)5137 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5138 {
5139 int offset;
5140 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5141 struct config_param *config = &sp->config;
5142
5143 for (offset = 1;
5144 offset < config->max_mc_addr; offset++) {
5145 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5146 if (tmp64 == addr) {
5147 /* disable the entry by writing 0xffffffffffffULL */
5148 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5149 return FAILURE;
5150 /* store the new mac list from CAM */
5151 do_s2io_store_unicast_mc(sp);
5152 return SUCCESS;
5153 }
5154 }
5155 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5156 (unsigned long long)addr);
5157 return FAILURE;
5158 }
5159
5160 /* read mac entries from CAM */
do_s2io_read_unicast_mc(struct s2io_nic * sp,int offset)5161 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5162 {
5163 u64 tmp64, val64;
5164 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5165
5166 /* read mac addr */
5167 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5168 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5169 writeq(val64, &bar0->rmac_addr_cmd_mem);
5170
5171 /* Wait till command completes */
5172 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5173 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5174 S2IO_BIT_RESET)) {
5175 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5176 return FAILURE;
5177 }
5178 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5179
5180 return tmp64 >> 16;
5181 }
5182
5183 /*
5184 * s2io_set_mac_addr - driver entry point
5185 */
5186
s2io_set_mac_addr(struct net_device * dev,void * p)5187 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5188 {
5189 struct sockaddr *addr = p;
5190
5191 if (!is_valid_ether_addr(addr->sa_data))
5192 return -EADDRNOTAVAIL;
5193
5194 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5195
5196 /* store the MAC address in CAM */
5197 return do_s2io_prog_unicast(dev, dev->dev_addr);
5198 }
5199 /**
5200 * do_s2io_prog_unicast - Programs the Xframe mac address
5201 * @dev : pointer to the device structure.
5202 * @addr: a uchar pointer to the new mac address which is to be set.
5203 * Description : This procedure will program the Xframe to receive
5204 * frames with new Mac Address
5205 * Return value: SUCCESS on success and an appropriate (-)ve integer
5206 * as defined in errno.h file on failure.
5207 */
5208
do_s2io_prog_unicast(struct net_device * dev,u8 * addr)5209 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5210 {
5211 struct s2io_nic *sp = netdev_priv(dev);
5212 register u64 mac_addr = 0, perm_addr = 0;
5213 int i;
5214 u64 tmp64;
5215 struct config_param *config = &sp->config;
5216
5217 /*
5218 * Set the new MAC address as the new unicast filter and reflect this
5219 * change on the device address registered with the OS. It will be
5220 * at offset 0.
5221 */
5222 for (i = 0; i < ETH_ALEN; i++) {
5223 mac_addr <<= 8;
5224 mac_addr |= addr[i];
5225 perm_addr <<= 8;
5226 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5227 }
5228
5229 /* check if the dev_addr is different than perm_addr */
5230 if (mac_addr == perm_addr)
5231 return SUCCESS;
5232
5233 /* check if the mac already preset in CAM */
5234 for (i = 1; i < config->max_mac_addr; i++) {
5235 tmp64 = do_s2io_read_unicast_mc(sp, i);
5236 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5237 break;
5238
5239 if (tmp64 == mac_addr) {
5240 DBG_PRINT(INFO_DBG,
5241 "MAC addr:0x%llx already present in CAM\n",
5242 (unsigned long long)mac_addr);
5243 return SUCCESS;
5244 }
5245 }
5246 if (i == config->max_mac_addr) {
5247 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5248 return FAILURE;
5249 }
5250 /* Update the internal structure with this new mac address */
5251 do_s2io_copy_mac_addr(sp, i, mac_addr);
5252
5253 return do_s2io_add_mac(sp, mac_addr, i);
5254 }
5255
5256 /**
5257 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5258 * @dev : pointer to netdev
5259 * @cmd: pointer to the structure with parameters given by ethtool to set
5260 * link information.
5261 * Description:
5262 * The function sets different link parameters provided by the user onto
5263 * the NIC.
5264 * Return value:
5265 * 0 on success.
5266 */
5267
5268 static int
s2io_ethtool_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5269 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5270 const struct ethtool_link_ksettings *cmd)
5271 {
5272 struct s2io_nic *sp = netdev_priv(dev);
5273 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5274 (cmd->base.speed != SPEED_10000) ||
5275 (cmd->base.duplex != DUPLEX_FULL))
5276 return -EINVAL;
5277 else {
5278 s2io_close(sp->dev);
5279 s2io_open(sp->dev);
5280 }
5281
5282 return 0;
5283 }
5284
5285 /**
5286 * s2io_ethtol_get_link_ksettings - Return link specific information.
5287 * @dev: pointer to netdev
5288 * @cmd : pointer to the structure with parameters given by ethtool
5289 * to return link information.
5290 * Description:
5291 * Returns link specific information like speed, duplex etc.. to ethtool.
5292 * Return value :
5293 * return 0 on success.
5294 */
5295
5296 static int
s2io_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5297 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5298 struct ethtool_link_ksettings *cmd)
5299 {
5300 struct s2io_nic *sp = netdev_priv(dev);
5301
5302 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5303 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5304 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5305
5306 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5307 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5308 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5309
5310 cmd->base.port = PORT_FIBRE;
5311
5312 if (netif_carrier_ok(sp->dev)) {
5313 cmd->base.speed = SPEED_10000;
5314 cmd->base.duplex = DUPLEX_FULL;
5315 } else {
5316 cmd->base.speed = SPEED_UNKNOWN;
5317 cmd->base.duplex = DUPLEX_UNKNOWN;
5318 }
5319
5320 cmd->base.autoneg = AUTONEG_DISABLE;
5321 return 0;
5322 }
5323
5324 /**
5325 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5326 * @dev: pointer to netdev
5327 * @info : pointer to the structure with parameters given by ethtool to
5328 * return driver information.
5329 * Description:
5330 * Returns driver specefic information like name, version etc.. to ethtool.
5331 * Return value:
5332 * void
5333 */
5334
s2io_ethtool_gdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)5335 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5336 struct ethtool_drvinfo *info)
5337 {
5338 struct s2io_nic *sp = netdev_priv(dev);
5339
5340 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5341 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5342 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5343 }
5344
5345 /**
5346 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5347 * @dev: pointer to netdev
5348 * @regs : pointer to the structure with parameters given by ethtool for
5349 * dumping the registers.
5350 * @space: The input argument into which all the registers are dumped.
5351 * Description:
5352 * Dumps the entire register space of xFrame NIC into the user given
5353 * buffer area.
5354 * Return value :
5355 * void .
5356 */
5357
s2io_ethtool_gregs(struct net_device * dev,struct ethtool_regs * regs,void * space)5358 static void s2io_ethtool_gregs(struct net_device *dev,
5359 struct ethtool_regs *regs, void *space)
5360 {
5361 int i;
5362 u64 reg;
5363 u8 *reg_space = (u8 *)space;
5364 struct s2io_nic *sp = netdev_priv(dev);
5365
5366 regs->len = XENA_REG_SPACE;
5367 regs->version = sp->pdev->subsystem_device;
5368
5369 for (i = 0; i < regs->len; i += 8) {
5370 reg = readq(sp->bar0 + i);
5371 memcpy((reg_space + i), ®, 8);
5372 }
5373 }
5374
5375 /*
5376 * s2io_set_led - control NIC led
5377 */
s2io_set_led(struct s2io_nic * sp,bool on)5378 static void s2io_set_led(struct s2io_nic *sp, bool on)
5379 {
5380 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5381 u16 subid = sp->pdev->subsystem_device;
5382 u64 val64;
5383
5384 if ((sp->device_type == XFRAME_II_DEVICE) ||
5385 ((subid & 0xFF) >= 0x07)) {
5386 val64 = readq(&bar0->gpio_control);
5387 if (on)
5388 val64 |= GPIO_CTRL_GPIO_0;
5389 else
5390 val64 &= ~GPIO_CTRL_GPIO_0;
5391
5392 writeq(val64, &bar0->gpio_control);
5393 } else {
5394 val64 = readq(&bar0->adapter_control);
5395 if (on)
5396 val64 |= ADAPTER_LED_ON;
5397 else
5398 val64 &= ~ADAPTER_LED_ON;
5399
5400 writeq(val64, &bar0->adapter_control);
5401 }
5402
5403 }
5404
5405 /**
5406 * s2io_ethtool_set_led - To physically identify the nic on the system.
5407 * @dev : network device
5408 * @state: led setting
5409 *
5410 * Description: Used to physically identify the NIC on the system.
5411 * The Link LED will blink for a time specified by the user for
5412 * identification.
5413 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5414 * identification is possible only if it's link is up.
5415 */
5416
s2io_ethtool_set_led(struct net_device * dev,enum ethtool_phys_id_state state)5417 static int s2io_ethtool_set_led(struct net_device *dev,
5418 enum ethtool_phys_id_state state)
5419 {
5420 struct s2io_nic *sp = netdev_priv(dev);
5421 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5422 u16 subid = sp->pdev->subsystem_device;
5423
5424 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5425 u64 val64 = readq(&bar0->adapter_control);
5426 if (!(val64 & ADAPTER_CNTL_EN)) {
5427 pr_err("Adapter Link down, cannot blink LED\n");
5428 return -EAGAIN;
5429 }
5430 }
5431
5432 switch (state) {
5433 case ETHTOOL_ID_ACTIVE:
5434 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5435 return 1; /* cycle on/off once per second */
5436
5437 case ETHTOOL_ID_ON:
5438 s2io_set_led(sp, true);
5439 break;
5440
5441 case ETHTOOL_ID_OFF:
5442 s2io_set_led(sp, false);
5443 break;
5444
5445 case ETHTOOL_ID_INACTIVE:
5446 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5447 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5448 }
5449
5450 return 0;
5451 }
5452
s2io_ethtool_gringparam(struct net_device * dev,struct ethtool_ringparam * ering)5453 static void s2io_ethtool_gringparam(struct net_device *dev,
5454 struct ethtool_ringparam *ering)
5455 {
5456 struct s2io_nic *sp = netdev_priv(dev);
5457 int i, tx_desc_count = 0, rx_desc_count = 0;
5458
5459 if (sp->rxd_mode == RXD_MODE_1) {
5460 ering->rx_max_pending = MAX_RX_DESC_1;
5461 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5462 } else {
5463 ering->rx_max_pending = MAX_RX_DESC_2;
5464 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5465 }
5466
5467 ering->tx_max_pending = MAX_TX_DESC;
5468
5469 for (i = 0; i < sp->config.rx_ring_num; i++)
5470 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5471 ering->rx_pending = rx_desc_count;
5472 ering->rx_jumbo_pending = rx_desc_count;
5473
5474 for (i = 0; i < sp->config.tx_fifo_num; i++)
5475 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5476 ering->tx_pending = tx_desc_count;
5477 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5478 }
5479
5480 /**
5481 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5482 * @dev: pointer to netdev
5483 * @ep : pointer to the structure with pause parameters given by ethtool.
5484 * Description:
5485 * Returns the Pause frame generation and reception capability of the NIC.
5486 * Return value:
5487 * void
5488 */
s2io_ethtool_getpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5489 static void s2io_ethtool_getpause_data(struct net_device *dev,
5490 struct ethtool_pauseparam *ep)
5491 {
5492 u64 val64;
5493 struct s2io_nic *sp = netdev_priv(dev);
5494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5495
5496 val64 = readq(&bar0->rmac_pause_cfg);
5497 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5498 ep->tx_pause = true;
5499 if (val64 & RMAC_PAUSE_RX_ENABLE)
5500 ep->rx_pause = true;
5501 ep->autoneg = false;
5502 }
5503
5504 /**
5505 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5506 * @dev: pointer to netdev
5507 * @ep : pointer to the structure with pause parameters given by ethtool.
5508 * Description:
5509 * It can be used to set or reset Pause frame generation or reception
5510 * support of the NIC.
5511 * Return value:
5512 * int, returns 0 on Success
5513 */
5514
s2io_ethtool_setpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5515 static int s2io_ethtool_setpause_data(struct net_device *dev,
5516 struct ethtool_pauseparam *ep)
5517 {
5518 u64 val64;
5519 struct s2io_nic *sp = netdev_priv(dev);
5520 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5521
5522 val64 = readq(&bar0->rmac_pause_cfg);
5523 if (ep->tx_pause)
5524 val64 |= RMAC_PAUSE_GEN_ENABLE;
5525 else
5526 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5527 if (ep->rx_pause)
5528 val64 |= RMAC_PAUSE_RX_ENABLE;
5529 else
5530 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5531 writeq(val64, &bar0->rmac_pause_cfg);
5532 return 0;
5533 }
5534
5535 #define S2IO_DEV_ID 5
5536 /**
5537 * read_eeprom - reads 4 bytes of data from user given offset.
5538 * @sp : private member of the device structure, which is a pointer to the
5539 * s2io_nic structure.
5540 * @off : offset at which the data must be written
5541 * @data : Its an output parameter where the data read at the given
5542 * offset is stored.
5543 * Description:
5544 * Will read 4 bytes of data from the user given offset and return the
5545 * read data.
5546 * NOTE: Will allow to read only part of the EEPROM visible through the
5547 * I2C bus.
5548 * Return value:
5549 * -1 on failure and 0 on success.
5550 */
read_eeprom(struct s2io_nic * sp,int off,u64 * data)5551 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5552 {
5553 int ret = -1;
5554 u32 exit_cnt = 0;
5555 u64 val64;
5556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5557
5558 if (sp->device_type == XFRAME_I_DEVICE) {
5559 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5560 I2C_CONTROL_ADDR(off) |
5561 I2C_CONTROL_BYTE_CNT(0x3) |
5562 I2C_CONTROL_READ |
5563 I2C_CONTROL_CNTL_START;
5564 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5565
5566 while (exit_cnt < 5) {
5567 val64 = readq(&bar0->i2c_control);
5568 if (I2C_CONTROL_CNTL_END(val64)) {
5569 *data = I2C_CONTROL_GET_DATA(val64);
5570 ret = 0;
5571 break;
5572 }
5573 msleep(50);
5574 exit_cnt++;
5575 }
5576 }
5577
5578 if (sp->device_type == XFRAME_II_DEVICE) {
5579 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5580 SPI_CONTROL_BYTECNT(0x3) |
5581 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5582 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5583 val64 |= SPI_CONTROL_REQ;
5584 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5585 while (exit_cnt < 5) {
5586 val64 = readq(&bar0->spi_control);
5587 if (val64 & SPI_CONTROL_NACK) {
5588 ret = 1;
5589 break;
5590 } else if (val64 & SPI_CONTROL_DONE) {
5591 *data = readq(&bar0->spi_data);
5592 *data &= 0xffffff;
5593 ret = 0;
5594 break;
5595 }
5596 msleep(50);
5597 exit_cnt++;
5598 }
5599 }
5600 return ret;
5601 }
5602
5603 /**
5604 * write_eeprom - actually writes the relevant part of the data value.
5605 * @sp : private member of the device structure, which is a pointer to the
5606 * s2io_nic structure.
5607 * @off : offset at which the data must be written
5608 * @data : The data that is to be written
5609 * @cnt : Number of bytes of the data that are actually to be written into
5610 * the Eeprom. (max of 3)
5611 * Description:
5612 * Actually writes the relevant part of the data value into the Eeprom
5613 * through the I2C bus.
5614 * Return value:
5615 * 0 on success, -1 on failure.
5616 */
5617
write_eeprom(struct s2io_nic * sp,int off,u64 data,int cnt)5618 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5619 {
5620 int exit_cnt = 0, ret = -1;
5621 u64 val64;
5622 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5623
5624 if (sp->device_type == XFRAME_I_DEVICE) {
5625 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5626 I2C_CONTROL_ADDR(off) |
5627 I2C_CONTROL_BYTE_CNT(cnt) |
5628 I2C_CONTROL_SET_DATA((u32)data) |
5629 I2C_CONTROL_CNTL_START;
5630 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5631
5632 while (exit_cnt < 5) {
5633 val64 = readq(&bar0->i2c_control);
5634 if (I2C_CONTROL_CNTL_END(val64)) {
5635 if (!(val64 & I2C_CONTROL_NACK))
5636 ret = 0;
5637 break;
5638 }
5639 msleep(50);
5640 exit_cnt++;
5641 }
5642 }
5643
5644 if (sp->device_type == XFRAME_II_DEVICE) {
5645 int write_cnt = (cnt == 8) ? 0 : cnt;
5646 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5647
5648 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5649 SPI_CONTROL_BYTECNT(write_cnt) |
5650 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5651 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5652 val64 |= SPI_CONTROL_REQ;
5653 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5654 while (exit_cnt < 5) {
5655 val64 = readq(&bar0->spi_control);
5656 if (val64 & SPI_CONTROL_NACK) {
5657 ret = 1;
5658 break;
5659 } else if (val64 & SPI_CONTROL_DONE) {
5660 ret = 0;
5661 break;
5662 }
5663 msleep(50);
5664 exit_cnt++;
5665 }
5666 }
5667 return ret;
5668 }
s2io_vpd_read(struct s2io_nic * nic)5669 static void s2io_vpd_read(struct s2io_nic *nic)
5670 {
5671 u8 *vpd_data;
5672 u8 data;
5673 int i = 0, cnt, len, fail = 0;
5674 int vpd_addr = 0x80;
5675 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5676
5677 if (nic->device_type == XFRAME_II_DEVICE) {
5678 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5679 vpd_addr = 0x80;
5680 } else {
5681 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5682 vpd_addr = 0x50;
5683 }
5684 strcpy(nic->serial_num, "NOT AVAILABLE");
5685
5686 vpd_data = kmalloc(256, GFP_KERNEL);
5687 if (!vpd_data) {
5688 swstats->mem_alloc_fail_cnt++;
5689 return;
5690 }
5691 swstats->mem_allocated += 256;
5692
5693 for (i = 0; i < 256; i += 4) {
5694 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5695 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5696 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5697 for (cnt = 0; cnt < 5; cnt++) {
5698 msleep(2);
5699 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5700 if (data == 0x80)
5701 break;
5702 }
5703 if (cnt >= 5) {
5704 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5705 fail = 1;
5706 break;
5707 }
5708 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5709 (u32 *)&vpd_data[i]);
5710 }
5711
5712 if (!fail) {
5713 /* read serial number of adapter */
5714 for (cnt = 0; cnt < 252; cnt++) {
5715 if ((vpd_data[cnt] == 'S') &&
5716 (vpd_data[cnt+1] == 'N')) {
5717 len = vpd_data[cnt+2];
5718 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5719 memcpy(nic->serial_num,
5720 &vpd_data[cnt + 3],
5721 len);
5722 memset(nic->serial_num+len,
5723 0,
5724 VPD_STRING_LEN-len);
5725 break;
5726 }
5727 }
5728 }
5729 }
5730
5731 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5732 len = vpd_data[1];
5733 memcpy(nic->product_name, &vpd_data[3], len);
5734 nic->product_name[len] = 0;
5735 }
5736 kfree(vpd_data);
5737 swstats->mem_freed += 256;
5738 }
5739
5740 /**
5741 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5742 * @dev: pointer to netdev
5743 * @eeprom : pointer to the user level structure provided by ethtool,
5744 * containing all relevant information.
5745 * @data_buf : user defined value to be written into Eeprom.
5746 * Description: Reads the values stored in the Eeprom at given offset
5747 * for a given length. Stores these values int the input argument data
5748 * buffer 'data_buf' and returns these to the caller (ethtool.)
5749 * Return value:
5750 * int 0 on success
5751 */
5752
s2io_ethtool_geeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5753 static int s2io_ethtool_geeprom(struct net_device *dev,
5754 struct ethtool_eeprom *eeprom, u8 * data_buf)
5755 {
5756 u32 i, valid;
5757 u64 data;
5758 struct s2io_nic *sp = netdev_priv(dev);
5759
5760 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5761
5762 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5763 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5764
5765 for (i = 0; i < eeprom->len; i += 4) {
5766 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5767 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5768 return -EFAULT;
5769 }
5770 valid = INV(data);
5771 memcpy((data_buf + i), &valid, 4);
5772 }
5773 return 0;
5774 }
5775
5776 /**
5777 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5778 * @dev: pointer to netdev
5779 * @eeprom : pointer to the user level structure provided by ethtool,
5780 * containing all relevant information.
5781 * @data_buf : user defined value to be written into Eeprom.
5782 * Description:
5783 * Tries to write the user provided value in the Eeprom, at the offset
5784 * given by the user.
5785 * Return value:
5786 * 0 on success, -EFAULT on failure.
5787 */
5788
s2io_ethtool_seeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5789 static int s2io_ethtool_seeprom(struct net_device *dev,
5790 struct ethtool_eeprom *eeprom,
5791 u8 *data_buf)
5792 {
5793 int len = eeprom->len, cnt = 0;
5794 u64 valid = 0, data;
5795 struct s2io_nic *sp = netdev_priv(dev);
5796
5797 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5798 DBG_PRINT(ERR_DBG,
5799 "ETHTOOL_WRITE_EEPROM Err: "
5800 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5801 (sp->pdev->vendor | (sp->pdev->device << 16)),
5802 eeprom->magic);
5803 return -EFAULT;
5804 }
5805
5806 while (len) {
5807 data = (u32)data_buf[cnt] & 0x000000FF;
5808 if (data)
5809 valid = (u32)(data << 24);
5810 else
5811 valid = data;
5812
5813 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5814 DBG_PRINT(ERR_DBG,
5815 "ETHTOOL_WRITE_EEPROM Err: "
5816 "Cannot write into the specified offset\n");
5817 return -EFAULT;
5818 }
5819 cnt++;
5820 len--;
5821 }
5822
5823 return 0;
5824 }
5825
5826 /**
5827 * s2io_register_test - reads and writes into all clock domains.
5828 * @sp : private member of the device structure, which is a pointer to the
5829 * s2io_nic structure.
5830 * @data : variable that returns the result of each of the test conducted b
5831 * by the driver.
5832 * Description:
5833 * Read and write into all clock domains. The NIC has 3 clock domains,
5834 * see that registers in all the three regions are accessible.
5835 * Return value:
5836 * 0 on success.
5837 */
5838
s2io_register_test(struct s2io_nic * sp,uint64_t * data)5839 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5840 {
5841 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5842 u64 val64 = 0, exp_val;
5843 int fail = 0;
5844
5845 val64 = readq(&bar0->pif_rd_swapper_fb);
5846 if (val64 != 0x123456789abcdefULL) {
5847 fail = 1;
5848 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5849 }
5850
5851 val64 = readq(&bar0->rmac_pause_cfg);
5852 if (val64 != 0xc000ffff00000000ULL) {
5853 fail = 1;
5854 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5855 }
5856
5857 val64 = readq(&bar0->rx_queue_cfg);
5858 if (sp->device_type == XFRAME_II_DEVICE)
5859 exp_val = 0x0404040404040404ULL;
5860 else
5861 exp_val = 0x0808080808080808ULL;
5862 if (val64 != exp_val) {
5863 fail = 1;
5864 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5865 }
5866
5867 val64 = readq(&bar0->xgxs_efifo_cfg);
5868 if (val64 != 0x000000001923141EULL) {
5869 fail = 1;
5870 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5871 }
5872
5873 val64 = 0x5A5A5A5A5A5A5A5AULL;
5874 writeq(val64, &bar0->xmsi_data);
5875 val64 = readq(&bar0->xmsi_data);
5876 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5877 fail = 1;
5878 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5879 }
5880
5881 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5882 writeq(val64, &bar0->xmsi_data);
5883 val64 = readq(&bar0->xmsi_data);
5884 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5885 fail = 1;
5886 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5887 }
5888
5889 *data = fail;
5890 return fail;
5891 }
5892
5893 /**
5894 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5895 * @sp : private member of the device structure, which is a pointer to the
5896 * s2io_nic structure.
5897 * @data:variable that returns the result of each of the test conducted by
5898 * the driver.
5899 * Description:
5900 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5901 * register.
5902 * Return value:
5903 * 0 on success.
5904 */
5905
s2io_eeprom_test(struct s2io_nic * sp,uint64_t * data)5906 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5907 {
5908 int fail = 0;
5909 u64 ret_data, org_4F0, org_7F0;
5910 u8 saved_4F0 = 0, saved_7F0 = 0;
5911 struct net_device *dev = sp->dev;
5912
5913 /* Test Write Error at offset 0 */
5914 /* Note that SPI interface allows write access to all areas
5915 * of EEPROM. Hence doing all negative testing only for Xframe I.
5916 */
5917 if (sp->device_type == XFRAME_I_DEVICE)
5918 if (!write_eeprom(sp, 0, 0, 3))
5919 fail = 1;
5920
5921 /* Save current values at offsets 0x4F0 and 0x7F0 */
5922 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5923 saved_4F0 = 1;
5924 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5925 saved_7F0 = 1;
5926
5927 /* Test Write at offset 4f0 */
5928 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5929 fail = 1;
5930 if (read_eeprom(sp, 0x4F0, &ret_data))
5931 fail = 1;
5932
5933 if (ret_data != 0x012345) {
5934 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5935 "Data written %llx Data read %llx\n",
5936 dev->name, (unsigned long long)0x12345,
5937 (unsigned long long)ret_data);
5938 fail = 1;
5939 }
5940
5941 /* Reset the EEPROM data go FFFF */
5942 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5943
5944 /* Test Write Request Error at offset 0x7c */
5945 if (sp->device_type == XFRAME_I_DEVICE)
5946 if (!write_eeprom(sp, 0x07C, 0, 3))
5947 fail = 1;
5948
5949 /* Test Write Request at offset 0x7f0 */
5950 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5951 fail = 1;
5952 if (read_eeprom(sp, 0x7F0, &ret_data))
5953 fail = 1;
5954
5955 if (ret_data != 0x012345) {
5956 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5957 "Data written %llx Data read %llx\n",
5958 dev->name, (unsigned long long)0x12345,
5959 (unsigned long long)ret_data);
5960 fail = 1;
5961 }
5962
5963 /* Reset the EEPROM data go FFFF */
5964 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5965
5966 if (sp->device_type == XFRAME_I_DEVICE) {
5967 /* Test Write Error at offset 0x80 */
5968 if (!write_eeprom(sp, 0x080, 0, 3))
5969 fail = 1;
5970
5971 /* Test Write Error at offset 0xfc */
5972 if (!write_eeprom(sp, 0x0FC, 0, 3))
5973 fail = 1;
5974
5975 /* Test Write Error at offset 0x100 */
5976 if (!write_eeprom(sp, 0x100, 0, 3))
5977 fail = 1;
5978
5979 /* Test Write Error at offset 4ec */
5980 if (!write_eeprom(sp, 0x4EC, 0, 3))
5981 fail = 1;
5982 }
5983
5984 /* Restore values at offsets 0x4F0 and 0x7F0 */
5985 if (saved_4F0)
5986 write_eeprom(sp, 0x4F0, org_4F0, 3);
5987 if (saved_7F0)
5988 write_eeprom(sp, 0x7F0, org_7F0, 3);
5989
5990 *data = fail;
5991 return fail;
5992 }
5993
5994 /**
5995 * s2io_bist_test - invokes the MemBist test of the card .
5996 * @sp : private member of the device structure, which is a pointer to the
5997 * s2io_nic structure.
5998 * @data:variable that returns the result of each of the test conducted by
5999 * the driver.
6000 * Description:
6001 * This invokes the MemBist test of the card. We give around
6002 * 2 secs time for the Test to complete. If it's still not complete
6003 * within this peiod, we consider that the test failed.
6004 * Return value:
6005 * 0 on success and -1 on failure.
6006 */
6007
s2io_bist_test(struct s2io_nic * sp,uint64_t * data)6008 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6009 {
6010 u8 bist = 0;
6011 int cnt = 0, ret = -1;
6012
6013 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6014 bist |= PCI_BIST_START;
6015 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6016
6017 while (cnt < 20) {
6018 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6019 if (!(bist & PCI_BIST_START)) {
6020 *data = (bist & PCI_BIST_CODE_MASK);
6021 ret = 0;
6022 break;
6023 }
6024 msleep(100);
6025 cnt++;
6026 }
6027
6028 return ret;
6029 }
6030
6031 /**
6032 * s2io_link_test - verifies the link state of the nic
6033 * @sp: private member of the device structure, which is a pointer to the
6034 * s2io_nic structure.
6035 * @data: variable that returns the result of each of the test conducted by
6036 * the driver.
6037 * Description:
6038 * The function verifies the link state of the NIC and updates the input
6039 * argument 'data' appropriately.
6040 * Return value:
6041 * 0 on success.
6042 */
6043
s2io_link_test(struct s2io_nic * sp,uint64_t * data)6044 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6045 {
6046 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6047 u64 val64;
6048
6049 val64 = readq(&bar0->adapter_status);
6050 if (!(LINK_IS_UP(val64)))
6051 *data = 1;
6052 else
6053 *data = 0;
6054
6055 return *data;
6056 }
6057
6058 /**
6059 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6060 * @sp: private member of the device structure, which is a pointer to the
6061 * s2io_nic structure.
6062 * @data: variable that returns the result of each of the test
6063 * conducted by the driver.
6064 * Description:
6065 * This is one of the offline test that tests the read and write
6066 * access to the RldRam chip on the NIC.
6067 * Return value:
6068 * 0 on success.
6069 */
6070
s2io_rldram_test(struct s2io_nic * sp,uint64_t * data)6071 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6072 {
6073 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6074 u64 val64;
6075 int cnt, iteration = 0, test_fail = 0;
6076
6077 val64 = readq(&bar0->adapter_control);
6078 val64 &= ~ADAPTER_ECC_EN;
6079 writeq(val64, &bar0->adapter_control);
6080
6081 val64 = readq(&bar0->mc_rldram_test_ctrl);
6082 val64 |= MC_RLDRAM_TEST_MODE;
6083 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6084
6085 val64 = readq(&bar0->mc_rldram_mrs);
6086 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6087 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6088
6089 val64 |= MC_RLDRAM_MRS_ENABLE;
6090 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6091
6092 while (iteration < 2) {
6093 val64 = 0x55555555aaaa0000ULL;
6094 if (iteration == 1)
6095 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6096 writeq(val64, &bar0->mc_rldram_test_d0);
6097
6098 val64 = 0xaaaa5a5555550000ULL;
6099 if (iteration == 1)
6100 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6101 writeq(val64, &bar0->mc_rldram_test_d1);
6102
6103 val64 = 0x55aaaaaaaa5a0000ULL;
6104 if (iteration == 1)
6105 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6106 writeq(val64, &bar0->mc_rldram_test_d2);
6107
6108 val64 = (u64) (0x0000003ffffe0100ULL);
6109 writeq(val64, &bar0->mc_rldram_test_add);
6110
6111 val64 = MC_RLDRAM_TEST_MODE |
6112 MC_RLDRAM_TEST_WRITE |
6113 MC_RLDRAM_TEST_GO;
6114 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6115
6116 for (cnt = 0; cnt < 5; cnt++) {
6117 val64 = readq(&bar0->mc_rldram_test_ctrl);
6118 if (val64 & MC_RLDRAM_TEST_DONE)
6119 break;
6120 msleep(200);
6121 }
6122
6123 if (cnt == 5)
6124 break;
6125
6126 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6127 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6128
6129 for (cnt = 0; cnt < 5; cnt++) {
6130 val64 = readq(&bar0->mc_rldram_test_ctrl);
6131 if (val64 & MC_RLDRAM_TEST_DONE)
6132 break;
6133 msleep(500);
6134 }
6135
6136 if (cnt == 5)
6137 break;
6138
6139 val64 = readq(&bar0->mc_rldram_test_ctrl);
6140 if (!(val64 & MC_RLDRAM_TEST_PASS))
6141 test_fail = 1;
6142
6143 iteration++;
6144 }
6145
6146 *data = test_fail;
6147
6148 /* Bring the adapter out of test mode */
6149 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6150
6151 return test_fail;
6152 }
6153
6154 /**
6155 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6156 * @dev: pointer to netdev
6157 * @ethtest : pointer to a ethtool command specific structure that will be
6158 * returned to the user.
6159 * @data : variable that returns the result of each of the test
6160 * conducted by the driver.
6161 * Description:
6162 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6163 * the health of the card.
6164 * Return value:
6165 * void
6166 */
6167
s2io_ethtool_test(struct net_device * dev,struct ethtool_test * ethtest,uint64_t * data)6168 static void s2io_ethtool_test(struct net_device *dev,
6169 struct ethtool_test *ethtest,
6170 uint64_t *data)
6171 {
6172 struct s2io_nic *sp = netdev_priv(dev);
6173 int orig_state = netif_running(sp->dev);
6174
6175 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6176 /* Offline Tests. */
6177 if (orig_state)
6178 s2io_close(sp->dev);
6179
6180 if (s2io_register_test(sp, &data[0]))
6181 ethtest->flags |= ETH_TEST_FL_FAILED;
6182
6183 s2io_reset(sp);
6184
6185 if (s2io_rldram_test(sp, &data[3]))
6186 ethtest->flags |= ETH_TEST_FL_FAILED;
6187
6188 s2io_reset(sp);
6189
6190 if (s2io_eeprom_test(sp, &data[1]))
6191 ethtest->flags |= ETH_TEST_FL_FAILED;
6192
6193 if (s2io_bist_test(sp, &data[4]))
6194 ethtest->flags |= ETH_TEST_FL_FAILED;
6195
6196 if (orig_state)
6197 s2io_open(sp->dev);
6198
6199 data[2] = 0;
6200 } else {
6201 /* Online Tests. */
6202 if (!orig_state) {
6203 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6204 dev->name);
6205 data[0] = -1;
6206 data[1] = -1;
6207 data[2] = -1;
6208 data[3] = -1;
6209 data[4] = -1;
6210 }
6211
6212 if (s2io_link_test(sp, &data[2]))
6213 ethtest->flags |= ETH_TEST_FL_FAILED;
6214
6215 data[0] = 0;
6216 data[1] = 0;
6217 data[3] = 0;
6218 data[4] = 0;
6219 }
6220 }
6221
s2io_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)6222 static void s2io_get_ethtool_stats(struct net_device *dev,
6223 struct ethtool_stats *estats,
6224 u64 *tmp_stats)
6225 {
6226 int i = 0, k;
6227 struct s2io_nic *sp = netdev_priv(dev);
6228 struct stat_block *stats = sp->mac_control.stats_info;
6229 struct swStat *swstats = &stats->sw_stat;
6230 struct xpakStat *xstats = &stats->xpak_stat;
6231
6232 s2io_updt_stats(sp);
6233 tmp_stats[i++] =
6234 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6235 le32_to_cpu(stats->tmac_frms);
6236 tmp_stats[i++] =
6237 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6238 le32_to_cpu(stats->tmac_data_octets);
6239 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6240 tmp_stats[i++] =
6241 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6242 le32_to_cpu(stats->tmac_mcst_frms);
6243 tmp_stats[i++] =
6244 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6245 le32_to_cpu(stats->tmac_bcst_frms);
6246 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6247 tmp_stats[i++] =
6248 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6249 le32_to_cpu(stats->tmac_ttl_octets);
6250 tmp_stats[i++] =
6251 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6252 le32_to_cpu(stats->tmac_ucst_frms);
6253 tmp_stats[i++] =
6254 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6255 le32_to_cpu(stats->tmac_nucst_frms);
6256 tmp_stats[i++] =
6257 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6258 le32_to_cpu(stats->tmac_any_err_frms);
6259 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6260 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6261 tmp_stats[i++] =
6262 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6263 le32_to_cpu(stats->tmac_vld_ip);
6264 tmp_stats[i++] =
6265 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6266 le32_to_cpu(stats->tmac_drop_ip);
6267 tmp_stats[i++] =
6268 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6269 le32_to_cpu(stats->tmac_icmp);
6270 tmp_stats[i++] =
6271 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6272 le32_to_cpu(stats->tmac_rst_tcp);
6273 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6274 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6275 le32_to_cpu(stats->tmac_udp);
6276 tmp_stats[i++] =
6277 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6278 le32_to_cpu(stats->rmac_vld_frms);
6279 tmp_stats[i++] =
6280 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6281 le32_to_cpu(stats->rmac_data_octets);
6282 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6283 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6284 tmp_stats[i++] =
6285 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6286 le32_to_cpu(stats->rmac_vld_mcst_frms);
6287 tmp_stats[i++] =
6288 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6289 le32_to_cpu(stats->rmac_vld_bcst_frms);
6290 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6291 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6292 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6293 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6294 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6295 tmp_stats[i++] =
6296 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6297 le32_to_cpu(stats->rmac_ttl_octets);
6298 tmp_stats[i++] =
6299 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6300 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6301 tmp_stats[i++] =
6302 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6303 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6304 tmp_stats[i++] =
6305 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6306 le32_to_cpu(stats->rmac_discarded_frms);
6307 tmp_stats[i++] =
6308 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6309 << 32 | le32_to_cpu(stats->rmac_drop_events);
6310 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6311 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6312 tmp_stats[i++] =
6313 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6314 le32_to_cpu(stats->rmac_usized_frms);
6315 tmp_stats[i++] =
6316 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6317 le32_to_cpu(stats->rmac_osized_frms);
6318 tmp_stats[i++] =
6319 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6320 le32_to_cpu(stats->rmac_frag_frms);
6321 tmp_stats[i++] =
6322 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6323 le32_to_cpu(stats->rmac_jabber_frms);
6324 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6325 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6326 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6327 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6328 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6329 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6330 tmp_stats[i++] =
6331 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6332 le32_to_cpu(stats->rmac_ip);
6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6334 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6335 tmp_stats[i++] =
6336 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6337 le32_to_cpu(stats->rmac_drop_ip);
6338 tmp_stats[i++] =
6339 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6340 le32_to_cpu(stats->rmac_icmp);
6341 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6342 tmp_stats[i++] =
6343 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6344 le32_to_cpu(stats->rmac_udp);
6345 tmp_stats[i++] =
6346 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6347 le32_to_cpu(stats->rmac_err_drp_udp);
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6349 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6350 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6351 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6352 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6353 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6354 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6355 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6357 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6358 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6359 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6360 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6361 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6362 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6363 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6364 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6365 tmp_stats[i++] =
6366 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6367 le32_to_cpu(stats->rmac_pause_cnt);
6368 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6369 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6370 tmp_stats[i++] =
6371 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6372 le32_to_cpu(stats->rmac_accepted_ip);
6373 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6374 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6375 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6376 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6377 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6378 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6379 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6380 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6381 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6382 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6383 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6384 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6385 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6386 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6387 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6388 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6389 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6392
6393 /* Enhanced statistics exist only for Hercules */
6394 if (sp->device_type == XFRAME_II_DEVICE) {
6395 tmp_stats[i++] =
6396 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6397 tmp_stats[i++] =
6398 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6399 tmp_stats[i++] =
6400 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6401 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6402 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6403 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6408 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6409 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6410 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6411 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6413 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6414 }
6415
6416 tmp_stats[i++] = 0;
6417 tmp_stats[i++] = swstats->single_ecc_errs;
6418 tmp_stats[i++] = swstats->double_ecc_errs;
6419 tmp_stats[i++] = swstats->parity_err_cnt;
6420 tmp_stats[i++] = swstats->serious_err_cnt;
6421 tmp_stats[i++] = swstats->soft_reset_cnt;
6422 tmp_stats[i++] = swstats->fifo_full_cnt;
6423 for (k = 0; k < MAX_RX_RINGS; k++)
6424 tmp_stats[i++] = swstats->ring_full_cnt[k];
6425 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6426 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6427 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6428 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6429 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6430 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6431 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6432 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6433 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6434 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6435 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6436 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6437 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6438 tmp_stats[i++] = swstats->sending_both;
6439 tmp_stats[i++] = swstats->outof_sequence_pkts;
6440 tmp_stats[i++] = swstats->flush_max_pkts;
6441 if (swstats->num_aggregations) {
6442 u64 tmp = swstats->sum_avg_pkts_aggregated;
6443 int count = 0;
6444 /*
6445 * Since 64-bit divide does not work on all platforms,
6446 * do repeated subtraction.
6447 */
6448 while (tmp >= swstats->num_aggregations) {
6449 tmp -= swstats->num_aggregations;
6450 count++;
6451 }
6452 tmp_stats[i++] = count;
6453 } else
6454 tmp_stats[i++] = 0;
6455 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6456 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6457 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6458 tmp_stats[i++] = swstats->mem_allocated;
6459 tmp_stats[i++] = swstats->mem_freed;
6460 tmp_stats[i++] = swstats->link_up_cnt;
6461 tmp_stats[i++] = swstats->link_down_cnt;
6462 tmp_stats[i++] = swstats->link_up_time;
6463 tmp_stats[i++] = swstats->link_down_time;
6464
6465 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6466 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6467 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6468 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6469 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6470
6471 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6472 tmp_stats[i++] = swstats->rx_abort_cnt;
6473 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6474 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6475 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6476 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6477 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6478 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6479 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6480 tmp_stats[i++] = swstats->tda_err_cnt;
6481 tmp_stats[i++] = swstats->pfc_err_cnt;
6482 tmp_stats[i++] = swstats->pcc_err_cnt;
6483 tmp_stats[i++] = swstats->tti_err_cnt;
6484 tmp_stats[i++] = swstats->tpa_err_cnt;
6485 tmp_stats[i++] = swstats->sm_err_cnt;
6486 tmp_stats[i++] = swstats->lso_err_cnt;
6487 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6488 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6489 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6490 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6491 tmp_stats[i++] = swstats->rc_err_cnt;
6492 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6493 tmp_stats[i++] = swstats->rpa_err_cnt;
6494 tmp_stats[i++] = swstats->rda_err_cnt;
6495 tmp_stats[i++] = swstats->rti_err_cnt;
6496 tmp_stats[i++] = swstats->mc_err_cnt;
6497 }
6498
s2io_ethtool_get_regs_len(struct net_device * dev)6499 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6500 {
6501 return XENA_REG_SPACE;
6502 }
6503
6504
s2io_get_eeprom_len(struct net_device * dev)6505 static int s2io_get_eeprom_len(struct net_device *dev)
6506 {
6507 return XENA_EEPROM_SPACE;
6508 }
6509
s2io_get_sset_count(struct net_device * dev,int sset)6510 static int s2io_get_sset_count(struct net_device *dev, int sset)
6511 {
6512 struct s2io_nic *sp = netdev_priv(dev);
6513
6514 switch (sset) {
6515 case ETH_SS_TEST:
6516 return S2IO_TEST_LEN;
6517 case ETH_SS_STATS:
6518 switch (sp->device_type) {
6519 case XFRAME_I_DEVICE:
6520 return XFRAME_I_STAT_LEN;
6521 case XFRAME_II_DEVICE:
6522 return XFRAME_II_STAT_LEN;
6523 default:
6524 return 0;
6525 }
6526 default:
6527 return -EOPNOTSUPP;
6528 }
6529 }
6530
s2io_ethtool_get_strings(struct net_device * dev,u32 stringset,u8 * data)6531 static void s2io_ethtool_get_strings(struct net_device *dev,
6532 u32 stringset, u8 *data)
6533 {
6534 int stat_size = 0;
6535 struct s2io_nic *sp = netdev_priv(dev);
6536
6537 switch (stringset) {
6538 case ETH_SS_TEST:
6539 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6540 break;
6541 case ETH_SS_STATS:
6542 stat_size = sizeof(ethtool_xena_stats_keys);
6543 memcpy(data, ðtool_xena_stats_keys, stat_size);
6544 if (sp->device_type == XFRAME_II_DEVICE) {
6545 memcpy(data + stat_size,
6546 ðtool_enhanced_stats_keys,
6547 sizeof(ethtool_enhanced_stats_keys));
6548 stat_size += sizeof(ethtool_enhanced_stats_keys);
6549 }
6550
6551 memcpy(data + stat_size, ðtool_driver_stats_keys,
6552 sizeof(ethtool_driver_stats_keys));
6553 }
6554 }
6555
s2io_set_features(struct net_device * dev,netdev_features_t features)6556 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6557 {
6558 struct s2io_nic *sp = netdev_priv(dev);
6559 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6560
6561 if (changed && netif_running(dev)) {
6562 int rc;
6563
6564 s2io_stop_all_tx_queue(sp);
6565 s2io_card_down(sp);
6566 dev->features = features;
6567 rc = s2io_card_up(sp);
6568 if (rc)
6569 s2io_reset(sp);
6570 else
6571 s2io_start_all_tx_queue(sp);
6572
6573 return rc ? rc : 1;
6574 }
6575
6576 return 0;
6577 }
6578
6579 static const struct ethtool_ops netdev_ethtool_ops = {
6580 .get_drvinfo = s2io_ethtool_gdrvinfo,
6581 .get_regs_len = s2io_ethtool_get_regs_len,
6582 .get_regs = s2io_ethtool_gregs,
6583 .get_link = ethtool_op_get_link,
6584 .get_eeprom_len = s2io_get_eeprom_len,
6585 .get_eeprom = s2io_ethtool_geeprom,
6586 .set_eeprom = s2io_ethtool_seeprom,
6587 .get_ringparam = s2io_ethtool_gringparam,
6588 .get_pauseparam = s2io_ethtool_getpause_data,
6589 .set_pauseparam = s2io_ethtool_setpause_data,
6590 .self_test = s2io_ethtool_test,
6591 .get_strings = s2io_ethtool_get_strings,
6592 .set_phys_id = s2io_ethtool_set_led,
6593 .get_ethtool_stats = s2io_get_ethtool_stats,
6594 .get_sset_count = s2io_get_sset_count,
6595 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6596 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6597 };
6598
6599 /**
6600 * s2io_ioctl - Entry point for the Ioctl
6601 * @dev : Device pointer.
6602 * @rq : An IOCTL specefic structure, that can contain a pointer to
6603 * a proprietary structure used to pass information to the driver.
6604 * @cmd : This is used to distinguish between the different commands that
6605 * can be passed to the IOCTL functions.
6606 * Description:
6607 * Currently there are no special functionality supported in IOCTL, hence
6608 * function always return EOPNOTSUPPORTED
6609 */
6610
s2io_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6611 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6612 {
6613 return -EOPNOTSUPP;
6614 }
6615
6616 /**
6617 * s2io_change_mtu - entry point to change MTU size for the device.
6618 * @dev : device pointer.
6619 * @new_mtu : the new MTU size for the device.
6620 * Description: A driver entry point to change MTU size for the device.
6621 * Before changing the MTU the device must be stopped.
6622 * Return value:
6623 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6624 * file on failure.
6625 */
6626
s2io_change_mtu(struct net_device * dev,int new_mtu)6627 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6628 {
6629 struct s2io_nic *sp = netdev_priv(dev);
6630 int ret = 0;
6631
6632 dev->mtu = new_mtu;
6633 if (netif_running(dev)) {
6634 s2io_stop_all_tx_queue(sp);
6635 s2io_card_down(sp);
6636 ret = s2io_card_up(sp);
6637 if (ret) {
6638 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6639 __func__);
6640 return ret;
6641 }
6642 s2io_wake_all_tx_queue(sp);
6643 } else { /* Device is down */
6644 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6645 u64 val64 = new_mtu;
6646
6647 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6648 }
6649
6650 return ret;
6651 }
6652
6653 /**
6654 * s2io_set_link - Set the LInk status
6655 * @work: work struct containing a pointer to device private structue
6656 * Description: Sets the link status for the adapter
6657 */
6658
s2io_set_link(struct work_struct * work)6659 static void s2io_set_link(struct work_struct *work)
6660 {
6661 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6662 set_link_task);
6663 struct net_device *dev = nic->dev;
6664 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6665 register u64 val64;
6666 u16 subid;
6667
6668 rtnl_lock();
6669
6670 if (!netif_running(dev))
6671 goto out_unlock;
6672
6673 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6674 /* The card is being reset, no point doing anything */
6675 goto out_unlock;
6676 }
6677
6678 subid = nic->pdev->subsystem_device;
6679 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6680 /*
6681 * Allow a small delay for the NICs self initiated
6682 * cleanup to complete.
6683 */
6684 msleep(100);
6685 }
6686
6687 val64 = readq(&bar0->adapter_status);
6688 if (LINK_IS_UP(val64)) {
6689 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6690 if (verify_xena_quiescence(nic)) {
6691 val64 = readq(&bar0->adapter_control);
6692 val64 |= ADAPTER_CNTL_EN;
6693 writeq(val64, &bar0->adapter_control);
6694 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6695 nic->device_type, subid)) {
6696 val64 = readq(&bar0->gpio_control);
6697 val64 |= GPIO_CTRL_GPIO_0;
6698 writeq(val64, &bar0->gpio_control);
6699 val64 = readq(&bar0->gpio_control);
6700 } else {
6701 val64 |= ADAPTER_LED_ON;
6702 writeq(val64, &bar0->adapter_control);
6703 }
6704 nic->device_enabled_once = true;
6705 } else {
6706 DBG_PRINT(ERR_DBG,
6707 "%s: Error: device is not Quiescent\n",
6708 dev->name);
6709 s2io_stop_all_tx_queue(nic);
6710 }
6711 }
6712 val64 = readq(&bar0->adapter_control);
6713 val64 |= ADAPTER_LED_ON;
6714 writeq(val64, &bar0->adapter_control);
6715 s2io_link(nic, LINK_UP);
6716 } else {
6717 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6718 subid)) {
6719 val64 = readq(&bar0->gpio_control);
6720 val64 &= ~GPIO_CTRL_GPIO_0;
6721 writeq(val64, &bar0->gpio_control);
6722 val64 = readq(&bar0->gpio_control);
6723 }
6724 /* turn off LED */
6725 val64 = readq(&bar0->adapter_control);
6726 val64 = val64 & (~ADAPTER_LED_ON);
6727 writeq(val64, &bar0->adapter_control);
6728 s2io_link(nic, LINK_DOWN);
6729 }
6730 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6731
6732 out_unlock:
6733 rtnl_unlock();
6734 }
6735
set_rxd_buffer_pointer(struct s2io_nic * sp,struct RxD_t * rxdp,struct buffAdd * ba,struct sk_buff ** skb,u64 * temp0,u64 * temp1,u64 * temp2,int size)6736 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6737 struct buffAdd *ba,
6738 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6739 u64 *temp2, int size)
6740 {
6741 struct net_device *dev = sp->dev;
6742 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6743
6744 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6745 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6746 /* allocate skb */
6747 if (*skb) {
6748 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6749 /*
6750 * As Rx frame are not going to be processed,
6751 * using same mapped address for the Rxd
6752 * buffer pointer
6753 */
6754 rxdp1->Buffer0_ptr = *temp0;
6755 } else {
6756 *skb = netdev_alloc_skb(dev, size);
6757 if (!(*skb)) {
6758 DBG_PRINT(INFO_DBG,
6759 "%s: Out of memory to allocate %s\n",
6760 dev->name, "1 buf mode SKBs");
6761 stats->mem_alloc_fail_cnt++;
6762 return -ENOMEM ;
6763 }
6764 stats->mem_allocated += (*skb)->truesize;
6765 /* storing the mapped addr in a temp variable
6766 * such it will be used for next rxd whose
6767 * Host Control is NULL
6768 */
6769 rxdp1->Buffer0_ptr = *temp0 =
6770 dma_map_single(&sp->pdev->dev, (*skb)->data,
6771 size - NET_IP_ALIGN,
6772 DMA_FROM_DEVICE);
6773 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6774 goto memalloc_failed;
6775 rxdp->Host_Control = (unsigned long) (*skb);
6776 }
6777 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6778 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6779 /* Two buffer Mode */
6780 if (*skb) {
6781 rxdp3->Buffer2_ptr = *temp2;
6782 rxdp3->Buffer0_ptr = *temp0;
6783 rxdp3->Buffer1_ptr = *temp1;
6784 } else {
6785 *skb = netdev_alloc_skb(dev, size);
6786 if (!(*skb)) {
6787 DBG_PRINT(INFO_DBG,
6788 "%s: Out of memory to allocate %s\n",
6789 dev->name,
6790 "2 buf mode SKBs");
6791 stats->mem_alloc_fail_cnt++;
6792 return -ENOMEM;
6793 }
6794 stats->mem_allocated += (*skb)->truesize;
6795 rxdp3->Buffer2_ptr = *temp2 =
6796 dma_map_single(&sp->pdev->dev, (*skb)->data,
6797 dev->mtu + 4, DMA_FROM_DEVICE);
6798 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6799 goto memalloc_failed;
6800 rxdp3->Buffer0_ptr = *temp0 =
6801 dma_map_single(&sp->pdev->dev, ba->ba_0,
6802 BUF0_LEN, DMA_FROM_DEVICE);
6803 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6804 dma_unmap_single(&sp->pdev->dev,
6805 (dma_addr_t)rxdp3->Buffer2_ptr,
6806 dev->mtu + 4,
6807 DMA_FROM_DEVICE);
6808 goto memalloc_failed;
6809 }
6810 rxdp->Host_Control = (unsigned long) (*skb);
6811
6812 /* Buffer-1 will be dummy buffer not used */
6813 rxdp3->Buffer1_ptr = *temp1 =
6814 dma_map_single(&sp->pdev->dev, ba->ba_1,
6815 BUF1_LEN, DMA_FROM_DEVICE);
6816 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6817 dma_unmap_single(&sp->pdev->dev,
6818 (dma_addr_t)rxdp3->Buffer0_ptr,
6819 BUF0_LEN, DMA_FROM_DEVICE);
6820 dma_unmap_single(&sp->pdev->dev,
6821 (dma_addr_t)rxdp3->Buffer2_ptr,
6822 dev->mtu + 4,
6823 DMA_FROM_DEVICE);
6824 goto memalloc_failed;
6825 }
6826 }
6827 }
6828 return 0;
6829
6830 memalloc_failed:
6831 stats->pci_map_fail_cnt++;
6832 stats->mem_freed += (*skb)->truesize;
6833 dev_kfree_skb(*skb);
6834 return -ENOMEM;
6835 }
6836
set_rxd_buffer_size(struct s2io_nic * sp,struct RxD_t * rxdp,int size)6837 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6838 int size)
6839 {
6840 struct net_device *dev = sp->dev;
6841 if (sp->rxd_mode == RXD_MODE_1) {
6842 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6843 } else if (sp->rxd_mode == RXD_MODE_3B) {
6844 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6845 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6846 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6847 }
6848 }
6849
rxd_owner_bit_reset(struct s2io_nic * sp)6850 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6851 {
6852 int i, j, k, blk_cnt = 0, size;
6853 struct config_param *config = &sp->config;
6854 struct mac_info *mac_control = &sp->mac_control;
6855 struct net_device *dev = sp->dev;
6856 struct RxD_t *rxdp = NULL;
6857 struct sk_buff *skb = NULL;
6858 struct buffAdd *ba = NULL;
6859 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6860
6861 /* Calculate the size based on ring mode */
6862 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6863 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6864 if (sp->rxd_mode == RXD_MODE_1)
6865 size += NET_IP_ALIGN;
6866 else if (sp->rxd_mode == RXD_MODE_3B)
6867 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6868
6869 for (i = 0; i < config->rx_ring_num; i++) {
6870 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6871 struct ring_info *ring = &mac_control->rings[i];
6872
6873 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6874
6875 for (j = 0; j < blk_cnt; j++) {
6876 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6877 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6878 if (sp->rxd_mode == RXD_MODE_3B)
6879 ba = &ring->ba[j][k];
6880 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6881 &temp0_64,
6882 &temp1_64,
6883 &temp2_64,
6884 size) == -ENOMEM) {
6885 return 0;
6886 }
6887
6888 set_rxd_buffer_size(sp, rxdp, size);
6889 dma_wmb();
6890 /* flip the Ownership bit to Hardware */
6891 rxdp->Control_1 |= RXD_OWN_XENA;
6892 }
6893 }
6894 }
6895 return 0;
6896
6897 }
6898
s2io_add_isr(struct s2io_nic * sp)6899 static int s2io_add_isr(struct s2io_nic *sp)
6900 {
6901 int ret = 0;
6902 struct net_device *dev = sp->dev;
6903 int err = 0;
6904
6905 if (sp->config.intr_type == MSI_X)
6906 ret = s2io_enable_msi_x(sp);
6907 if (ret) {
6908 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6909 sp->config.intr_type = INTA;
6910 }
6911
6912 /*
6913 * Store the values of the MSIX table in
6914 * the struct s2io_nic structure
6915 */
6916 store_xmsi_data(sp);
6917
6918 /* After proper initialization of H/W, register ISR */
6919 if (sp->config.intr_type == MSI_X) {
6920 int i, msix_rx_cnt = 0;
6921
6922 for (i = 0; i < sp->num_entries; i++) {
6923 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6924 if (sp->s2io_entries[i].type ==
6925 MSIX_RING_TYPE) {
6926 snprintf(sp->desc[i],
6927 sizeof(sp->desc[i]),
6928 "%s:MSI-X-%d-RX",
6929 dev->name, i);
6930 err = request_irq(sp->entries[i].vector,
6931 s2io_msix_ring_handle,
6932 0,
6933 sp->desc[i],
6934 sp->s2io_entries[i].arg);
6935 } else if (sp->s2io_entries[i].type ==
6936 MSIX_ALARM_TYPE) {
6937 snprintf(sp->desc[i],
6938 sizeof(sp->desc[i]),
6939 "%s:MSI-X-%d-TX",
6940 dev->name, i);
6941 err = request_irq(sp->entries[i].vector,
6942 s2io_msix_fifo_handle,
6943 0,
6944 sp->desc[i],
6945 sp->s2io_entries[i].arg);
6946
6947 }
6948 /* if either data or addr is zero print it. */
6949 if (!(sp->msix_info[i].addr &&
6950 sp->msix_info[i].data)) {
6951 DBG_PRINT(ERR_DBG,
6952 "%s @Addr:0x%llx Data:0x%llx\n",
6953 sp->desc[i],
6954 (unsigned long long)
6955 sp->msix_info[i].addr,
6956 (unsigned long long)
6957 ntohl(sp->msix_info[i].data));
6958 } else
6959 msix_rx_cnt++;
6960 if (err) {
6961 remove_msix_isr(sp);
6962
6963 DBG_PRINT(ERR_DBG,
6964 "%s:MSI-X-%d registration "
6965 "failed\n", dev->name, i);
6966
6967 DBG_PRINT(ERR_DBG,
6968 "%s: Defaulting to INTA\n",
6969 dev->name);
6970 sp->config.intr_type = INTA;
6971 break;
6972 }
6973 sp->s2io_entries[i].in_use =
6974 MSIX_REGISTERED_SUCCESS;
6975 }
6976 }
6977 if (!err) {
6978 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6979 DBG_PRINT(INFO_DBG,
6980 "MSI-X-TX entries enabled through alarm vector\n");
6981 }
6982 }
6983 if (sp->config.intr_type == INTA) {
6984 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6985 sp->name, dev);
6986 if (err) {
6987 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6988 dev->name);
6989 return -1;
6990 }
6991 }
6992 return 0;
6993 }
6994
s2io_rem_isr(struct s2io_nic * sp)6995 static void s2io_rem_isr(struct s2io_nic *sp)
6996 {
6997 if (sp->config.intr_type == MSI_X)
6998 remove_msix_isr(sp);
6999 else
7000 remove_inta_isr(sp);
7001 }
7002
do_s2io_card_down(struct s2io_nic * sp,int do_io)7003 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7004 {
7005 int cnt = 0;
7006 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7007 register u64 val64 = 0;
7008 struct config_param *config;
7009 config = &sp->config;
7010
7011 if (!is_s2io_card_up(sp))
7012 return;
7013
7014 del_timer_sync(&sp->alarm_timer);
7015 /* If s2io_set_link task is executing, wait till it completes. */
7016 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7017 msleep(50);
7018 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7019
7020 /* Disable napi */
7021 if (sp->config.napi) {
7022 int off = 0;
7023 if (config->intr_type == MSI_X) {
7024 for (; off < sp->config.rx_ring_num; off++)
7025 napi_disable(&sp->mac_control.rings[off].napi);
7026 }
7027 else
7028 napi_disable(&sp->napi);
7029 }
7030
7031 /* disable Tx and Rx traffic on the NIC */
7032 if (do_io)
7033 stop_nic(sp);
7034
7035 s2io_rem_isr(sp);
7036
7037 /* stop the tx queue, indicate link down */
7038 s2io_link(sp, LINK_DOWN);
7039
7040 /* Check if the device is Quiescent and then Reset the NIC */
7041 while (do_io) {
7042 /* As per the HW requirement we need to replenish the
7043 * receive buffer to avoid the ring bump. Since there is
7044 * no intention of processing the Rx frame at this pointwe are
7045 * just setting the ownership bit of rxd in Each Rx
7046 * ring to HW and set the appropriate buffer size
7047 * based on the ring mode
7048 */
7049 rxd_owner_bit_reset(sp);
7050
7051 val64 = readq(&bar0->adapter_status);
7052 if (verify_xena_quiescence(sp)) {
7053 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7054 break;
7055 }
7056
7057 msleep(50);
7058 cnt++;
7059 if (cnt == 10) {
7060 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7061 "adapter status reads 0x%llx\n",
7062 (unsigned long long)val64);
7063 break;
7064 }
7065 }
7066 if (do_io)
7067 s2io_reset(sp);
7068
7069 /* Free all Tx buffers */
7070 free_tx_buffers(sp);
7071
7072 /* Free all Rx buffers */
7073 free_rx_buffers(sp);
7074
7075 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7076 }
7077
s2io_card_down(struct s2io_nic * sp)7078 static void s2io_card_down(struct s2io_nic *sp)
7079 {
7080 do_s2io_card_down(sp, 1);
7081 }
7082
s2io_card_up(struct s2io_nic * sp)7083 static int s2io_card_up(struct s2io_nic *sp)
7084 {
7085 int i, ret = 0;
7086 struct config_param *config;
7087 struct mac_info *mac_control;
7088 struct net_device *dev = sp->dev;
7089 u16 interruptible;
7090
7091 /* Initialize the H/W I/O registers */
7092 ret = init_nic(sp);
7093 if (ret != 0) {
7094 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7095 dev->name);
7096 if (ret != -EIO)
7097 s2io_reset(sp);
7098 return ret;
7099 }
7100
7101 /*
7102 * Initializing the Rx buffers. For now we are considering only 1
7103 * Rx ring and initializing buffers into 30 Rx blocks
7104 */
7105 config = &sp->config;
7106 mac_control = &sp->mac_control;
7107
7108 for (i = 0; i < config->rx_ring_num; i++) {
7109 struct ring_info *ring = &mac_control->rings[i];
7110
7111 ring->mtu = dev->mtu;
7112 ring->lro = !!(dev->features & NETIF_F_LRO);
7113 ret = fill_rx_buffers(sp, ring, 1);
7114 if (ret) {
7115 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7116 dev->name);
7117 ret = -ENOMEM;
7118 goto err_fill_buff;
7119 }
7120 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7121 ring->rx_bufs_left);
7122 }
7123
7124 /* Initialise napi */
7125 if (config->napi) {
7126 if (config->intr_type == MSI_X) {
7127 for (i = 0; i < sp->config.rx_ring_num; i++)
7128 napi_enable(&sp->mac_control.rings[i].napi);
7129 } else {
7130 napi_enable(&sp->napi);
7131 }
7132 }
7133
7134 /* Maintain the state prior to the open */
7135 if (sp->promisc_flg)
7136 sp->promisc_flg = 0;
7137 if (sp->m_cast_flg) {
7138 sp->m_cast_flg = 0;
7139 sp->all_multi_pos = 0;
7140 }
7141
7142 /* Setting its receive mode */
7143 s2io_set_multicast(dev);
7144
7145 if (dev->features & NETIF_F_LRO) {
7146 /* Initialize max aggregatable pkts per session based on MTU */
7147 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7148 /* Check if we can use (if specified) user provided value */
7149 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7150 sp->lro_max_aggr_per_sess = lro_max_pkts;
7151 }
7152
7153 /* Enable Rx Traffic and interrupts on the NIC */
7154 if (start_nic(sp)) {
7155 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7156 ret = -ENODEV;
7157 goto err_out;
7158 }
7159
7160 /* Add interrupt service routine */
7161 if (s2io_add_isr(sp) != 0) {
7162 if (sp->config.intr_type == MSI_X)
7163 s2io_rem_isr(sp);
7164 ret = -ENODEV;
7165 goto err_out;
7166 }
7167
7168 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7169 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7170
7171 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7172
7173 /* Enable select interrupts */
7174 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7175 if (sp->config.intr_type != INTA) {
7176 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7177 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7178 } else {
7179 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7180 interruptible |= TX_PIC_INTR;
7181 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7182 }
7183
7184 return 0;
7185
7186 err_out:
7187 if (config->napi) {
7188 if (config->intr_type == MSI_X) {
7189 for (i = 0; i < sp->config.rx_ring_num; i++)
7190 napi_disable(&sp->mac_control.rings[i].napi);
7191 } else {
7192 napi_disable(&sp->napi);
7193 }
7194 }
7195 err_fill_buff:
7196 s2io_reset(sp);
7197 free_rx_buffers(sp);
7198 return ret;
7199 }
7200
7201 /**
7202 * s2io_restart_nic - Resets the NIC.
7203 * @work : work struct containing a pointer to the device private structure
7204 * Description:
7205 * This function is scheduled to be run by the s2io_tx_watchdog
7206 * function after 0.5 secs to reset the NIC. The idea is to reduce
7207 * the run time of the watch dog routine which is run holding a
7208 * spin lock.
7209 */
7210
s2io_restart_nic(struct work_struct * work)7211 static void s2io_restart_nic(struct work_struct *work)
7212 {
7213 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7214 struct net_device *dev = sp->dev;
7215
7216 rtnl_lock();
7217
7218 if (!netif_running(dev))
7219 goto out_unlock;
7220
7221 s2io_card_down(sp);
7222 if (s2io_card_up(sp)) {
7223 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7224 }
7225 s2io_wake_all_tx_queue(sp);
7226 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7227 out_unlock:
7228 rtnl_unlock();
7229 }
7230
7231 /**
7232 * s2io_tx_watchdog - Watchdog for transmit side.
7233 * @dev : Pointer to net device structure
7234 * @txqueue: index of the hanging queue
7235 * Description:
7236 * This function is triggered if the Tx Queue is stopped
7237 * for a pre-defined amount of time when the Interface is still up.
7238 * If the Interface is jammed in such a situation, the hardware is
7239 * reset (by s2io_close) and restarted again (by s2io_open) to
7240 * overcome any problem that might have been caused in the hardware.
7241 * Return value:
7242 * void
7243 */
7244
s2io_tx_watchdog(struct net_device * dev,unsigned int txqueue)7245 static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7246 {
7247 struct s2io_nic *sp = netdev_priv(dev);
7248 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7249
7250 if (netif_carrier_ok(dev)) {
7251 swstats->watchdog_timer_cnt++;
7252 schedule_work(&sp->rst_timer_task);
7253 swstats->soft_reset_cnt++;
7254 }
7255 }
7256
7257 /**
7258 * rx_osm_handler - To perform some OS related operations on SKB.
7259 * @ring_data : the ring from which this RxD was extracted.
7260 * @rxdp: descriptor
7261 * Description:
7262 * This function is called by the Rx interrupt serivce routine to perform
7263 * some OS related operations on the SKB before passing it to the upper
7264 * layers. It mainly checks if the checksum is OK, if so adds it to the
7265 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7266 * to the upper layer. If the checksum is wrong, it increments the Rx
7267 * packet error count, frees the SKB and returns error.
7268 * Return value:
7269 * SUCCESS on success and -1 on failure.
7270 */
rx_osm_handler(struct ring_info * ring_data,struct RxD_t * rxdp)7271 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7272 {
7273 struct s2io_nic *sp = ring_data->nic;
7274 struct net_device *dev = ring_data->dev;
7275 struct sk_buff *skb = (struct sk_buff *)
7276 ((unsigned long)rxdp->Host_Control);
7277 int ring_no = ring_data->ring_no;
7278 u16 l3_csum, l4_csum;
7279 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7280 struct lro *lro;
7281 u8 err_mask;
7282 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7283
7284 skb->dev = dev;
7285
7286 if (err) {
7287 /* Check for parity error */
7288 if (err & 0x1)
7289 swstats->parity_err_cnt++;
7290
7291 err_mask = err >> 48;
7292 switch (err_mask) {
7293 case 1:
7294 swstats->rx_parity_err_cnt++;
7295 break;
7296
7297 case 2:
7298 swstats->rx_abort_cnt++;
7299 break;
7300
7301 case 3:
7302 swstats->rx_parity_abort_cnt++;
7303 break;
7304
7305 case 4:
7306 swstats->rx_rda_fail_cnt++;
7307 break;
7308
7309 case 5:
7310 swstats->rx_unkn_prot_cnt++;
7311 break;
7312
7313 case 6:
7314 swstats->rx_fcs_err_cnt++;
7315 break;
7316
7317 case 7:
7318 swstats->rx_buf_size_err_cnt++;
7319 break;
7320
7321 case 8:
7322 swstats->rx_rxd_corrupt_cnt++;
7323 break;
7324
7325 case 15:
7326 swstats->rx_unkn_err_cnt++;
7327 break;
7328 }
7329 /*
7330 * Drop the packet if bad transfer code. Exception being
7331 * 0x5, which could be due to unsupported IPv6 extension header.
7332 * In this case, we let stack handle the packet.
7333 * Note that in this case, since checksum will be incorrect,
7334 * stack will validate the same.
7335 */
7336 if (err_mask != 0x5) {
7337 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7338 dev->name, err_mask);
7339 dev->stats.rx_crc_errors++;
7340 swstats->mem_freed
7341 += skb->truesize;
7342 dev_kfree_skb(skb);
7343 ring_data->rx_bufs_left -= 1;
7344 rxdp->Host_Control = 0;
7345 return 0;
7346 }
7347 }
7348
7349 rxdp->Host_Control = 0;
7350 if (sp->rxd_mode == RXD_MODE_1) {
7351 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7352
7353 skb_put(skb, len);
7354 } else if (sp->rxd_mode == RXD_MODE_3B) {
7355 int get_block = ring_data->rx_curr_get_info.block_index;
7356 int get_off = ring_data->rx_curr_get_info.offset;
7357 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7358 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7359 unsigned char *buff = skb_push(skb, buf0_len);
7360
7361 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7362 memcpy(buff, ba->ba_0, buf0_len);
7363 skb_put(skb, buf2_len);
7364 }
7365
7366 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7367 ((!ring_data->lro) ||
7368 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7369 (dev->features & NETIF_F_RXCSUM)) {
7370 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7371 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7372 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7373 /*
7374 * NIC verifies if the Checksum of the received
7375 * frame is Ok or not and accordingly returns
7376 * a flag in the RxD.
7377 */
7378 skb->ip_summed = CHECKSUM_UNNECESSARY;
7379 if (ring_data->lro) {
7380 u32 tcp_len = 0;
7381 u8 *tcp;
7382 int ret = 0;
7383
7384 ret = s2io_club_tcp_session(ring_data,
7385 skb->data, &tcp,
7386 &tcp_len, &lro,
7387 rxdp, sp);
7388 switch (ret) {
7389 case 3: /* Begin anew */
7390 lro->parent = skb;
7391 goto aggregate;
7392 case 1: /* Aggregate */
7393 lro_append_pkt(sp, lro, skb, tcp_len);
7394 goto aggregate;
7395 case 4: /* Flush session */
7396 lro_append_pkt(sp, lro, skb, tcp_len);
7397 queue_rx_frame(lro->parent,
7398 lro->vlan_tag);
7399 clear_lro_session(lro);
7400 swstats->flush_max_pkts++;
7401 goto aggregate;
7402 case 2: /* Flush both */
7403 lro->parent->data_len = lro->frags_len;
7404 swstats->sending_both++;
7405 queue_rx_frame(lro->parent,
7406 lro->vlan_tag);
7407 clear_lro_session(lro);
7408 goto send_up;
7409 case 0: /* sessions exceeded */
7410 case -1: /* non-TCP or not L2 aggregatable */
7411 case 5: /*
7412 * First pkt in session not
7413 * L3/L4 aggregatable
7414 */
7415 break;
7416 default:
7417 DBG_PRINT(ERR_DBG,
7418 "%s: Samadhana!!\n",
7419 __func__);
7420 BUG();
7421 }
7422 }
7423 } else {
7424 /*
7425 * Packet with erroneous checksum, let the
7426 * upper layers deal with it.
7427 */
7428 skb_checksum_none_assert(skb);
7429 }
7430 } else
7431 skb_checksum_none_assert(skb);
7432
7433 swstats->mem_freed += skb->truesize;
7434 send_up:
7435 skb_record_rx_queue(skb, ring_no);
7436 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7437 aggregate:
7438 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7439 return SUCCESS;
7440 }
7441
7442 /**
7443 * s2io_link - stops/starts the Tx queue.
7444 * @sp : private member of the device structure, which is a pointer to the
7445 * s2io_nic structure.
7446 * @link : inidicates whether link is UP/DOWN.
7447 * Description:
7448 * This function stops/starts the Tx queue depending on whether the link
7449 * status of the NIC is is down or up. This is called by the Alarm
7450 * interrupt handler whenever a link change interrupt comes up.
7451 * Return value:
7452 * void.
7453 */
7454
s2io_link(struct s2io_nic * sp,int link)7455 static void s2io_link(struct s2io_nic *sp, int link)
7456 {
7457 struct net_device *dev = sp->dev;
7458 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7459
7460 if (link != sp->last_link_state) {
7461 init_tti(sp, link);
7462 if (link == LINK_DOWN) {
7463 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7464 s2io_stop_all_tx_queue(sp);
7465 netif_carrier_off(dev);
7466 if (swstats->link_up_cnt)
7467 swstats->link_up_time =
7468 jiffies - sp->start_time;
7469 swstats->link_down_cnt++;
7470 } else {
7471 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7472 if (swstats->link_down_cnt)
7473 swstats->link_down_time =
7474 jiffies - sp->start_time;
7475 swstats->link_up_cnt++;
7476 netif_carrier_on(dev);
7477 s2io_wake_all_tx_queue(sp);
7478 }
7479 }
7480 sp->last_link_state = link;
7481 sp->start_time = jiffies;
7482 }
7483
7484 /**
7485 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486 * @sp : private member of the device structure, which is a pointer to the
7487 * s2io_nic structure.
7488 * Description:
7489 * This function initializes a few of the PCI and PCI-X configuration registers
7490 * with recommended values.
7491 * Return value:
7492 * void
7493 */
7494
s2io_init_pci(struct s2io_nic * sp)7495 static void s2io_init_pci(struct s2io_nic *sp)
7496 {
7497 u16 pci_cmd = 0, pcix_cmd = 0;
7498
7499 /* Enable Data Parity Error Recovery in PCI-X command register. */
7500 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7501 &(pcix_cmd));
7502 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503 (pcix_cmd | 1));
7504 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505 &(pcix_cmd));
7506
7507 /* Set the PErr Response bit in PCI command register. */
7508 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509 pci_write_config_word(sp->pdev, PCI_COMMAND,
7510 (pci_cmd | PCI_COMMAND_PARITY));
7511 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512 }
7513
s2io_verify_parm(struct pci_dev * pdev,u8 * dev_intr_type,u8 * dev_multiq)7514 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7515 u8 *dev_multiq)
7516 {
7517 int i;
7518
7519 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7520 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7521 "(%d) not supported\n", tx_fifo_num);
7522
7523 if (tx_fifo_num < 1)
7524 tx_fifo_num = 1;
7525 else
7526 tx_fifo_num = MAX_TX_FIFOS;
7527
7528 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7529 }
7530
7531 if (multiq)
7532 *dev_multiq = multiq;
7533
7534 if (tx_steering_type && (1 == tx_fifo_num)) {
7535 if (tx_steering_type != TX_DEFAULT_STEERING)
7536 DBG_PRINT(ERR_DBG,
7537 "Tx steering is not supported with "
7538 "one fifo. Disabling Tx steering.\n");
7539 tx_steering_type = NO_STEERING;
7540 }
7541
7542 if ((tx_steering_type < NO_STEERING) ||
7543 (tx_steering_type > TX_DEFAULT_STEERING)) {
7544 DBG_PRINT(ERR_DBG,
7545 "Requested transmit steering not supported\n");
7546 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7547 tx_steering_type = NO_STEERING;
7548 }
7549
7550 if (rx_ring_num > MAX_RX_RINGS) {
7551 DBG_PRINT(ERR_DBG,
7552 "Requested number of rx rings not supported\n");
7553 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7554 MAX_RX_RINGS);
7555 rx_ring_num = MAX_RX_RINGS;
7556 }
7557
7558 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7559 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7560 "Defaulting to INTA\n");
7561 *dev_intr_type = INTA;
7562 }
7563
7564 if ((*dev_intr_type == MSI_X) &&
7565 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7566 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7567 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7568 "Defaulting to INTA\n");
7569 *dev_intr_type = INTA;
7570 }
7571
7572 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7573 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7574 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7575 rx_ring_mode = 1;
7576 }
7577
7578 for (i = 0; i < MAX_RX_RINGS; i++)
7579 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7580 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7581 "supported\nDefaulting to %d\n",
7582 MAX_RX_BLOCKS_PER_RING);
7583 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7584 }
7585
7586 return SUCCESS;
7587 }
7588
7589 /**
7590 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7591 * @nic: device private variable
7592 * @ds_codepoint: data
7593 * @ring: ring index
7594 * Description: The function configures the receive steering to
7595 * desired receive ring.
7596 * Return Value: SUCCESS on success and
7597 * '-1' on failure (endian settings incorrect).
7598 */
rts_ds_steer(struct s2io_nic * nic,u8 ds_codepoint,u8 ring)7599 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7600 {
7601 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7602 register u64 val64 = 0;
7603
7604 if (ds_codepoint > 63)
7605 return FAILURE;
7606
7607 val64 = RTS_DS_MEM_DATA(ring);
7608 writeq(val64, &bar0->rts_ds_mem_data);
7609
7610 val64 = RTS_DS_MEM_CTRL_WE |
7611 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7612 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7613
7614 writeq(val64, &bar0->rts_ds_mem_ctrl);
7615
7616 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7617 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7618 S2IO_BIT_RESET);
7619 }
7620
7621 static const struct net_device_ops s2io_netdev_ops = {
7622 .ndo_open = s2io_open,
7623 .ndo_stop = s2io_close,
7624 .ndo_get_stats = s2io_get_stats,
7625 .ndo_start_xmit = s2io_xmit,
7626 .ndo_validate_addr = eth_validate_addr,
7627 .ndo_set_rx_mode = s2io_set_multicast,
7628 .ndo_do_ioctl = s2io_ioctl,
7629 .ndo_set_mac_address = s2io_set_mac_addr,
7630 .ndo_change_mtu = s2io_change_mtu,
7631 .ndo_set_features = s2io_set_features,
7632 .ndo_tx_timeout = s2io_tx_watchdog,
7633 #ifdef CONFIG_NET_POLL_CONTROLLER
7634 .ndo_poll_controller = s2io_netpoll,
7635 #endif
7636 };
7637
7638 /**
7639 * s2io_init_nic - Initialization of the adapter .
7640 * @pdev : structure containing the PCI related information of the device.
7641 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7642 * Description:
7643 * The function initializes an adapter identified by the pci_dec structure.
7644 * All OS related initialization including memory and device structure and
7645 * initlaization of the device private variable is done. Also the swapper
7646 * control register is initialized to enable read and write into the I/O
7647 * registers of the device.
7648 * Return value:
7649 * returns 0 on success and negative on failure.
7650 */
7651
7652 static int
s2io_init_nic(struct pci_dev * pdev,const struct pci_device_id * pre)7653 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7654 {
7655 struct s2io_nic *sp;
7656 struct net_device *dev;
7657 int i, j, ret;
7658 int dma_flag = false;
7659 u32 mac_up, mac_down;
7660 u64 val64 = 0, tmp64 = 0;
7661 struct XENA_dev_config __iomem *bar0 = NULL;
7662 u16 subid;
7663 struct config_param *config;
7664 struct mac_info *mac_control;
7665 int mode;
7666 u8 dev_intr_type = intr_type;
7667 u8 dev_multiq = 0;
7668
7669 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7670 if (ret)
7671 return ret;
7672
7673 ret = pci_enable_device(pdev);
7674 if (ret) {
7675 DBG_PRINT(ERR_DBG,
7676 "%s: pci_enable_device failed\n", __func__);
7677 return ret;
7678 }
7679
7680 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7681 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7682 dma_flag = true;
7683 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7684 DBG_PRINT(ERR_DBG,
7685 "Unable to obtain 64bit DMA for coherent allocations\n");
7686 pci_disable_device(pdev);
7687 return -ENOMEM;
7688 }
7689 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7690 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7691 } else {
7692 pci_disable_device(pdev);
7693 return -ENOMEM;
7694 }
7695 ret = pci_request_regions(pdev, s2io_driver_name);
7696 if (ret) {
7697 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7698 __func__, ret);
7699 pci_disable_device(pdev);
7700 return -ENODEV;
7701 }
7702 if (dev_multiq)
7703 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7704 else
7705 dev = alloc_etherdev(sizeof(struct s2io_nic));
7706 if (dev == NULL) {
7707 pci_disable_device(pdev);
7708 pci_release_regions(pdev);
7709 return -ENODEV;
7710 }
7711
7712 pci_set_master(pdev);
7713 pci_set_drvdata(pdev, dev);
7714 SET_NETDEV_DEV(dev, &pdev->dev);
7715
7716 /* Private member variable initialized to s2io NIC structure */
7717 sp = netdev_priv(dev);
7718 sp->dev = dev;
7719 sp->pdev = pdev;
7720 sp->high_dma_flag = dma_flag;
7721 sp->device_enabled_once = false;
7722 if (rx_ring_mode == 1)
7723 sp->rxd_mode = RXD_MODE_1;
7724 if (rx_ring_mode == 2)
7725 sp->rxd_mode = RXD_MODE_3B;
7726
7727 sp->config.intr_type = dev_intr_type;
7728
7729 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7730 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7731 sp->device_type = XFRAME_II_DEVICE;
7732 else
7733 sp->device_type = XFRAME_I_DEVICE;
7734
7735
7736 /* Initialize some PCI/PCI-X fields of the NIC. */
7737 s2io_init_pci(sp);
7738
7739 /*
7740 * Setting the device configuration parameters.
7741 * Most of these parameters can be specified by the user during
7742 * module insertion as they are module loadable parameters. If
7743 * these parameters are not not specified during load time, they
7744 * are initialized with default values.
7745 */
7746 config = &sp->config;
7747 mac_control = &sp->mac_control;
7748
7749 config->napi = napi;
7750 config->tx_steering_type = tx_steering_type;
7751
7752 /* Tx side parameters. */
7753 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7754 config->tx_fifo_num = MAX_TX_FIFOS;
7755 else
7756 config->tx_fifo_num = tx_fifo_num;
7757
7758 /* Initialize the fifos used for tx steering */
7759 if (config->tx_fifo_num < 5) {
7760 if (config->tx_fifo_num == 1)
7761 sp->total_tcp_fifos = 1;
7762 else
7763 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7764 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7765 sp->total_udp_fifos = 1;
7766 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7767 } else {
7768 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7769 FIFO_OTHER_MAX_NUM);
7770 sp->udp_fifo_idx = sp->total_tcp_fifos;
7771 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7772 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7773 }
7774
7775 config->multiq = dev_multiq;
7776 for (i = 0; i < config->tx_fifo_num; i++) {
7777 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7778
7779 tx_cfg->fifo_len = tx_fifo_len[i];
7780 tx_cfg->fifo_priority = i;
7781 }
7782
7783 /* mapping the QoS priority to the configured fifos */
7784 for (i = 0; i < MAX_TX_FIFOS; i++)
7785 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7786
7787 /* map the hashing selector table to the configured fifos */
7788 for (i = 0; i < config->tx_fifo_num; i++)
7789 sp->fifo_selector[i] = fifo_selector[i];
7790
7791
7792 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7793 for (i = 0; i < config->tx_fifo_num; i++) {
7794 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7795
7796 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7797 if (tx_cfg->fifo_len < 65) {
7798 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7799 break;
7800 }
7801 }
7802 /* + 2 because one Txd for skb->data and one Txd for UFO */
7803 config->max_txds = MAX_SKB_FRAGS + 2;
7804
7805 /* Rx side parameters. */
7806 config->rx_ring_num = rx_ring_num;
7807 for (i = 0; i < config->rx_ring_num; i++) {
7808 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7809 struct ring_info *ring = &mac_control->rings[i];
7810
7811 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7812 rx_cfg->ring_priority = i;
7813 ring->rx_bufs_left = 0;
7814 ring->rxd_mode = sp->rxd_mode;
7815 ring->rxd_count = rxd_count[sp->rxd_mode];
7816 ring->pdev = sp->pdev;
7817 ring->dev = sp->dev;
7818 }
7819
7820 for (i = 0; i < rx_ring_num; i++) {
7821 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7822
7823 rx_cfg->ring_org = RING_ORG_BUFF1;
7824 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7825 }
7826
7827 /* Setting Mac Control parameters */
7828 mac_control->rmac_pause_time = rmac_pause_time;
7829 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7830 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7831
7832
7833 /* initialize the shared memory used by the NIC and the host */
7834 if (init_shared_mem(sp)) {
7835 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7836 ret = -ENOMEM;
7837 goto mem_alloc_failed;
7838 }
7839
7840 sp->bar0 = pci_ioremap_bar(pdev, 0);
7841 if (!sp->bar0) {
7842 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7843 dev->name);
7844 ret = -ENOMEM;
7845 goto bar0_remap_failed;
7846 }
7847
7848 sp->bar1 = pci_ioremap_bar(pdev, 2);
7849 if (!sp->bar1) {
7850 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7851 dev->name);
7852 ret = -ENOMEM;
7853 goto bar1_remap_failed;
7854 }
7855
7856 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7857 for (j = 0; j < MAX_TX_FIFOS; j++) {
7858 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7859 }
7860
7861 /* Driver entry points */
7862 dev->netdev_ops = &s2io_netdev_ops;
7863 dev->ethtool_ops = &netdev_ethtool_ops;
7864 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7865 NETIF_F_TSO | NETIF_F_TSO6 |
7866 NETIF_F_RXCSUM | NETIF_F_LRO;
7867 dev->features |= dev->hw_features |
7868 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7869 if (sp->high_dma_flag == true)
7870 dev->features |= NETIF_F_HIGHDMA;
7871 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7872 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7873 INIT_WORK(&sp->set_link_task, s2io_set_link);
7874
7875 pci_save_state(sp->pdev);
7876
7877 /* Setting swapper control on the NIC, for proper reset operation */
7878 if (s2io_set_swapper(sp)) {
7879 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7880 dev->name);
7881 ret = -EAGAIN;
7882 goto set_swap_failed;
7883 }
7884
7885 /* Verify if the Herc works on the slot its placed into */
7886 if (sp->device_type & XFRAME_II_DEVICE) {
7887 mode = s2io_verify_pci_mode(sp);
7888 if (mode < 0) {
7889 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7890 __func__);
7891 ret = -EBADSLT;
7892 goto set_swap_failed;
7893 }
7894 }
7895
7896 if (sp->config.intr_type == MSI_X) {
7897 sp->num_entries = config->rx_ring_num + 1;
7898 ret = s2io_enable_msi_x(sp);
7899
7900 if (!ret) {
7901 ret = s2io_test_msi(sp);
7902 /* rollback MSI-X, will re-enable during add_isr() */
7903 remove_msix_isr(sp);
7904 }
7905 if (ret) {
7906
7907 DBG_PRINT(ERR_DBG,
7908 "MSI-X requested but failed to enable\n");
7909 sp->config.intr_type = INTA;
7910 }
7911 }
7912
7913 if (config->intr_type == MSI_X) {
7914 for (i = 0; i < config->rx_ring_num ; i++) {
7915 struct ring_info *ring = &mac_control->rings[i];
7916
7917 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7918 }
7919 } else {
7920 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7921 }
7922
7923 /* Not needed for Herc */
7924 if (sp->device_type & XFRAME_I_DEVICE) {
7925 /*
7926 * Fix for all "FFs" MAC address problems observed on
7927 * Alpha platforms
7928 */
7929 fix_mac_address(sp);
7930 s2io_reset(sp);
7931 }
7932
7933 /*
7934 * MAC address initialization.
7935 * For now only one mac address will be read and used.
7936 */
7937 bar0 = sp->bar0;
7938 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7939 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7940 writeq(val64, &bar0->rmac_addr_cmd_mem);
7941 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7942 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7943 S2IO_BIT_RESET);
7944 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7945 mac_down = (u32)tmp64;
7946 mac_up = (u32) (tmp64 >> 32);
7947
7948 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7949 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7950 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7951 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7952 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7953 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7954
7955 /* Set the factory defined MAC address initially */
7956 dev->addr_len = ETH_ALEN;
7957 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7958
7959 /* initialize number of multicast & unicast MAC entries variables */
7960 if (sp->device_type == XFRAME_I_DEVICE) {
7961 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7962 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7963 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7964 } else if (sp->device_type == XFRAME_II_DEVICE) {
7965 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7966 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7967 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7968 }
7969
7970 /* MTU range: 46 - 9600 */
7971 dev->min_mtu = MIN_MTU;
7972 dev->max_mtu = S2IO_JUMBO_SIZE;
7973
7974 /* store mac addresses from CAM to s2io_nic structure */
7975 do_s2io_store_unicast_mc(sp);
7976
7977 /* Configure MSIX vector for number of rings configured plus one */
7978 if ((sp->device_type == XFRAME_II_DEVICE) &&
7979 (config->intr_type == MSI_X))
7980 sp->num_entries = config->rx_ring_num + 1;
7981
7982 /* Store the values of the MSIX table in the s2io_nic structure */
7983 store_xmsi_data(sp);
7984 /* reset Nic and bring it to known state */
7985 s2io_reset(sp);
7986
7987 /*
7988 * Initialize link state flags
7989 * and the card state parameter
7990 */
7991 sp->state = 0;
7992
7993 /* Initialize spinlocks */
7994 for (i = 0; i < sp->config.tx_fifo_num; i++) {
7995 struct fifo_info *fifo = &mac_control->fifos[i];
7996
7997 spin_lock_init(&fifo->tx_lock);
7998 }
7999
8000 /*
8001 * SXE-002: Configure link and activity LED to init state
8002 * on driver load.
8003 */
8004 subid = sp->pdev->subsystem_device;
8005 if ((subid & 0xFF) >= 0x07) {
8006 val64 = readq(&bar0->gpio_control);
8007 val64 |= 0x0000800000000000ULL;
8008 writeq(val64, &bar0->gpio_control);
8009 val64 = 0x0411040400000000ULL;
8010 writeq(val64, (void __iomem *)bar0 + 0x2700);
8011 val64 = readq(&bar0->gpio_control);
8012 }
8013
8014 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8015
8016 if (register_netdev(dev)) {
8017 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8018 ret = -ENODEV;
8019 goto register_failed;
8020 }
8021 s2io_vpd_read(sp);
8022 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8023 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8024 sp->product_name, pdev->revision);
8025 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8026 s2io_driver_version);
8027 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8028 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8029 if (sp->device_type & XFRAME_II_DEVICE) {
8030 mode = s2io_print_pci_mode(sp);
8031 if (mode < 0) {
8032 ret = -EBADSLT;
8033 unregister_netdev(dev);
8034 goto set_swap_failed;
8035 }
8036 }
8037 switch (sp->rxd_mode) {
8038 case RXD_MODE_1:
8039 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8040 dev->name);
8041 break;
8042 case RXD_MODE_3B:
8043 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8044 dev->name);
8045 break;
8046 }
8047
8048 switch (sp->config.napi) {
8049 case 0:
8050 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8051 break;
8052 case 1:
8053 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8054 break;
8055 }
8056
8057 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8058 sp->config.tx_fifo_num);
8059
8060 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8061 sp->config.rx_ring_num);
8062
8063 switch (sp->config.intr_type) {
8064 case INTA:
8065 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8066 break;
8067 case MSI_X:
8068 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8069 break;
8070 }
8071 if (sp->config.multiq) {
8072 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8073 struct fifo_info *fifo = &mac_control->fifos[i];
8074
8075 fifo->multiq = config->multiq;
8076 }
8077 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8078 dev->name);
8079 } else
8080 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8081 dev->name);
8082
8083 switch (sp->config.tx_steering_type) {
8084 case NO_STEERING:
8085 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8086 dev->name);
8087 break;
8088 case TX_PRIORITY_STEERING:
8089 DBG_PRINT(ERR_DBG,
8090 "%s: Priority steering enabled for transmit\n",
8091 dev->name);
8092 break;
8093 case TX_DEFAULT_STEERING:
8094 DBG_PRINT(ERR_DBG,
8095 "%s: Default steering enabled for transmit\n",
8096 dev->name);
8097 }
8098
8099 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8100 dev->name);
8101 /* Initialize device name */
8102 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8103 sp->product_name);
8104
8105 if (vlan_tag_strip)
8106 sp->vlan_strip_flag = 1;
8107 else
8108 sp->vlan_strip_flag = 0;
8109
8110 /*
8111 * Make Link state as off at this point, when the Link change
8112 * interrupt comes the state will be automatically changed to
8113 * the right state.
8114 */
8115 netif_carrier_off(dev);
8116
8117 return 0;
8118
8119 register_failed:
8120 set_swap_failed:
8121 iounmap(sp->bar1);
8122 bar1_remap_failed:
8123 iounmap(sp->bar0);
8124 bar0_remap_failed:
8125 mem_alloc_failed:
8126 free_shared_mem(sp);
8127 pci_disable_device(pdev);
8128 pci_release_regions(pdev);
8129 free_netdev(dev);
8130
8131 return ret;
8132 }
8133
8134 /**
8135 * s2io_rem_nic - Free the PCI device
8136 * @pdev: structure containing the PCI related information of the device.
8137 * Description: This function is called by the Pci subsystem to release a
8138 * PCI device and free up all resource held up by the device. This could
8139 * be in response to a Hot plug event or when the driver is to be removed
8140 * from memory.
8141 */
8142
s2io_rem_nic(struct pci_dev * pdev)8143 static void s2io_rem_nic(struct pci_dev *pdev)
8144 {
8145 struct net_device *dev = pci_get_drvdata(pdev);
8146 struct s2io_nic *sp;
8147
8148 if (dev == NULL) {
8149 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8150 return;
8151 }
8152
8153 sp = netdev_priv(dev);
8154
8155 cancel_work_sync(&sp->rst_timer_task);
8156 cancel_work_sync(&sp->set_link_task);
8157
8158 unregister_netdev(dev);
8159
8160 free_shared_mem(sp);
8161 iounmap(sp->bar0);
8162 iounmap(sp->bar1);
8163 pci_release_regions(pdev);
8164 free_netdev(dev);
8165 pci_disable_device(pdev);
8166 }
8167
8168 module_pci_driver(s2io_driver);
8169
check_L2_lro_capable(u8 * buffer,struct iphdr ** ip,struct tcphdr ** tcp,struct RxD_t * rxdp,struct s2io_nic * sp)8170 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8171 struct tcphdr **tcp, struct RxD_t *rxdp,
8172 struct s2io_nic *sp)
8173 {
8174 int ip_off;
8175 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8176
8177 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8178 DBG_PRINT(INIT_DBG,
8179 "%s: Non-TCP frames not supported for LRO\n",
8180 __func__);
8181 return -1;
8182 }
8183
8184 /* Checking for DIX type or DIX type with VLAN */
8185 if ((l2_type == 0) || (l2_type == 4)) {
8186 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8187 /*
8188 * If vlan stripping is disabled and the frame is VLAN tagged,
8189 * shift the offset by the VLAN header size bytes.
8190 */
8191 if ((!sp->vlan_strip_flag) &&
8192 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8193 ip_off += HEADER_VLAN_SIZE;
8194 } else {
8195 /* LLC, SNAP etc are considered non-mergeable */
8196 return -1;
8197 }
8198
8199 *ip = (struct iphdr *)(buffer + ip_off);
8200 ip_len = (u8)((*ip)->ihl);
8201 ip_len <<= 2;
8202 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8203
8204 return 0;
8205 }
8206
check_for_socket_match(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp)8207 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8208 struct tcphdr *tcp)
8209 {
8210 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8211 if ((lro->iph->saddr != ip->saddr) ||
8212 (lro->iph->daddr != ip->daddr) ||
8213 (lro->tcph->source != tcp->source) ||
8214 (lro->tcph->dest != tcp->dest))
8215 return -1;
8216 return 0;
8217 }
8218
get_l4_pyld_length(struct iphdr * ip,struct tcphdr * tcp)8219 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8220 {
8221 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8222 }
8223
initiate_new_session(struct lro * lro,u8 * l2h,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len,u16 vlan_tag)8224 static void initiate_new_session(struct lro *lro, u8 *l2h,
8225 struct iphdr *ip, struct tcphdr *tcp,
8226 u32 tcp_pyld_len, u16 vlan_tag)
8227 {
8228 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8229 lro->l2h = l2h;
8230 lro->iph = ip;
8231 lro->tcph = tcp;
8232 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8233 lro->tcp_ack = tcp->ack_seq;
8234 lro->sg_num = 1;
8235 lro->total_len = ntohs(ip->tot_len);
8236 lro->frags_len = 0;
8237 lro->vlan_tag = vlan_tag;
8238 /*
8239 * Check if we saw TCP timestamp.
8240 * Other consistency checks have already been done.
8241 */
8242 if (tcp->doff == 8) {
8243 __be32 *ptr;
8244 ptr = (__be32 *)(tcp+1);
8245 lro->saw_ts = 1;
8246 lro->cur_tsval = ntohl(*(ptr+1));
8247 lro->cur_tsecr = *(ptr+2);
8248 }
8249 lro->in_use = 1;
8250 }
8251
update_L3L4_header(struct s2io_nic * sp,struct lro * lro)8252 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8253 {
8254 struct iphdr *ip = lro->iph;
8255 struct tcphdr *tcp = lro->tcph;
8256 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8257
8258 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8259
8260 /* Update L3 header */
8261 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8262 ip->tot_len = htons(lro->total_len);
8263
8264 /* Update L4 header */
8265 tcp->ack_seq = lro->tcp_ack;
8266 tcp->window = lro->window;
8267
8268 /* Update tsecr field if this session has timestamps enabled */
8269 if (lro->saw_ts) {
8270 __be32 *ptr = (__be32 *)(tcp + 1);
8271 *(ptr+2) = lro->cur_tsecr;
8272 }
8273
8274 /* Update counters required for calculation of
8275 * average no. of packets aggregated.
8276 */
8277 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8278 swstats->num_aggregations++;
8279 }
8280
aggregate_new_rx(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp,u32 l4_pyld)8281 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8282 struct tcphdr *tcp, u32 l4_pyld)
8283 {
8284 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8285 lro->total_len += l4_pyld;
8286 lro->frags_len += l4_pyld;
8287 lro->tcp_next_seq += l4_pyld;
8288 lro->sg_num++;
8289
8290 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8291 lro->tcp_ack = tcp->ack_seq;
8292 lro->window = tcp->window;
8293
8294 if (lro->saw_ts) {
8295 __be32 *ptr;
8296 /* Update tsecr and tsval from this packet */
8297 ptr = (__be32 *)(tcp+1);
8298 lro->cur_tsval = ntohl(*(ptr+1));
8299 lro->cur_tsecr = *(ptr + 2);
8300 }
8301 }
8302
verify_l3_l4_lro_capable(struct lro * l_lro,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len)8303 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8304 struct tcphdr *tcp, u32 tcp_pyld_len)
8305 {
8306 u8 *ptr;
8307
8308 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8309
8310 if (!tcp_pyld_len) {
8311 /* Runt frame or a pure ack */
8312 return -1;
8313 }
8314
8315 if (ip->ihl != 5) /* IP has options */
8316 return -1;
8317
8318 /* If we see CE codepoint in IP header, packet is not mergeable */
8319 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8320 return -1;
8321
8322 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8323 if (tcp->urg || tcp->psh || tcp->rst ||
8324 tcp->syn || tcp->fin ||
8325 tcp->ece || tcp->cwr || !tcp->ack) {
8326 /*
8327 * Currently recognize only the ack control word and
8328 * any other control field being set would result in
8329 * flushing the LRO session
8330 */
8331 return -1;
8332 }
8333
8334 /*
8335 * Allow only one TCP timestamp option. Don't aggregate if
8336 * any other options are detected.
8337 */
8338 if (tcp->doff != 5 && tcp->doff != 8)
8339 return -1;
8340
8341 if (tcp->doff == 8) {
8342 ptr = (u8 *)(tcp + 1);
8343 while (*ptr == TCPOPT_NOP)
8344 ptr++;
8345 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8346 return -1;
8347
8348 /* Ensure timestamp value increases monotonically */
8349 if (l_lro)
8350 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8351 return -1;
8352
8353 /* timestamp echo reply should be non-zero */
8354 if (*((__be32 *)(ptr+6)) == 0)
8355 return -1;
8356 }
8357
8358 return 0;
8359 }
8360
s2io_club_tcp_session(struct ring_info * ring_data,u8 * buffer,u8 ** tcp,u32 * tcp_len,struct lro ** lro,struct RxD_t * rxdp,struct s2io_nic * sp)8361 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8362 u8 **tcp, u32 *tcp_len, struct lro **lro,
8363 struct RxD_t *rxdp, struct s2io_nic *sp)
8364 {
8365 struct iphdr *ip;
8366 struct tcphdr *tcph;
8367 int ret = 0, i;
8368 u16 vlan_tag = 0;
8369 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8370
8371 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8372 rxdp, sp);
8373 if (ret)
8374 return ret;
8375
8376 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8377
8378 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8379 tcph = (struct tcphdr *)*tcp;
8380 *tcp_len = get_l4_pyld_length(ip, tcph);
8381 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8382 struct lro *l_lro = &ring_data->lro0_n[i];
8383 if (l_lro->in_use) {
8384 if (check_for_socket_match(l_lro, ip, tcph))
8385 continue;
8386 /* Sock pair matched */
8387 *lro = l_lro;
8388
8389 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8390 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8391 "expected 0x%x, actual 0x%x\n",
8392 __func__,
8393 (*lro)->tcp_next_seq,
8394 ntohl(tcph->seq));
8395
8396 swstats->outof_sequence_pkts++;
8397 ret = 2;
8398 break;
8399 }
8400
8401 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8402 *tcp_len))
8403 ret = 1; /* Aggregate */
8404 else
8405 ret = 2; /* Flush both */
8406 break;
8407 }
8408 }
8409
8410 if (ret == 0) {
8411 /* Before searching for available LRO objects,
8412 * check if the pkt is L3/L4 aggregatable. If not
8413 * don't create new LRO session. Just send this
8414 * packet up.
8415 */
8416 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8417 return 5;
8418
8419 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8420 struct lro *l_lro = &ring_data->lro0_n[i];
8421 if (!(l_lro->in_use)) {
8422 *lro = l_lro;
8423 ret = 3; /* Begin anew */
8424 break;
8425 }
8426 }
8427 }
8428
8429 if (ret == 0) { /* sessions exceeded */
8430 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8431 __func__);
8432 *lro = NULL;
8433 return ret;
8434 }
8435
8436 switch (ret) {
8437 case 3:
8438 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8439 vlan_tag);
8440 break;
8441 case 2:
8442 update_L3L4_header(sp, *lro);
8443 break;
8444 case 1:
8445 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8446 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8447 update_L3L4_header(sp, *lro);
8448 ret = 4; /* Flush the LRO */
8449 }
8450 break;
8451 default:
8452 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8453 break;
8454 }
8455
8456 return ret;
8457 }
8458
clear_lro_session(struct lro * lro)8459 static void clear_lro_session(struct lro *lro)
8460 {
8461 static u16 lro_struct_size = sizeof(struct lro);
8462
8463 memset(lro, 0, lro_struct_size);
8464 }
8465
queue_rx_frame(struct sk_buff * skb,u16 vlan_tag)8466 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8467 {
8468 struct net_device *dev = skb->dev;
8469 struct s2io_nic *sp = netdev_priv(dev);
8470
8471 skb->protocol = eth_type_trans(skb, dev);
8472 if (vlan_tag && sp->vlan_strip_flag)
8473 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8474 if (sp->config.napi)
8475 netif_receive_skb(skb);
8476 else
8477 netif_rx(skb);
8478 }
8479
lro_append_pkt(struct s2io_nic * sp,struct lro * lro,struct sk_buff * skb,u32 tcp_len)8480 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8481 struct sk_buff *skb, u32 tcp_len)
8482 {
8483 struct sk_buff *first = lro->parent;
8484 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8485
8486 first->len += tcp_len;
8487 first->data_len = lro->frags_len;
8488 skb_pull(skb, (skb->len - tcp_len));
8489 if (skb_shinfo(first)->frag_list)
8490 lro->last_frag->next = skb;
8491 else
8492 skb_shinfo(first)->frag_list = skb;
8493 first->truesize += skb->truesize;
8494 lro->last_frag = skb;
8495 swstats->clubbed_frms_cnt++;
8496 }
8497
8498 /**
8499 * s2io_io_error_detected - called when PCI error is detected
8500 * @pdev: Pointer to PCI device
8501 * @state: The current pci connection state
8502 *
8503 * This function is called after a PCI bus error affecting
8504 * this device has been detected.
8505 */
s2io_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8506 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8507 pci_channel_state_t state)
8508 {
8509 struct net_device *netdev = pci_get_drvdata(pdev);
8510 struct s2io_nic *sp = netdev_priv(netdev);
8511
8512 netif_device_detach(netdev);
8513
8514 if (state == pci_channel_io_perm_failure)
8515 return PCI_ERS_RESULT_DISCONNECT;
8516
8517 if (netif_running(netdev)) {
8518 /* Bring down the card, while avoiding PCI I/O */
8519 do_s2io_card_down(sp, 0);
8520 }
8521 pci_disable_device(pdev);
8522
8523 return PCI_ERS_RESULT_NEED_RESET;
8524 }
8525
8526 /**
8527 * s2io_io_slot_reset - called after the pci bus has been reset.
8528 * @pdev: Pointer to PCI device
8529 *
8530 * Restart the card from scratch, as if from a cold-boot.
8531 * At this point, the card has exprienced a hard reset,
8532 * followed by fixups by BIOS, and has its config space
8533 * set up identically to what it was at cold boot.
8534 */
s2io_io_slot_reset(struct pci_dev * pdev)8535 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8536 {
8537 struct net_device *netdev = pci_get_drvdata(pdev);
8538 struct s2io_nic *sp = netdev_priv(netdev);
8539
8540 if (pci_enable_device(pdev)) {
8541 pr_err("Cannot re-enable PCI device after reset.\n");
8542 return PCI_ERS_RESULT_DISCONNECT;
8543 }
8544
8545 pci_set_master(pdev);
8546 s2io_reset(sp);
8547
8548 return PCI_ERS_RESULT_RECOVERED;
8549 }
8550
8551 /**
8552 * s2io_io_resume - called when traffic can start flowing again.
8553 * @pdev: Pointer to PCI device
8554 *
8555 * This callback is called when the error recovery driver tells
8556 * us that its OK to resume normal operation.
8557 */
s2io_io_resume(struct pci_dev * pdev)8558 static void s2io_io_resume(struct pci_dev *pdev)
8559 {
8560 struct net_device *netdev = pci_get_drvdata(pdev);
8561 struct s2io_nic *sp = netdev_priv(netdev);
8562
8563 if (netif_running(netdev)) {
8564 if (s2io_card_up(sp)) {
8565 pr_err("Can't bring device back up after reset.\n");
8566 return;
8567 }
8568
8569 if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8570 s2io_card_down(sp);
8571 pr_err("Can't restore mac addr after reset.\n");
8572 return;
8573 }
8574 }
8575
8576 netif_device_attach(netdev);
8577 netif_tx_wake_all_queues(netdev);
8578 }
8579