• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38 
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50 
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59 
60 #include <linux/workqueue.h>
61 
schedule_mac_stats_update(struct adapter * ap,int secs)62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64 	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66 
cancel_mac_stats_update(struct adapter * ap)67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69 	cancel_delayed_work(&ap->stats_update_task);
70 }
71 
72 #define MAX_CMDQ_ENTRIES	16384
73 #define MAX_CMDQ1_ENTRIES	1024
74 #define MAX_RX_BUFFERS		16384
75 #define MAX_RX_JUMBO_BUFFERS	16384
76 #define MAX_TX_BUFFERS_HIGH	16384U
77 #define MAX_TX_BUFFERS_LOW	1536U
78 #define MAX_TX_BUFFERS		1460U
79 #define MIN_FL_ENTRIES		32
80 
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84 
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90 
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94 
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96 
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99 
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102 
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;	/* HW default is powersave mode. */
106 
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109 
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113 
114 static const char pci_speed[][4] = {
115 	"33", "66", "100", "133"
116 };
117 
118 /*
119  * Setup MAC to receive the types of packets we want.
120  */
t1_set_rxmode(struct net_device * dev)121 static void t1_set_rxmode(struct net_device *dev)
122 {
123 	struct adapter *adapter = dev->ml_priv;
124 	struct cmac *mac = adapter->port[dev->if_port].mac;
125 	struct t1_rx_mode rm;
126 
127 	rm.dev = dev;
128 	rm.idx = 0;
129 	rm.list = dev->mc_list;
130 	mac->ops->set_rx_mode(mac, &rm);
131 }
132 
link_report(struct port_info * p)133 static void link_report(struct port_info *p)
134 {
135 	if (!netif_carrier_ok(p->dev))
136 		printk(KERN_INFO "%s: link down\n", p->dev->name);
137 	else {
138 		const char *s = "10Mbps";
139 
140 		switch (p->link_config.speed) {
141 			case SPEED_10000: s = "10Gbps"; break;
142 			case SPEED_1000:  s = "1000Mbps"; break;
143 			case SPEED_100:   s = "100Mbps"; break;
144 		}
145 
146 		printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 		       p->dev->name, s,
148 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 	}
150 }
151 
t1_link_negotiated(struct adapter * adapter,int port_id,int link_stat,int speed,int duplex,int pause)152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153 			int speed, int duplex, int pause)
154 {
155 	struct port_info *p = &adapter->port[port_id];
156 
157 	if (link_stat != netif_carrier_ok(p->dev)) {
158 		if (link_stat)
159 			netif_carrier_on(p->dev);
160 		else
161 			netif_carrier_off(p->dev);
162 		link_report(p);
163 
164 		/* multi-ports: inform toe */
165 		if ((speed > 0) && (adapter->params.nports > 1)) {
166 			unsigned int sched_speed = 10;
167 			switch (speed) {
168 			case SPEED_1000:
169 				sched_speed = 1000;
170 				break;
171 			case SPEED_100:
172 				sched_speed = 100;
173 				break;
174 			case SPEED_10:
175 				sched_speed = 10;
176 				break;
177 			}
178 			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179 		}
180 	}
181 }
182 
link_start(struct port_info * p)183 static void link_start(struct port_info *p)
184 {
185 	struct cmac *mac = p->mac;
186 
187 	mac->ops->reset(mac);
188 	if (mac->ops->macaddress_set)
189 		mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 	t1_set_rxmode(p->dev);
191 	t1_link_start(p->phy, mac, &p->link_config);
192 	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193 }
194 
enable_hw_csum(struct adapter * adapter)195 static void enable_hw_csum(struct adapter *adapter)
196 {
197 	if (adapter->flags & TSO_CAPABLE)
198 		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
199 	if (adapter->flags & UDP_CSUM_CAPABLE)
200 		t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201 	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
202 }
203 
204 /*
205  * Things to do upon first use of a card.
206  * This must run with the rtnl lock held.
207  */
cxgb_up(struct adapter * adapter)208 static int cxgb_up(struct adapter *adapter)
209 {
210 	int err = 0;
211 
212 	if (!(adapter->flags & FULL_INIT_DONE)) {
213 		err = t1_init_hw_modules(adapter);
214 		if (err)
215 			goto out_err;
216 
217 		enable_hw_csum(adapter);
218 		adapter->flags |= FULL_INIT_DONE;
219 	}
220 
221 	t1_interrupts_clear(adapter);
222 
223 	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 	err = request_irq(adapter->pdev->irq, t1_interrupt,
225 			  adapter->params.has_msi ? 0 : IRQF_SHARED,
226 			  adapter->name, adapter);
227 	if (err) {
228 		if (adapter->params.has_msi)
229 			pci_disable_msi(adapter->pdev);
230 
231 		goto out_err;
232 	}
233 
234 	t1_sge_start(adapter->sge);
235 	t1_interrupts_enable(adapter);
236 out_err:
237 	return err;
238 }
239 
240 /*
241  * Release resources when all the ports have been stopped.
242  */
cxgb_down(struct adapter * adapter)243 static void cxgb_down(struct adapter *adapter)
244 {
245 	t1_sge_stop(adapter->sge);
246 	t1_interrupts_disable(adapter);
247 	free_irq(adapter->pdev->irq, adapter);
248 	if (adapter->params.has_msi)
249 		pci_disable_msi(adapter->pdev);
250 }
251 
cxgb_open(struct net_device * dev)252 static int cxgb_open(struct net_device *dev)
253 {
254 	int err;
255 	struct adapter *adapter = dev->ml_priv;
256 	int other_ports = adapter->open_device_map & PORT_MASK;
257 
258 	napi_enable(&adapter->napi);
259 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
260 		napi_disable(&adapter->napi);
261 		return err;
262 	}
263 
264 	__set_bit(dev->if_port, &adapter->open_device_map);
265 	link_start(&adapter->port[dev->if_port]);
266 	netif_start_queue(dev);
267 	if (!other_ports && adapter->params.stats_update_period)
268 		schedule_mac_stats_update(adapter,
269 					  adapter->params.stats_update_period);
270 	return 0;
271 }
272 
cxgb_close(struct net_device * dev)273 static int cxgb_close(struct net_device *dev)
274 {
275 	struct adapter *adapter = dev->ml_priv;
276 	struct port_info *p = &adapter->port[dev->if_port];
277 	struct cmac *mac = p->mac;
278 
279 	netif_stop_queue(dev);
280 	napi_disable(&adapter->napi);
281 	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
282 	netif_carrier_off(dev);
283 
284 	clear_bit(dev->if_port, &adapter->open_device_map);
285 	if (adapter->params.stats_update_period &&
286 	    !(adapter->open_device_map & PORT_MASK)) {
287 		/* Stop statistics accumulation. */
288 		smp_mb__after_clear_bit();
289 		spin_lock(&adapter->work_lock);   /* sync with update task */
290 		spin_unlock(&adapter->work_lock);
291 		cancel_mac_stats_update(adapter);
292 	}
293 
294 	if (!adapter->open_device_map)
295 		cxgb_down(adapter);
296 	return 0;
297 }
298 
t1_get_stats(struct net_device * dev)299 static struct net_device_stats *t1_get_stats(struct net_device *dev)
300 {
301 	struct adapter *adapter = dev->ml_priv;
302 	struct port_info *p = &adapter->port[dev->if_port];
303 	struct net_device_stats *ns = &p->netstats;
304 	const struct cmac_statistics *pstats;
305 
306 	/* Do a full update of the MAC stats */
307 	pstats = p->mac->ops->statistics_update(p->mac,
308 						MAC_STATS_UPDATE_FULL);
309 
310 	ns->tx_packets = pstats->TxUnicastFramesOK +
311 		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
312 
313 	ns->rx_packets = pstats->RxUnicastFramesOK +
314 		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
315 
316 	ns->tx_bytes = pstats->TxOctetsOK;
317 	ns->rx_bytes = pstats->RxOctetsOK;
318 
319 	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
320 		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
321 	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
322 		pstats->RxFCSErrors + pstats->RxAlignErrors +
323 		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
324 		pstats->RxSymbolErrors + pstats->RxRuntErrors;
325 
326 	ns->multicast  = pstats->RxMulticastFramesOK;
327 	ns->collisions = pstats->TxTotalCollisions;
328 
329 	/* detailed rx_errors */
330 	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
331 		pstats->RxJabberErrors;
332 	ns->rx_over_errors   = 0;
333 	ns->rx_crc_errors    = pstats->RxFCSErrors;
334 	ns->rx_frame_errors  = pstats->RxAlignErrors;
335 	ns->rx_fifo_errors   = 0;
336 	ns->rx_missed_errors = 0;
337 
338 	/* detailed tx_errors */
339 	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
340 	ns->tx_carrier_errors   = 0;
341 	ns->tx_fifo_errors      = pstats->TxUnderrun;
342 	ns->tx_heartbeat_errors = 0;
343 	ns->tx_window_errors    = pstats->TxLateCollisions;
344 	return ns;
345 }
346 
get_msglevel(struct net_device * dev)347 static u32 get_msglevel(struct net_device *dev)
348 {
349 	struct adapter *adapter = dev->ml_priv;
350 
351 	return adapter->msg_enable;
352 }
353 
set_msglevel(struct net_device * dev,u32 val)354 static void set_msglevel(struct net_device *dev, u32 val)
355 {
356 	struct adapter *adapter = dev->ml_priv;
357 
358 	adapter->msg_enable = val;
359 }
360 
361 static char stats_strings[][ETH_GSTRING_LEN] = {
362 	"TxOctetsOK",
363 	"TxOctetsBad",
364 	"TxUnicastFramesOK",
365 	"TxMulticastFramesOK",
366 	"TxBroadcastFramesOK",
367 	"TxPauseFrames",
368 	"TxFramesWithDeferredXmissions",
369 	"TxLateCollisions",
370 	"TxTotalCollisions",
371 	"TxFramesAbortedDueToXSCollisions",
372 	"TxUnderrun",
373 	"TxLengthErrors",
374 	"TxInternalMACXmitError",
375 	"TxFramesWithExcessiveDeferral",
376 	"TxFCSErrors",
377 	"TxJumboFramesOk",
378 	"TxJumboOctetsOk",
379 
380 	"RxOctetsOK",
381 	"RxOctetsBad",
382 	"RxUnicastFramesOK",
383 	"RxMulticastFramesOK",
384 	"RxBroadcastFramesOK",
385 	"RxPauseFrames",
386 	"RxFCSErrors",
387 	"RxAlignErrors",
388 	"RxSymbolErrors",
389 	"RxDataErrors",
390 	"RxSequenceErrors",
391 	"RxRuntErrors",
392 	"RxJabberErrors",
393 	"RxInternalMACRcvError",
394 	"RxInRangeLengthErrors",
395 	"RxOutOfRangeLengthField",
396 	"RxFrameTooLongErrors",
397 	"RxJumboFramesOk",
398 	"RxJumboOctetsOk",
399 
400 	/* Port stats */
401 	"RxCsumGood",
402 	"TxCsumOffload",
403 	"TxTso",
404 	"RxVlan",
405 	"TxVlan",
406 	"TxNeedHeadroom",
407 
408 	/* Interrupt stats */
409 	"rx drops",
410 	"pure_rsps",
411 	"unhandled irqs",
412 	"respQ_empty",
413 	"respQ_overflow",
414 	"freelistQ_empty",
415 	"pkt_too_big",
416 	"pkt_mismatch",
417 	"cmdQ_full0",
418 	"cmdQ_full1",
419 
420 	"espi_DIP2ParityErr",
421 	"espi_DIP4Err",
422 	"espi_RxDrops",
423 	"espi_TxDrops",
424 	"espi_RxOvfl",
425 	"espi_ParityErr"
426 };
427 
428 #define T2_REGMAP_SIZE (3 * 1024)
429 
get_regs_len(struct net_device * dev)430 static int get_regs_len(struct net_device *dev)
431 {
432 	return T2_REGMAP_SIZE;
433 }
434 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)435 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
436 {
437 	struct adapter *adapter = dev->ml_priv;
438 
439 	strcpy(info->driver, DRV_NAME);
440 	strcpy(info->version, DRV_VERSION);
441 	strcpy(info->fw_version, "N/A");
442 	strcpy(info->bus_info, pci_name(adapter->pdev));
443 }
444 
get_sset_count(struct net_device * dev,int sset)445 static int get_sset_count(struct net_device *dev, int sset)
446 {
447 	switch (sset) {
448 	case ETH_SS_STATS:
449 		return ARRAY_SIZE(stats_strings);
450 	default:
451 		return -EOPNOTSUPP;
452 	}
453 }
454 
get_strings(struct net_device * dev,u32 stringset,u8 * data)455 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
456 {
457 	if (stringset == ETH_SS_STATS)
458 		memcpy(data, stats_strings, sizeof(stats_strings));
459 }
460 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)461 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
462 		      u64 *data)
463 {
464 	struct adapter *adapter = dev->ml_priv;
465 	struct cmac *mac = adapter->port[dev->if_port].mac;
466 	const struct cmac_statistics *s;
467 	const struct sge_intr_counts *t;
468 	struct sge_port_stats ss;
469 
470 	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
471 	t = t1_sge_get_intr_counts(adapter->sge);
472 	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
473 
474 	*data++ = s->TxOctetsOK;
475 	*data++ = s->TxOctetsBad;
476 	*data++ = s->TxUnicastFramesOK;
477 	*data++ = s->TxMulticastFramesOK;
478 	*data++ = s->TxBroadcastFramesOK;
479 	*data++ = s->TxPauseFrames;
480 	*data++ = s->TxFramesWithDeferredXmissions;
481 	*data++ = s->TxLateCollisions;
482 	*data++ = s->TxTotalCollisions;
483 	*data++ = s->TxFramesAbortedDueToXSCollisions;
484 	*data++ = s->TxUnderrun;
485 	*data++ = s->TxLengthErrors;
486 	*data++ = s->TxInternalMACXmitError;
487 	*data++ = s->TxFramesWithExcessiveDeferral;
488 	*data++ = s->TxFCSErrors;
489 	*data++ = s->TxJumboFramesOK;
490 	*data++ = s->TxJumboOctetsOK;
491 
492 	*data++ = s->RxOctetsOK;
493 	*data++ = s->RxOctetsBad;
494 	*data++ = s->RxUnicastFramesOK;
495 	*data++ = s->RxMulticastFramesOK;
496 	*data++ = s->RxBroadcastFramesOK;
497 	*data++ = s->RxPauseFrames;
498 	*data++ = s->RxFCSErrors;
499 	*data++ = s->RxAlignErrors;
500 	*data++ = s->RxSymbolErrors;
501 	*data++ = s->RxDataErrors;
502 	*data++ = s->RxSequenceErrors;
503 	*data++ = s->RxRuntErrors;
504 	*data++ = s->RxJabberErrors;
505 	*data++ = s->RxInternalMACRcvError;
506 	*data++ = s->RxInRangeLengthErrors;
507 	*data++ = s->RxOutOfRangeLengthField;
508 	*data++ = s->RxFrameTooLongErrors;
509 	*data++ = s->RxJumboFramesOK;
510 	*data++ = s->RxJumboOctetsOK;
511 
512 	*data++ = ss.rx_cso_good;
513 	*data++ = ss.tx_cso;
514 	*data++ = ss.tx_tso;
515 	*data++ = ss.vlan_xtract;
516 	*data++ = ss.vlan_insert;
517 	*data++ = ss.tx_need_hdrroom;
518 
519 	*data++ = t->rx_drops;
520 	*data++ = t->pure_rsps;
521 	*data++ = t->unhandled_irqs;
522 	*data++ = t->respQ_empty;
523 	*data++ = t->respQ_overflow;
524 	*data++ = t->freelistQ_empty;
525 	*data++ = t->pkt_too_big;
526 	*data++ = t->pkt_mismatch;
527 	*data++ = t->cmdQ_full[0];
528 	*data++ = t->cmdQ_full[1];
529 
530 	if (adapter->espi) {
531 		const struct espi_intr_counts *e;
532 
533 		e = t1_espi_get_intr_counts(adapter->espi);
534 		*data++ = e->DIP2_parity_err;
535 		*data++ = e->DIP4_err;
536 		*data++ = e->rx_drops;
537 		*data++ = e->tx_drops;
538 		*data++ = e->rx_ovflw;
539 		*data++ = e->parity_err;
540 	}
541 }
542 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)543 static inline void reg_block_dump(struct adapter *ap, void *buf,
544 				  unsigned int start, unsigned int end)
545 {
546 	u32 *p = buf + start;
547 
548 	for ( ; start <= end; start += sizeof(u32))
549 		*p++ = readl(ap->regs + start);
550 }
551 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)552 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
553 		     void *buf)
554 {
555 	struct adapter *ap = dev->ml_priv;
556 
557 	/*
558 	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
559 	 */
560 	regs->version = 2;
561 
562 	memset(buf, 0, T2_REGMAP_SIZE);
563 	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
564 	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
565 	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
566 	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
567 	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
568 	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
569 	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
570 	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
571 	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
572 	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
573 }
574 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)575 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
576 {
577 	struct adapter *adapter = dev->ml_priv;
578 	struct port_info *p = &adapter->port[dev->if_port];
579 
580 	cmd->supported = p->link_config.supported;
581 	cmd->advertising = p->link_config.advertising;
582 
583 	if (netif_carrier_ok(dev)) {
584 		cmd->speed = p->link_config.speed;
585 		cmd->duplex = p->link_config.duplex;
586 	} else {
587 		cmd->speed = -1;
588 		cmd->duplex = -1;
589 	}
590 
591 	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
592 	cmd->phy_address = p->phy->addr;
593 	cmd->transceiver = XCVR_EXTERNAL;
594 	cmd->autoneg = p->link_config.autoneg;
595 	cmd->maxtxpkt = 0;
596 	cmd->maxrxpkt = 0;
597 	return 0;
598 }
599 
speed_duplex_to_caps(int speed,int duplex)600 static int speed_duplex_to_caps(int speed, int duplex)
601 {
602 	int cap = 0;
603 
604 	switch (speed) {
605 	case SPEED_10:
606 		if (duplex == DUPLEX_FULL)
607 			cap = SUPPORTED_10baseT_Full;
608 		else
609 			cap = SUPPORTED_10baseT_Half;
610 		break;
611 	case SPEED_100:
612 		if (duplex == DUPLEX_FULL)
613 			cap = SUPPORTED_100baseT_Full;
614 		else
615 			cap = SUPPORTED_100baseT_Half;
616 		break;
617 	case SPEED_1000:
618 		if (duplex == DUPLEX_FULL)
619 			cap = SUPPORTED_1000baseT_Full;
620 		else
621 			cap = SUPPORTED_1000baseT_Half;
622 		break;
623 	case SPEED_10000:
624 		if (duplex == DUPLEX_FULL)
625 			cap = SUPPORTED_10000baseT_Full;
626 	}
627 	return cap;
628 }
629 
630 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
631 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
632 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
633 		      ADVERTISED_10000baseT_Full)
634 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)635 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
636 {
637 	struct adapter *adapter = dev->ml_priv;
638 	struct port_info *p = &adapter->port[dev->if_port];
639 	struct link_config *lc = &p->link_config;
640 
641 	if (!(lc->supported & SUPPORTED_Autoneg))
642 		return -EOPNOTSUPP;             /* can't change speed/duplex */
643 
644 	if (cmd->autoneg == AUTONEG_DISABLE) {
645 		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
646 
647 		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
648 			return -EINVAL;
649 		lc->requested_speed = cmd->speed;
650 		lc->requested_duplex = cmd->duplex;
651 		lc->advertising = 0;
652 	} else {
653 		cmd->advertising &= ADVERTISED_MASK;
654 		if (cmd->advertising & (cmd->advertising - 1))
655 			cmd->advertising = lc->supported;
656 		cmd->advertising &= lc->supported;
657 		if (!cmd->advertising)
658 			return -EINVAL;
659 		lc->requested_speed = SPEED_INVALID;
660 		lc->requested_duplex = DUPLEX_INVALID;
661 		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
662 	}
663 	lc->autoneg = cmd->autoneg;
664 	if (netif_running(dev))
665 		t1_link_start(p->phy, p->mac, lc);
666 	return 0;
667 }
668 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)669 static void get_pauseparam(struct net_device *dev,
670 			   struct ethtool_pauseparam *epause)
671 {
672 	struct adapter *adapter = dev->ml_priv;
673 	struct port_info *p = &adapter->port[dev->if_port];
674 
675 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
676 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
677 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
678 }
679 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)680 static int set_pauseparam(struct net_device *dev,
681 			  struct ethtool_pauseparam *epause)
682 {
683 	struct adapter *adapter = dev->ml_priv;
684 	struct port_info *p = &adapter->port[dev->if_port];
685 	struct link_config *lc = &p->link_config;
686 
687 	if (epause->autoneg == AUTONEG_DISABLE)
688 		lc->requested_fc = 0;
689 	else if (lc->supported & SUPPORTED_Autoneg)
690 		lc->requested_fc = PAUSE_AUTONEG;
691 	else
692 		return -EINVAL;
693 
694 	if (epause->rx_pause)
695 		lc->requested_fc |= PAUSE_RX;
696 	if (epause->tx_pause)
697 		lc->requested_fc |= PAUSE_TX;
698 	if (lc->autoneg == AUTONEG_ENABLE) {
699 		if (netif_running(dev))
700 			t1_link_start(p->phy, p->mac, lc);
701 	} else {
702 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
703 		if (netif_running(dev))
704 			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
705 							 lc->fc);
706 	}
707 	return 0;
708 }
709 
get_rx_csum(struct net_device * dev)710 static u32 get_rx_csum(struct net_device *dev)
711 {
712 	struct adapter *adapter = dev->ml_priv;
713 
714 	return (adapter->flags & RX_CSUM_ENABLED) != 0;
715 }
716 
set_rx_csum(struct net_device * dev,u32 data)717 static int set_rx_csum(struct net_device *dev, u32 data)
718 {
719 	struct adapter *adapter = dev->ml_priv;
720 
721 	if (data)
722 		adapter->flags |= RX_CSUM_ENABLED;
723 	else
724 		adapter->flags &= ~RX_CSUM_ENABLED;
725 	return 0;
726 }
727 
set_tso(struct net_device * dev,u32 value)728 static int set_tso(struct net_device *dev, u32 value)
729 {
730 	struct adapter *adapter = dev->ml_priv;
731 
732 	if (!(adapter->flags & TSO_CAPABLE))
733 		return value ? -EOPNOTSUPP : 0;
734 	return ethtool_op_set_tso(dev, value);
735 }
736 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)737 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
738 {
739 	struct adapter *adapter = dev->ml_priv;
740 	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
741 
742 	e->rx_max_pending = MAX_RX_BUFFERS;
743 	e->rx_mini_max_pending = 0;
744 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
745 	e->tx_max_pending = MAX_CMDQ_ENTRIES;
746 
747 	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
748 	e->rx_mini_pending = 0;
749 	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
750 	e->tx_pending = adapter->params.sge.cmdQ_size[0];
751 }
752 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)753 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
754 {
755 	struct adapter *adapter = dev->ml_priv;
756 	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
757 
758 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
759 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
760 	    e->tx_pending > MAX_CMDQ_ENTRIES ||
761 	    e->rx_pending < MIN_FL_ENTRIES ||
762 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
763 	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
764 		return -EINVAL;
765 
766 	if (adapter->flags & FULL_INIT_DONE)
767 		return -EBUSY;
768 
769 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
770 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
771 	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
772 	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
773 		MAX_CMDQ1_ENTRIES : e->tx_pending;
774 	return 0;
775 }
776 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)777 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
778 {
779 	struct adapter *adapter = dev->ml_priv;
780 
781 	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
782 	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
783 	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
784 	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
785 	return 0;
786 }
787 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)788 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
789 {
790 	struct adapter *adapter = dev->ml_priv;
791 
792 	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
793 	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
794 	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
795 	return 0;
796 }
797 
get_eeprom_len(struct net_device * dev)798 static int get_eeprom_len(struct net_device *dev)
799 {
800 	struct adapter *adapter = dev->ml_priv;
801 
802 	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
803 }
804 
805 #define EEPROM_MAGIC(ap) \
806 	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
807 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)808 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
809 		      u8 *data)
810 {
811 	int i;
812 	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
813 	struct adapter *adapter = dev->ml_priv;
814 
815 	e->magic = EEPROM_MAGIC(adapter);
816 	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
817 		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
818 	memcpy(data, buf + e->offset, e->len);
819 	return 0;
820 }
821 
822 static const struct ethtool_ops t1_ethtool_ops = {
823 	.get_settings      = get_settings,
824 	.set_settings      = set_settings,
825 	.get_drvinfo       = get_drvinfo,
826 	.get_msglevel      = get_msglevel,
827 	.set_msglevel      = set_msglevel,
828 	.get_ringparam     = get_sge_param,
829 	.set_ringparam     = set_sge_param,
830 	.get_coalesce      = get_coalesce,
831 	.set_coalesce      = set_coalesce,
832 	.get_eeprom_len    = get_eeprom_len,
833 	.get_eeprom        = get_eeprom,
834 	.get_pauseparam    = get_pauseparam,
835 	.set_pauseparam    = set_pauseparam,
836 	.get_rx_csum       = get_rx_csum,
837 	.set_rx_csum       = set_rx_csum,
838 	.set_tx_csum       = ethtool_op_set_tx_csum,
839 	.set_sg            = ethtool_op_set_sg,
840 	.get_link          = ethtool_op_get_link,
841 	.get_strings       = get_strings,
842 	.get_sset_count	   = get_sset_count,
843 	.get_ethtool_stats = get_stats,
844 	.get_regs_len      = get_regs_len,
845 	.get_regs          = get_regs,
846 	.set_tso           = set_tso,
847 };
848 
t1_ioctl(struct net_device * dev,struct ifreq * req,int cmd)849 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
850 {
851 	struct adapter *adapter = dev->ml_priv;
852 	struct mii_ioctl_data *data = if_mii(req);
853 
854 	switch (cmd) {
855 	case SIOCGMIIPHY:
856 		data->phy_id = adapter->port[dev->if_port].phy->addr;
857 		/* FALLTHRU */
858 	case SIOCGMIIREG: {
859 		struct cphy *phy = adapter->port[dev->if_port].phy;
860 		u32 val;
861 
862 		if (!phy->mdio_read)
863 			return -EOPNOTSUPP;
864 		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 			       &val);
866 		data->val_out = val;
867 		break;
868 	}
869 	case SIOCSMIIREG: {
870 		struct cphy *phy = adapter->port[dev->if_port].phy;
871 
872 		if (!capable(CAP_NET_ADMIN))
873 		    return -EPERM;
874 		if (!phy->mdio_write)
875 			return -EOPNOTSUPP;
876 		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
877 			        data->val_in);
878 		break;
879 	}
880 
881 	default:
882 		return -EOPNOTSUPP;
883 	}
884 	return 0;
885 }
886 
t1_change_mtu(struct net_device * dev,int new_mtu)887 static int t1_change_mtu(struct net_device *dev, int new_mtu)
888 {
889 	int ret;
890 	struct adapter *adapter = dev->ml_priv;
891 	struct cmac *mac = adapter->port[dev->if_port].mac;
892 
893 	if (!mac->ops->set_mtu)
894 		return -EOPNOTSUPP;
895 	if (new_mtu < 68)
896 		return -EINVAL;
897 	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
898 		return ret;
899 	dev->mtu = new_mtu;
900 	return 0;
901 }
902 
t1_set_mac_addr(struct net_device * dev,void * p)903 static int t1_set_mac_addr(struct net_device *dev, void *p)
904 {
905 	struct adapter *adapter = dev->ml_priv;
906 	struct cmac *mac = adapter->port[dev->if_port].mac;
907 	struct sockaddr *addr = p;
908 
909 	if (!mac->ops->macaddress_set)
910 		return -EOPNOTSUPP;
911 
912 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
913 	mac->ops->macaddress_set(mac, dev->dev_addr);
914 	return 0;
915 }
916 
917 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
t1_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)918 static void t1_vlan_rx_register(struct net_device *dev,
919 				   struct vlan_group *grp)
920 {
921 	struct adapter *adapter = dev->ml_priv;
922 
923 	spin_lock_irq(&adapter->async_lock);
924 	adapter->vlan_grp = grp;
925 	t1_set_vlan_accel(adapter, grp != NULL);
926 	spin_unlock_irq(&adapter->async_lock);
927 }
928 #endif
929 
930 #ifdef CONFIG_NET_POLL_CONTROLLER
t1_netpoll(struct net_device * dev)931 static void t1_netpoll(struct net_device *dev)
932 {
933 	unsigned long flags;
934 	struct adapter *adapter = dev->ml_priv;
935 
936 	local_irq_save(flags);
937 	t1_interrupt(adapter->pdev->irq, adapter);
938 	local_irq_restore(flags);
939 }
940 #endif
941 
942 /*
943  * Periodic accumulation of MAC statistics.  This is used only if the MAC
944  * does not have any other way to prevent stats counter overflow.
945  */
mac_stats_task(struct work_struct * work)946 static void mac_stats_task(struct work_struct *work)
947 {
948 	int i;
949 	struct adapter *adapter =
950 		container_of(work, struct adapter, stats_update_task.work);
951 
952 	for_each_port(adapter, i) {
953 		struct port_info *p = &adapter->port[i];
954 
955 		if (netif_running(p->dev))
956 			p->mac->ops->statistics_update(p->mac,
957 						       MAC_STATS_UPDATE_FAST);
958 	}
959 
960 	/* Schedule the next statistics update if any port is active. */
961 	spin_lock(&adapter->work_lock);
962 	if (adapter->open_device_map & PORT_MASK)
963 		schedule_mac_stats_update(adapter,
964 					  adapter->params.stats_update_period);
965 	spin_unlock(&adapter->work_lock);
966 }
967 
968 /*
969  * Processes elmer0 external interrupts in process context.
970  */
ext_intr_task(struct work_struct * work)971 static void ext_intr_task(struct work_struct *work)
972 {
973 	struct adapter *adapter =
974 		container_of(work, struct adapter, ext_intr_handler_task);
975 
976 	t1_elmer0_ext_intr_handler(adapter);
977 
978 	/* Now reenable external interrupts */
979 	spin_lock_irq(&adapter->async_lock);
980 	adapter->slow_intr_mask |= F_PL_INTR_EXT;
981 	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
982 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
983 		   adapter->regs + A_PL_ENABLE);
984 	spin_unlock_irq(&adapter->async_lock);
985 }
986 
987 /*
988  * Interrupt-context handler for elmer0 external interrupts.
989  */
t1_elmer0_ext_intr(struct adapter * adapter)990 void t1_elmer0_ext_intr(struct adapter *adapter)
991 {
992 	/*
993 	 * Schedule a task to handle external interrupts as we require
994 	 * a process context.  We disable EXT interrupts in the interim
995 	 * and let the task reenable them when it's done.
996 	 */
997 	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
998 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
999 		   adapter->regs + A_PL_ENABLE);
1000 	schedule_work(&adapter->ext_intr_handler_task);
1001 }
1002 
t1_fatal_err(struct adapter * adapter)1003 void t1_fatal_err(struct adapter *adapter)
1004 {
1005 	if (adapter->flags & FULL_INIT_DONE) {
1006 		t1_sge_stop(adapter->sge);
1007 		t1_interrupts_disable(adapter);
1008 	}
1009 	CH_ALERT("%s: encountered fatal error, operation suspended\n",
1010 		 adapter->name);
1011 }
1012 
1013 static const struct net_device_ops cxgb_netdev_ops = {
1014 	.ndo_open		= cxgb_open,
1015 	.ndo_stop		= cxgb_close,
1016 	.ndo_start_xmit		= t1_start_xmit,
1017 	.ndo_get_stats		= t1_get_stats,
1018 	.ndo_validate_addr	= eth_validate_addr,
1019 	.ndo_set_multicast_list	= t1_set_rxmode,
1020 	.ndo_do_ioctl		= t1_ioctl,
1021 	.ndo_change_mtu		= t1_change_mtu,
1022 	.ndo_set_mac_address	= t1_set_mac_addr,
1023 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1024 	.ndo_vlan_rx_register	= t1_vlan_rx_register,
1025 #endif
1026 #ifdef CONFIG_NET_POLL_CONTROLLER
1027 	.ndo_poll_controller	= t1_netpoll,
1028 #endif
1029 };
1030 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1031 static int __devinit init_one(struct pci_dev *pdev,
1032 			      const struct pci_device_id *ent)
1033 {
1034 	static int version_printed;
1035 
1036 	int i, err, pci_using_dac = 0;
1037 	unsigned long mmio_start, mmio_len;
1038 	const struct board_info *bi;
1039 	struct adapter *adapter = NULL;
1040 	struct port_info *pi;
1041 
1042 	if (!version_printed) {
1043 		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1044 		       DRV_VERSION);
1045 		++version_printed;
1046 	}
1047 
1048 	err = pci_enable_device(pdev);
1049 	if (err)
1050 		return err;
1051 
1052 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1053 		CH_ERR("%s: cannot find PCI device memory base address\n",
1054 		       pci_name(pdev));
1055 		err = -ENODEV;
1056 		goto out_disable_pdev;
1057 	}
1058 
1059 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1060 		pci_using_dac = 1;
1061 
1062 		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1063 			CH_ERR("%s: unable to obtain 64-bit DMA for "
1064 			       "consistent allocations\n", pci_name(pdev));
1065 			err = -ENODEV;
1066 			goto out_disable_pdev;
1067 		}
1068 
1069 	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1070 		CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1071 		goto out_disable_pdev;
1072 	}
1073 
1074 	err = pci_request_regions(pdev, DRV_NAME);
1075 	if (err) {
1076 		CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1077 		goto out_disable_pdev;
1078 	}
1079 
1080 	pci_set_master(pdev);
1081 
1082 	mmio_start = pci_resource_start(pdev, 0);
1083 	mmio_len = pci_resource_len(pdev, 0);
1084 	bi = t1_get_board_info(ent->driver_data);
1085 
1086 	for (i = 0; i < bi->port_number; ++i) {
1087 		struct net_device *netdev;
1088 
1089 		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1090 		if (!netdev) {
1091 			err = -ENOMEM;
1092 			goto out_free_dev;
1093 		}
1094 
1095 		SET_NETDEV_DEV(netdev, &pdev->dev);
1096 
1097 		if (!adapter) {
1098 			adapter = netdev_priv(netdev);
1099 			adapter->pdev = pdev;
1100 			adapter->port[0].dev = netdev;  /* so we don't leak it */
1101 
1102 			adapter->regs = ioremap(mmio_start, mmio_len);
1103 			if (!adapter->regs) {
1104 				CH_ERR("%s: cannot map device registers\n",
1105 				       pci_name(pdev));
1106 				err = -ENOMEM;
1107 				goto out_free_dev;
1108 			}
1109 
1110 			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1111 				err = -ENODEV;	  /* Can't handle this chip rev */
1112 				goto out_free_dev;
1113 			}
1114 
1115 			adapter->name = pci_name(pdev);
1116 			adapter->msg_enable = dflt_msg_enable;
1117 			adapter->mmio_len = mmio_len;
1118 
1119 			spin_lock_init(&adapter->tpi_lock);
1120 			spin_lock_init(&adapter->work_lock);
1121 			spin_lock_init(&adapter->async_lock);
1122 			spin_lock_init(&adapter->mac_lock);
1123 
1124 			INIT_WORK(&adapter->ext_intr_handler_task,
1125 				  ext_intr_task);
1126 			INIT_DELAYED_WORK(&adapter->stats_update_task,
1127 					  mac_stats_task);
1128 
1129 			pci_set_drvdata(pdev, netdev);
1130 		}
1131 
1132 		pi = &adapter->port[i];
1133 		pi->dev = netdev;
1134 		netif_carrier_off(netdev);
1135 		netdev->irq = pdev->irq;
1136 		netdev->if_port = i;
1137 		netdev->mem_start = mmio_start;
1138 		netdev->mem_end = mmio_start + mmio_len - 1;
1139 		netdev->ml_priv = adapter;
1140 		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1141 		netdev->features |= NETIF_F_LLTX;
1142 
1143 		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1144 		if (pci_using_dac)
1145 			netdev->features |= NETIF_F_HIGHDMA;
1146 		if (vlan_tso_capable(adapter)) {
1147 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1148 			adapter->flags |= VLAN_ACCEL_CAPABLE;
1149 			netdev->features |=
1150 				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1151 #endif
1152 
1153 			/* T204: disable TSO */
1154 			if (!(is_T2(adapter)) || bi->port_number != 4) {
1155 				adapter->flags |= TSO_CAPABLE;
1156 				netdev->features |= NETIF_F_TSO;
1157 			}
1158 		}
1159 
1160 		netdev->netdev_ops = &cxgb_netdev_ops;
1161 		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1162 			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1163 
1164 		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1165 
1166 		SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1167 	}
1168 
1169 	if (t1_init_sw_modules(adapter, bi) < 0) {
1170 		err = -ENODEV;
1171 		goto out_free_dev;
1172 	}
1173 
1174 	/*
1175 	 * The card is now ready to go.  If any errors occur during device
1176 	 * registration we do not fail the whole card but rather proceed only
1177 	 * with the ports we manage to register successfully.  However we must
1178 	 * register at least one net device.
1179 	 */
1180 	for (i = 0; i < bi->port_number; ++i) {
1181 		err = register_netdev(adapter->port[i].dev);
1182 		if (err)
1183 			CH_WARN("%s: cannot register net device %s, skipping\n",
1184 				pci_name(pdev), adapter->port[i].dev->name);
1185 		else {
1186 			/*
1187 			 * Change the name we use for messages to the name of
1188 			 * the first successfully registered interface.
1189 			 */
1190 			if (!adapter->registered_device_map)
1191 				adapter->name = adapter->port[i].dev->name;
1192 
1193 			__set_bit(i, &adapter->registered_device_map);
1194 		}
1195 	}
1196 	if (!adapter->registered_device_map) {
1197 		CH_ERR("%s: could not register any net devices\n",
1198 		       pci_name(pdev));
1199 		goto out_release_adapter_res;
1200 	}
1201 
1202 	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1203 	       bi->desc, adapter->params.chip_revision,
1204 	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1205 	       adapter->params.pci.speed, adapter->params.pci.width);
1206 
1207 	/*
1208 	 * Set the T1B ASIC and memory clocks.
1209 	 */
1210 	if (t1powersave)
1211 		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1212 	else
1213 		adapter->t1powersave = HCLOCK;
1214 	if (t1_is_T1B(adapter))
1215 		t1_clock(adapter, t1powersave);
1216 
1217 	return 0;
1218 
1219 out_release_adapter_res:
1220 	t1_free_sw_modules(adapter);
1221 out_free_dev:
1222 	if (adapter) {
1223 		if (adapter->regs)
1224 			iounmap(adapter->regs);
1225 		for (i = bi->port_number - 1; i >= 0; --i)
1226 			if (adapter->port[i].dev)
1227 				free_netdev(adapter->port[i].dev);
1228 	}
1229 	pci_release_regions(pdev);
1230 out_disable_pdev:
1231 	pci_disable_device(pdev);
1232 	pci_set_drvdata(pdev, NULL);
1233 	return err;
1234 }
1235 
bit_bang(struct adapter * adapter,int bitdata,int nbits)1236 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1237 {
1238 	int data;
1239 	int i;
1240 	u32 val;
1241 
1242 	enum {
1243 		S_CLOCK = 1 << 3,
1244 		S_DATA = 1 << 4
1245 	};
1246 
1247 	for (i = (nbits - 1); i > -1; i--) {
1248 
1249 		udelay(50);
1250 
1251 		data = ((bitdata >> i) & 0x1);
1252 		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1253 
1254 		if (data)
1255 			val |= S_DATA;
1256 		else
1257 			val &= ~S_DATA;
1258 
1259 		udelay(50);
1260 
1261 		/* Set SCLOCK low */
1262 		val &= ~S_CLOCK;
1263 		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1264 
1265 		udelay(50);
1266 
1267 		/* Write SCLOCK high */
1268 		val |= S_CLOCK;
1269 		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1270 
1271 	}
1272 }
1273 
t1_clock(struct adapter * adapter,int mode)1274 static int t1_clock(struct adapter *adapter, int mode)
1275 {
1276 	u32 val;
1277 	int M_CORE_VAL;
1278 	int M_MEM_VAL;
1279 
1280 	enum {
1281 		M_CORE_BITS	= 9,
1282 		T_CORE_VAL	= 0,
1283 		T_CORE_BITS	= 2,
1284 		N_CORE_VAL	= 0,
1285 		N_CORE_BITS	= 2,
1286 		M_MEM_BITS	= 9,
1287 		T_MEM_VAL	= 0,
1288 		T_MEM_BITS	= 2,
1289 		N_MEM_VAL	= 0,
1290 		N_MEM_BITS	= 2,
1291 		NP_LOAD		= 1 << 17,
1292 		S_LOAD_MEM	= 1 << 5,
1293 		S_LOAD_CORE	= 1 << 6,
1294 		S_CLOCK		= 1 << 3
1295 	};
1296 
1297 	if (!t1_is_T1B(adapter))
1298 		return -ENODEV;	/* Can't re-clock this chip. */
1299 
1300 	if (mode & 2)
1301 		return 0;	/* show current mode. */
1302 
1303 	if ((adapter->t1powersave & 1) == (mode & 1))
1304 		return -EALREADY;	/* ASIC already running in mode. */
1305 
1306 	if ((mode & 1) == HCLOCK) {
1307 		M_CORE_VAL = 0x14;
1308 		M_MEM_VAL = 0x18;
1309 		adapter->t1powersave = HCLOCK;	/* overclock */
1310 	} else {
1311 		M_CORE_VAL = 0xe;
1312 		M_MEM_VAL = 0x10;
1313 		adapter->t1powersave = LCLOCK;	/* underclock */
1314 	}
1315 
1316 	/* Don't interrupt this serial stream! */
1317 	spin_lock(&adapter->tpi_lock);
1318 
1319 	/* Initialize for ASIC core */
1320 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1321 	val |= NP_LOAD;
1322 	udelay(50);
1323 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1324 	udelay(50);
1325 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1326 	val &= ~S_LOAD_CORE;
1327 	val &= ~S_CLOCK;
1328 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1329 	udelay(50);
1330 
1331 	/* Serial program the ASIC clock synthesizer */
1332 	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1333 	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1334 	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1335 	udelay(50);
1336 
1337 	/* Finish ASIC core */
1338 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1339 	val |= S_LOAD_CORE;
1340 	udelay(50);
1341 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1342 	udelay(50);
1343 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1344 	val &= ~S_LOAD_CORE;
1345 	udelay(50);
1346 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1347 	udelay(50);
1348 
1349 	/* Initialize for memory */
1350 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1351 	val |= NP_LOAD;
1352 	udelay(50);
1353 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1354 	udelay(50);
1355 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1356 	val &= ~S_LOAD_MEM;
1357 	val &= ~S_CLOCK;
1358 	udelay(50);
1359 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1360 	udelay(50);
1361 
1362 	/* Serial program the memory clock synthesizer */
1363 	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1364 	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1365 	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1366 	udelay(50);
1367 
1368 	/* Finish memory */
1369 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1370 	val |= S_LOAD_MEM;
1371 	udelay(50);
1372 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1373 	udelay(50);
1374 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1375 	val &= ~S_LOAD_MEM;
1376 	udelay(50);
1377 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1378 
1379 	spin_unlock(&adapter->tpi_lock);
1380 
1381 	return 0;
1382 }
1383 
t1_sw_reset(struct pci_dev * pdev)1384 static inline void t1_sw_reset(struct pci_dev *pdev)
1385 {
1386 	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1387 	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1388 }
1389 
remove_one(struct pci_dev * pdev)1390 static void __devexit remove_one(struct pci_dev *pdev)
1391 {
1392 	struct net_device *dev = pci_get_drvdata(pdev);
1393 	struct adapter *adapter = dev->ml_priv;
1394 	int i;
1395 
1396 	for_each_port(adapter, i) {
1397 		if (test_bit(i, &adapter->registered_device_map))
1398 			unregister_netdev(adapter->port[i].dev);
1399 	}
1400 
1401 	t1_free_sw_modules(adapter);
1402 	iounmap(adapter->regs);
1403 
1404 	while (--i >= 0) {
1405 		if (adapter->port[i].dev)
1406 			free_netdev(adapter->port[i].dev);
1407 	}
1408 
1409 	pci_release_regions(pdev);
1410 	pci_disable_device(pdev);
1411 	pci_set_drvdata(pdev, NULL);
1412 	t1_sw_reset(pdev);
1413 }
1414 
1415 static struct pci_driver driver = {
1416 	.name     = DRV_NAME,
1417 	.id_table = t1_pci_tbl,
1418 	.probe    = init_one,
1419 	.remove   = __devexit_p(remove_one),
1420 };
1421 
t1_init_module(void)1422 static int __init t1_init_module(void)
1423 {
1424 	return pci_register_driver(&driver);
1425 }
1426 
t1_cleanup_module(void)1427 static void __exit t1_cleanup_module(void)
1428 {
1429 	pci_unregister_driver(&driver);
1430 }
1431 
1432 module_init(t1_init_module);
1433 module_exit(t1_cleanup_module);
1434