• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
64 
65 #include "cxgb4.h"
66 #include "t4_regs.h"
67 #include "t4_msg.h"
68 #include "t4fw_api.h"
69 #include "l2t.h"
70 
71 #define DRV_VERSION "2.0.0-ko"
72 #define DRV_DESC "Chelsio T4/T5 Network Driver"
73 
74 /*
75  * Max interrupt hold-off timer value in us.  Queues fall back to this value
76  * under extreme memory pressure so it's largish to give the system time to
77  * recover.
78  */
79 #define MAX_SGE_TIMERVAL 200U
80 
81 enum {
82 	/*
83 	 * Physical Function provisioning constants.
84 	 */
85 	PFRES_NVI = 4,			/* # of Virtual Interfaces */
86 	PFRES_NETHCTRL = 128,		/* # of EQs used for ETH or CTRL Qs */
87 	PFRES_NIQFLINT = 128,		/* # of ingress Qs/w Free List(s)/intr
88 					 */
89 	PFRES_NEQ = 256,		/* # of egress queues */
90 	PFRES_NIQ = 0,			/* # of ingress queues */
91 	PFRES_TC = 0,			/* PCI-E traffic class */
92 	PFRES_NEXACTF = 128,		/* # of exact MPS filters */
93 
94 	PFRES_R_CAPS = FW_CMD_CAP_PF,
95 	PFRES_WX_CAPS = FW_CMD_CAP_PF,
96 
97 #ifdef CONFIG_PCI_IOV
98 	/*
99 	 * Virtual Function provisioning constants.  We need two extra Ingress
100 	 * Queues with Interrupt capability to serve as the VF's Firmware
101 	 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 	 * neither will have Free Lists associated with them).  For each
103 	 * Ethernet/Control Egress Queue and for each Free List, we need an
104 	 * Egress Context.
105 	 */
106 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
107 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
108 
109 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
110 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
111 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
113 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
114 	VFRES_TC = 0,			/* PCI-E traffic class */
115 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
116 
117 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119 #endif
120 };
121 
122 /*
123  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
124  * static and likely not to be useful in the long run.  We really need to
125  * implement some form of persistent configuration which the firmware
126  * controls.
127  */
pfvfres_pmask(struct adapter * adapter,unsigned int pf,unsigned int vf)128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 				  unsigned int pf, unsigned int vf)
130 {
131 	unsigned int portn, portvec;
132 
133 	/*
134 	 * Give PF's access to all of the ports.
135 	 */
136 	if (vf == 0)
137 		return FW_PFVF_CMD_PMASK_MASK;
138 
139 	/*
140 	 * For VFs, we'll assign them access to the ports based purely on the
141 	 * PF.  We assign active ports in order, wrapping around if there are
142 	 * fewer active ports than PFs: e.g. active port[pf % nports].
143 	 * Unfortunately the adapter's port_info structs haven't been
144 	 * initialized yet so we have to compute this.
145 	 */
146 	if (adapter->params.nports == 0)
147 		return 0;
148 
149 	portn = pf % adapter->params.nports;
150 	portvec = adapter->params.portvec;
151 	for (;;) {
152 		/*
153 		 * Isolate the lowest set bit in the port vector.  If we're at
154 		 * the port number that we want, return that as the pmask.
155 		 * otherwise mask that bit out of the port vector and
156 		 * decrement our port number ...
157 		 */
158 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 		if (portn == 0)
160 			return pmask;
161 		portn--;
162 		portvec &= ~pmask;
163 	}
164 	/*NOTREACHED*/
165 }
166 
167 enum {
168 	MAX_TXQ_ENTRIES      = 16384,
169 	MAX_CTRL_TXQ_ENTRIES = 1024,
170 	MAX_RSPQ_ENTRIES     = 16384,
171 	MAX_RX_BUFFERS       = 16384,
172 	MIN_TXQ_ENTRIES      = 32,
173 	MIN_CTRL_TXQ_ENTRIES = 32,
174 	MIN_RSPQ_ENTRIES     = 128,
175 	MIN_FL_ENTRIES       = 16
176 };
177 
178 /* Host shadow copy of ingress filter entry.  This is in host native format
179  * and doesn't match the ordering or bit order, etc. of the hardware of the
180  * firmware command.  The use of bit-field structure elements is purely to
181  * remind ourselves of the field size limitations and save memory in the case
182  * where the filter table is large.
183  */
184 struct filter_entry {
185 	/* Administrative fields for filter.
186 	 */
187 	u32 valid:1;            /* filter allocated and valid */
188 	u32 locked:1;           /* filter is administratively locked */
189 
190 	u32 pending:1;          /* filter action is pending firmware reply */
191 	u32 smtidx:8;           /* Source MAC Table index for smac */
192 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
193 
194 	/* The filter itself.  Most of this is a straight copy of information
195 	 * provided by the extended ioctl().  Some fields are translated to
196 	 * internal forms -- for instance the Ingress Queue ID passed in from
197 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 	 */
199 	struct ch_filter_specification fs;
200 };
201 
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205 
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
207 
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 	CH_DEVICE(0xa000, 0),  /* PE10K */
210 	CH_DEVICE(0x4001, -1),
211 	CH_DEVICE(0x4002, -1),
212 	CH_DEVICE(0x4003, -1),
213 	CH_DEVICE(0x4004, -1),
214 	CH_DEVICE(0x4005, -1),
215 	CH_DEVICE(0x4006, -1),
216 	CH_DEVICE(0x4007, -1),
217 	CH_DEVICE(0x4008, -1),
218 	CH_DEVICE(0x4009, -1),
219 	CH_DEVICE(0x400a, -1),
220 	CH_DEVICE(0x4401, 4),
221 	CH_DEVICE(0x4402, 4),
222 	CH_DEVICE(0x4403, 4),
223 	CH_DEVICE(0x4404, 4),
224 	CH_DEVICE(0x4405, 4),
225 	CH_DEVICE(0x4406, 4),
226 	CH_DEVICE(0x4407, 4),
227 	CH_DEVICE(0x4408, 4),
228 	CH_DEVICE(0x4409, 4),
229 	CH_DEVICE(0x440a, 4),
230 	CH_DEVICE(0x440d, 4),
231 	CH_DEVICE(0x440e, 4),
232 	CH_DEVICE(0x5001, 4),
233 	CH_DEVICE(0x5002, 4),
234 	CH_DEVICE(0x5003, 4),
235 	CH_DEVICE(0x5004, 4),
236 	CH_DEVICE(0x5005, 4),
237 	CH_DEVICE(0x5006, 4),
238 	CH_DEVICE(0x5007, 4),
239 	CH_DEVICE(0x5008, 4),
240 	CH_DEVICE(0x5009, 4),
241 	CH_DEVICE(0x500A, 4),
242 	CH_DEVICE(0x500B, 4),
243 	CH_DEVICE(0x500C, 4),
244 	CH_DEVICE(0x500D, 4),
245 	CH_DEVICE(0x500E, 4),
246 	CH_DEVICE(0x500F, 4),
247 	CH_DEVICE(0x5010, 4),
248 	CH_DEVICE(0x5011, 4),
249 	CH_DEVICE(0x5012, 4),
250 	CH_DEVICE(0x5013, 4),
251 	CH_DEVICE(0x5401, 4),
252 	CH_DEVICE(0x5402, 4),
253 	CH_DEVICE(0x5403, 4),
254 	CH_DEVICE(0x5404, 4),
255 	CH_DEVICE(0x5405, 4),
256 	CH_DEVICE(0x5406, 4),
257 	CH_DEVICE(0x5407, 4),
258 	CH_DEVICE(0x5408, 4),
259 	CH_DEVICE(0x5409, 4),
260 	CH_DEVICE(0x540A, 4),
261 	CH_DEVICE(0x540B, 4),
262 	CH_DEVICE(0x540C, 4),
263 	CH_DEVICE(0x540D, 4),
264 	CH_DEVICE(0x540E, 4),
265 	CH_DEVICE(0x540F, 4),
266 	CH_DEVICE(0x5410, 4),
267 	CH_DEVICE(0x5411, 4),
268 	CH_DEVICE(0x5412, 4),
269 	CH_DEVICE(0x5413, 4),
270 	{ 0, }
271 };
272 
273 #define FW_FNAME "cxgb4/t4fw.bin"
274 #define FW5_FNAME "cxgb4/t5fw.bin"
275 #define FW_CFNAME "cxgb4/t4-config.txt"
276 #define FW5_CFNAME "cxgb4/t5-config.txt"
277 
278 MODULE_DESCRIPTION(DRV_DESC);
279 MODULE_AUTHOR("Chelsio Communications");
280 MODULE_LICENSE("Dual BSD/GPL");
281 MODULE_VERSION(DRV_VERSION);
282 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283 MODULE_FIRMWARE(FW_FNAME);
284 MODULE_FIRMWARE(FW5_FNAME);
285 
286 /*
287  * Normally we're willing to become the firmware's Master PF but will be happy
288  * if another PF has already become the Master and initialized the adapter.
289  * Setting "force_init" will cause this driver to forcibly establish itself as
290  * the Master PF and initialize the adapter.
291  */
292 static uint force_init;
293 
294 module_param(force_init, uint, 0644);
295 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
296 
297 /*
298  * Normally if the firmware we connect to has Configuration File support, we
299  * use that and only fall back to the old Driver-based initialization if the
300  * Configuration File fails for some reason.  If force_old_init is set, then
301  * we'll always use the old Driver-based initialization sequence.
302  */
303 static uint force_old_init;
304 
305 module_param(force_old_init, uint, 0644);
306 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
307 
308 static int dflt_msg_enable = DFLT_MSG_ENABLE;
309 
310 module_param(dflt_msg_enable, int, 0644);
311 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
312 
313 /*
314  * The driver uses the best interrupt scheme available on a platform in the
315  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
316  * of these schemes the driver may consider as follows:
317  *
318  * msi = 2: choose from among all three options
319  * msi = 1: only consider MSI and INTx interrupts
320  * msi = 0: force INTx interrupts
321  */
322 static int msi = 2;
323 
324 module_param(msi, int, 0644);
325 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
326 
327 /*
328  * Queue interrupt hold-off timer values.  Queues default to the first of these
329  * upon creation.
330  */
331 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
332 
333 module_param_array(intr_holdoff, uint, NULL, 0644);
334 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 		 "0..4 in microseconds");
336 
337 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
338 
339 module_param_array(intr_cnt, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_cnt,
341 		 "thresholds 1..3 for queue interrupt packet counters");
342 
343 /*
344  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345  * offset by 2 bytes in order to have the IP headers line up on 4-byte
346  * boundaries.  This is a requirement for many architectures which will throw
347  * a machine check fault if an attempt is made to access one of the 4-byte IP
348  * header fields on a non-4-byte boundary.  And it's a major performance issue
349  * even on some architectures which allow it like some implementations of the
350  * x86 ISA.  However, some architectures don't mind this and for some very
351  * edge-case performance sensitive applications (like forwarding large volumes
352  * of small packets), setting this DMA offset to 0 will decrease the number of
353  * PCI-E Bus transfers enough to measurably affect performance.
354  */
355 static int rx_dma_offset = 2;
356 
357 static bool vf_acls;
358 
359 #ifdef CONFIG_PCI_IOV
360 module_param(vf_acls, bool, 0644);
361 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
362 
363 /* Configure the number of PCI-E Virtual Function which are to be instantiated
364  * on SR-IOV Capable Physical Functions.
365  */
366 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
367 
368 module_param_array(num_vf, uint, NULL, 0644);
369 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
370 #endif
371 
372 /*
373  * The filter TCAM has a fixed portion and a variable portion.  The fixed
374  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
375  * ports.  The variable portion is 36 bits which can include things like Exact
376  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
377  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
378  * far exceed the 36-bit budget for this "compressed" header portion of the
379  * filter.  Thus, we have a scarce resource which must be carefully managed.
380  *
381  * By default we set this up to mostly match the set of filter matching
382  * capabilities of T3 but with accommodations for some of T4's more
383  * interesting features:
384  *
385  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
386  *     [Inner] VLAN (17), Port (3), FCoE (1) }
387  */
388 enum {
389 	TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
390 	TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
391 	TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
392 };
393 
394 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
395 
396 module_param(tp_vlan_pri_map, uint, 0644);
397 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
398 
399 static struct dentry *cxgb4_debugfs_root;
400 
401 static LIST_HEAD(adapter_list);
402 static DEFINE_MUTEX(uld_mutex);
403 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
404 static const char *uld_str[] = { "RDMA", "iSCSI" };
405 
link_report(struct net_device * dev)406 static void link_report(struct net_device *dev)
407 {
408 	if (!netif_carrier_ok(dev))
409 		netdev_info(dev, "link down\n");
410 	else {
411 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
412 
413 		const char *s = "10Mbps";
414 		const struct port_info *p = netdev_priv(dev);
415 
416 		switch (p->link_cfg.speed) {
417 		case SPEED_10000:
418 			s = "10Gbps";
419 			break;
420 		case SPEED_1000:
421 			s = "1000Mbps";
422 			break;
423 		case SPEED_100:
424 			s = "100Mbps";
425 			break;
426 		}
427 
428 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
429 			    fc[p->link_cfg.fc]);
430 	}
431 }
432 
t4_os_link_changed(struct adapter * adapter,int port_id,int link_stat)433 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
434 {
435 	struct net_device *dev = adapter->port[port_id];
436 
437 	/* Skip changes from disabled ports. */
438 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
439 		if (link_stat)
440 			netif_carrier_on(dev);
441 		else
442 			netif_carrier_off(dev);
443 
444 		link_report(dev);
445 	}
446 }
447 
t4_os_portmod_changed(const struct adapter * adap,int port_id)448 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
449 {
450 	static const char *mod_str[] = {
451 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
452 	};
453 
454 	const struct net_device *dev = adap->port[port_id];
455 	const struct port_info *pi = netdev_priv(dev);
456 
457 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
458 		netdev_info(dev, "port module unplugged\n");
459 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
460 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
461 }
462 
463 /*
464  * Configure the exact and hash address filters to handle a port's multicast
465  * and secondary unicast MAC addresses.
466  */
set_addr_filters(const struct net_device * dev,bool sleep)467 static int set_addr_filters(const struct net_device *dev, bool sleep)
468 {
469 	u64 mhash = 0;
470 	u64 uhash = 0;
471 	bool free = true;
472 	u16 filt_idx[7];
473 	const u8 *addr[7];
474 	int ret, naddr = 0;
475 	const struct netdev_hw_addr *ha;
476 	int uc_cnt = netdev_uc_count(dev);
477 	int mc_cnt = netdev_mc_count(dev);
478 	const struct port_info *pi = netdev_priv(dev);
479 	unsigned int mb = pi->adapter->fn;
480 
481 	/* first do the secondary unicast addresses */
482 	netdev_for_each_uc_addr(ha, dev) {
483 		addr[naddr++] = ha->addr;
484 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
485 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
486 					naddr, addr, filt_idx, &uhash, sleep);
487 			if (ret < 0)
488 				return ret;
489 
490 			free = false;
491 			naddr = 0;
492 		}
493 	}
494 
495 	/* next set up the multicast addresses */
496 	netdev_for_each_mc_addr(ha, dev) {
497 		addr[naddr++] = ha->addr;
498 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
499 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
500 					naddr, addr, filt_idx, &mhash, sleep);
501 			if (ret < 0)
502 				return ret;
503 
504 			free = false;
505 			naddr = 0;
506 		}
507 	}
508 
509 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
510 				uhash | mhash, sleep);
511 }
512 
513 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
514 module_param(dbfifo_int_thresh, int, 0644);
515 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
516 
517 /*
518  * usecs to sleep while draining the dbfifo
519  */
520 static int dbfifo_drain_delay = 1000;
521 module_param(dbfifo_drain_delay, int, 0644);
522 MODULE_PARM_DESC(dbfifo_drain_delay,
523 		 "usecs to sleep while draining the dbfifo");
524 
525 /*
526  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
527  * If @mtu is -1 it is left unchanged.
528  */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)529 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
530 {
531 	int ret;
532 	struct port_info *pi = netdev_priv(dev);
533 
534 	ret = set_addr_filters(dev, sleep_ok);
535 	if (ret == 0)
536 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
537 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
538 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
539 				    sleep_ok);
540 	return ret;
541 }
542 
543 static struct workqueue_struct *workq;
544 
545 /**
546  *	link_start - enable a port
547  *	@dev: the port to enable
548  *
549  *	Performs the MAC and PHY actions needed to enable a port.
550  */
link_start(struct net_device * dev)551 static int link_start(struct net_device *dev)
552 {
553 	int ret;
554 	struct port_info *pi = netdev_priv(dev);
555 	unsigned int mb = pi->adapter->fn;
556 
557 	/*
558 	 * We do not set address filters and promiscuity here, the stack does
559 	 * that step explicitly.
560 	 */
561 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
562 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
563 	if (ret == 0) {
564 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
565 				    pi->xact_addr_filt, dev->dev_addr, true,
566 				    true);
567 		if (ret >= 0) {
568 			pi->xact_addr_filt = ret;
569 			ret = 0;
570 		}
571 	}
572 	if (ret == 0)
573 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
574 				    &pi->link_cfg);
575 	if (ret == 0)
576 		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
577 	return ret;
578 }
579 
580 /* Clear a filter and release any of its resources that we own.  This also
581  * clears the filter's "pending" status.
582  */
clear_filter(struct adapter * adap,struct filter_entry * f)583 static void clear_filter(struct adapter *adap, struct filter_entry *f)
584 {
585 	/* If the new or old filter have loopback rewriteing rules then we'll
586 	 * need to free any existing Layer Two Table (L2T) entries of the old
587 	 * filter rule.  The firmware will handle freeing up any Source MAC
588 	 * Table (SMT) entries used for rewriting Source MAC Addresses in
589 	 * loopback rules.
590 	 */
591 	if (f->l2t)
592 		cxgb4_l2t_release(f->l2t);
593 
594 	/* The zeroing of the filter rule below clears the filter valid,
595 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
596 	 * this operation.
597 	 */
598 	memset(f, 0, sizeof(*f));
599 }
600 
601 /* Handle a filter write/deletion reply.
602  */
filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)603 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
604 {
605 	unsigned int idx = GET_TID(rpl);
606 	unsigned int nidx = idx - adap->tids.ftid_base;
607 	unsigned int ret;
608 	struct filter_entry *f;
609 
610 	if (idx >= adap->tids.ftid_base && nidx <
611 	   (adap->tids.nftids + adap->tids.nsftids)) {
612 		idx = nidx;
613 		ret = GET_TCB_COOKIE(rpl->cookie);
614 		f = &adap->tids.ftid_tab[idx];
615 
616 		if (ret == FW_FILTER_WR_FLT_DELETED) {
617 			/* Clear the filter when we get confirmation from the
618 			 * hardware that the filter has been deleted.
619 			 */
620 			clear_filter(adap, f);
621 		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
622 			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
623 				idx);
624 			clear_filter(adap, f);
625 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
626 			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
627 			f->pending = 0;  /* asynchronous setup completed */
628 			f->valid = 1;
629 		} else {
630 			/* Something went wrong.  Issue a warning about the
631 			 * problem and clear everything out.
632 			 */
633 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
634 				idx, ret);
635 			clear_filter(adap, f);
636 		}
637 	}
638 }
639 
640 /* Response queue handler for the FW event queue.
641  */
fwevtq_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)642 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
643 			  const struct pkt_gl *gl)
644 {
645 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
646 
647 	rsp++;                                          /* skip RSS header */
648 
649 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
650 	 */
651 	if (unlikely(opcode == CPL_FW4_MSG &&
652 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
653 		rsp++;
654 		opcode = ((const struct rss_header *)rsp)->opcode;
655 		rsp++;
656 		if (opcode != CPL_SGE_EGR_UPDATE) {
657 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
658 				, opcode);
659 			goto out;
660 		}
661 	}
662 
663 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
664 		const struct cpl_sge_egr_update *p = (void *)rsp;
665 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
666 		struct sge_txq *txq;
667 
668 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
669 		txq->restarts++;
670 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
671 			struct sge_eth_txq *eq;
672 
673 			eq = container_of(txq, struct sge_eth_txq, q);
674 			netif_tx_wake_queue(eq->txq);
675 		} else {
676 			struct sge_ofld_txq *oq;
677 
678 			oq = container_of(txq, struct sge_ofld_txq, q);
679 			tasklet_schedule(&oq->qresume_tsk);
680 		}
681 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
682 		const struct cpl_fw6_msg *p = (void *)rsp;
683 
684 		if (p->type == 0)
685 			t4_handle_fw_rpl(q->adap, p->data);
686 	} else if (opcode == CPL_L2T_WRITE_RPL) {
687 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
688 
689 		do_l2t_write_rpl(q->adap, p);
690 	} else if (opcode == CPL_SET_TCB_RPL) {
691 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
692 
693 		filter_rpl(q->adap, p);
694 	} else
695 		dev_err(q->adap->pdev_dev,
696 			"unexpected CPL %#x on FW event queue\n", opcode);
697 out:
698 	return 0;
699 }
700 
701 /**
702  *	uldrx_handler - response queue handler for ULD queues
703  *	@q: the response queue that received the packet
704  *	@rsp: the response queue descriptor holding the offload message
705  *	@gl: the gather list of packet fragments
706  *
707  *	Deliver an ingress offload packet to a ULD.  All processing is done by
708  *	the ULD, we just maintain statistics.
709  */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)710 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
711 			 const struct pkt_gl *gl)
712 {
713 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
714 
715 	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
716 	 */
717 	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
718 	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
719 		rsp += 2;
720 
721 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
722 		rxq->stats.nomem++;
723 		return -1;
724 	}
725 	if (gl == NULL)
726 		rxq->stats.imm++;
727 	else if (gl == CXGB4_MSG_AN)
728 		rxq->stats.an++;
729 	else
730 		rxq->stats.pkts++;
731 	return 0;
732 }
733 
disable_msi(struct adapter * adapter)734 static void disable_msi(struct adapter *adapter)
735 {
736 	if (adapter->flags & USING_MSIX) {
737 		pci_disable_msix(adapter->pdev);
738 		adapter->flags &= ~USING_MSIX;
739 	} else if (adapter->flags & USING_MSI) {
740 		pci_disable_msi(adapter->pdev);
741 		adapter->flags &= ~USING_MSI;
742 	}
743 }
744 
745 /*
746  * Interrupt handler for non-data events used with MSI-X.
747  */
t4_nondata_intr(int irq,void * cookie)748 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
749 {
750 	struct adapter *adap = cookie;
751 
752 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
753 	if (v & PFSW) {
754 		adap->swintr = 1;
755 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
756 	}
757 	t4_slow_intr_handler(adap);
758 	return IRQ_HANDLED;
759 }
760 
761 /*
762  * Name the MSI-X interrupts.
763  */
name_msix_vecs(struct adapter * adap)764 static void name_msix_vecs(struct adapter *adap)
765 {
766 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
767 
768 	/* non-data interrupts */
769 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
770 
771 	/* FW events */
772 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
773 		 adap->port[0]->name);
774 
775 	/* Ethernet queues */
776 	for_each_port(adap, j) {
777 		struct net_device *d = adap->port[j];
778 		const struct port_info *pi = netdev_priv(d);
779 
780 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
781 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
782 				 d->name, i);
783 	}
784 
785 	/* offload queues */
786 	for_each_ofldrxq(&adap->sge, i)
787 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
788 			 adap->port[0]->name, i);
789 
790 	for_each_rdmarxq(&adap->sge, i)
791 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
792 			 adap->port[0]->name, i);
793 }
794 
request_msix_queue_irqs(struct adapter * adap)795 static int request_msix_queue_irqs(struct adapter *adap)
796 {
797 	struct sge *s = &adap->sge;
798 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
799 
800 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
801 			  adap->msix_info[1].desc, &s->fw_evtq);
802 	if (err)
803 		return err;
804 
805 	for_each_ethrxq(s, ethqidx) {
806 		err = request_irq(adap->msix_info[msi_index].vec,
807 				  t4_sge_intr_msix, 0,
808 				  adap->msix_info[msi_index].desc,
809 				  &s->ethrxq[ethqidx].rspq);
810 		if (err)
811 			goto unwind;
812 		msi_index++;
813 	}
814 	for_each_ofldrxq(s, ofldqidx) {
815 		err = request_irq(adap->msix_info[msi_index].vec,
816 				  t4_sge_intr_msix, 0,
817 				  adap->msix_info[msi_index].desc,
818 				  &s->ofldrxq[ofldqidx].rspq);
819 		if (err)
820 			goto unwind;
821 		msi_index++;
822 	}
823 	for_each_rdmarxq(s, rdmaqidx) {
824 		err = request_irq(adap->msix_info[msi_index].vec,
825 				  t4_sge_intr_msix, 0,
826 				  adap->msix_info[msi_index].desc,
827 				  &s->rdmarxq[rdmaqidx].rspq);
828 		if (err)
829 			goto unwind;
830 		msi_index++;
831 	}
832 	return 0;
833 
834 unwind:
835 	while (--rdmaqidx >= 0)
836 		free_irq(adap->msix_info[--msi_index].vec,
837 			 &s->rdmarxq[rdmaqidx].rspq);
838 	while (--ofldqidx >= 0)
839 		free_irq(adap->msix_info[--msi_index].vec,
840 			 &s->ofldrxq[ofldqidx].rspq);
841 	while (--ethqidx >= 0)
842 		free_irq(adap->msix_info[--msi_index].vec,
843 			 &s->ethrxq[ethqidx].rspq);
844 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
845 	return err;
846 }
847 
free_msix_queue_irqs(struct adapter * adap)848 static void free_msix_queue_irqs(struct adapter *adap)
849 {
850 	int i, msi_index = 2;
851 	struct sge *s = &adap->sge;
852 
853 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 	for_each_ethrxq(s, i)
855 		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
856 	for_each_ofldrxq(s, i)
857 		free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
858 	for_each_rdmarxq(s, i)
859 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
860 }
861 
862 /**
863  *	write_rss - write the RSS table for a given port
864  *	@pi: the port
865  *	@queues: array of queue indices for RSS
866  *
867  *	Sets up the portion of the HW RSS table for the port's VI to distribute
868  *	packets to the Rx queues in @queues.
869  */
write_rss(const struct port_info * pi,const u16 * queues)870 static int write_rss(const struct port_info *pi, const u16 *queues)
871 {
872 	u16 *rss;
873 	int i, err;
874 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
875 
876 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
877 	if (!rss)
878 		return -ENOMEM;
879 
880 	/* map the queue indices to queue ids */
881 	for (i = 0; i < pi->rss_size; i++, queues++)
882 		rss[i] = q[*queues].rspq.abs_id;
883 
884 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
885 				  pi->rss_size, rss, pi->rss_size);
886 	kfree(rss);
887 	return err;
888 }
889 
890 /**
891  *	setup_rss - configure RSS
892  *	@adap: the adapter
893  *
894  *	Sets up RSS for each port.
895  */
setup_rss(struct adapter * adap)896 static int setup_rss(struct adapter *adap)
897 {
898 	int i, err;
899 
900 	for_each_port(adap, i) {
901 		const struct port_info *pi = adap2pinfo(adap, i);
902 
903 		err = write_rss(pi, pi->rss);
904 		if (err)
905 			return err;
906 	}
907 	return 0;
908 }
909 
910 /*
911  * Return the channel of the ingress queue with the given qid.
912  */
rxq_to_chan(const struct sge * p,unsigned int qid)913 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
914 {
915 	qid -= p->ingr_start;
916 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
917 }
918 
919 /*
920  * Wait until all NAPI handlers are descheduled.
921  */
quiesce_rx(struct adapter * adap)922 static void quiesce_rx(struct adapter *adap)
923 {
924 	int i;
925 
926 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
927 		struct sge_rspq *q = adap->sge.ingr_map[i];
928 
929 		if (q && q->handler)
930 			napi_disable(&q->napi);
931 	}
932 }
933 
934 /*
935  * Enable NAPI scheduling and interrupt generation for all Rx queues.
936  */
enable_rx(struct adapter * adap)937 static void enable_rx(struct adapter *adap)
938 {
939 	int i;
940 
941 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
942 		struct sge_rspq *q = adap->sge.ingr_map[i];
943 
944 		if (!q)
945 			continue;
946 		if (q->handler)
947 			napi_enable(&q->napi);
948 		/* 0-increment GTS to start the timer and enable interrupts */
949 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
950 			     SEINTARM(q->intr_params) |
951 			     INGRESSQID(q->cntxt_id));
952 	}
953 }
954 
955 /**
956  *	setup_sge_queues - configure SGE Tx/Rx/response queues
957  *	@adap: the adapter
958  *
959  *	Determines how many sets of SGE queues to use and initializes them.
960  *	We support multiple queue sets per port if we have MSI-X, otherwise
961  *	just one queue set per port.
962  */
setup_sge_queues(struct adapter * adap)963 static int setup_sge_queues(struct adapter *adap)
964 {
965 	int err, msi_idx, i, j;
966 	struct sge *s = &adap->sge;
967 
968 	bitmap_zero(s->starving_fl, MAX_EGRQ);
969 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
970 
971 	if (adap->flags & USING_MSIX)
972 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
973 	else {
974 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
975 				       NULL, NULL);
976 		if (err)
977 			return err;
978 		msi_idx = -((int)s->intrq.abs_id + 1);
979 	}
980 
981 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
982 			       msi_idx, NULL, fwevtq_handler);
983 	if (err) {
984 freeout:	t4_free_sge_resources(adap);
985 		return err;
986 	}
987 
988 	for_each_port(adap, i) {
989 		struct net_device *dev = adap->port[i];
990 		struct port_info *pi = netdev_priv(dev);
991 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
992 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
993 
994 		for (j = 0; j < pi->nqsets; j++, q++) {
995 			if (msi_idx > 0)
996 				msi_idx++;
997 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
998 					       msi_idx, &q->fl,
999 					       t4_ethrx_handler);
1000 			if (err)
1001 				goto freeout;
1002 			q->rspq.idx = j;
1003 			memset(&q->stats, 0, sizeof(q->stats));
1004 		}
1005 		for (j = 0; j < pi->nqsets; j++, t++) {
1006 			err = t4_sge_alloc_eth_txq(adap, t, dev,
1007 					netdev_get_tx_queue(dev, j),
1008 					s->fw_evtq.cntxt_id);
1009 			if (err)
1010 				goto freeout;
1011 		}
1012 	}
1013 
1014 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1015 	for_each_ofldrxq(s, i) {
1016 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
1017 		struct net_device *dev = adap->port[i / j];
1018 
1019 		if (msi_idx > 0)
1020 			msi_idx++;
1021 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1022 				       &q->fl, uldrx_handler);
1023 		if (err)
1024 			goto freeout;
1025 		memset(&q->stats, 0, sizeof(q->stats));
1026 		s->ofld_rxq[i] = q->rspq.abs_id;
1027 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1028 					    s->fw_evtq.cntxt_id);
1029 		if (err)
1030 			goto freeout;
1031 	}
1032 
1033 	for_each_rdmarxq(s, i) {
1034 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
1035 
1036 		if (msi_idx > 0)
1037 			msi_idx++;
1038 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1039 				       msi_idx, &q->fl, uldrx_handler);
1040 		if (err)
1041 			goto freeout;
1042 		memset(&q->stats, 0, sizeof(q->stats));
1043 		s->rdma_rxq[i] = q->rspq.abs_id;
1044 	}
1045 
1046 	for_each_port(adap, i) {
1047 		/*
1048 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1049 		 * have RDMA queues, and that's the right value.
1050 		 */
1051 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1052 					    s->fw_evtq.cntxt_id,
1053 					    s->rdmarxq[i].rspq.cntxt_id);
1054 		if (err)
1055 			goto freeout;
1056 	}
1057 
1058 	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1059 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1060 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1061 	return 0;
1062 }
1063 
1064 /*
1065  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1066  * started but failed, and a negative errno if flash load couldn't start.
1067  */
upgrade_fw(struct adapter * adap)1068 static int upgrade_fw(struct adapter *adap)
1069 {
1070 	int ret;
1071 	u32 vers, exp_major;
1072 	const struct fw_hdr *hdr;
1073 	const struct firmware *fw;
1074 	struct device *dev = adap->pdev_dev;
1075 	char *fw_file_name;
1076 
1077 	switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1078 	case CHELSIO_T4:
1079 		fw_file_name = FW_FNAME;
1080 		exp_major = FW_VERSION_MAJOR;
1081 		break;
1082 	case CHELSIO_T5:
1083 		fw_file_name = FW5_FNAME;
1084 		exp_major = FW_VERSION_MAJOR_T5;
1085 		break;
1086 	default:
1087 		dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1088 		return -EINVAL;
1089 	}
1090 
1091 	ret = request_firmware(&fw, fw_file_name, dev);
1092 	if (ret < 0) {
1093 		dev_err(dev, "unable to load firmware image %s, error %d\n",
1094 			fw_file_name, ret);
1095 		return ret;
1096 	}
1097 
1098 	hdr = (const struct fw_hdr *)fw->data;
1099 	vers = ntohl(hdr->fw_ver);
1100 	if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1101 		ret = -EINVAL;              /* wrong major version, won't do */
1102 		goto out;
1103 	}
1104 
1105 	/*
1106 	 * If the flash FW is unusable or we found something newer, load it.
1107 	 */
1108 	if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1109 	    vers > adap->params.fw_vers) {
1110 		dev_info(dev, "upgrading firmware ...\n");
1111 		ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1112 				    /*force=*/false);
1113 		if (!ret)
1114 			dev_info(dev,
1115 				 "firmware upgraded to version %pI4 from %s\n",
1116 				 &hdr->fw_ver, fw_file_name);
1117 		else
1118 			dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1119 	} else {
1120 		/*
1121 		 * Tell our caller that we didn't upgrade the firmware.
1122 		 */
1123 		ret = -EINVAL;
1124 	}
1125 
1126 out:	release_firmware(fw);
1127 	return ret;
1128 }
1129 
1130 /*
1131  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1132  * The allocated memory is cleared.
1133  */
t4_alloc_mem(size_t size)1134 void *t4_alloc_mem(size_t size)
1135 {
1136 	void *p = kzalloc(size, GFP_KERNEL);
1137 
1138 	if (!p)
1139 		p = vzalloc(size);
1140 	return p;
1141 }
1142 
1143 /*
1144  * Free memory allocated through alloc_mem().
1145  */
t4_free_mem(void * addr)1146 static void t4_free_mem(void *addr)
1147 {
1148 	if (is_vmalloc_addr(addr))
1149 		vfree(addr);
1150 	else
1151 		kfree(addr);
1152 }
1153 
1154 /* Send a Work Request to write the filter at a specified index.  We construct
1155  * a Firmware Filter Work Request to have the work done and put the indicated
1156  * filter into "pending" mode which will prevent any further actions against
1157  * it till we get a reply from the firmware on the completion status of the
1158  * request.
1159  */
set_filter_wr(struct adapter * adapter,int fidx)1160 static int set_filter_wr(struct adapter *adapter, int fidx)
1161 {
1162 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1163 	struct sk_buff *skb;
1164 	struct fw_filter_wr *fwr;
1165 	unsigned int ftid;
1166 
1167 	/* If the new filter requires loopback Destination MAC and/or VLAN
1168 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1169 	 * the filter.
1170 	 */
1171 	if (f->fs.newdmac || f->fs.newvlan) {
1172 		/* allocate L2T entry for new filter */
1173 		f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1174 		if (f->l2t == NULL)
1175 			return -EAGAIN;
1176 		if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1177 					f->fs.eport, f->fs.dmac)) {
1178 			cxgb4_l2t_release(f->l2t);
1179 			f->l2t = NULL;
1180 			return -ENOMEM;
1181 		}
1182 	}
1183 
1184 	ftid = adapter->tids.ftid_base + fidx;
1185 
1186 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1187 	fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1188 	memset(fwr, 0, sizeof(*fwr));
1189 
1190 	/* It would be nice to put most of the following in t4_hw.c but most
1191 	 * of the work is translating the cxgbtool ch_filter_specification
1192 	 * into the Work Request and the definition of that structure is
1193 	 * currently in cxgbtool.h which isn't appropriate to pull into the
1194 	 * common code.  We may eventually try to come up with a more neutral
1195 	 * filter specification structure but for now it's easiest to simply
1196 	 * put this fairly direct code in line ...
1197 	 */
1198 	fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1199 	fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1200 	fwr->tid_to_iq =
1201 		htonl(V_FW_FILTER_WR_TID(ftid) |
1202 		      V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1203 		      V_FW_FILTER_WR_NOREPLY(0) |
1204 		      V_FW_FILTER_WR_IQ(f->fs.iq));
1205 	fwr->del_filter_to_l2tix =
1206 		htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1207 		      V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1208 		      V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1209 		      V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1210 		      V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1211 		      V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1212 		      V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1213 		      V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1214 		      V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1215 					     f->fs.newvlan == VLAN_REWRITE) |
1216 		      V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1217 					    f->fs.newvlan == VLAN_REWRITE) |
1218 		      V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1219 		      V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1220 		      V_FW_FILTER_WR_PRIO(f->fs.prio) |
1221 		      V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1222 	fwr->ethtype = htons(f->fs.val.ethtype);
1223 	fwr->ethtypem = htons(f->fs.mask.ethtype);
1224 	fwr->frag_to_ovlan_vldm =
1225 		(V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1226 		 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1227 		 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1228 		 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1229 		 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1230 		 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1231 	fwr->smac_sel = 0;
1232 	fwr->rx_chan_rx_rpl_iq =
1233 		htons(V_FW_FILTER_WR_RX_CHAN(0) |
1234 		      V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1235 	fwr->maci_to_matchtypem =
1236 		htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1237 		      V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1238 		      V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1239 		      V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1240 		      V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1241 		      V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1242 		      V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1243 		      V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1244 	fwr->ptcl = f->fs.val.proto;
1245 	fwr->ptclm = f->fs.mask.proto;
1246 	fwr->ttyp = f->fs.val.tos;
1247 	fwr->ttypm = f->fs.mask.tos;
1248 	fwr->ivlan = htons(f->fs.val.ivlan);
1249 	fwr->ivlanm = htons(f->fs.mask.ivlan);
1250 	fwr->ovlan = htons(f->fs.val.ovlan);
1251 	fwr->ovlanm = htons(f->fs.mask.ovlan);
1252 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1253 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1254 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1255 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1256 	fwr->lp = htons(f->fs.val.lport);
1257 	fwr->lpm = htons(f->fs.mask.lport);
1258 	fwr->fp = htons(f->fs.val.fport);
1259 	fwr->fpm = htons(f->fs.mask.fport);
1260 	if (f->fs.newsmac)
1261 		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1262 
1263 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1264 	 * When we get the Work Request Reply we'll clear the pending status.
1265 	 */
1266 	f->pending = 1;
1267 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1268 	t4_ofld_send(adapter, skb);
1269 	return 0;
1270 }
1271 
1272 /* Delete the filter at a specified index.
1273  */
del_filter_wr(struct adapter * adapter,int fidx)1274 static int del_filter_wr(struct adapter *adapter, int fidx)
1275 {
1276 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1277 	struct sk_buff *skb;
1278 	struct fw_filter_wr *fwr;
1279 	unsigned int len, ftid;
1280 
1281 	len = sizeof(*fwr);
1282 	ftid = adapter->tids.ftid_base + fidx;
1283 
1284 	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1285 	fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1286 	t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1287 
1288 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1289 	 * When we get the Work Request Reply we'll clear the pending status.
1290 	 */
1291 	f->pending = 1;
1292 	t4_mgmt_tx(adapter, skb);
1293 	return 0;
1294 }
1295 
is_offload(const struct adapter * adap)1296 static inline int is_offload(const struct adapter *adap)
1297 {
1298 	return adap->params.offload;
1299 }
1300 
1301 /*
1302  * Implementation of ethtool operations.
1303  */
1304 
get_msglevel(struct net_device * dev)1305 static u32 get_msglevel(struct net_device *dev)
1306 {
1307 	return netdev2adap(dev)->msg_enable;
1308 }
1309 
set_msglevel(struct net_device * dev,u32 val)1310 static void set_msglevel(struct net_device *dev, u32 val)
1311 {
1312 	netdev2adap(dev)->msg_enable = val;
1313 }
1314 
1315 static char stats_strings[][ETH_GSTRING_LEN] = {
1316 	"TxOctetsOK         ",
1317 	"TxFramesOK         ",
1318 	"TxBroadcastFrames  ",
1319 	"TxMulticastFrames  ",
1320 	"TxUnicastFrames    ",
1321 	"TxErrorFrames      ",
1322 
1323 	"TxFrames64         ",
1324 	"TxFrames65To127    ",
1325 	"TxFrames128To255   ",
1326 	"TxFrames256To511   ",
1327 	"TxFrames512To1023  ",
1328 	"TxFrames1024To1518 ",
1329 	"TxFrames1519ToMax  ",
1330 
1331 	"TxFramesDropped    ",
1332 	"TxPauseFrames      ",
1333 	"TxPPP0Frames       ",
1334 	"TxPPP1Frames       ",
1335 	"TxPPP2Frames       ",
1336 	"TxPPP3Frames       ",
1337 	"TxPPP4Frames       ",
1338 	"TxPPP5Frames       ",
1339 	"TxPPP6Frames       ",
1340 	"TxPPP7Frames       ",
1341 
1342 	"RxOctetsOK         ",
1343 	"RxFramesOK         ",
1344 	"RxBroadcastFrames  ",
1345 	"RxMulticastFrames  ",
1346 	"RxUnicastFrames    ",
1347 
1348 	"RxFramesTooLong    ",
1349 	"RxJabberErrors     ",
1350 	"RxFCSErrors        ",
1351 	"RxLengthErrors     ",
1352 	"RxSymbolErrors     ",
1353 	"RxRuntFrames       ",
1354 
1355 	"RxFrames64         ",
1356 	"RxFrames65To127    ",
1357 	"RxFrames128To255   ",
1358 	"RxFrames256To511   ",
1359 	"RxFrames512To1023  ",
1360 	"RxFrames1024To1518 ",
1361 	"RxFrames1519ToMax  ",
1362 
1363 	"RxPauseFrames      ",
1364 	"RxPPP0Frames       ",
1365 	"RxPPP1Frames       ",
1366 	"RxPPP2Frames       ",
1367 	"RxPPP3Frames       ",
1368 	"RxPPP4Frames       ",
1369 	"RxPPP5Frames       ",
1370 	"RxPPP6Frames       ",
1371 	"RxPPP7Frames       ",
1372 
1373 	"RxBG0FramesDropped ",
1374 	"RxBG1FramesDropped ",
1375 	"RxBG2FramesDropped ",
1376 	"RxBG3FramesDropped ",
1377 	"RxBG0FramesTrunc   ",
1378 	"RxBG1FramesTrunc   ",
1379 	"RxBG2FramesTrunc   ",
1380 	"RxBG3FramesTrunc   ",
1381 
1382 	"TSO                ",
1383 	"TxCsumOffload      ",
1384 	"RxCsumGood         ",
1385 	"VLANextractions    ",
1386 	"VLANinsertions     ",
1387 	"GROpackets         ",
1388 	"GROmerged          ",
1389 	"WriteCoalSuccess   ",
1390 	"WriteCoalFail      ",
1391 };
1392 
get_sset_count(struct net_device * dev,int sset)1393 static int get_sset_count(struct net_device *dev, int sset)
1394 {
1395 	switch (sset) {
1396 	case ETH_SS_STATS:
1397 		return ARRAY_SIZE(stats_strings);
1398 	default:
1399 		return -EOPNOTSUPP;
1400 	}
1401 }
1402 
1403 #define T4_REGMAP_SIZE (160 * 1024)
1404 #define T5_REGMAP_SIZE (332 * 1024)
1405 
get_regs_len(struct net_device * dev)1406 static int get_regs_len(struct net_device *dev)
1407 {
1408 	struct adapter *adap = netdev2adap(dev);
1409 	if (is_t4(adap->chip))
1410 		return T4_REGMAP_SIZE;
1411 	else
1412 		return T5_REGMAP_SIZE;
1413 }
1414 
get_eeprom_len(struct net_device * dev)1415 static int get_eeprom_len(struct net_device *dev)
1416 {
1417 	return EEPROMSIZE;
1418 }
1419 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1420 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1421 {
1422 	struct adapter *adapter = netdev2adap(dev);
1423 
1424 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1425 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1426 	strlcpy(info->bus_info, pci_name(adapter->pdev),
1427 		sizeof(info->bus_info));
1428 
1429 	if (adapter->params.fw_vers)
1430 		snprintf(info->fw_version, sizeof(info->fw_version),
1431 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1432 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1433 			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1434 			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1435 			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1436 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1437 			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1438 			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1439 			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1440 }
1441 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1442 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1443 {
1444 	if (stringset == ETH_SS_STATS)
1445 		memcpy(data, stats_strings, sizeof(stats_strings));
1446 }
1447 
1448 /*
1449  * port stats maintained per queue of the port.  They should be in the same
1450  * order as in stats_strings above.
1451  */
1452 struct queue_port_stats {
1453 	u64 tso;
1454 	u64 tx_csum;
1455 	u64 rx_csum;
1456 	u64 vlan_ex;
1457 	u64 vlan_ins;
1458 	u64 gro_pkts;
1459 	u64 gro_merged;
1460 };
1461 
collect_sge_port_stats(const struct adapter * adap,const struct port_info * p,struct queue_port_stats * s)1462 static void collect_sge_port_stats(const struct adapter *adap,
1463 		const struct port_info *p, struct queue_port_stats *s)
1464 {
1465 	int i;
1466 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1467 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1468 
1469 	memset(s, 0, sizeof(*s));
1470 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1471 		s->tso += tx->tso;
1472 		s->tx_csum += tx->tx_cso;
1473 		s->rx_csum += rx->stats.rx_cso;
1474 		s->vlan_ex += rx->stats.vlan_ex;
1475 		s->vlan_ins += tx->vlan_ins;
1476 		s->gro_pkts += rx->stats.lro_pkts;
1477 		s->gro_merged += rx->stats.lro_merged;
1478 	}
1479 }
1480 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1481 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1482 		      u64 *data)
1483 {
1484 	struct port_info *pi = netdev_priv(dev);
1485 	struct adapter *adapter = pi->adapter;
1486 	u32 val1, val2;
1487 
1488 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1489 
1490 	data += sizeof(struct port_stats) / sizeof(u64);
1491 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1492 	data += sizeof(struct queue_port_stats) / sizeof(u64);
1493 	if (!is_t4(adapter->chip)) {
1494 		t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1495 		val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1496 		val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1497 		*data = val1 - val2;
1498 		data++;
1499 		*data = val2;
1500 		data++;
1501 	} else {
1502 		memset(data, 0, 2 * sizeof(u64));
1503 		*data += 2;
1504 	}
1505 }
1506 
1507 /*
1508  * Return a version number to identify the type of adapter.  The scheme is:
1509  * - bits 0..9: chip version
1510  * - bits 10..15: chip revision
1511  * - bits 16..23: register dump version
1512  */
mk_adap_vers(const struct adapter * ap)1513 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1514 {
1515 	return CHELSIO_CHIP_VERSION(ap->chip) |
1516 		(CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1517 }
1518 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)1519 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1520 			   unsigned int end)
1521 {
1522 	u32 *p = buf + start;
1523 
1524 	for ( ; start <= end; start += sizeof(u32))
1525 		*p++ = t4_read_reg(ap, start);
1526 }
1527 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1528 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1529 		     void *buf)
1530 {
1531 	static const unsigned int t4_reg_ranges[] = {
1532 		0x1008, 0x1108,
1533 		0x1180, 0x11b4,
1534 		0x11fc, 0x123c,
1535 		0x1300, 0x173c,
1536 		0x1800, 0x18fc,
1537 		0x3000, 0x30d8,
1538 		0x30e0, 0x5924,
1539 		0x5960, 0x59d4,
1540 		0x5a00, 0x5af8,
1541 		0x6000, 0x6098,
1542 		0x6100, 0x6150,
1543 		0x6200, 0x6208,
1544 		0x6240, 0x6248,
1545 		0x6280, 0x6338,
1546 		0x6370, 0x638c,
1547 		0x6400, 0x643c,
1548 		0x6500, 0x6524,
1549 		0x6a00, 0x6a38,
1550 		0x6a60, 0x6a78,
1551 		0x6b00, 0x6b84,
1552 		0x6bf0, 0x6c84,
1553 		0x6cf0, 0x6d84,
1554 		0x6df0, 0x6e84,
1555 		0x6ef0, 0x6f84,
1556 		0x6ff0, 0x7084,
1557 		0x70f0, 0x7184,
1558 		0x71f0, 0x7284,
1559 		0x72f0, 0x7384,
1560 		0x73f0, 0x7450,
1561 		0x7500, 0x7530,
1562 		0x7600, 0x761c,
1563 		0x7680, 0x76cc,
1564 		0x7700, 0x7798,
1565 		0x77c0, 0x77fc,
1566 		0x7900, 0x79fc,
1567 		0x7b00, 0x7c38,
1568 		0x7d00, 0x7efc,
1569 		0x8dc0, 0x8e1c,
1570 		0x8e30, 0x8e78,
1571 		0x8ea0, 0x8f6c,
1572 		0x8fc0, 0x9074,
1573 		0x90fc, 0x90fc,
1574 		0x9400, 0x9458,
1575 		0x9600, 0x96bc,
1576 		0x9800, 0x9808,
1577 		0x9820, 0x983c,
1578 		0x9850, 0x9864,
1579 		0x9c00, 0x9c6c,
1580 		0x9c80, 0x9cec,
1581 		0x9d00, 0x9d6c,
1582 		0x9d80, 0x9dec,
1583 		0x9e00, 0x9e6c,
1584 		0x9e80, 0x9eec,
1585 		0x9f00, 0x9f6c,
1586 		0x9f80, 0x9fec,
1587 		0xd004, 0xd03c,
1588 		0xdfc0, 0xdfe0,
1589 		0xe000, 0xea7c,
1590 		0xf000, 0x11190,
1591 		0x19040, 0x1906c,
1592 		0x19078, 0x19080,
1593 		0x1908c, 0x19124,
1594 		0x19150, 0x191b0,
1595 		0x191d0, 0x191e8,
1596 		0x19238, 0x1924c,
1597 		0x193f8, 0x19474,
1598 		0x19490, 0x194f8,
1599 		0x19800, 0x19f30,
1600 		0x1a000, 0x1a06c,
1601 		0x1a0b0, 0x1a120,
1602 		0x1a128, 0x1a138,
1603 		0x1a190, 0x1a1c4,
1604 		0x1a1fc, 0x1a1fc,
1605 		0x1e040, 0x1e04c,
1606 		0x1e284, 0x1e28c,
1607 		0x1e2c0, 0x1e2c0,
1608 		0x1e2e0, 0x1e2e0,
1609 		0x1e300, 0x1e384,
1610 		0x1e3c0, 0x1e3c8,
1611 		0x1e440, 0x1e44c,
1612 		0x1e684, 0x1e68c,
1613 		0x1e6c0, 0x1e6c0,
1614 		0x1e6e0, 0x1e6e0,
1615 		0x1e700, 0x1e784,
1616 		0x1e7c0, 0x1e7c8,
1617 		0x1e840, 0x1e84c,
1618 		0x1ea84, 0x1ea8c,
1619 		0x1eac0, 0x1eac0,
1620 		0x1eae0, 0x1eae0,
1621 		0x1eb00, 0x1eb84,
1622 		0x1ebc0, 0x1ebc8,
1623 		0x1ec40, 0x1ec4c,
1624 		0x1ee84, 0x1ee8c,
1625 		0x1eec0, 0x1eec0,
1626 		0x1eee0, 0x1eee0,
1627 		0x1ef00, 0x1ef84,
1628 		0x1efc0, 0x1efc8,
1629 		0x1f040, 0x1f04c,
1630 		0x1f284, 0x1f28c,
1631 		0x1f2c0, 0x1f2c0,
1632 		0x1f2e0, 0x1f2e0,
1633 		0x1f300, 0x1f384,
1634 		0x1f3c0, 0x1f3c8,
1635 		0x1f440, 0x1f44c,
1636 		0x1f684, 0x1f68c,
1637 		0x1f6c0, 0x1f6c0,
1638 		0x1f6e0, 0x1f6e0,
1639 		0x1f700, 0x1f784,
1640 		0x1f7c0, 0x1f7c8,
1641 		0x1f840, 0x1f84c,
1642 		0x1fa84, 0x1fa8c,
1643 		0x1fac0, 0x1fac0,
1644 		0x1fae0, 0x1fae0,
1645 		0x1fb00, 0x1fb84,
1646 		0x1fbc0, 0x1fbc8,
1647 		0x1fc40, 0x1fc4c,
1648 		0x1fe84, 0x1fe8c,
1649 		0x1fec0, 0x1fec0,
1650 		0x1fee0, 0x1fee0,
1651 		0x1ff00, 0x1ff84,
1652 		0x1ffc0, 0x1ffc8,
1653 		0x20000, 0x2002c,
1654 		0x20100, 0x2013c,
1655 		0x20190, 0x201c8,
1656 		0x20200, 0x20318,
1657 		0x20400, 0x20528,
1658 		0x20540, 0x20614,
1659 		0x21000, 0x21040,
1660 		0x2104c, 0x21060,
1661 		0x210c0, 0x210ec,
1662 		0x21200, 0x21268,
1663 		0x21270, 0x21284,
1664 		0x212fc, 0x21388,
1665 		0x21400, 0x21404,
1666 		0x21500, 0x21518,
1667 		0x2152c, 0x2153c,
1668 		0x21550, 0x21554,
1669 		0x21600, 0x21600,
1670 		0x21608, 0x21628,
1671 		0x21630, 0x2163c,
1672 		0x21700, 0x2171c,
1673 		0x21780, 0x2178c,
1674 		0x21800, 0x21c38,
1675 		0x21c80, 0x21d7c,
1676 		0x21e00, 0x21e04,
1677 		0x22000, 0x2202c,
1678 		0x22100, 0x2213c,
1679 		0x22190, 0x221c8,
1680 		0x22200, 0x22318,
1681 		0x22400, 0x22528,
1682 		0x22540, 0x22614,
1683 		0x23000, 0x23040,
1684 		0x2304c, 0x23060,
1685 		0x230c0, 0x230ec,
1686 		0x23200, 0x23268,
1687 		0x23270, 0x23284,
1688 		0x232fc, 0x23388,
1689 		0x23400, 0x23404,
1690 		0x23500, 0x23518,
1691 		0x2352c, 0x2353c,
1692 		0x23550, 0x23554,
1693 		0x23600, 0x23600,
1694 		0x23608, 0x23628,
1695 		0x23630, 0x2363c,
1696 		0x23700, 0x2371c,
1697 		0x23780, 0x2378c,
1698 		0x23800, 0x23c38,
1699 		0x23c80, 0x23d7c,
1700 		0x23e00, 0x23e04,
1701 		0x24000, 0x2402c,
1702 		0x24100, 0x2413c,
1703 		0x24190, 0x241c8,
1704 		0x24200, 0x24318,
1705 		0x24400, 0x24528,
1706 		0x24540, 0x24614,
1707 		0x25000, 0x25040,
1708 		0x2504c, 0x25060,
1709 		0x250c0, 0x250ec,
1710 		0x25200, 0x25268,
1711 		0x25270, 0x25284,
1712 		0x252fc, 0x25388,
1713 		0x25400, 0x25404,
1714 		0x25500, 0x25518,
1715 		0x2552c, 0x2553c,
1716 		0x25550, 0x25554,
1717 		0x25600, 0x25600,
1718 		0x25608, 0x25628,
1719 		0x25630, 0x2563c,
1720 		0x25700, 0x2571c,
1721 		0x25780, 0x2578c,
1722 		0x25800, 0x25c38,
1723 		0x25c80, 0x25d7c,
1724 		0x25e00, 0x25e04,
1725 		0x26000, 0x2602c,
1726 		0x26100, 0x2613c,
1727 		0x26190, 0x261c8,
1728 		0x26200, 0x26318,
1729 		0x26400, 0x26528,
1730 		0x26540, 0x26614,
1731 		0x27000, 0x27040,
1732 		0x2704c, 0x27060,
1733 		0x270c0, 0x270ec,
1734 		0x27200, 0x27268,
1735 		0x27270, 0x27284,
1736 		0x272fc, 0x27388,
1737 		0x27400, 0x27404,
1738 		0x27500, 0x27518,
1739 		0x2752c, 0x2753c,
1740 		0x27550, 0x27554,
1741 		0x27600, 0x27600,
1742 		0x27608, 0x27628,
1743 		0x27630, 0x2763c,
1744 		0x27700, 0x2771c,
1745 		0x27780, 0x2778c,
1746 		0x27800, 0x27c38,
1747 		0x27c80, 0x27d7c,
1748 		0x27e00, 0x27e04
1749 	};
1750 
1751 	static const unsigned int t5_reg_ranges[] = {
1752 		0x1008, 0x1148,
1753 		0x1180, 0x11b4,
1754 		0x11fc, 0x123c,
1755 		0x1280, 0x173c,
1756 		0x1800, 0x18fc,
1757 		0x3000, 0x3028,
1758 		0x3060, 0x30d8,
1759 		0x30e0, 0x30fc,
1760 		0x3140, 0x357c,
1761 		0x35a8, 0x35cc,
1762 		0x35ec, 0x35ec,
1763 		0x3600, 0x5624,
1764 		0x56cc, 0x575c,
1765 		0x580c, 0x5814,
1766 		0x5890, 0x58bc,
1767 		0x5940, 0x59dc,
1768 		0x59fc, 0x5a18,
1769 		0x5a60, 0x5a9c,
1770 		0x5b9c, 0x5bfc,
1771 		0x6000, 0x6040,
1772 		0x6058, 0x614c,
1773 		0x7700, 0x7798,
1774 		0x77c0, 0x78fc,
1775 		0x7b00, 0x7c54,
1776 		0x7d00, 0x7efc,
1777 		0x8dc0, 0x8de0,
1778 		0x8df8, 0x8e84,
1779 		0x8ea0, 0x8f84,
1780 		0x8fc0, 0x90f8,
1781 		0x9400, 0x9470,
1782 		0x9600, 0x96f4,
1783 		0x9800, 0x9808,
1784 		0x9820, 0x983c,
1785 		0x9850, 0x9864,
1786 		0x9c00, 0x9c6c,
1787 		0x9c80, 0x9cec,
1788 		0x9d00, 0x9d6c,
1789 		0x9d80, 0x9dec,
1790 		0x9e00, 0x9e6c,
1791 		0x9e80, 0x9eec,
1792 		0x9f00, 0x9f6c,
1793 		0x9f80, 0xa020,
1794 		0xd004, 0xd03c,
1795 		0xdfc0, 0xdfe0,
1796 		0xe000, 0x11088,
1797 		0x1109c, 0x1117c,
1798 		0x11190, 0x11204,
1799 		0x19040, 0x1906c,
1800 		0x19078, 0x19080,
1801 		0x1908c, 0x19124,
1802 		0x19150, 0x191b0,
1803 		0x191d0, 0x191e8,
1804 		0x19238, 0x19290,
1805 		0x193f8, 0x19474,
1806 		0x19490, 0x194cc,
1807 		0x194f0, 0x194f8,
1808 		0x19c00, 0x19c60,
1809 		0x19c94, 0x19e10,
1810 		0x19e50, 0x19f34,
1811 		0x19f40, 0x19f50,
1812 		0x19f90, 0x19fe4,
1813 		0x1a000, 0x1a06c,
1814 		0x1a0b0, 0x1a120,
1815 		0x1a128, 0x1a138,
1816 		0x1a190, 0x1a1c4,
1817 		0x1a1fc, 0x1a1fc,
1818 		0x1e008, 0x1e00c,
1819 		0x1e040, 0x1e04c,
1820 		0x1e284, 0x1e290,
1821 		0x1e2c0, 0x1e2c0,
1822 		0x1e2e0, 0x1e2e0,
1823 		0x1e300, 0x1e384,
1824 		0x1e3c0, 0x1e3c8,
1825 		0x1e408, 0x1e40c,
1826 		0x1e440, 0x1e44c,
1827 		0x1e684, 0x1e690,
1828 		0x1e6c0, 0x1e6c0,
1829 		0x1e6e0, 0x1e6e0,
1830 		0x1e700, 0x1e784,
1831 		0x1e7c0, 0x1e7c8,
1832 		0x1e808, 0x1e80c,
1833 		0x1e840, 0x1e84c,
1834 		0x1ea84, 0x1ea90,
1835 		0x1eac0, 0x1eac0,
1836 		0x1eae0, 0x1eae0,
1837 		0x1eb00, 0x1eb84,
1838 		0x1ebc0, 0x1ebc8,
1839 		0x1ec08, 0x1ec0c,
1840 		0x1ec40, 0x1ec4c,
1841 		0x1ee84, 0x1ee90,
1842 		0x1eec0, 0x1eec0,
1843 		0x1eee0, 0x1eee0,
1844 		0x1ef00, 0x1ef84,
1845 		0x1efc0, 0x1efc8,
1846 		0x1f008, 0x1f00c,
1847 		0x1f040, 0x1f04c,
1848 		0x1f284, 0x1f290,
1849 		0x1f2c0, 0x1f2c0,
1850 		0x1f2e0, 0x1f2e0,
1851 		0x1f300, 0x1f384,
1852 		0x1f3c0, 0x1f3c8,
1853 		0x1f408, 0x1f40c,
1854 		0x1f440, 0x1f44c,
1855 		0x1f684, 0x1f690,
1856 		0x1f6c0, 0x1f6c0,
1857 		0x1f6e0, 0x1f6e0,
1858 		0x1f700, 0x1f784,
1859 		0x1f7c0, 0x1f7c8,
1860 		0x1f808, 0x1f80c,
1861 		0x1f840, 0x1f84c,
1862 		0x1fa84, 0x1fa90,
1863 		0x1fac0, 0x1fac0,
1864 		0x1fae0, 0x1fae0,
1865 		0x1fb00, 0x1fb84,
1866 		0x1fbc0, 0x1fbc8,
1867 		0x1fc08, 0x1fc0c,
1868 		0x1fc40, 0x1fc4c,
1869 		0x1fe84, 0x1fe90,
1870 		0x1fec0, 0x1fec0,
1871 		0x1fee0, 0x1fee0,
1872 		0x1ff00, 0x1ff84,
1873 		0x1ffc0, 0x1ffc8,
1874 		0x30000, 0x30030,
1875 		0x30100, 0x30144,
1876 		0x30190, 0x301d0,
1877 		0x30200, 0x30318,
1878 		0x30400, 0x3052c,
1879 		0x30540, 0x3061c,
1880 		0x30800, 0x30834,
1881 		0x308c0, 0x30908,
1882 		0x30910, 0x309ac,
1883 		0x30a00, 0x30a04,
1884 		0x30a0c, 0x30a2c,
1885 		0x30a44, 0x30a50,
1886 		0x30a74, 0x30c24,
1887 		0x30d08, 0x30d14,
1888 		0x30d1c, 0x30d20,
1889 		0x30d3c, 0x30d50,
1890 		0x31200, 0x3120c,
1891 		0x31220, 0x31220,
1892 		0x31240, 0x31240,
1893 		0x31600, 0x31600,
1894 		0x31608, 0x3160c,
1895 		0x31a00, 0x31a1c,
1896 		0x31e04, 0x31e20,
1897 		0x31e38, 0x31e3c,
1898 		0x31e80, 0x31e80,
1899 		0x31e88, 0x31ea8,
1900 		0x31eb0, 0x31eb4,
1901 		0x31ec8, 0x31ed4,
1902 		0x31fb8, 0x32004,
1903 		0x32208, 0x3223c,
1904 		0x32600, 0x32630,
1905 		0x32a00, 0x32abc,
1906 		0x32b00, 0x32b70,
1907 		0x33000, 0x33048,
1908 		0x33060, 0x3309c,
1909 		0x330f0, 0x33148,
1910 		0x33160, 0x3319c,
1911 		0x331f0, 0x332e4,
1912 		0x332f8, 0x333e4,
1913 		0x333f8, 0x33448,
1914 		0x33460, 0x3349c,
1915 		0x334f0, 0x33548,
1916 		0x33560, 0x3359c,
1917 		0x335f0, 0x336e4,
1918 		0x336f8, 0x337e4,
1919 		0x337f8, 0x337fc,
1920 		0x33814, 0x33814,
1921 		0x3382c, 0x3382c,
1922 		0x33880, 0x3388c,
1923 		0x338e8, 0x338ec,
1924 		0x33900, 0x33948,
1925 		0x33960, 0x3399c,
1926 		0x339f0, 0x33ae4,
1927 		0x33af8, 0x33b10,
1928 		0x33b28, 0x33b28,
1929 		0x33b3c, 0x33b50,
1930 		0x33bf0, 0x33c10,
1931 		0x33c28, 0x33c28,
1932 		0x33c3c, 0x33c50,
1933 		0x33cf0, 0x33cfc,
1934 		0x34000, 0x34030,
1935 		0x34100, 0x34144,
1936 		0x34190, 0x341d0,
1937 		0x34200, 0x34318,
1938 		0x34400, 0x3452c,
1939 		0x34540, 0x3461c,
1940 		0x34800, 0x34834,
1941 		0x348c0, 0x34908,
1942 		0x34910, 0x349ac,
1943 		0x34a00, 0x34a04,
1944 		0x34a0c, 0x34a2c,
1945 		0x34a44, 0x34a50,
1946 		0x34a74, 0x34c24,
1947 		0x34d08, 0x34d14,
1948 		0x34d1c, 0x34d20,
1949 		0x34d3c, 0x34d50,
1950 		0x35200, 0x3520c,
1951 		0x35220, 0x35220,
1952 		0x35240, 0x35240,
1953 		0x35600, 0x35600,
1954 		0x35608, 0x3560c,
1955 		0x35a00, 0x35a1c,
1956 		0x35e04, 0x35e20,
1957 		0x35e38, 0x35e3c,
1958 		0x35e80, 0x35e80,
1959 		0x35e88, 0x35ea8,
1960 		0x35eb0, 0x35eb4,
1961 		0x35ec8, 0x35ed4,
1962 		0x35fb8, 0x36004,
1963 		0x36208, 0x3623c,
1964 		0x36600, 0x36630,
1965 		0x36a00, 0x36abc,
1966 		0x36b00, 0x36b70,
1967 		0x37000, 0x37048,
1968 		0x37060, 0x3709c,
1969 		0x370f0, 0x37148,
1970 		0x37160, 0x3719c,
1971 		0x371f0, 0x372e4,
1972 		0x372f8, 0x373e4,
1973 		0x373f8, 0x37448,
1974 		0x37460, 0x3749c,
1975 		0x374f0, 0x37548,
1976 		0x37560, 0x3759c,
1977 		0x375f0, 0x376e4,
1978 		0x376f8, 0x377e4,
1979 		0x377f8, 0x377fc,
1980 		0x37814, 0x37814,
1981 		0x3782c, 0x3782c,
1982 		0x37880, 0x3788c,
1983 		0x378e8, 0x378ec,
1984 		0x37900, 0x37948,
1985 		0x37960, 0x3799c,
1986 		0x379f0, 0x37ae4,
1987 		0x37af8, 0x37b10,
1988 		0x37b28, 0x37b28,
1989 		0x37b3c, 0x37b50,
1990 		0x37bf0, 0x37c10,
1991 		0x37c28, 0x37c28,
1992 		0x37c3c, 0x37c50,
1993 		0x37cf0, 0x37cfc,
1994 		0x38000, 0x38030,
1995 		0x38100, 0x38144,
1996 		0x38190, 0x381d0,
1997 		0x38200, 0x38318,
1998 		0x38400, 0x3852c,
1999 		0x38540, 0x3861c,
2000 		0x38800, 0x38834,
2001 		0x388c0, 0x38908,
2002 		0x38910, 0x389ac,
2003 		0x38a00, 0x38a04,
2004 		0x38a0c, 0x38a2c,
2005 		0x38a44, 0x38a50,
2006 		0x38a74, 0x38c24,
2007 		0x38d08, 0x38d14,
2008 		0x38d1c, 0x38d20,
2009 		0x38d3c, 0x38d50,
2010 		0x39200, 0x3920c,
2011 		0x39220, 0x39220,
2012 		0x39240, 0x39240,
2013 		0x39600, 0x39600,
2014 		0x39608, 0x3960c,
2015 		0x39a00, 0x39a1c,
2016 		0x39e04, 0x39e20,
2017 		0x39e38, 0x39e3c,
2018 		0x39e80, 0x39e80,
2019 		0x39e88, 0x39ea8,
2020 		0x39eb0, 0x39eb4,
2021 		0x39ec8, 0x39ed4,
2022 		0x39fb8, 0x3a004,
2023 		0x3a208, 0x3a23c,
2024 		0x3a600, 0x3a630,
2025 		0x3aa00, 0x3aabc,
2026 		0x3ab00, 0x3ab70,
2027 		0x3b000, 0x3b048,
2028 		0x3b060, 0x3b09c,
2029 		0x3b0f0, 0x3b148,
2030 		0x3b160, 0x3b19c,
2031 		0x3b1f0, 0x3b2e4,
2032 		0x3b2f8, 0x3b3e4,
2033 		0x3b3f8, 0x3b448,
2034 		0x3b460, 0x3b49c,
2035 		0x3b4f0, 0x3b548,
2036 		0x3b560, 0x3b59c,
2037 		0x3b5f0, 0x3b6e4,
2038 		0x3b6f8, 0x3b7e4,
2039 		0x3b7f8, 0x3b7fc,
2040 		0x3b814, 0x3b814,
2041 		0x3b82c, 0x3b82c,
2042 		0x3b880, 0x3b88c,
2043 		0x3b8e8, 0x3b8ec,
2044 		0x3b900, 0x3b948,
2045 		0x3b960, 0x3b99c,
2046 		0x3b9f0, 0x3bae4,
2047 		0x3baf8, 0x3bb10,
2048 		0x3bb28, 0x3bb28,
2049 		0x3bb3c, 0x3bb50,
2050 		0x3bbf0, 0x3bc10,
2051 		0x3bc28, 0x3bc28,
2052 		0x3bc3c, 0x3bc50,
2053 		0x3bcf0, 0x3bcfc,
2054 		0x3c000, 0x3c030,
2055 		0x3c100, 0x3c144,
2056 		0x3c190, 0x3c1d0,
2057 		0x3c200, 0x3c318,
2058 		0x3c400, 0x3c52c,
2059 		0x3c540, 0x3c61c,
2060 		0x3c800, 0x3c834,
2061 		0x3c8c0, 0x3c908,
2062 		0x3c910, 0x3c9ac,
2063 		0x3ca00, 0x3ca04,
2064 		0x3ca0c, 0x3ca2c,
2065 		0x3ca44, 0x3ca50,
2066 		0x3ca74, 0x3cc24,
2067 		0x3cd08, 0x3cd14,
2068 		0x3cd1c, 0x3cd20,
2069 		0x3cd3c, 0x3cd50,
2070 		0x3d200, 0x3d20c,
2071 		0x3d220, 0x3d220,
2072 		0x3d240, 0x3d240,
2073 		0x3d600, 0x3d600,
2074 		0x3d608, 0x3d60c,
2075 		0x3da00, 0x3da1c,
2076 		0x3de04, 0x3de20,
2077 		0x3de38, 0x3de3c,
2078 		0x3de80, 0x3de80,
2079 		0x3de88, 0x3dea8,
2080 		0x3deb0, 0x3deb4,
2081 		0x3dec8, 0x3ded4,
2082 		0x3dfb8, 0x3e004,
2083 		0x3e208, 0x3e23c,
2084 		0x3e600, 0x3e630,
2085 		0x3ea00, 0x3eabc,
2086 		0x3eb00, 0x3eb70,
2087 		0x3f000, 0x3f048,
2088 		0x3f060, 0x3f09c,
2089 		0x3f0f0, 0x3f148,
2090 		0x3f160, 0x3f19c,
2091 		0x3f1f0, 0x3f2e4,
2092 		0x3f2f8, 0x3f3e4,
2093 		0x3f3f8, 0x3f448,
2094 		0x3f460, 0x3f49c,
2095 		0x3f4f0, 0x3f548,
2096 		0x3f560, 0x3f59c,
2097 		0x3f5f0, 0x3f6e4,
2098 		0x3f6f8, 0x3f7e4,
2099 		0x3f7f8, 0x3f7fc,
2100 		0x3f814, 0x3f814,
2101 		0x3f82c, 0x3f82c,
2102 		0x3f880, 0x3f88c,
2103 		0x3f8e8, 0x3f8ec,
2104 		0x3f900, 0x3f948,
2105 		0x3f960, 0x3f99c,
2106 		0x3f9f0, 0x3fae4,
2107 		0x3faf8, 0x3fb10,
2108 		0x3fb28, 0x3fb28,
2109 		0x3fb3c, 0x3fb50,
2110 		0x3fbf0, 0x3fc10,
2111 		0x3fc28, 0x3fc28,
2112 		0x3fc3c, 0x3fc50,
2113 		0x3fcf0, 0x3fcfc,
2114 		0x40000, 0x4000c,
2115 		0x40040, 0x40068,
2116 		0x40080, 0x40144,
2117 		0x40180, 0x4018c,
2118 		0x40200, 0x40298,
2119 		0x402ac, 0x4033c,
2120 		0x403f8, 0x403fc,
2121 		0x41300, 0x413c4,
2122 		0x41400, 0x4141c,
2123 		0x41480, 0x414d0,
2124 		0x44000, 0x44078,
2125 		0x440c0, 0x44278,
2126 		0x442c0, 0x44478,
2127 		0x444c0, 0x44678,
2128 		0x446c0, 0x44878,
2129 		0x448c0, 0x449fc,
2130 		0x45000, 0x45068,
2131 		0x45080, 0x45084,
2132 		0x450a0, 0x450b0,
2133 		0x45200, 0x45268,
2134 		0x45280, 0x45284,
2135 		0x452a0, 0x452b0,
2136 		0x460c0, 0x460e4,
2137 		0x47000, 0x4708c,
2138 		0x47200, 0x47250,
2139 		0x47400, 0x47420,
2140 		0x47600, 0x47618,
2141 		0x47800, 0x47814,
2142 		0x48000, 0x4800c,
2143 		0x48040, 0x48068,
2144 		0x48080, 0x48144,
2145 		0x48180, 0x4818c,
2146 		0x48200, 0x48298,
2147 		0x482ac, 0x4833c,
2148 		0x483f8, 0x483fc,
2149 		0x49300, 0x493c4,
2150 		0x49400, 0x4941c,
2151 		0x49480, 0x494d0,
2152 		0x4c000, 0x4c078,
2153 		0x4c0c0, 0x4c278,
2154 		0x4c2c0, 0x4c478,
2155 		0x4c4c0, 0x4c678,
2156 		0x4c6c0, 0x4c878,
2157 		0x4c8c0, 0x4c9fc,
2158 		0x4d000, 0x4d068,
2159 		0x4d080, 0x4d084,
2160 		0x4d0a0, 0x4d0b0,
2161 		0x4d200, 0x4d268,
2162 		0x4d280, 0x4d284,
2163 		0x4d2a0, 0x4d2b0,
2164 		0x4e0c0, 0x4e0e4,
2165 		0x4f000, 0x4f08c,
2166 		0x4f200, 0x4f250,
2167 		0x4f400, 0x4f420,
2168 		0x4f600, 0x4f618,
2169 		0x4f800, 0x4f814,
2170 		0x50000, 0x500cc,
2171 		0x50400, 0x50400,
2172 		0x50800, 0x508cc,
2173 		0x50c00, 0x50c00,
2174 		0x51000, 0x5101c,
2175 		0x51300, 0x51308,
2176 	};
2177 
2178 	int i;
2179 	struct adapter *ap = netdev2adap(dev);
2180 	static const unsigned int *reg_ranges;
2181 	int arr_size = 0, buf_size = 0;
2182 
2183 	if (is_t4(ap->chip)) {
2184 		reg_ranges = &t4_reg_ranges[0];
2185 		arr_size = ARRAY_SIZE(t4_reg_ranges);
2186 		buf_size = T4_REGMAP_SIZE;
2187 	} else {
2188 		reg_ranges = &t5_reg_ranges[0];
2189 		arr_size = ARRAY_SIZE(t5_reg_ranges);
2190 		buf_size = T5_REGMAP_SIZE;
2191 	}
2192 
2193 	regs->version = mk_adap_vers(ap);
2194 
2195 	memset(buf, 0, buf_size);
2196 	for (i = 0; i < arr_size; i += 2)
2197 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2198 }
2199 
restart_autoneg(struct net_device * dev)2200 static int restart_autoneg(struct net_device *dev)
2201 {
2202 	struct port_info *p = netdev_priv(dev);
2203 
2204 	if (!netif_running(dev))
2205 		return -EAGAIN;
2206 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2207 		return -EINVAL;
2208 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2209 	return 0;
2210 }
2211 
identify_port(struct net_device * dev,enum ethtool_phys_id_state state)2212 static int identify_port(struct net_device *dev,
2213 			 enum ethtool_phys_id_state state)
2214 {
2215 	unsigned int val;
2216 	struct adapter *adap = netdev2adap(dev);
2217 
2218 	if (state == ETHTOOL_ID_ACTIVE)
2219 		val = 0xffff;
2220 	else if (state == ETHTOOL_ID_INACTIVE)
2221 		val = 0;
2222 	else
2223 		return -EINVAL;
2224 
2225 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2226 }
2227 
from_fw_linkcaps(unsigned int type,unsigned int caps)2228 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2229 {
2230 	unsigned int v = 0;
2231 
2232 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2233 	    type == FW_PORT_TYPE_BT_XAUI) {
2234 		v |= SUPPORTED_TP;
2235 		if (caps & FW_PORT_CAP_SPEED_100M)
2236 			v |= SUPPORTED_100baseT_Full;
2237 		if (caps & FW_PORT_CAP_SPEED_1G)
2238 			v |= SUPPORTED_1000baseT_Full;
2239 		if (caps & FW_PORT_CAP_SPEED_10G)
2240 			v |= SUPPORTED_10000baseT_Full;
2241 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2242 		v |= SUPPORTED_Backplane;
2243 		if (caps & FW_PORT_CAP_SPEED_1G)
2244 			v |= SUPPORTED_1000baseKX_Full;
2245 		if (caps & FW_PORT_CAP_SPEED_10G)
2246 			v |= SUPPORTED_10000baseKX4_Full;
2247 	} else if (type == FW_PORT_TYPE_KR)
2248 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2249 	else if (type == FW_PORT_TYPE_BP_AP)
2250 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2251 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2252 	else if (type == FW_PORT_TYPE_BP4_AP)
2253 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2254 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2255 		     SUPPORTED_10000baseKX4_Full;
2256 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
2257 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2258 		v |= SUPPORTED_FIBRE;
2259 
2260 	if (caps & FW_PORT_CAP_ANEG)
2261 		v |= SUPPORTED_Autoneg;
2262 	return v;
2263 }
2264 
to_fw_linkcaps(unsigned int caps)2265 static unsigned int to_fw_linkcaps(unsigned int caps)
2266 {
2267 	unsigned int v = 0;
2268 
2269 	if (caps & ADVERTISED_100baseT_Full)
2270 		v |= FW_PORT_CAP_SPEED_100M;
2271 	if (caps & ADVERTISED_1000baseT_Full)
2272 		v |= FW_PORT_CAP_SPEED_1G;
2273 	if (caps & ADVERTISED_10000baseT_Full)
2274 		v |= FW_PORT_CAP_SPEED_10G;
2275 	return v;
2276 }
2277 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)2278 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2279 {
2280 	const struct port_info *p = netdev_priv(dev);
2281 
2282 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2283 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
2284 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
2285 		cmd->port = PORT_TP;
2286 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2287 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2288 		cmd->port = PORT_FIBRE;
2289 	else if (p->port_type == FW_PORT_TYPE_SFP) {
2290 		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2291 		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2292 			cmd->port = PORT_DA;
2293 		else
2294 			cmd->port = PORT_FIBRE;
2295 	} else
2296 		cmd->port = PORT_OTHER;
2297 
2298 	if (p->mdio_addr >= 0) {
2299 		cmd->phy_address = p->mdio_addr;
2300 		cmd->transceiver = XCVR_EXTERNAL;
2301 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2302 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2303 	} else {
2304 		cmd->phy_address = 0;  /* not really, but no better option */
2305 		cmd->transceiver = XCVR_INTERNAL;
2306 		cmd->mdio_support = 0;
2307 	}
2308 
2309 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2310 	cmd->advertising = from_fw_linkcaps(p->port_type,
2311 					    p->link_cfg.advertising);
2312 	ethtool_cmd_speed_set(cmd,
2313 			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2314 	cmd->duplex = DUPLEX_FULL;
2315 	cmd->autoneg = p->link_cfg.autoneg;
2316 	cmd->maxtxpkt = 0;
2317 	cmd->maxrxpkt = 0;
2318 	return 0;
2319 }
2320 
speed_to_caps(int speed)2321 static unsigned int speed_to_caps(int speed)
2322 {
2323 	if (speed == SPEED_100)
2324 		return FW_PORT_CAP_SPEED_100M;
2325 	if (speed == SPEED_1000)
2326 		return FW_PORT_CAP_SPEED_1G;
2327 	if (speed == SPEED_10000)
2328 		return FW_PORT_CAP_SPEED_10G;
2329 	return 0;
2330 }
2331 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)2332 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2333 {
2334 	unsigned int cap;
2335 	struct port_info *p = netdev_priv(dev);
2336 	struct link_config *lc = &p->link_cfg;
2337 	u32 speed = ethtool_cmd_speed(cmd);
2338 
2339 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2340 		return -EINVAL;
2341 
2342 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2343 		/*
2344 		 * PHY offers a single speed.  See if that's what's
2345 		 * being requested.
2346 		 */
2347 		if (cmd->autoneg == AUTONEG_DISABLE &&
2348 		    (lc->supported & speed_to_caps(speed)))
2349 			return 0;
2350 		return -EINVAL;
2351 	}
2352 
2353 	if (cmd->autoneg == AUTONEG_DISABLE) {
2354 		cap = speed_to_caps(speed);
2355 
2356 		if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2357 		    (speed == SPEED_10000))
2358 			return -EINVAL;
2359 		lc->requested_speed = cap;
2360 		lc->advertising = 0;
2361 	} else {
2362 		cap = to_fw_linkcaps(cmd->advertising);
2363 		if (!(lc->supported & cap))
2364 			return -EINVAL;
2365 		lc->requested_speed = 0;
2366 		lc->advertising = cap | FW_PORT_CAP_ANEG;
2367 	}
2368 	lc->autoneg = cmd->autoneg;
2369 
2370 	if (netif_running(dev))
2371 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2372 				     lc);
2373 	return 0;
2374 }
2375 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)2376 static void get_pauseparam(struct net_device *dev,
2377 			   struct ethtool_pauseparam *epause)
2378 {
2379 	struct port_info *p = netdev_priv(dev);
2380 
2381 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2382 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2383 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2384 }
2385 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)2386 static int set_pauseparam(struct net_device *dev,
2387 			  struct ethtool_pauseparam *epause)
2388 {
2389 	struct port_info *p = netdev_priv(dev);
2390 	struct link_config *lc = &p->link_cfg;
2391 
2392 	if (epause->autoneg == AUTONEG_DISABLE)
2393 		lc->requested_fc = 0;
2394 	else if (lc->supported & FW_PORT_CAP_ANEG)
2395 		lc->requested_fc = PAUSE_AUTONEG;
2396 	else
2397 		return -EINVAL;
2398 
2399 	if (epause->rx_pause)
2400 		lc->requested_fc |= PAUSE_RX;
2401 	if (epause->tx_pause)
2402 		lc->requested_fc |= PAUSE_TX;
2403 	if (netif_running(dev))
2404 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2405 				     lc);
2406 	return 0;
2407 }
2408 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)2409 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2410 {
2411 	const struct port_info *pi = netdev_priv(dev);
2412 	const struct sge *s = &pi->adapter->sge;
2413 
2414 	e->rx_max_pending = MAX_RX_BUFFERS;
2415 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2416 	e->rx_jumbo_max_pending = 0;
2417 	e->tx_max_pending = MAX_TXQ_ENTRIES;
2418 
2419 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2420 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2421 	e->rx_jumbo_pending = 0;
2422 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2423 }
2424 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)2425 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2426 {
2427 	int i;
2428 	const struct port_info *pi = netdev_priv(dev);
2429 	struct adapter *adapter = pi->adapter;
2430 	struct sge *s = &adapter->sge;
2431 
2432 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2433 	    e->tx_pending > MAX_TXQ_ENTRIES ||
2434 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2435 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2436 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2437 		return -EINVAL;
2438 
2439 	if (adapter->flags & FULL_INIT_DONE)
2440 		return -EBUSY;
2441 
2442 	for (i = 0; i < pi->nqsets; ++i) {
2443 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2444 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2445 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2446 	}
2447 	return 0;
2448 }
2449 
closest_timer(const struct sge * s,int time)2450 static int closest_timer(const struct sge *s, int time)
2451 {
2452 	int i, delta, match = 0, min_delta = INT_MAX;
2453 
2454 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2455 		delta = time - s->timer_val[i];
2456 		if (delta < 0)
2457 			delta = -delta;
2458 		if (delta < min_delta) {
2459 			min_delta = delta;
2460 			match = i;
2461 		}
2462 	}
2463 	return match;
2464 }
2465 
closest_thres(const struct sge * s,int thres)2466 static int closest_thres(const struct sge *s, int thres)
2467 {
2468 	int i, delta, match = 0, min_delta = INT_MAX;
2469 
2470 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2471 		delta = thres - s->counter_val[i];
2472 		if (delta < 0)
2473 			delta = -delta;
2474 		if (delta < min_delta) {
2475 			min_delta = delta;
2476 			match = i;
2477 		}
2478 	}
2479 	return match;
2480 }
2481 
2482 /*
2483  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2484  */
qtimer_val(const struct adapter * adap,const struct sge_rspq * q)2485 static unsigned int qtimer_val(const struct adapter *adap,
2486 			       const struct sge_rspq *q)
2487 {
2488 	unsigned int idx = q->intr_params >> 1;
2489 
2490 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2491 }
2492 
2493 /**
2494  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
2495  *	@adap: the adapter
2496  *	@q: the Rx queue
2497  *	@us: the hold-off time in us, or 0 to disable timer
2498  *	@cnt: the hold-off packet count, or 0 to disable counter
2499  *
2500  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
2501  *	one of the two needs to be enabled for the queue to generate interrupts.
2502  */
set_rxq_intr_params(struct adapter * adap,struct sge_rspq * q,unsigned int us,unsigned int cnt)2503 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2504 			       unsigned int us, unsigned int cnt)
2505 {
2506 	if ((us | cnt) == 0)
2507 		cnt = 1;
2508 
2509 	if (cnt) {
2510 		int err;
2511 		u32 v, new_idx;
2512 
2513 		new_idx = closest_thres(&adap->sge, cnt);
2514 		if (q->desc && q->pktcnt_idx != new_idx) {
2515 			/* the queue has already been created, update it */
2516 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2517 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2518 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
2519 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2520 					    &new_idx);
2521 			if (err)
2522 				return err;
2523 		}
2524 		q->pktcnt_idx = new_idx;
2525 	}
2526 
2527 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2528 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2529 	return 0;
2530 }
2531 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)2532 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2533 {
2534 	const struct port_info *pi = netdev_priv(dev);
2535 	struct adapter *adap = pi->adapter;
2536 	struct sge_rspq *q;
2537 	int i;
2538 	int r = 0;
2539 
2540 	for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2541 		q = &adap->sge.ethrxq[i].rspq;
2542 		r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2543 			c->rx_max_coalesced_frames);
2544 		if (r) {
2545 			dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2546 			break;
2547 		}
2548 	}
2549 	return r;
2550 }
2551 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)2552 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2553 {
2554 	const struct port_info *pi = netdev_priv(dev);
2555 	const struct adapter *adap = pi->adapter;
2556 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2557 
2558 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
2559 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2560 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
2561 	return 0;
2562 }
2563 
2564 /**
2565  *	eeprom_ptov - translate a physical EEPROM address to virtual
2566  *	@phys_addr: the physical EEPROM address
2567  *	@fn: the PCI function number
2568  *	@sz: size of function-specific area
2569  *
2570  *	Translate a physical EEPROM address to virtual.  The first 1K is
2571  *	accessed through virtual addresses starting at 31K, the rest is
2572  *	accessed through virtual addresses starting at 0.
2573  *
2574  *	The mapping is as follows:
2575  *	[0..1K) -> [31K..32K)
2576  *	[1K..1K+A) -> [31K-A..31K)
2577  *	[1K+A..ES) -> [0..ES-A-1K)
2578  *
2579  *	where A = @fn * @sz, and ES = EEPROM size.
2580  */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)2581 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2582 {
2583 	fn *= sz;
2584 	if (phys_addr < 1024)
2585 		return phys_addr + (31 << 10);
2586 	if (phys_addr < 1024 + fn)
2587 		return 31744 - fn + phys_addr - 1024;
2588 	if (phys_addr < EEPROMSIZE)
2589 		return phys_addr - 1024 - fn;
2590 	return -EINVAL;
2591 }
2592 
2593 /*
2594  * The next two routines implement eeprom read/write from physical addresses.
2595  */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)2596 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2597 {
2598 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2599 
2600 	if (vaddr >= 0)
2601 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2602 	return vaddr < 0 ? vaddr : 0;
2603 }
2604 
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)2605 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2606 {
2607 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2608 
2609 	if (vaddr >= 0)
2610 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2611 	return vaddr < 0 ? vaddr : 0;
2612 }
2613 
2614 #define EEPROM_MAGIC 0x38E2F10C
2615 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)2616 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2617 		      u8 *data)
2618 {
2619 	int i, err = 0;
2620 	struct adapter *adapter = netdev2adap(dev);
2621 
2622 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2623 	if (!buf)
2624 		return -ENOMEM;
2625 
2626 	e->magic = EEPROM_MAGIC;
2627 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2628 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2629 
2630 	if (!err)
2631 		memcpy(data, buf + e->offset, e->len);
2632 	kfree(buf);
2633 	return err;
2634 }
2635 
set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)2636 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2637 		      u8 *data)
2638 {
2639 	u8 *buf;
2640 	int err = 0;
2641 	u32 aligned_offset, aligned_len, *p;
2642 	struct adapter *adapter = netdev2adap(dev);
2643 
2644 	if (eeprom->magic != EEPROM_MAGIC)
2645 		return -EINVAL;
2646 
2647 	aligned_offset = eeprom->offset & ~3;
2648 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2649 
2650 	if (adapter->fn > 0) {
2651 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2652 
2653 		if (aligned_offset < start ||
2654 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
2655 			return -EPERM;
2656 	}
2657 
2658 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2659 		/*
2660 		 * RMW possibly needed for first or last words.
2661 		 */
2662 		buf = kmalloc(aligned_len, GFP_KERNEL);
2663 		if (!buf)
2664 			return -ENOMEM;
2665 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2666 		if (!err && aligned_len > 4)
2667 			err = eeprom_rd_phys(adapter,
2668 					     aligned_offset + aligned_len - 4,
2669 					     (u32 *)&buf[aligned_len - 4]);
2670 		if (err)
2671 			goto out;
2672 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2673 	} else
2674 		buf = data;
2675 
2676 	err = t4_seeprom_wp(adapter, false);
2677 	if (err)
2678 		goto out;
2679 
2680 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2681 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
2682 		aligned_offset += 4;
2683 	}
2684 
2685 	if (!err)
2686 		err = t4_seeprom_wp(adapter, true);
2687 out:
2688 	if (buf != data)
2689 		kfree(buf);
2690 	return err;
2691 }
2692 
set_flash(struct net_device * netdev,struct ethtool_flash * ef)2693 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2694 {
2695 	int ret;
2696 	const struct firmware *fw;
2697 	struct adapter *adap = netdev2adap(netdev);
2698 
2699 	ef->data[sizeof(ef->data) - 1] = '\0';
2700 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2701 	if (ret < 0)
2702 		return ret;
2703 
2704 	ret = t4_load_fw(adap, fw->data, fw->size);
2705 	release_firmware(fw);
2706 	if (!ret)
2707 		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2708 	return ret;
2709 }
2710 
2711 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2712 #define BCAST_CRC 0xa0ccc1a6
2713 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2714 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2715 {
2716 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
2717 	wol->wolopts = netdev2adap(dev)->wol;
2718 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2719 }
2720 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2721 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2722 {
2723 	int err = 0;
2724 	struct port_info *pi = netdev_priv(dev);
2725 
2726 	if (wol->wolopts & ~WOL_SUPPORTED)
2727 		return -EINVAL;
2728 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2729 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2730 	if (wol->wolopts & WAKE_BCAST) {
2731 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2732 					~0ULL, 0, false);
2733 		if (!err)
2734 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2735 						~6ULL, ~0ULL, BCAST_CRC, true);
2736 	} else
2737 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2738 	return err;
2739 }
2740 
cxgb_set_features(struct net_device * dev,netdev_features_t features)2741 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2742 {
2743 	const struct port_info *pi = netdev_priv(dev);
2744 	netdev_features_t changed = dev->features ^ features;
2745 	int err;
2746 
2747 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2748 		return 0;
2749 
2750 	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2751 			    -1, -1, -1,
2752 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2753 	if (unlikely(err))
2754 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2755 	return err;
2756 }
2757 
get_rss_table_size(struct net_device * dev)2758 static u32 get_rss_table_size(struct net_device *dev)
2759 {
2760 	const struct port_info *pi = netdev_priv(dev);
2761 
2762 	return pi->rss_size;
2763 }
2764 
get_rss_table(struct net_device * dev,u32 * p)2765 static int get_rss_table(struct net_device *dev, u32 *p)
2766 {
2767 	const struct port_info *pi = netdev_priv(dev);
2768 	unsigned int n = pi->rss_size;
2769 
2770 	while (n--)
2771 		p[n] = pi->rss[n];
2772 	return 0;
2773 }
2774 
set_rss_table(struct net_device * dev,const u32 * p)2775 static int set_rss_table(struct net_device *dev, const u32 *p)
2776 {
2777 	unsigned int i;
2778 	struct port_info *pi = netdev_priv(dev);
2779 
2780 	for (i = 0; i < pi->rss_size; i++)
2781 		pi->rss[i] = p[i];
2782 	if (pi->adapter->flags & FULL_INIT_DONE)
2783 		return write_rss(pi, pi->rss);
2784 	return 0;
2785 }
2786 
get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)2787 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2788 		     u32 *rules)
2789 {
2790 	const struct port_info *pi = netdev_priv(dev);
2791 
2792 	switch (info->cmd) {
2793 	case ETHTOOL_GRXFH: {
2794 		unsigned int v = pi->rss_mode;
2795 
2796 		info->data = 0;
2797 		switch (info->flow_type) {
2798 		case TCP_V4_FLOW:
2799 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2800 				info->data = RXH_IP_SRC | RXH_IP_DST |
2801 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2802 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2803 				info->data = RXH_IP_SRC | RXH_IP_DST;
2804 			break;
2805 		case UDP_V4_FLOW:
2806 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2807 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2808 				info->data = RXH_IP_SRC | RXH_IP_DST |
2809 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2810 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2811 				info->data = RXH_IP_SRC | RXH_IP_DST;
2812 			break;
2813 		case SCTP_V4_FLOW:
2814 		case AH_ESP_V4_FLOW:
2815 		case IPV4_FLOW:
2816 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2817 				info->data = RXH_IP_SRC | RXH_IP_DST;
2818 			break;
2819 		case TCP_V6_FLOW:
2820 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2821 				info->data = RXH_IP_SRC | RXH_IP_DST |
2822 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2823 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2824 				info->data = RXH_IP_SRC | RXH_IP_DST;
2825 			break;
2826 		case UDP_V6_FLOW:
2827 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2828 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2829 				info->data = RXH_IP_SRC | RXH_IP_DST |
2830 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2831 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2832 				info->data = RXH_IP_SRC | RXH_IP_DST;
2833 			break;
2834 		case SCTP_V6_FLOW:
2835 		case AH_ESP_V6_FLOW:
2836 		case IPV6_FLOW:
2837 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2838 				info->data = RXH_IP_SRC | RXH_IP_DST;
2839 			break;
2840 		}
2841 		return 0;
2842 	}
2843 	case ETHTOOL_GRXRINGS:
2844 		info->data = pi->nqsets;
2845 		return 0;
2846 	}
2847 	return -EOPNOTSUPP;
2848 }
2849 
2850 static const struct ethtool_ops cxgb_ethtool_ops = {
2851 	.get_settings      = get_settings,
2852 	.set_settings      = set_settings,
2853 	.get_drvinfo       = get_drvinfo,
2854 	.get_msglevel      = get_msglevel,
2855 	.set_msglevel      = set_msglevel,
2856 	.get_ringparam     = get_sge_param,
2857 	.set_ringparam     = set_sge_param,
2858 	.get_coalesce      = get_coalesce,
2859 	.set_coalesce      = set_coalesce,
2860 	.get_eeprom_len    = get_eeprom_len,
2861 	.get_eeprom        = get_eeprom,
2862 	.set_eeprom        = set_eeprom,
2863 	.get_pauseparam    = get_pauseparam,
2864 	.set_pauseparam    = set_pauseparam,
2865 	.get_link          = ethtool_op_get_link,
2866 	.get_strings       = get_strings,
2867 	.set_phys_id       = identify_port,
2868 	.nway_reset        = restart_autoneg,
2869 	.get_sset_count    = get_sset_count,
2870 	.get_ethtool_stats = get_stats,
2871 	.get_regs_len      = get_regs_len,
2872 	.get_regs          = get_regs,
2873 	.get_wol           = get_wol,
2874 	.set_wol           = set_wol,
2875 	.get_rxnfc         = get_rxnfc,
2876 	.get_rxfh_indir_size = get_rss_table_size,
2877 	.get_rxfh_indir    = get_rss_table,
2878 	.set_rxfh_indir    = set_rss_table,
2879 	.flash_device      = set_flash,
2880 };
2881 
2882 /*
2883  * debugfs support
2884  */
mem_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2885 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2886 			loff_t *ppos)
2887 {
2888 	loff_t pos = *ppos;
2889 	loff_t avail = file_inode(file)->i_size;
2890 	unsigned int mem = (uintptr_t)file->private_data & 3;
2891 	struct adapter *adap = file->private_data - mem;
2892 
2893 	if (pos < 0)
2894 		return -EINVAL;
2895 	if (pos >= avail)
2896 		return 0;
2897 	if (count > avail - pos)
2898 		count = avail - pos;
2899 
2900 	while (count) {
2901 		size_t len;
2902 		int ret, ofst;
2903 		__be32 data[16];
2904 
2905 		if ((mem == MEM_MC) || (mem == MEM_MC1))
2906 			ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2907 		else
2908 			ret = t4_edc_read(adap, mem, pos, data, NULL);
2909 		if (ret)
2910 			return ret;
2911 
2912 		ofst = pos % sizeof(data);
2913 		len = min(count, sizeof(data) - ofst);
2914 		if (copy_to_user(buf, (u8 *)data + ofst, len))
2915 			return -EFAULT;
2916 
2917 		buf += len;
2918 		pos += len;
2919 		count -= len;
2920 	}
2921 	count = pos - *ppos;
2922 	*ppos = pos;
2923 	return count;
2924 }
2925 
2926 static const struct file_operations mem_debugfs_fops = {
2927 	.owner   = THIS_MODULE,
2928 	.open    = simple_open,
2929 	.read    = mem_read,
2930 	.llseek  = default_llseek,
2931 };
2932 
add_debugfs_mem(struct adapter * adap,const char * name,unsigned int idx,unsigned int size_mb)2933 static void add_debugfs_mem(struct adapter *adap, const char *name,
2934 			    unsigned int idx, unsigned int size_mb)
2935 {
2936 	struct dentry *de;
2937 
2938 	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2939 				 (void *)adap + idx, &mem_debugfs_fops);
2940 	if (de && de->d_inode)
2941 		de->d_inode->i_size = size_mb << 20;
2942 }
2943 
setup_debugfs(struct adapter * adap)2944 static int setup_debugfs(struct adapter *adap)
2945 {
2946 	int i;
2947 	u32 size;
2948 
2949 	if (IS_ERR_OR_NULL(adap->debugfs_root))
2950 		return -1;
2951 
2952 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2953 	if (i & EDRAM0_ENABLE) {
2954 		size = t4_read_reg(adap, MA_EDRAM0_BAR);
2955 		add_debugfs_mem(adap, "edc0", MEM_EDC0,	EDRAM_SIZE_GET(size));
2956 	}
2957 	if (i & EDRAM1_ENABLE) {
2958 		size = t4_read_reg(adap, MA_EDRAM1_BAR);
2959 		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2960 	}
2961 	if (is_t4(adap->chip)) {
2962 		size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2963 		if (i & EXT_MEM_ENABLE)
2964 			add_debugfs_mem(adap, "mc", MEM_MC,
2965 					EXT_MEM_SIZE_GET(size));
2966 	} else {
2967 		if (i & EXT_MEM_ENABLE) {
2968 			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2969 			add_debugfs_mem(adap, "mc0", MEM_MC0,
2970 					EXT_MEM_SIZE_GET(size));
2971 		}
2972 		if (i & EXT_MEM1_ENABLE) {
2973 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2974 			add_debugfs_mem(adap, "mc1", MEM_MC1,
2975 					EXT_MEM_SIZE_GET(size));
2976 		}
2977 	}
2978 	if (adap->l2t)
2979 		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2980 				    &t4_l2t_fops);
2981 	return 0;
2982 }
2983 
2984 /*
2985  * upper-layer driver support
2986  */
2987 
2988 /*
2989  * Allocate an active-open TID and set it to the supplied value.
2990  */
cxgb4_alloc_atid(struct tid_info * t,void * data)2991 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2992 {
2993 	int atid = -1;
2994 
2995 	spin_lock_bh(&t->atid_lock);
2996 	if (t->afree) {
2997 		union aopen_entry *p = t->afree;
2998 
2999 		atid = (p - t->atid_tab) + t->atid_base;
3000 		t->afree = p->next;
3001 		p->data = data;
3002 		t->atids_in_use++;
3003 	}
3004 	spin_unlock_bh(&t->atid_lock);
3005 	return atid;
3006 }
3007 EXPORT_SYMBOL(cxgb4_alloc_atid);
3008 
3009 /*
3010  * Release an active-open TID.
3011  */
cxgb4_free_atid(struct tid_info * t,unsigned int atid)3012 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3013 {
3014 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3015 
3016 	spin_lock_bh(&t->atid_lock);
3017 	p->next = t->afree;
3018 	t->afree = p;
3019 	t->atids_in_use--;
3020 	spin_unlock_bh(&t->atid_lock);
3021 }
3022 EXPORT_SYMBOL(cxgb4_free_atid);
3023 
3024 /*
3025  * Allocate a server TID and set it to the supplied value.
3026  */
cxgb4_alloc_stid(struct tid_info * t,int family,void * data)3027 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3028 {
3029 	int stid;
3030 
3031 	spin_lock_bh(&t->stid_lock);
3032 	if (family == PF_INET) {
3033 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3034 		if (stid < t->nstids)
3035 			__set_bit(stid, t->stid_bmap);
3036 		else
3037 			stid = -1;
3038 	} else {
3039 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3040 		if (stid < 0)
3041 			stid = -1;
3042 	}
3043 	if (stid >= 0) {
3044 		t->stid_tab[stid].data = data;
3045 		stid += t->stid_base;
3046 		t->stids_in_use++;
3047 	}
3048 	spin_unlock_bh(&t->stid_lock);
3049 	return stid;
3050 }
3051 EXPORT_SYMBOL(cxgb4_alloc_stid);
3052 
3053 /* Allocate a server filter TID and set it to the supplied value.
3054  */
cxgb4_alloc_sftid(struct tid_info * t,int family,void * data)3055 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3056 {
3057 	int stid;
3058 
3059 	spin_lock_bh(&t->stid_lock);
3060 	if (family == PF_INET) {
3061 		stid = find_next_zero_bit(t->stid_bmap,
3062 				t->nstids + t->nsftids, t->nstids);
3063 		if (stid < (t->nstids + t->nsftids))
3064 			__set_bit(stid, t->stid_bmap);
3065 		else
3066 			stid = -1;
3067 	} else {
3068 		stid = -1;
3069 	}
3070 	if (stid >= 0) {
3071 		t->stid_tab[stid].data = data;
3072 		stid += t->stid_base;
3073 		t->stids_in_use++;
3074 	}
3075 	spin_unlock_bh(&t->stid_lock);
3076 	return stid;
3077 }
3078 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3079 
3080 /* Release a server TID.
3081  */
cxgb4_free_stid(struct tid_info * t,unsigned int stid,int family)3082 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3083 {
3084 	stid -= t->stid_base;
3085 	spin_lock_bh(&t->stid_lock);
3086 	if (family == PF_INET)
3087 		__clear_bit(stid, t->stid_bmap);
3088 	else
3089 		bitmap_release_region(t->stid_bmap, stid, 2);
3090 	t->stid_tab[stid].data = NULL;
3091 	t->stids_in_use--;
3092 	spin_unlock_bh(&t->stid_lock);
3093 }
3094 EXPORT_SYMBOL(cxgb4_free_stid);
3095 
3096 /*
3097  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3098  */
mk_tid_release(struct sk_buff * skb,unsigned int chan,unsigned int tid)3099 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3100 			   unsigned int tid)
3101 {
3102 	struct cpl_tid_release *req;
3103 
3104 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3105 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3106 	INIT_TP_WR(req, tid);
3107 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3108 }
3109 
3110 /*
3111  * Queue a TID release request and if necessary schedule a work queue to
3112  * process it.
3113  */
cxgb4_queue_tid_release(struct tid_info * t,unsigned int chan,unsigned int tid)3114 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3115 				    unsigned int tid)
3116 {
3117 	void **p = &t->tid_tab[tid];
3118 	struct adapter *adap = container_of(t, struct adapter, tids);
3119 
3120 	spin_lock_bh(&adap->tid_release_lock);
3121 	*p = adap->tid_release_head;
3122 	/* Low 2 bits encode the Tx channel number */
3123 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
3124 	if (!adap->tid_release_task_busy) {
3125 		adap->tid_release_task_busy = true;
3126 		queue_work(workq, &adap->tid_release_task);
3127 	}
3128 	spin_unlock_bh(&adap->tid_release_lock);
3129 }
3130 
3131 /*
3132  * Process the list of pending TID release requests.
3133  */
process_tid_release_list(struct work_struct * work)3134 static void process_tid_release_list(struct work_struct *work)
3135 {
3136 	struct sk_buff *skb;
3137 	struct adapter *adap;
3138 
3139 	adap = container_of(work, struct adapter, tid_release_task);
3140 
3141 	spin_lock_bh(&adap->tid_release_lock);
3142 	while (adap->tid_release_head) {
3143 		void **p = adap->tid_release_head;
3144 		unsigned int chan = (uintptr_t)p & 3;
3145 		p = (void *)p - chan;
3146 
3147 		adap->tid_release_head = *p;
3148 		*p = NULL;
3149 		spin_unlock_bh(&adap->tid_release_lock);
3150 
3151 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3152 					 GFP_KERNEL)))
3153 			schedule_timeout_uninterruptible(1);
3154 
3155 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3156 		t4_ofld_send(adap, skb);
3157 		spin_lock_bh(&adap->tid_release_lock);
3158 	}
3159 	adap->tid_release_task_busy = false;
3160 	spin_unlock_bh(&adap->tid_release_lock);
3161 }
3162 
3163 /*
3164  * Release a TID and inform HW.  If we are unable to allocate the release
3165  * message we defer to a work queue.
3166  */
cxgb4_remove_tid(struct tid_info * t,unsigned int chan,unsigned int tid)3167 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3168 {
3169 	void *old;
3170 	struct sk_buff *skb;
3171 	struct adapter *adap = container_of(t, struct adapter, tids);
3172 
3173 	old = t->tid_tab[tid];
3174 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3175 	if (likely(skb)) {
3176 		t->tid_tab[tid] = NULL;
3177 		mk_tid_release(skb, chan, tid);
3178 		t4_ofld_send(adap, skb);
3179 	} else
3180 		cxgb4_queue_tid_release(t, chan, tid);
3181 	if (old)
3182 		atomic_dec(&t->tids_in_use);
3183 }
3184 EXPORT_SYMBOL(cxgb4_remove_tid);
3185 
3186 /*
3187  * Allocate and initialize the TID tables.  Returns 0 on success.
3188  */
tid_init(struct tid_info * t)3189 static int tid_init(struct tid_info *t)
3190 {
3191 	size_t size;
3192 	unsigned int stid_bmap_size;
3193 	unsigned int natids = t->natids;
3194 
3195 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3196 	size = t->ntids * sizeof(*t->tid_tab) +
3197 	       natids * sizeof(*t->atid_tab) +
3198 	       t->nstids * sizeof(*t->stid_tab) +
3199 	       t->nsftids * sizeof(*t->stid_tab) +
3200 	       stid_bmap_size * sizeof(long) +
3201 	       t->nftids * sizeof(*t->ftid_tab) +
3202 	       t->nsftids * sizeof(*t->ftid_tab);
3203 
3204 	t->tid_tab = t4_alloc_mem(size);
3205 	if (!t->tid_tab)
3206 		return -ENOMEM;
3207 
3208 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3209 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3210 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3211 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3212 	spin_lock_init(&t->stid_lock);
3213 	spin_lock_init(&t->atid_lock);
3214 
3215 	t->stids_in_use = 0;
3216 	t->afree = NULL;
3217 	t->atids_in_use = 0;
3218 	atomic_set(&t->tids_in_use, 0);
3219 
3220 	/* Setup the free list for atid_tab and clear the stid bitmap. */
3221 	if (natids) {
3222 		while (--natids)
3223 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3224 		t->afree = t->atid_tab;
3225 	}
3226 	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3227 	return 0;
3228 }
3229 
3230 /**
3231  *	cxgb4_create_server - create an IP server
3232  *	@dev: the device
3233  *	@stid: the server TID
3234  *	@sip: local IP address to bind server to
3235  *	@sport: the server's TCP port
3236  *	@queue: queue to direct messages from this server to
3237  *
3238  *	Create an IP server for the given port and address.
3239  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3240  */
cxgb4_create_server(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,__be16 vlan,unsigned int queue)3241 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3242 			__be32 sip, __be16 sport, __be16 vlan,
3243 			unsigned int queue)
3244 {
3245 	unsigned int chan;
3246 	struct sk_buff *skb;
3247 	struct adapter *adap;
3248 	struct cpl_pass_open_req *req;
3249 
3250 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3251 	if (!skb)
3252 		return -ENOMEM;
3253 
3254 	adap = netdev2adap(dev);
3255 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3256 	INIT_TP_WR(req, 0);
3257 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3258 	req->local_port = sport;
3259 	req->peer_port = htons(0);
3260 	req->local_ip = sip;
3261 	req->peer_ip = htonl(0);
3262 	chan = rxq_to_chan(&adap->sge, queue);
3263 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
3264 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3265 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3266 	return t4_mgmt_tx(adap, skb);
3267 }
3268 EXPORT_SYMBOL(cxgb4_create_server);
3269 
3270 /**
3271  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3272  *	@mtus: the HW MTU table
3273  *	@mtu: the target MTU
3274  *	@idx: index of selected entry in the MTU table
3275  *
3276  *	Returns the index and the value in the HW MTU table that is closest to
3277  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
3278  *	table, in which case that smallest available value is selected.
3279  */
cxgb4_best_mtu(const unsigned short * mtus,unsigned short mtu,unsigned int * idx)3280 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3281 			    unsigned int *idx)
3282 {
3283 	unsigned int i = 0;
3284 
3285 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3286 		++i;
3287 	if (idx)
3288 		*idx = i;
3289 	return mtus[i];
3290 }
3291 EXPORT_SYMBOL(cxgb4_best_mtu);
3292 
3293 /**
3294  *	cxgb4_port_chan - get the HW channel of a port
3295  *	@dev: the net device for the port
3296  *
3297  *	Return the HW Tx channel of the given port.
3298  */
cxgb4_port_chan(const struct net_device * dev)3299 unsigned int cxgb4_port_chan(const struct net_device *dev)
3300 {
3301 	return netdev2pinfo(dev)->tx_chan;
3302 }
3303 EXPORT_SYMBOL(cxgb4_port_chan);
3304 
cxgb4_dbfifo_count(const struct net_device * dev,int lpfifo)3305 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3306 {
3307 	struct adapter *adap = netdev2adap(dev);
3308 	u32 v1, v2, lp_count, hp_count;
3309 
3310 	v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3311 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3312 	if (is_t4(adap->chip)) {
3313 		lp_count = G_LP_COUNT(v1);
3314 		hp_count = G_HP_COUNT(v1);
3315 	} else {
3316 		lp_count = G_LP_COUNT_T5(v1);
3317 		hp_count = G_HP_COUNT_T5(v2);
3318 	}
3319 	return lpfifo ? lp_count : hp_count;
3320 }
3321 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3322 
3323 /**
3324  *	cxgb4_port_viid - get the VI id of a port
3325  *	@dev: the net device for the port
3326  *
3327  *	Return the VI id of the given port.
3328  */
cxgb4_port_viid(const struct net_device * dev)3329 unsigned int cxgb4_port_viid(const struct net_device *dev)
3330 {
3331 	return netdev2pinfo(dev)->viid;
3332 }
3333 EXPORT_SYMBOL(cxgb4_port_viid);
3334 
3335 /**
3336  *	cxgb4_port_idx - get the index of a port
3337  *	@dev: the net device for the port
3338  *
3339  *	Return the index of the given port.
3340  */
cxgb4_port_idx(const struct net_device * dev)3341 unsigned int cxgb4_port_idx(const struct net_device *dev)
3342 {
3343 	return netdev2pinfo(dev)->port_id;
3344 }
3345 EXPORT_SYMBOL(cxgb4_port_idx);
3346 
cxgb4_get_tcp_stats(struct pci_dev * pdev,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6)3347 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3348 			 struct tp_tcp_stats *v6)
3349 {
3350 	struct adapter *adap = pci_get_drvdata(pdev);
3351 
3352 	spin_lock(&adap->stats_lock);
3353 	t4_tp_get_tcp_stats(adap, v4, v6);
3354 	spin_unlock(&adap->stats_lock);
3355 }
3356 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3357 
cxgb4_iscsi_init(struct net_device * dev,unsigned int tag_mask,const unsigned int * pgsz_order)3358 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3359 		      const unsigned int *pgsz_order)
3360 {
3361 	struct adapter *adap = netdev2adap(dev);
3362 
3363 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3364 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3365 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3366 		     HPZ3(pgsz_order[3]));
3367 }
3368 EXPORT_SYMBOL(cxgb4_iscsi_init);
3369 
cxgb4_flush_eq_cache(struct net_device * dev)3370 int cxgb4_flush_eq_cache(struct net_device *dev)
3371 {
3372 	struct adapter *adap = netdev2adap(dev);
3373 	int ret;
3374 
3375 	ret = t4_fwaddrspace_write(adap, adap->mbox,
3376 				   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3377 	return ret;
3378 }
3379 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3380 
read_eq_indices(struct adapter * adap,u16 qid,u16 * pidx,u16 * cidx)3381 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3382 {
3383 	u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3384 	__be64 indices;
3385 	int ret;
3386 
3387 	ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3388 	if (!ret) {
3389 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3390 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3391 	}
3392 	return ret;
3393 }
3394 
cxgb4_sync_txq_pidx(struct net_device * dev,u16 qid,u16 pidx,u16 size)3395 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3396 			u16 size)
3397 {
3398 	struct adapter *adap = netdev2adap(dev);
3399 	u16 hw_pidx, hw_cidx;
3400 	int ret;
3401 
3402 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3403 	if (ret)
3404 		goto out;
3405 
3406 	if (pidx != hw_pidx) {
3407 		u16 delta;
3408 
3409 		if (pidx >= hw_pidx)
3410 			delta = pidx - hw_pidx;
3411 		else
3412 			delta = size - hw_pidx + pidx;
3413 		wmb();
3414 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3415 			     QID(qid) | PIDX(delta));
3416 	}
3417 out:
3418 	return ret;
3419 }
3420 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3421 
cxgb4_disable_db_coalescing(struct net_device * dev)3422 void cxgb4_disable_db_coalescing(struct net_device *dev)
3423 {
3424 	struct adapter *adap;
3425 
3426 	adap = netdev2adap(dev);
3427 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3428 			 F_NOCOALESCE);
3429 }
3430 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3431 
cxgb4_enable_db_coalescing(struct net_device * dev)3432 void cxgb4_enable_db_coalescing(struct net_device *dev)
3433 {
3434 	struct adapter *adap;
3435 
3436 	adap = netdev2adap(dev);
3437 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3438 }
3439 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3440 
3441 static struct pci_driver cxgb4_driver;
3442 
check_neigh_update(struct neighbour * neigh)3443 static void check_neigh_update(struct neighbour *neigh)
3444 {
3445 	const struct device *parent;
3446 	const struct net_device *netdev = neigh->dev;
3447 
3448 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
3449 		netdev = vlan_dev_real_dev(netdev);
3450 	parent = netdev->dev.parent;
3451 	if (parent && parent->driver == &cxgb4_driver.driver)
3452 		t4_l2t_update(dev_get_drvdata(parent), neigh);
3453 }
3454 
netevent_cb(struct notifier_block * nb,unsigned long event,void * data)3455 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3456 		       void *data)
3457 {
3458 	switch (event) {
3459 	case NETEVENT_NEIGH_UPDATE:
3460 		check_neigh_update(data);
3461 		break;
3462 	case NETEVENT_REDIRECT:
3463 	default:
3464 		break;
3465 	}
3466 	return 0;
3467 }
3468 
3469 static bool netevent_registered;
3470 static struct notifier_block cxgb4_netevent_nb = {
3471 	.notifier_call = netevent_cb
3472 };
3473 
drain_db_fifo(struct adapter * adap,int usecs)3474 static void drain_db_fifo(struct adapter *adap, int usecs)
3475 {
3476 	u32 v1, v2, lp_count, hp_count;
3477 
3478 	do {
3479 		v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3480 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3481 		if (is_t4(adap->chip)) {
3482 			lp_count = G_LP_COUNT(v1);
3483 			hp_count = G_HP_COUNT(v1);
3484 		} else {
3485 			lp_count = G_LP_COUNT_T5(v1);
3486 			hp_count = G_HP_COUNT_T5(v2);
3487 		}
3488 
3489 		if (lp_count == 0 && hp_count == 0)
3490 			break;
3491 		set_current_state(TASK_UNINTERRUPTIBLE);
3492 		schedule_timeout(usecs_to_jiffies(usecs));
3493 	} while (1);
3494 }
3495 
disable_txq_db(struct sge_txq * q)3496 static void disable_txq_db(struct sge_txq *q)
3497 {
3498 	spin_lock_irq(&q->db_lock);
3499 	q->db_disabled = 1;
3500 	spin_unlock_irq(&q->db_lock);
3501 }
3502 
enable_txq_db(struct sge_txq * q)3503 static void enable_txq_db(struct sge_txq *q)
3504 {
3505 	spin_lock_irq(&q->db_lock);
3506 	q->db_disabled = 0;
3507 	spin_unlock_irq(&q->db_lock);
3508 }
3509 
disable_dbs(struct adapter * adap)3510 static void disable_dbs(struct adapter *adap)
3511 {
3512 	int i;
3513 
3514 	for_each_ethrxq(&adap->sge, i)
3515 		disable_txq_db(&adap->sge.ethtxq[i].q);
3516 	for_each_ofldrxq(&adap->sge, i)
3517 		disable_txq_db(&adap->sge.ofldtxq[i].q);
3518 	for_each_port(adap, i)
3519 		disable_txq_db(&adap->sge.ctrlq[i].q);
3520 }
3521 
enable_dbs(struct adapter * adap)3522 static void enable_dbs(struct adapter *adap)
3523 {
3524 	int i;
3525 
3526 	for_each_ethrxq(&adap->sge, i)
3527 		enable_txq_db(&adap->sge.ethtxq[i].q);
3528 	for_each_ofldrxq(&adap->sge, i)
3529 		enable_txq_db(&adap->sge.ofldtxq[i].q);
3530 	for_each_port(adap, i)
3531 		enable_txq_db(&adap->sge.ctrlq[i].q);
3532 }
3533 
sync_txq_pidx(struct adapter * adap,struct sge_txq * q)3534 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3535 {
3536 	u16 hw_pidx, hw_cidx;
3537 	int ret;
3538 
3539 	spin_lock_bh(&q->db_lock);
3540 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3541 	if (ret)
3542 		goto out;
3543 	if (q->db_pidx != hw_pidx) {
3544 		u16 delta;
3545 
3546 		if (q->db_pidx >= hw_pidx)
3547 			delta = q->db_pidx - hw_pidx;
3548 		else
3549 			delta = q->size - hw_pidx + q->db_pidx;
3550 		wmb();
3551 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3552 			     QID(q->cntxt_id) | PIDX(delta));
3553 	}
3554 out:
3555 	q->db_disabled = 0;
3556 	spin_unlock_bh(&q->db_lock);
3557 	if (ret)
3558 		CH_WARN(adap, "DB drop recovery failed.\n");
3559 }
recover_all_queues(struct adapter * adap)3560 static void recover_all_queues(struct adapter *adap)
3561 {
3562 	int i;
3563 
3564 	for_each_ethrxq(&adap->sge, i)
3565 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3566 	for_each_ofldrxq(&adap->sge, i)
3567 		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3568 	for_each_port(adap, i)
3569 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3570 }
3571 
notify_rdma_uld(struct adapter * adap,enum cxgb4_control cmd)3572 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3573 {
3574 	mutex_lock(&uld_mutex);
3575 	if (adap->uld_handle[CXGB4_ULD_RDMA])
3576 		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3577 				cmd);
3578 	mutex_unlock(&uld_mutex);
3579 }
3580 
process_db_full(struct work_struct * work)3581 static void process_db_full(struct work_struct *work)
3582 {
3583 	struct adapter *adap;
3584 
3585 	adap = container_of(work, struct adapter, db_full_task);
3586 
3587 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3588 	drain_db_fifo(adap, dbfifo_drain_delay);
3589 	t4_set_reg_field(adap, SGE_INT_ENABLE3,
3590 			 DBFIFO_HP_INT | DBFIFO_LP_INT,
3591 			 DBFIFO_HP_INT | DBFIFO_LP_INT);
3592 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3593 }
3594 
process_db_drop(struct work_struct * work)3595 static void process_db_drop(struct work_struct *work)
3596 {
3597 	struct adapter *adap;
3598 
3599 	adap = container_of(work, struct adapter, db_drop_task);
3600 
3601 	if (is_t4(adap->chip)) {
3602 		disable_dbs(adap);
3603 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3604 		drain_db_fifo(adap, 1);
3605 		recover_all_queues(adap);
3606 		enable_dbs(adap);
3607 	} else {
3608 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
3609 		u16 qid = (dropped_db >> 15) & 0x1ffff;
3610 		u16 pidx_inc = dropped_db & 0x1fff;
3611 		unsigned int s_qpp;
3612 		unsigned short udb_density;
3613 		unsigned long qpshift;
3614 		int page;
3615 		u32 udb;
3616 
3617 		dev_warn(adap->pdev_dev,
3618 			 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3619 			 dropped_db, qid,
3620 			 (dropped_db >> 14) & 1,
3621 			 (dropped_db >> 13) & 1,
3622 			 pidx_inc);
3623 
3624 		drain_db_fifo(adap, 1);
3625 
3626 		s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3627 		udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3628 				SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3629 		qpshift = PAGE_SHIFT - ilog2(udb_density);
3630 		udb = qid << qpshift;
3631 		udb &= PAGE_MASK;
3632 		page = udb / PAGE_SIZE;
3633 		udb += (qid - (page * udb_density)) * 128;
3634 
3635 		writel(PIDX(pidx_inc),  adap->bar2 + udb + 8);
3636 
3637 		/* Re-enable BAR2 WC */
3638 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3639 	}
3640 
3641 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3642 }
3643 
t4_db_full(struct adapter * adap)3644 void t4_db_full(struct adapter *adap)
3645 {
3646 	if (is_t4(adap->chip)) {
3647 		t4_set_reg_field(adap, SGE_INT_ENABLE3,
3648 				 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3649 		queue_work(workq, &adap->db_full_task);
3650 	}
3651 }
3652 
t4_db_dropped(struct adapter * adap)3653 void t4_db_dropped(struct adapter *adap)
3654 {
3655 	if (is_t4(adap->chip))
3656 		queue_work(workq, &adap->db_drop_task);
3657 }
3658 
uld_attach(struct adapter * adap,unsigned int uld)3659 static void uld_attach(struct adapter *adap, unsigned int uld)
3660 {
3661 	void *handle;
3662 	struct cxgb4_lld_info lli;
3663 	unsigned short i;
3664 
3665 	lli.pdev = adap->pdev;
3666 	lli.l2t = adap->l2t;
3667 	lli.tids = &adap->tids;
3668 	lli.ports = adap->port;
3669 	lli.vr = &adap->vres;
3670 	lli.mtus = adap->params.mtus;
3671 	if (uld == CXGB4_ULD_RDMA) {
3672 		lli.rxq_ids = adap->sge.rdma_rxq;
3673 		lli.nrxq = adap->sge.rdmaqs;
3674 	} else if (uld == CXGB4_ULD_ISCSI) {
3675 		lli.rxq_ids = adap->sge.ofld_rxq;
3676 		lli.nrxq = adap->sge.ofldqsets;
3677 	}
3678 	lli.ntxq = adap->sge.ofldqsets;
3679 	lli.nchan = adap->params.nports;
3680 	lli.nports = adap->params.nports;
3681 	lli.wr_cred = adap->params.ofldq_wr_cred;
3682 	lli.adapter_type = adap->params.rev;
3683 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3684 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3685 			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3686 			(adap->fn * 4));
3687 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3688 			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3689 			(adap->fn * 4));
3690 	lli.filt_mode = adap->filter_mode;
3691 	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3692 	for (i = 0; i < NCHAN; i++)
3693 		lli.tx_modq[i] = i;
3694 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3695 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3696 	lli.fw_vers = adap->params.fw_vers;
3697 	lli.dbfifo_int_thresh = dbfifo_int_thresh;
3698 	lli.sge_pktshift = adap->sge.pktshift;
3699 	lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3700 
3701 	handle = ulds[uld].add(&lli);
3702 	if (IS_ERR(handle)) {
3703 		dev_warn(adap->pdev_dev,
3704 			 "could not attach to the %s driver, error %ld\n",
3705 			 uld_str[uld], PTR_ERR(handle));
3706 		return;
3707 	}
3708 
3709 	adap->uld_handle[uld] = handle;
3710 
3711 	if (!netevent_registered) {
3712 		register_netevent_notifier(&cxgb4_netevent_nb);
3713 		netevent_registered = true;
3714 	}
3715 
3716 	if (adap->flags & FULL_INIT_DONE)
3717 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
3718 }
3719 
attach_ulds(struct adapter * adap)3720 static void attach_ulds(struct adapter *adap)
3721 {
3722 	unsigned int i;
3723 
3724 	mutex_lock(&uld_mutex);
3725 	list_add_tail(&adap->list_node, &adapter_list);
3726 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3727 		if (ulds[i].add)
3728 			uld_attach(adap, i);
3729 	mutex_unlock(&uld_mutex);
3730 }
3731 
detach_ulds(struct adapter * adap)3732 static void detach_ulds(struct adapter *adap)
3733 {
3734 	unsigned int i;
3735 
3736 	mutex_lock(&uld_mutex);
3737 	list_del(&adap->list_node);
3738 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3739 		if (adap->uld_handle[i]) {
3740 			ulds[i].state_change(adap->uld_handle[i],
3741 					     CXGB4_STATE_DETACH);
3742 			adap->uld_handle[i] = NULL;
3743 		}
3744 	if (netevent_registered && list_empty(&adapter_list)) {
3745 		unregister_netevent_notifier(&cxgb4_netevent_nb);
3746 		netevent_registered = false;
3747 	}
3748 	mutex_unlock(&uld_mutex);
3749 }
3750 
notify_ulds(struct adapter * adap,enum cxgb4_state new_state)3751 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3752 {
3753 	unsigned int i;
3754 
3755 	mutex_lock(&uld_mutex);
3756 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3757 		if (adap->uld_handle[i])
3758 			ulds[i].state_change(adap->uld_handle[i], new_state);
3759 	mutex_unlock(&uld_mutex);
3760 }
3761 
3762 /**
3763  *	cxgb4_register_uld - register an upper-layer driver
3764  *	@type: the ULD type
3765  *	@p: the ULD methods
3766  *
3767  *	Registers an upper-layer driver with this driver and notifies the ULD
3768  *	about any presently available devices that support its type.  Returns
3769  *	%-EBUSY if a ULD of the same type is already registered.
3770  */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)3771 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3772 {
3773 	int ret = 0;
3774 	struct adapter *adap;
3775 
3776 	if (type >= CXGB4_ULD_MAX)
3777 		return -EINVAL;
3778 	mutex_lock(&uld_mutex);
3779 	if (ulds[type].add) {
3780 		ret = -EBUSY;
3781 		goto out;
3782 	}
3783 	ulds[type] = *p;
3784 	list_for_each_entry(adap, &adapter_list, list_node)
3785 		uld_attach(adap, type);
3786 out:	mutex_unlock(&uld_mutex);
3787 	return ret;
3788 }
3789 EXPORT_SYMBOL(cxgb4_register_uld);
3790 
3791 /**
3792  *	cxgb4_unregister_uld - unregister an upper-layer driver
3793  *	@type: the ULD type
3794  *
3795  *	Unregisters an existing upper-layer driver.
3796  */
cxgb4_unregister_uld(enum cxgb4_uld type)3797 int cxgb4_unregister_uld(enum cxgb4_uld type)
3798 {
3799 	struct adapter *adap;
3800 
3801 	if (type >= CXGB4_ULD_MAX)
3802 		return -EINVAL;
3803 	mutex_lock(&uld_mutex);
3804 	list_for_each_entry(adap, &adapter_list, list_node)
3805 		adap->uld_handle[type] = NULL;
3806 	ulds[type].add = NULL;
3807 	mutex_unlock(&uld_mutex);
3808 	return 0;
3809 }
3810 EXPORT_SYMBOL(cxgb4_unregister_uld);
3811 
3812 /**
3813  *	cxgb_up - enable the adapter
3814  *	@adap: adapter being enabled
3815  *
3816  *	Called when the first port is enabled, this function performs the
3817  *	actions necessary to make an adapter operational, such as completing
3818  *	the initialization of HW modules, and enabling interrupts.
3819  *
3820  *	Must be called with the rtnl lock held.
3821  */
cxgb_up(struct adapter * adap)3822 static int cxgb_up(struct adapter *adap)
3823 {
3824 	int err;
3825 
3826 	err = setup_sge_queues(adap);
3827 	if (err)
3828 		goto out;
3829 	err = setup_rss(adap);
3830 	if (err)
3831 		goto freeq;
3832 
3833 	if (adap->flags & USING_MSIX) {
3834 		name_msix_vecs(adap);
3835 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3836 				  adap->msix_info[0].desc, adap);
3837 		if (err)
3838 			goto irq_err;
3839 
3840 		err = request_msix_queue_irqs(adap);
3841 		if (err) {
3842 			free_irq(adap->msix_info[0].vec, adap);
3843 			goto irq_err;
3844 		}
3845 	} else {
3846 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3847 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3848 				  adap->port[0]->name, adap);
3849 		if (err)
3850 			goto irq_err;
3851 	}
3852 	enable_rx(adap);
3853 	t4_sge_start(adap);
3854 	t4_intr_enable(adap);
3855 	adap->flags |= FULL_INIT_DONE;
3856 	notify_ulds(adap, CXGB4_STATE_UP);
3857  out:
3858 	return err;
3859  irq_err:
3860 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3861  freeq:
3862 	t4_free_sge_resources(adap);
3863 	goto out;
3864 }
3865 
cxgb_down(struct adapter * adapter)3866 static void cxgb_down(struct adapter *adapter)
3867 {
3868 	t4_intr_disable(adapter);
3869 	cancel_work_sync(&adapter->tid_release_task);
3870 	cancel_work_sync(&adapter->db_full_task);
3871 	cancel_work_sync(&adapter->db_drop_task);
3872 	adapter->tid_release_task_busy = false;
3873 	adapter->tid_release_head = NULL;
3874 
3875 	if (adapter->flags & USING_MSIX) {
3876 		free_msix_queue_irqs(adapter);
3877 		free_irq(adapter->msix_info[0].vec, adapter);
3878 	} else
3879 		free_irq(adapter->pdev->irq, adapter);
3880 	quiesce_rx(adapter);
3881 	t4_sge_stop(adapter);
3882 	t4_free_sge_resources(adapter);
3883 	adapter->flags &= ~FULL_INIT_DONE;
3884 }
3885 
3886 /*
3887  * net_device operations
3888  */
cxgb_open(struct net_device * dev)3889 static int cxgb_open(struct net_device *dev)
3890 {
3891 	int err;
3892 	struct port_info *pi = netdev_priv(dev);
3893 	struct adapter *adapter = pi->adapter;
3894 
3895 	netif_carrier_off(dev);
3896 
3897 	if (!(adapter->flags & FULL_INIT_DONE)) {
3898 		err = cxgb_up(adapter);
3899 		if (err < 0)
3900 			return err;
3901 	}
3902 
3903 	err = link_start(dev);
3904 	if (!err)
3905 		netif_tx_start_all_queues(dev);
3906 	return err;
3907 }
3908 
cxgb_close(struct net_device * dev)3909 static int cxgb_close(struct net_device *dev)
3910 {
3911 	struct port_info *pi = netdev_priv(dev);
3912 	struct adapter *adapter = pi->adapter;
3913 
3914 	netif_tx_stop_all_queues(dev);
3915 	netif_carrier_off(dev);
3916 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3917 }
3918 
3919 /* Return an error number if the indicated filter isn't writable ...
3920  */
writable_filter(struct filter_entry * f)3921 static int writable_filter(struct filter_entry *f)
3922 {
3923 	if (f->locked)
3924 		return -EPERM;
3925 	if (f->pending)
3926 		return -EBUSY;
3927 
3928 	return 0;
3929 }
3930 
3931 /* Delete the filter at the specified index (if valid).  The checks for all
3932  * the common problems with doing this like the filter being locked, currently
3933  * pending in another operation, etc.
3934  */
delete_filter(struct adapter * adapter,unsigned int fidx)3935 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3936 {
3937 	struct filter_entry *f;
3938 	int ret;
3939 
3940 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3941 		return -EINVAL;
3942 
3943 	f = &adapter->tids.ftid_tab[fidx];
3944 	ret = writable_filter(f);
3945 	if (ret)
3946 		return ret;
3947 	if (f->valid)
3948 		return del_filter_wr(adapter, fidx);
3949 
3950 	return 0;
3951 }
3952 
cxgb4_create_server_filter(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,__be16 vlan,unsigned int queue,unsigned char port,unsigned char mask)3953 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3954 		__be32 sip, __be16 sport, __be16 vlan,
3955 		unsigned int queue, unsigned char port, unsigned char mask)
3956 {
3957 	int ret;
3958 	struct filter_entry *f;
3959 	struct adapter *adap;
3960 	int i;
3961 	u8 *val;
3962 
3963 	adap = netdev2adap(dev);
3964 
3965 	/* Adjust stid to correct filter index */
3966 	stid -= adap->tids.nstids;
3967 	stid += adap->tids.nftids;
3968 
3969 	/* Check to make sure the filter requested is writable ...
3970 	 */
3971 	f = &adap->tids.ftid_tab[stid];
3972 	ret = writable_filter(f);
3973 	if (ret)
3974 		return ret;
3975 
3976 	/* Clear out any old resources being used by the filter before
3977 	 * we start constructing the new filter.
3978 	 */
3979 	if (f->valid)
3980 		clear_filter(adap, f);
3981 
3982 	/* Clear out filter specifications */
3983 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3984 	f->fs.val.lport = cpu_to_be16(sport);
3985 	f->fs.mask.lport  = ~0;
3986 	val = (u8 *)&sip;
3987 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3988 		for (i = 0; i < 4; i++) {
3989 			f->fs.val.lip[i] = val[i];
3990 			f->fs.mask.lip[i] = ~0;
3991 		}
3992 		if (adap->filter_mode & F_PORT) {
3993 			f->fs.val.iport = port;
3994 			f->fs.mask.iport = mask;
3995 		}
3996 	}
3997 
3998 	f->fs.dirsteer = 1;
3999 	f->fs.iq = queue;
4000 	/* Mark filter as locked */
4001 	f->locked = 1;
4002 	f->fs.rpttid = 1;
4003 
4004 	ret = set_filter_wr(adap, stid);
4005 	if (ret) {
4006 		clear_filter(adap, f);
4007 		return ret;
4008 	}
4009 
4010 	return 0;
4011 }
4012 EXPORT_SYMBOL(cxgb4_create_server_filter);
4013 
cxgb4_remove_server_filter(const struct net_device * dev,unsigned int stid,unsigned int queue,bool ipv6)4014 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4015 		unsigned int queue, bool ipv6)
4016 {
4017 	int ret;
4018 	struct filter_entry *f;
4019 	struct adapter *adap;
4020 
4021 	adap = netdev2adap(dev);
4022 
4023 	/* Adjust stid to correct filter index */
4024 	stid -= adap->tids.nstids;
4025 	stid += adap->tids.nftids;
4026 
4027 	f = &adap->tids.ftid_tab[stid];
4028 	/* Unlock the filter */
4029 	f->locked = 0;
4030 
4031 	ret = delete_filter(adap, stid);
4032 	if (ret)
4033 		return ret;
4034 
4035 	return 0;
4036 }
4037 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4038 
cxgb_get_stats(struct net_device * dev,struct rtnl_link_stats64 * ns)4039 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4040 						struct rtnl_link_stats64 *ns)
4041 {
4042 	struct port_stats stats;
4043 	struct port_info *p = netdev_priv(dev);
4044 	struct adapter *adapter = p->adapter;
4045 
4046 	spin_lock(&adapter->stats_lock);
4047 	t4_get_port_stats(adapter, p->tx_chan, &stats);
4048 	spin_unlock(&adapter->stats_lock);
4049 
4050 	ns->tx_bytes   = stats.tx_octets;
4051 	ns->tx_packets = stats.tx_frames;
4052 	ns->rx_bytes   = stats.rx_octets;
4053 	ns->rx_packets = stats.rx_frames;
4054 	ns->multicast  = stats.rx_mcast_frames;
4055 
4056 	/* detailed rx_errors */
4057 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4058 			       stats.rx_runt;
4059 	ns->rx_over_errors   = 0;
4060 	ns->rx_crc_errors    = stats.rx_fcs_err;
4061 	ns->rx_frame_errors  = stats.rx_symbol_err;
4062 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4063 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
4064 			       stats.rx_trunc0 + stats.rx_trunc1 +
4065 			       stats.rx_trunc2 + stats.rx_trunc3;
4066 	ns->rx_missed_errors = 0;
4067 
4068 	/* detailed tx_errors */
4069 	ns->tx_aborted_errors   = 0;
4070 	ns->tx_carrier_errors   = 0;
4071 	ns->tx_fifo_errors      = 0;
4072 	ns->tx_heartbeat_errors = 0;
4073 	ns->tx_window_errors    = 0;
4074 
4075 	ns->tx_errors = stats.tx_error_frames;
4076 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4077 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4078 	return ns;
4079 }
4080 
cxgb_ioctl(struct net_device * dev,struct ifreq * req,int cmd)4081 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4082 {
4083 	unsigned int mbox;
4084 	int ret = 0, prtad, devad;
4085 	struct port_info *pi = netdev_priv(dev);
4086 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4087 
4088 	switch (cmd) {
4089 	case SIOCGMIIPHY:
4090 		if (pi->mdio_addr < 0)
4091 			return -EOPNOTSUPP;
4092 		data->phy_id = pi->mdio_addr;
4093 		break;
4094 	case SIOCGMIIREG:
4095 	case SIOCSMIIREG:
4096 		if (mdio_phy_id_is_c45(data->phy_id)) {
4097 			prtad = mdio_phy_id_prtad(data->phy_id);
4098 			devad = mdio_phy_id_devad(data->phy_id);
4099 		} else if (data->phy_id < 32) {
4100 			prtad = data->phy_id;
4101 			devad = 0;
4102 			data->reg_num &= 0x1f;
4103 		} else
4104 			return -EINVAL;
4105 
4106 		mbox = pi->adapter->fn;
4107 		if (cmd == SIOCGMIIREG)
4108 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4109 					 data->reg_num, &data->val_out);
4110 		else
4111 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4112 					 data->reg_num, data->val_in);
4113 		break;
4114 	default:
4115 		return -EOPNOTSUPP;
4116 	}
4117 	return ret;
4118 }
4119 
cxgb_set_rxmode(struct net_device * dev)4120 static void cxgb_set_rxmode(struct net_device *dev)
4121 {
4122 	/* unfortunately we can't return errors to the stack */
4123 	set_rxmode(dev, -1, false);
4124 }
4125 
cxgb_change_mtu(struct net_device * dev,int new_mtu)4126 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4127 {
4128 	int ret;
4129 	struct port_info *pi = netdev_priv(dev);
4130 
4131 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4132 		return -EINVAL;
4133 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4134 			    -1, -1, -1, true);
4135 	if (!ret)
4136 		dev->mtu = new_mtu;
4137 	return ret;
4138 }
4139 
cxgb_set_mac_addr(struct net_device * dev,void * p)4140 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4141 {
4142 	int ret;
4143 	struct sockaddr *addr = p;
4144 	struct port_info *pi = netdev_priv(dev);
4145 
4146 	if (!is_valid_ether_addr(addr->sa_data))
4147 		return -EADDRNOTAVAIL;
4148 
4149 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4150 			    pi->xact_addr_filt, addr->sa_data, true, true);
4151 	if (ret < 0)
4152 		return ret;
4153 
4154 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4155 	pi->xact_addr_filt = ret;
4156 	return 0;
4157 }
4158 
4159 #ifdef CONFIG_NET_POLL_CONTROLLER
cxgb_netpoll(struct net_device * dev)4160 static void cxgb_netpoll(struct net_device *dev)
4161 {
4162 	struct port_info *pi = netdev_priv(dev);
4163 	struct adapter *adap = pi->adapter;
4164 
4165 	if (adap->flags & USING_MSIX) {
4166 		int i;
4167 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4168 
4169 		for (i = pi->nqsets; i; i--, rx++)
4170 			t4_sge_intr_msix(0, &rx->rspq);
4171 	} else
4172 		t4_intr_handler(adap)(0, adap);
4173 }
4174 #endif
4175 
4176 static const struct net_device_ops cxgb4_netdev_ops = {
4177 	.ndo_open             = cxgb_open,
4178 	.ndo_stop             = cxgb_close,
4179 	.ndo_start_xmit       = t4_eth_xmit,
4180 	.ndo_get_stats64      = cxgb_get_stats,
4181 	.ndo_set_rx_mode      = cxgb_set_rxmode,
4182 	.ndo_set_mac_address  = cxgb_set_mac_addr,
4183 	.ndo_set_features     = cxgb_set_features,
4184 	.ndo_validate_addr    = eth_validate_addr,
4185 	.ndo_do_ioctl         = cxgb_ioctl,
4186 	.ndo_change_mtu       = cxgb_change_mtu,
4187 #ifdef CONFIG_NET_POLL_CONTROLLER
4188 	.ndo_poll_controller  = cxgb_netpoll,
4189 #endif
4190 };
4191 
t4_fatal_err(struct adapter * adap)4192 void t4_fatal_err(struct adapter *adap)
4193 {
4194 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4195 	t4_intr_disable(adap);
4196 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4197 }
4198 
setup_memwin(struct adapter * adap)4199 static void setup_memwin(struct adapter *adap)
4200 {
4201 	u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4202 
4203 	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
4204 	if (is_t4(adap->chip)) {
4205 		mem_win0_base = bar0 + MEMWIN0_BASE;
4206 		mem_win1_base = bar0 + MEMWIN1_BASE;
4207 		mem_win2_base = bar0 + MEMWIN2_BASE;
4208 	} else {
4209 		/* For T5, only relative offset inside the PCIe BAR is passed */
4210 		mem_win0_base = MEMWIN0_BASE;
4211 		mem_win1_base = MEMWIN1_BASE_T5;
4212 		mem_win2_base = MEMWIN2_BASE_T5;
4213 	}
4214 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4215 		     mem_win0_base | BIR(0) |
4216 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4217 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4218 		     mem_win1_base | BIR(0) |
4219 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4220 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4221 		     mem_win2_base | BIR(0) |
4222 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4223 }
4224 
setup_memwin_rdma(struct adapter * adap)4225 static void setup_memwin_rdma(struct adapter *adap)
4226 {
4227 	if (adap->vres.ocq.size) {
4228 		unsigned int start, sz_kb;
4229 
4230 		start = pci_resource_start(adap->pdev, 2) +
4231 			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4232 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4233 		t4_write_reg(adap,
4234 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4235 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
4236 		t4_write_reg(adap,
4237 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4238 			     adap->vres.ocq.start);
4239 		t4_read_reg(adap,
4240 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4241 	}
4242 }
4243 
adap_init1(struct adapter * adap,struct fw_caps_config_cmd * c)4244 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4245 {
4246 	u32 v;
4247 	int ret;
4248 
4249 	/* get device capabilities */
4250 	memset(c, 0, sizeof(*c));
4251 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4252 			       FW_CMD_REQUEST | FW_CMD_READ);
4253 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4254 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4255 	if (ret < 0)
4256 		return ret;
4257 
4258 	/* select capabilities we'll be using */
4259 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4260 		if (!vf_acls)
4261 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4262 		else
4263 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4264 	} else if (vf_acls) {
4265 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4266 		return ret;
4267 	}
4268 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4269 			       FW_CMD_REQUEST | FW_CMD_WRITE);
4270 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4271 	if (ret < 0)
4272 		return ret;
4273 
4274 	ret = t4_config_glbl_rss(adap, adap->fn,
4275 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4276 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4277 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4278 	if (ret < 0)
4279 		return ret;
4280 
4281 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4282 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4283 	if (ret < 0)
4284 		return ret;
4285 
4286 	t4_sge_init(adap);
4287 
4288 	/* tweak some settings */
4289 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4290 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4291 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4292 	v = t4_read_reg(adap, TP_PIO_DATA);
4293 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4294 
4295 	/* first 4 Tx modulation queues point to consecutive Tx channels */
4296 	adap->params.tp.tx_modq_map = 0xE4;
4297 	t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4298 		     V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4299 
4300 	/* associate each Tx modulation queue with consecutive Tx channels */
4301 	v = 0x84218421;
4302 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4303 			  &v, 1, A_TP_TX_SCHED_HDR);
4304 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4305 			  &v, 1, A_TP_TX_SCHED_FIFO);
4306 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4307 			  &v, 1, A_TP_TX_SCHED_PCMD);
4308 
4309 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4310 	if (is_offload(adap)) {
4311 		t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4312 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4313 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4314 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4315 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4316 		t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4317 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4318 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4319 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4320 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4321 	}
4322 
4323 	/* get basic stuff going */
4324 	return t4_early_init(adap, adap->fn);
4325 }
4326 
4327 /*
4328  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4329  */
4330 #define MAX_ATIDS 8192U
4331 
4332 /*
4333  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4334  *
4335  * If the firmware we're dealing with has Configuration File support, then
4336  * we use that to perform all configuration
4337  */
4338 
4339 /*
4340  * Tweak configuration based on module parameters, etc.  Most of these have
4341  * defaults assigned to them by Firmware Configuration Files (if we're using
4342  * them) but need to be explicitly set if we're using hard-coded
4343  * initialization.  But even in the case of using Firmware Configuration
4344  * Files, we'd like to expose the ability to change these via module
4345  * parameters so these are essentially common tweaks/settings for
4346  * Configuration Files and hard-coded initialization ...
4347  */
adap_init0_tweaks(struct adapter * adapter)4348 static int adap_init0_tweaks(struct adapter *adapter)
4349 {
4350 	/*
4351 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
4352 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
4353 	 * 64B Cache Line Size ...
4354 	 */
4355 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4356 
4357 	/*
4358 	 * Process module parameters which affect early initialization.
4359 	 */
4360 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4361 		dev_err(&adapter->pdev->dev,
4362 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
4363 			rx_dma_offset);
4364 		rx_dma_offset = 2;
4365 	}
4366 	t4_set_reg_field(adapter, SGE_CONTROL,
4367 			 PKTSHIFT_MASK,
4368 			 PKTSHIFT(rx_dma_offset));
4369 
4370 	/*
4371 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4372 	 * adds the pseudo header itself.
4373 	 */
4374 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4375 			       CSUM_HAS_PSEUDO_HDR, 0);
4376 
4377 	return 0;
4378 }
4379 
4380 /*
4381  * Attempt to initialize the adapter via a Firmware Configuration File.
4382  */
adap_init0_config(struct adapter * adapter,int reset)4383 static int adap_init0_config(struct adapter *adapter, int reset)
4384 {
4385 	struct fw_caps_config_cmd caps_cmd;
4386 	const struct firmware *cf;
4387 	unsigned long mtype = 0, maddr = 0;
4388 	u32 finiver, finicsum, cfcsum;
4389 	int ret, using_flash;
4390 	char *fw_config_file, fw_config_file_path[256];
4391 
4392 	/*
4393 	 * Reset device if necessary.
4394 	 */
4395 	if (reset) {
4396 		ret = t4_fw_reset(adapter, adapter->mbox,
4397 				  PIORSTMODE | PIORST);
4398 		if (ret < 0)
4399 			goto bye;
4400 	}
4401 
4402 	/*
4403 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4404 	 * then use that.  Otherwise, use the configuration file stored
4405 	 * in the adapter flash ...
4406 	 */
4407 	switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4408 	case CHELSIO_T4:
4409 		fw_config_file = FW_CFNAME;
4410 		break;
4411 	case CHELSIO_T5:
4412 		fw_config_file = FW5_CFNAME;
4413 		break;
4414 	default:
4415 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4416 		       adapter->pdev->device);
4417 		ret = -EINVAL;
4418 		goto bye;
4419 	}
4420 
4421 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4422 	if (ret < 0) {
4423 		using_flash = 1;
4424 		mtype = FW_MEMTYPE_CF_FLASH;
4425 		maddr = t4_flash_cfg_addr(adapter);
4426 	} else {
4427 		u32 params[7], val[7];
4428 
4429 		using_flash = 0;
4430 		if (cf->size >= FLASH_CFG_MAX_SIZE)
4431 			ret = -ENOMEM;
4432 		else {
4433 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4434 			     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4435 			ret = t4_query_params(adapter, adapter->mbox,
4436 					      adapter->fn, 0, 1, params, val);
4437 			if (ret == 0) {
4438 				/*
4439 				 * For t4_memory_write() below addresses and
4440 				 * sizes have to be in terms of multiples of 4
4441 				 * bytes.  So, if the Configuration File isn't
4442 				 * a multiple of 4 bytes in length we'll have
4443 				 * to write that out separately since we can't
4444 				 * guarantee that the bytes following the
4445 				 * residual byte in the buffer returned by
4446 				 * request_firmware() are zeroed out ...
4447 				 */
4448 				size_t resid = cf->size & 0x3;
4449 				size_t size = cf->size & ~0x3;
4450 				__be32 *data = (__be32 *)cf->data;
4451 
4452 				mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4453 				maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4454 
4455 				ret = t4_memory_write(adapter, mtype, maddr,
4456 						      size, data);
4457 				if (ret == 0 && resid != 0) {
4458 					union {
4459 						__be32 word;
4460 						char buf[4];
4461 					} last;
4462 					int i;
4463 
4464 					last.word = data[size >> 2];
4465 					for (i = resid; i < 4; i++)
4466 						last.buf[i] = 0;
4467 					ret = t4_memory_write(adapter, mtype,
4468 							      maddr + size,
4469 							      4, &last.word);
4470 				}
4471 			}
4472 		}
4473 
4474 		release_firmware(cf);
4475 		if (ret)
4476 			goto bye;
4477 	}
4478 
4479 	/*
4480 	 * Issue a Capability Configuration command to the firmware to get it
4481 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
4482 	 * because we want the ability to modify various features after we've
4483 	 * processed the configuration file ...
4484 	 */
4485 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4486 	caps_cmd.op_to_write =
4487 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4488 		      FW_CMD_REQUEST |
4489 		      FW_CMD_READ);
4490 	caps_cmd.cfvalid_to_len16 =
4491 		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4492 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4493 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4494 		      FW_LEN16(caps_cmd));
4495 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4496 			 &caps_cmd);
4497 	if (ret < 0)
4498 		goto bye;
4499 
4500 	finiver = ntohl(caps_cmd.finiver);
4501 	finicsum = ntohl(caps_cmd.finicsum);
4502 	cfcsum = ntohl(caps_cmd.cfcsum);
4503 	if (finicsum != cfcsum)
4504 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4505 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4506 			 finicsum, cfcsum);
4507 
4508 	/*
4509 	 * And now tell the firmware to use the configuration we just loaded.
4510 	 */
4511 	caps_cmd.op_to_write =
4512 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4513 		      FW_CMD_REQUEST |
4514 		      FW_CMD_WRITE);
4515 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4516 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4517 			 NULL);
4518 	if (ret < 0)
4519 		goto bye;
4520 
4521 	/*
4522 	 * Tweak configuration based on system architecture, module
4523 	 * parameters, etc.
4524 	 */
4525 	ret = adap_init0_tweaks(adapter);
4526 	if (ret < 0)
4527 		goto bye;
4528 
4529 	/*
4530 	 * And finally tell the firmware to initialize itself using the
4531 	 * parameters from the Configuration File.
4532 	 */
4533 	ret = t4_fw_initialize(adapter, adapter->mbox);
4534 	if (ret < 0)
4535 		goto bye;
4536 
4537 	sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4538 	/*
4539 	 * Return successfully and note that we're operating with parameters
4540 	 * not supplied by the driver, rather than from hard-wired
4541 	 * initialization constants burried in the driver.
4542 	 */
4543 	adapter->flags |= USING_SOFT_PARAMS;
4544 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4545 		 "Configuration File %s, version %#x, computed checksum %#x\n",
4546 		 (using_flash
4547 		  ? "in device FLASH"
4548 		  : fw_config_file_path),
4549 		 finiver, cfcsum);
4550 	return 0;
4551 
4552 	/*
4553 	 * Something bad happened.  Return the error ...  (If the "error"
4554 	 * is that there's no Configuration File on the adapter we don't
4555 	 * want to issue a warning since this is fairly common.)
4556 	 */
4557 bye:
4558 	if (ret != -ENOENT)
4559 		dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4560 			 -ret);
4561 	return ret;
4562 }
4563 
4564 /*
4565  * Attempt to initialize the adapter via hard-coded, driver supplied
4566  * parameters ...
4567  */
adap_init0_no_config(struct adapter * adapter,int reset)4568 static int adap_init0_no_config(struct adapter *adapter, int reset)
4569 {
4570 	struct sge *s = &adapter->sge;
4571 	struct fw_caps_config_cmd caps_cmd;
4572 	u32 v;
4573 	int i, ret;
4574 
4575 	/*
4576 	 * Reset device if necessary
4577 	 */
4578 	if (reset) {
4579 		ret = t4_fw_reset(adapter, adapter->mbox,
4580 				  PIORSTMODE | PIORST);
4581 		if (ret < 0)
4582 			goto bye;
4583 	}
4584 
4585 	/*
4586 	 * Get device capabilities and select which we'll be using.
4587 	 */
4588 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4589 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4590 				     FW_CMD_REQUEST | FW_CMD_READ);
4591 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4592 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4593 			 &caps_cmd);
4594 	if (ret < 0)
4595 		goto bye;
4596 
4597 	if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4598 		if (!vf_acls)
4599 			caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4600 		else
4601 			caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4602 	} else if (vf_acls) {
4603 		dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4604 		goto bye;
4605 	}
4606 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4607 			      FW_CMD_REQUEST | FW_CMD_WRITE);
4608 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4609 			 NULL);
4610 	if (ret < 0)
4611 		goto bye;
4612 
4613 	/*
4614 	 * Tweak configuration based on system architecture, module
4615 	 * parameters, etc.
4616 	 */
4617 	ret = adap_init0_tweaks(adapter);
4618 	if (ret < 0)
4619 		goto bye;
4620 
4621 	/*
4622 	 * Select RSS Global Mode we want to use.  We use "Basic Virtual"
4623 	 * mode which maps each Virtual Interface to its own section of
4624 	 * the RSS Table and we turn on all map and hash enables ...
4625 	 */
4626 	adapter->flags |= RSS_TNLALLLOOKUP;
4627 	ret = t4_config_glbl_rss(adapter, adapter->mbox,
4628 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4629 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4630 				 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4631 				 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4632 					FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4633 	if (ret < 0)
4634 		goto bye;
4635 
4636 	/*
4637 	 * Set up our own fundamental resource provisioning ...
4638 	 */
4639 	ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4640 			  PFRES_NEQ, PFRES_NETHCTRL,
4641 			  PFRES_NIQFLINT, PFRES_NIQ,
4642 			  PFRES_TC, PFRES_NVI,
4643 			  FW_PFVF_CMD_CMASK_MASK,
4644 			  pfvfres_pmask(adapter, adapter->fn, 0),
4645 			  PFRES_NEXACTF,
4646 			  PFRES_R_CAPS, PFRES_WX_CAPS);
4647 	if (ret < 0)
4648 		goto bye;
4649 
4650 	/*
4651 	 * Perform low level SGE initialization.  We need to do this before we
4652 	 * send the firmware the INITIALIZE command because that will cause
4653 	 * any other PF Drivers which are waiting for the Master
4654 	 * Initialization to proceed forward.
4655 	 */
4656 	for (i = 0; i < SGE_NTIMERS - 1; i++)
4657 		s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4658 	s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4659 	s->counter_val[0] = 1;
4660 	for (i = 1; i < SGE_NCOUNTERS; i++)
4661 		s->counter_val[i] = min(intr_cnt[i - 1],
4662 					THRESHOLD_0_GET(THRESHOLD_0_MASK));
4663 	t4_sge_init(adapter);
4664 
4665 #ifdef CONFIG_PCI_IOV
4666 	/*
4667 	 * Provision resource limits for Virtual Functions.  We currently
4668 	 * grant them all the same static resource limits except for the Port
4669 	 * Access Rights Mask which we're assigning based on the PF.  All of
4670 	 * the static provisioning stuff for both the PF and VF really needs
4671 	 * to be managed in a persistent manner for each device which the
4672 	 * firmware controls.
4673 	 */
4674 	{
4675 		int pf, vf;
4676 
4677 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4678 			if (num_vf[pf] <= 0)
4679 				continue;
4680 
4681 			/* VF numbering starts at 1! */
4682 			for (vf = 1; vf <= num_vf[pf]; vf++) {
4683 				ret = t4_cfg_pfvf(adapter, adapter->mbox,
4684 						  pf, vf,
4685 						  VFRES_NEQ, VFRES_NETHCTRL,
4686 						  VFRES_NIQFLINT, VFRES_NIQ,
4687 						  VFRES_TC, VFRES_NVI,
4688 						  FW_PFVF_CMD_CMASK_MASK,
4689 						  pfvfres_pmask(
4690 						  adapter, pf, vf),
4691 						  VFRES_NEXACTF,
4692 						  VFRES_R_CAPS, VFRES_WX_CAPS);
4693 				if (ret < 0)
4694 					dev_warn(adapter->pdev_dev,
4695 						 "failed to "\
4696 						 "provision pf/vf=%d/%d; "
4697 						 "err=%d\n", pf, vf, ret);
4698 			}
4699 		}
4700 	}
4701 #endif
4702 
4703 	/*
4704 	 * Set up the default filter mode.  Later we'll want to implement this
4705 	 * via a firmware command, etc. ...  This needs to be done before the
4706 	 * firmare initialization command ...  If the selected set of fields
4707 	 * isn't equal to the default value, we'll need to make sure that the
4708 	 * field selections will fit in the 36-bit budget.
4709 	 */
4710 	if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4711 		int j, bits = 0;
4712 
4713 		for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4714 			switch (tp_vlan_pri_map & (1 << j)) {
4715 			case 0:
4716 				/* compressed filter field not enabled */
4717 				break;
4718 			case FCOE_MASK:
4719 				bits +=  1;
4720 				break;
4721 			case PORT_MASK:
4722 				bits +=  3;
4723 				break;
4724 			case VNIC_ID_MASK:
4725 				bits += 17;
4726 				break;
4727 			case VLAN_MASK:
4728 				bits += 17;
4729 				break;
4730 			case TOS_MASK:
4731 				bits +=  8;
4732 				break;
4733 			case PROTOCOL_MASK:
4734 				bits +=  8;
4735 				break;
4736 			case ETHERTYPE_MASK:
4737 				bits += 16;
4738 				break;
4739 			case MACMATCH_MASK:
4740 				bits +=  9;
4741 				break;
4742 			case MPSHITTYPE_MASK:
4743 				bits +=  3;
4744 				break;
4745 			case FRAGMENTATION_MASK:
4746 				bits +=  1;
4747 				break;
4748 			}
4749 
4750 		if (bits > 36) {
4751 			dev_err(adapter->pdev_dev,
4752 				"tp_vlan_pri_map=%#x needs %d bits > 36;"\
4753 				" using %#x\n", tp_vlan_pri_map, bits,
4754 				TP_VLAN_PRI_MAP_DEFAULT);
4755 			tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4756 		}
4757 	}
4758 	v = tp_vlan_pri_map;
4759 	t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4760 			  &v, 1, TP_VLAN_PRI_MAP);
4761 
4762 	/*
4763 	 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4764 	 * to support any of the compressed filter fields above.  Newer
4765 	 * versions of the firmware do this automatically but it doesn't hurt
4766 	 * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
4767 	 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4768 	 * since the firmware automatically turns this on and off when we have
4769 	 * a non-zero number of filters active (since it does have a
4770 	 * performance impact).
4771 	 */
4772 	if (tp_vlan_pri_map)
4773 		t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4774 				 FIVETUPLELOOKUP_MASK,
4775 				 FIVETUPLELOOKUP_MASK);
4776 
4777 	/*
4778 	 * Tweak some settings.
4779 	 */
4780 	t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4781 		     RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4782 		     PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4783 		     KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4784 
4785 	/*
4786 	 * Get basic stuff going by issuing the Firmware Initialize command.
4787 	 * Note that this _must_ be after all PFVF commands ...
4788 	 */
4789 	ret = t4_fw_initialize(adapter, adapter->mbox);
4790 	if (ret < 0)
4791 		goto bye;
4792 
4793 	/*
4794 	 * Return successfully!
4795 	 */
4796 	dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4797 		 "driver parameters\n");
4798 	return 0;
4799 
4800 	/*
4801 	 * Something bad happened.  Return the error ...
4802 	 */
4803 bye:
4804 	return ret;
4805 }
4806 
4807 /*
4808  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4809  */
adap_init0(struct adapter * adap)4810 static int adap_init0(struct adapter *adap)
4811 {
4812 	int ret;
4813 	u32 v, port_vec;
4814 	enum dev_state state;
4815 	u32 params[7], val[7];
4816 	struct fw_caps_config_cmd caps_cmd;
4817 	int reset = 1, j;
4818 
4819 	/*
4820 	 * Contact FW, advertising Master capability (and potentially forcing
4821 	 * ourselves as the Master PF if our module parameter force_init is
4822 	 * set).
4823 	 */
4824 	ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4825 			  force_init ? MASTER_MUST : MASTER_MAY,
4826 			  &state);
4827 	if (ret < 0) {
4828 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4829 			ret);
4830 		return ret;
4831 	}
4832 	if (ret == adap->mbox)
4833 		adap->flags |= MASTER_PF;
4834 	if (force_init && state == DEV_STATE_INIT)
4835 		state = DEV_STATE_UNINIT;
4836 
4837 	/*
4838 	 * If we're the Master PF Driver and the device is uninitialized,
4839 	 * then let's consider upgrading the firmware ...  (We always want
4840 	 * to check the firmware version number in order to A. get it for
4841 	 * later reporting and B. to warn if the currently loaded firmware
4842 	 * is excessively mismatched relative to the driver.)
4843 	 */
4844 	ret = t4_check_fw_version(adap);
4845 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4846 		if (ret == -EINVAL || ret > 0) {
4847 			if (upgrade_fw(adap) >= 0) {
4848 				/*
4849 				 * Note that the chip was reset as part of the
4850 				 * firmware upgrade so we don't reset it again
4851 				 * below and grab the new firmware version.
4852 				 */
4853 				reset = 0;
4854 				ret = t4_check_fw_version(adap);
4855 			}
4856 		}
4857 		if (ret < 0)
4858 			return ret;
4859 	}
4860 
4861 	/*
4862 	 * Grab VPD parameters.  This should be done after we establish a
4863 	 * connection to the firmware since some of the VPD parameters
4864 	 * (notably the Core Clock frequency) are retrieved via requests to
4865 	 * the firmware.  On the other hand, we need these fairly early on
4866 	 * so we do this right after getting ahold of the firmware.
4867 	 */
4868 	ret = get_vpd_params(adap, &adap->params.vpd);
4869 	if (ret < 0)
4870 		goto bye;
4871 
4872 	/*
4873 	 * Find out what ports are available to us.  Note that we need to do
4874 	 * this before calling adap_init0_no_config() since it needs nports
4875 	 * and portvec ...
4876 	 */
4877 	v =
4878 	    FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4879 	    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4880 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4881 	if (ret < 0)
4882 		goto bye;
4883 
4884 	adap->params.nports = hweight32(port_vec);
4885 	adap->params.portvec = port_vec;
4886 
4887 	/*
4888 	 * If the firmware is initialized already (and we're not forcing a
4889 	 * master initialization), note that we're living with existing
4890 	 * adapter parameters.  Otherwise, it's time to try initializing the
4891 	 * adapter ...
4892 	 */
4893 	if (state == DEV_STATE_INIT) {
4894 		dev_info(adap->pdev_dev, "Coming up as %s: "\
4895 			 "Adapter already initialized\n",
4896 			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4897 		adap->flags |= USING_SOFT_PARAMS;
4898 	} else {
4899 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4900 			 "Initializing adapter\n");
4901 
4902 		/*
4903 		 * If the firmware doesn't support Configuration
4904 		 * Files warn user and exit,
4905 		 */
4906 		if (ret < 0)
4907 			dev_warn(adap->pdev_dev, "Firmware doesn't support "
4908 				 "configuration file.\n");
4909 		if (force_old_init)
4910 			ret = adap_init0_no_config(adap, reset);
4911 		else {
4912 			/*
4913 			 * Find out whether we're dealing with a version of
4914 			 * the firmware which has configuration file support.
4915 			 */
4916 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4917 				     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4918 			ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4919 					      params, val);
4920 
4921 			/*
4922 			 * If the firmware doesn't support Configuration
4923 			 * Files, use the old Driver-based, hard-wired
4924 			 * initialization.  Otherwise, try using the
4925 			 * Configuration File support and fall back to the
4926 			 * Driver-based initialization if there's no
4927 			 * Configuration File found.
4928 			 */
4929 			if (ret < 0)
4930 				ret = adap_init0_no_config(adap, reset);
4931 			else {
4932 				/*
4933 				 * The firmware provides us with a memory
4934 				 * buffer where we can load a Configuration
4935 				 * File from the host if we want to override
4936 				 * the Configuration File in flash.
4937 				 */
4938 
4939 				ret = adap_init0_config(adap, reset);
4940 				if (ret == -ENOENT) {
4941 					dev_info(adap->pdev_dev,
4942 					    "No Configuration File present "
4943 					    "on adapter.  Using hard-wired "
4944 					    "configuration parameters.\n");
4945 					ret = adap_init0_no_config(adap, reset);
4946 				}
4947 			}
4948 		}
4949 		if (ret < 0) {
4950 			dev_err(adap->pdev_dev,
4951 				"could not initialize adapter, error %d\n",
4952 				-ret);
4953 			goto bye;
4954 		}
4955 	}
4956 
4957 	/*
4958 	 * If we're living with non-hard-coded parameters (either from a
4959 	 * Firmware Configuration File or values programmed by a different PF
4960 	 * Driver), give the SGE code a chance to pull in anything that it
4961 	 * needs ...  Note that this must be called after we retrieve our VPD
4962 	 * parameters in order to know how to convert core ticks to seconds.
4963 	 */
4964 	if (adap->flags & USING_SOFT_PARAMS) {
4965 		ret = t4_sge_init(adap);
4966 		if (ret < 0)
4967 			goto bye;
4968 	}
4969 
4970 	if (is_bypass_device(adap->pdev->device))
4971 		adap->params.bypass = 1;
4972 
4973 	/*
4974 	 * Grab some of our basic fundamental operating parameters.
4975 	 */
4976 #define FW_PARAM_DEV(param) \
4977 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4978 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4979 
4980 #define FW_PARAM_PFVF(param) \
4981 	FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4982 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
4983 	FW_PARAMS_PARAM_Y(0) | \
4984 	FW_PARAMS_PARAM_Z(0)
4985 
4986 	params[0] = FW_PARAM_PFVF(EQ_START);
4987 	params[1] = FW_PARAM_PFVF(L2T_START);
4988 	params[2] = FW_PARAM_PFVF(L2T_END);
4989 	params[3] = FW_PARAM_PFVF(FILTER_START);
4990 	params[4] = FW_PARAM_PFVF(FILTER_END);
4991 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
4992 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4993 	if (ret < 0)
4994 		goto bye;
4995 	adap->sge.egr_start = val[0];
4996 	adap->l2t_start = val[1];
4997 	adap->l2t_end = val[2];
4998 	adap->tids.ftid_base = val[3];
4999 	adap->tids.nftids = val[4] - val[3] + 1;
5000 	adap->sge.ingr_start = val[5];
5001 
5002 	/* query params related to active filter region */
5003 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5004 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5005 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5006 	/* If Active filter size is set we enable establishing
5007 	 * offload connection through firmware work request
5008 	 */
5009 	if ((val[0] != val[1]) && (ret >= 0)) {
5010 		adap->flags |= FW_OFLD_CONN;
5011 		adap->tids.aftid_base = val[0];
5012 		adap->tids.aftid_end = val[1];
5013 	}
5014 
5015 	/* If we're running on newer firmware, let it know that we're
5016 	 * prepared to deal with encapsulated CPL messages.  Older
5017 	 * firmware won't understand this and we'll just get
5018 	 * unencapsulated messages ...
5019 	 */
5020 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5021 	val[0] = 1;
5022 	(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5023 
5024 	/*
5025 	 * Get device capabilities so we can determine what resources we need
5026 	 * to manage.
5027 	 */
5028 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5029 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5030 				     FW_CMD_REQUEST | FW_CMD_READ);
5031 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5032 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5033 			 &caps_cmd);
5034 	if (ret < 0)
5035 		goto bye;
5036 
5037 	if (caps_cmd.ofldcaps) {
5038 		/* query offload-related parameters */
5039 		params[0] = FW_PARAM_DEV(NTID);
5040 		params[1] = FW_PARAM_PFVF(SERVER_START);
5041 		params[2] = FW_PARAM_PFVF(SERVER_END);
5042 		params[3] = FW_PARAM_PFVF(TDDP_START);
5043 		params[4] = FW_PARAM_PFVF(TDDP_END);
5044 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5045 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5046 				      params, val);
5047 		if (ret < 0)
5048 			goto bye;
5049 		adap->tids.ntids = val[0];
5050 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5051 		adap->tids.stid_base = val[1];
5052 		adap->tids.nstids = val[2] - val[1] + 1;
5053 		/*
5054 		 * Setup server filter region. Divide the availble filter
5055 		 * region into two parts. Regular filters get 1/3rd and server
5056 		 * filters get 2/3rd part. This is only enabled if workarond
5057 		 * path is enabled.
5058 		 * 1. For regular filters.
5059 		 * 2. Server filter: This are special filters which are used
5060 		 * to redirect SYN packets to offload queue.
5061 		 */
5062 		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5063 			adap->tids.sftid_base = adap->tids.ftid_base +
5064 					DIV_ROUND_UP(adap->tids.nftids, 3);
5065 			adap->tids.nsftids = adap->tids.nftids -
5066 					 DIV_ROUND_UP(adap->tids.nftids, 3);
5067 			adap->tids.nftids = adap->tids.sftid_base -
5068 						adap->tids.ftid_base;
5069 		}
5070 		adap->vres.ddp.start = val[3];
5071 		adap->vres.ddp.size = val[4] - val[3] + 1;
5072 		adap->params.ofldq_wr_cred = val[5];
5073 
5074 		adap->params.offload = 1;
5075 	}
5076 	if (caps_cmd.rdmacaps) {
5077 		params[0] = FW_PARAM_PFVF(STAG_START);
5078 		params[1] = FW_PARAM_PFVF(STAG_END);
5079 		params[2] = FW_PARAM_PFVF(RQ_START);
5080 		params[3] = FW_PARAM_PFVF(RQ_END);
5081 		params[4] = FW_PARAM_PFVF(PBL_START);
5082 		params[5] = FW_PARAM_PFVF(PBL_END);
5083 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5084 				      params, val);
5085 		if (ret < 0)
5086 			goto bye;
5087 		adap->vres.stag.start = val[0];
5088 		adap->vres.stag.size = val[1] - val[0] + 1;
5089 		adap->vres.rq.start = val[2];
5090 		adap->vres.rq.size = val[3] - val[2] + 1;
5091 		adap->vres.pbl.start = val[4];
5092 		adap->vres.pbl.size = val[5] - val[4] + 1;
5093 
5094 		params[0] = FW_PARAM_PFVF(SQRQ_START);
5095 		params[1] = FW_PARAM_PFVF(SQRQ_END);
5096 		params[2] = FW_PARAM_PFVF(CQ_START);
5097 		params[3] = FW_PARAM_PFVF(CQ_END);
5098 		params[4] = FW_PARAM_PFVF(OCQ_START);
5099 		params[5] = FW_PARAM_PFVF(OCQ_END);
5100 		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5101 		if (ret < 0)
5102 			goto bye;
5103 		adap->vres.qp.start = val[0];
5104 		adap->vres.qp.size = val[1] - val[0] + 1;
5105 		adap->vres.cq.start = val[2];
5106 		adap->vres.cq.size = val[3] - val[2] + 1;
5107 		adap->vres.ocq.start = val[4];
5108 		adap->vres.ocq.size = val[5] - val[4] + 1;
5109 	}
5110 	if (caps_cmd.iscsicaps) {
5111 		params[0] = FW_PARAM_PFVF(ISCSI_START);
5112 		params[1] = FW_PARAM_PFVF(ISCSI_END);
5113 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5114 				      params, val);
5115 		if (ret < 0)
5116 			goto bye;
5117 		adap->vres.iscsi.start = val[0];
5118 		adap->vres.iscsi.size = val[1] - val[0] + 1;
5119 	}
5120 #undef FW_PARAM_PFVF
5121 #undef FW_PARAM_DEV
5122 
5123 	/*
5124 	 * These are finalized by FW initialization, load their values now.
5125 	 */
5126 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5127 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5128 	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5129 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5130 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5131 		     adap->params.b_wnd);
5132 
5133 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5134 	for (j = 0; j < NCHAN; j++)
5135 		adap->params.tp.tx_modq[j] = j;
5136 
5137 	t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5138 			 &adap->filter_mode, 1,
5139 			 TP_VLAN_PRI_MAP);
5140 
5141 	adap->flags |= FW_OK;
5142 	return 0;
5143 
5144 	/*
5145 	 * Something bad happened.  If a command timed out or failed with EIO
5146 	 * FW does not operate within its spec or something catastrophic
5147 	 * happened to HW/FW, stop issuing commands.
5148 	 */
5149 bye:
5150 	if (ret != -ETIMEDOUT && ret != -EIO)
5151 		t4_fw_bye(adap, adap->mbox);
5152 	return ret;
5153 }
5154 
5155 /* EEH callbacks */
5156 
eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)5157 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5158 					 pci_channel_state_t state)
5159 {
5160 	int i;
5161 	struct adapter *adap = pci_get_drvdata(pdev);
5162 
5163 	if (!adap)
5164 		goto out;
5165 
5166 	rtnl_lock();
5167 	adap->flags &= ~FW_OK;
5168 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5169 	for_each_port(adap, i) {
5170 		struct net_device *dev = adap->port[i];
5171 
5172 		netif_device_detach(dev);
5173 		netif_carrier_off(dev);
5174 	}
5175 	if (adap->flags & FULL_INIT_DONE)
5176 		cxgb_down(adap);
5177 	rtnl_unlock();
5178 	pci_disable_device(pdev);
5179 out:	return state == pci_channel_io_perm_failure ?
5180 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5181 }
5182 
eeh_slot_reset(struct pci_dev * pdev)5183 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5184 {
5185 	int i, ret;
5186 	struct fw_caps_config_cmd c;
5187 	struct adapter *adap = pci_get_drvdata(pdev);
5188 
5189 	if (!adap) {
5190 		pci_restore_state(pdev);
5191 		pci_save_state(pdev);
5192 		return PCI_ERS_RESULT_RECOVERED;
5193 	}
5194 
5195 	if (pci_enable_device(pdev)) {
5196 		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5197 		return PCI_ERS_RESULT_DISCONNECT;
5198 	}
5199 
5200 	pci_set_master(pdev);
5201 	pci_restore_state(pdev);
5202 	pci_save_state(pdev);
5203 	pci_cleanup_aer_uncorrect_error_status(pdev);
5204 
5205 	if (t4_wait_dev_ready(adap) < 0)
5206 		return PCI_ERS_RESULT_DISCONNECT;
5207 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5208 		return PCI_ERS_RESULT_DISCONNECT;
5209 	adap->flags |= FW_OK;
5210 	if (adap_init1(adap, &c))
5211 		return PCI_ERS_RESULT_DISCONNECT;
5212 
5213 	for_each_port(adap, i) {
5214 		struct port_info *p = adap2pinfo(adap, i);
5215 
5216 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5217 				  NULL, NULL);
5218 		if (ret < 0)
5219 			return PCI_ERS_RESULT_DISCONNECT;
5220 		p->viid = ret;
5221 		p->xact_addr_filt = -1;
5222 	}
5223 
5224 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5225 		     adap->params.b_wnd);
5226 	setup_memwin(adap);
5227 	if (cxgb_up(adap))
5228 		return PCI_ERS_RESULT_DISCONNECT;
5229 	return PCI_ERS_RESULT_RECOVERED;
5230 }
5231 
eeh_resume(struct pci_dev * pdev)5232 static void eeh_resume(struct pci_dev *pdev)
5233 {
5234 	int i;
5235 	struct adapter *adap = pci_get_drvdata(pdev);
5236 
5237 	if (!adap)
5238 		return;
5239 
5240 	rtnl_lock();
5241 	for_each_port(adap, i) {
5242 		struct net_device *dev = adap->port[i];
5243 
5244 		if (netif_running(dev)) {
5245 			link_start(dev);
5246 			cxgb_set_rxmode(dev);
5247 		}
5248 		netif_device_attach(dev);
5249 	}
5250 	rtnl_unlock();
5251 }
5252 
5253 static const struct pci_error_handlers cxgb4_eeh = {
5254 	.error_detected = eeh_err_detected,
5255 	.slot_reset     = eeh_slot_reset,
5256 	.resume         = eeh_resume,
5257 };
5258 
is_10g_port(const struct link_config * lc)5259 static inline bool is_10g_port(const struct link_config *lc)
5260 {
5261 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5262 }
5263 
init_rspq(struct sge_rspq * q,u8 timer_idx,u8 pkt_cnt_idx,unsigned int size,unsigned int iqe_size)5264 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5265 			     unsigned int size, unsigned int iqe_size)
5266 {
5267 	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5268 			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5269 	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5270 	q->iqe_len = iqe_size;
5271 	q->size = size;
5272 }
5273 
5274 /*
5275  * Perform default configuration of DMA queues depending on the number and type
5276  * of ports we found and the number of available CPUs.  Most settings can be
5277  * modified by the admin prior to actual use.
5278  */
cfg_queues(struct adapter * adap)5279 static void cfg_queues(struct adapter *adap)
5280 {
5281 	struct sge *s = &adap->sge;
5282 	int i, q10g = 0, n10g = 0, qidx = 0;
5283 
5284 	for_each_port(adap, i)
5285 		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5286 
5287 	/*
5288 	 * We default to 1 queue per non-10G port and up to # of cores queues
5289 	 * per 10G port.
5290 	 */
5291 	if (n10g)
5292 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5293 	if (q10g > netif_get_num_default_rss_queues())
5294 		q10g = netif_get_num_default_rss_queues();
5295 
5296 	for_each_port(adap, i) {
5297 		struct port_info *pi = adap2pinfo(adap, i);
5298 
5299 		pi->first_qset = qidx;
5300 		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5301 		qidx += pi->nqsets;
5302 	}
5303 
5304 	s->ethqsets = qidx;
5305 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5306 
5307 	if (is_offload(adap)) {
5308 		/*
5309 		 * For offload we use 1 queue/channel if all ports are up to 1G,
5310 		 * otherwise we divide all available queues amongst the channels
5311 		 * capped by the number of available cores.
5312 		 */
5313 		if (n10g) {
5314 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5315 				  num_online_cpus());
5316 			s->ofldqsets = roundup(i, adap->params.nports);
5317 		} else
5318 			s->ofldqsets = adap->params.nports;
5319 		/* For RDMA one Rx queue per channel suffices */
5320 		s->rdmaqs = adap->params.nports;
5321 	}
5322 
5323 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5324 		struct sge_eth_rxq *r = &s->ethrxq[i];
5325 
5326 		init_rspq(&r->rspq, 0, 0, 1024, 64);
5327 		r->fl.size = 72;
5328 	}
5329 
5330 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5331 		s->ethtxq[i].q.size = 1024;
5332 
5333 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5334 		s->ctrlq[i].q.size = 512;
5335 
5336 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5337 		s->ofldtxq[i].q.size = 1024;
5338 
5339 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5340 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
5341 
5342 		init_rspq(&r->rspq, 0, 0, 1024, 64);
5343 		r->rspq.uld = CXGB4_ULD_ISCSI;
5344 		r->fl.size = 72;
5345 	}
5346 
5347 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5348 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
5349 
5350 		init_rspq(&r->rspq, 0, 0, 511, 64);
5351 		r->rspq.uld = CXGB4_ULD_RDMA;
5352 		r->fl.size = 72;
5353 	}
5354 
5355 	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5356 	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5357 }
5358 
5359 /*
5360  * Reduce the number of Ethernet queues across all ports to at most n.
5361  * n provides at least one queue per port.
5362  */
reduce_ethqs(struct adapter * adap,int n)5363 static void reduce_ethqs(struct adapter *adap, int n)
5364 {
5365 	int i;
5366 	struct port_info *pi;
5367 
5368 	while (n < adap->sge.ethqsets)
5369 		for_each_port(adap, i) {
5370 			pi = adap2pinfo(adap, i);
5371 			if (pi->nqsets > 1) {
5372 				pi->nqsets--;
5373 				adap->sge.ethqsets--;
5374 				if (adap->sge.ethqsets <= n)
5375 					break;
5376 			}
5377 		}
5378 
5379 	n = 0;
5380 	for_each_port(adap, i) {
5381 		pi = adap2pinfo(adap, i);
5382 		pi->first_qset = n;
5383 		n += pi->nqsets;
5384 	}
5385 }
5386 
5387 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5388 #define EXTRA_VECS 2
5389 
enable_msix(struct adapter * adap)5390 static int enable_msix(struct adapter *adap)
5391 {
5392 	int ofld_need = 0;
5393 	int i, err, want, need;
5394 	struct sge *s = &adap->sge;
5395 	unsigned int nchan = adap->params.nports;
5396 	struct msix_entry entries[MAX_INGQ + 1];
5397 
5398 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
5399 		entries[i].entry = i;
5400 
5401 	want = s->max_ethqsets + EXTRA_VECS;
5402 	if (is_offload(adap)) {
5403 		want += s->rdmaqs + s->ofldqsets;
5404 		/* need nchan for each possible ULD */
5405 		ofld_need = 2 * nchan;
5406 	}
5407 	need = adap->params.nports + EXTRA_VECS + ofld_need;
5408 
5409 	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5410 		want = err;
5411 
5412 	if (!err) {
5413 		/*
5414 		 * Distribute available vectors to the various queue groups.
5415 		 * Every group gets its minimum requirement and NIC gets top
5416 		 * priority for leftovers.
5417 		 */
5418 		i = want - EXTRA_VECS - ofld_need;
5419 		if (i < s->max_ethqsets) {
5420 			s->max_ethqsets = i;
5421 			if (i < s->ethqsets)
5422 				reduce_ethqs(adap, i);
5423 		}
5424 		if (is_offload(adap)) {
5425 			i = want - EXTRA_VECS - s->max_ethqsets;
5426 			i -= ofld_need - nchan;
5427 			s->ofldqsets = (i / nchan) * nchan;  /* round down */
5428 		}
5429 		for (i = 0; i < want; ++i)
5430 			adap->msix_info[i].vec = entries[i].vector;
5431 	} else if (err > 0)
5432 		dev_info(adap->pdev_dev,
5433 			 "only %d MSI-X vectors left, not using MSI-X\n", err);
5434 	return err;
5435 }
5436 
5437 #undef EXTRA_VECS
5438 
init_rss(struct adapter * adap)5439 static int init_rss(struct adapter *adap)
5440 {
5441 	unsigned int i, j;
5442 
5443 	for_each_port(adap, i) {
5444 		struct port_info *pi = adap2pinfo(adap, i);
5445 
5446 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5447 		if (!pi->rss)
5448 			return -ENOMEM;
5449 		for (j = 0; j < pi->rss_size; j++)
5450 			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5451 	}
5452 	return 0;
5453 }
5454 
print_port_info(const struct net_device * dev)5455 static void print_port_info(const struct net_device *dev)
5456 {
5457 	static const char *base[] = {
5458 		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5459 		"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5460 	};
5461 
5462 	char buf[80];
5463 	char *bufp = buf;
5464 	const char *spd = "";
5465 	const struct port_info *pi = netdev_priv(dev);
5466 	const struct adapter *adap = pi->adapter;
5467 
5468 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5469 		spd = " 2.5 GT/s";
5470 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5471 		spd = " 5 GT/s";
5472 
5473 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5474 		bufp += sprintf(bufp, "100/");
5475 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5476 		bufp += sprintf(bufp, "1000/");
5477 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5478 		bufp += sprintf(bufp, "10G/");
5479 	if (bufp != buf)
5480 		--bufp;
5481 	sprintf(bufp, "BASE-%s", base[pi->port_type]);
5482 
5483 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5484 		    adap->params.vpd.id,
5485 		    CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
5486 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5487 		    (adap->flags & USING_MSIX) ? " MSI-X" :
5488 		    (adap->flags & USING_MSI) ? " MSI" : "");
5489 	netdev_info(dev, "S/N: %s, E/C: %s\n",
5490 		    adap->params.vpd.sn, adap->params.vpd.ec);
5491 }
5492 
enable_pcie_relaxed_ordering(struct pci_dev * dev)5493 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5494 {
5495 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5496 }
5497 
5498 /*
5499  * Free the following resources:
5500  * - memory used for tables
5501  * - MSI/MSI-X
5502  * - net devices
5503  * - resources FW is holding for us
5504  */
free_some_resources(struct adapter * adapter)5505 static void free_some_resources(struct adapter *adapter)
5506 {
5507 	unsigned int i;
5508 
5509 	t4_free_mem(adapter->l2t);
5510 	t4_free_mem(adapter->tids.tid_tab);
5511 	disable_msi(adapter);
5512 
5513 	for_each_port(adapter, i)
5514 		if (adapter->port[i]) {
5515 			kfree(adap2pinfo(adapter, i)->rss);
5516 			free_netdev(adapter->port[i]);
5517 		}
5518 	if (adapter->flags & FW_OK)
5519 		t4_fw_bye(adapter, adapter->fn);
5520 }
5521 
5522 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5523 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5524 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5525 #define SEGMENT_SIZE 128
5526 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)5527 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5528 {
5529 	int func, i, err, s_qpp, qpp, num_seg;
5530 	struct port_info *pi;
5531 	bool highdma = false;
5532 	struct adapter *adapter = NULL;
5533 
5534 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5535 
5536 	err = pci_request_regions(pdev, KBUILD_MODNAME);
5537 	if (err) {
5538 		/* Just info, some other driver may have claimed the device. */
5539 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5540 		return err;
5541 	}
5542 
5543 	/* We control everything through one PF */
5544 	func = PCI_FUNC(pdev->devfn);
5545 	if (func != ent->driver_data) {
5546 		pci_save_state(pdev);        /* to restore SR-IOV later */
5547 		goto sriov;
5548 	}
5549 
5550 	err = pci_enable_device(pdev);
5551 	if (err) {
5552 		dev_err(&pdev->dev, "cannot enable PCI device\n");
5553 		goto out_release_regions;
5554 	}
5555 
5556 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5557 		highdma = true;
5558 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5559 		if (err) {
5560 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5561 				"coherent allocations\n");
5562 			goto out_disable_device;
5563 		}
5564 	} else {
5565 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5566 		if (err) {
5567 			dev_err(&pdev->dev, "no usable DMA configuration\n");
5568 			goto out_disable_device;
5569 		}
5570 	}
5571 
5572 	pci_enable_pcie_error_reporting(pdev);
5573 	enable_pcie_relaxed_ordering(pdev);
5574 	pci_set_master(pdev);
5575 	pci_save_state(pdev);
5576 
5577 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5578 	if (!adapter) {
5579 		err = -ENOMEM;
5580 		goto out_disable_device;
5581 	}
5582 
5583 	adapter->regs = pci_ioremap_bar(pdev, 0);
5584 	if (!adapter->regs) {
5585 		dev_err(&pdev->dev, "cannot map device registers\n");
5586 		err = -ENOMEM;
5587 		goto out_free_adapter;
5588 	}
5589 
5590 	adapter->pdev = pdev;
5591 	adapter->pdev_dev = &pdev->dev;
5592 	adapter->mbox = func;
5593 	adapter->fn = func;
5594 	adapter->msg_enable = dflt_msg_enable;
5595 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5596 
5597 	spin_lock_init(&adapter->stats_lock);
5598 	spin_lock_init(&adapter->tid_release_lock);
5599 
5600 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5601 	INIT_WORK(&adapter->db_full_task, process_db_full);
5602 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
5603 
5604 	err = t4_prep_adapter(adapter);
5605 	if (err)
5606 		goto out_unmap_bar0;
5607 
5608 	if (!is_t4(adapter->chip)) {
5609 		s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5610 		qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5611 		      SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5612 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
5613 
5614 		/* Each segment size is 128B. Write coalescing is enabled only
5615 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5616 		 * queue is less no of segments that can be accommodated in
5617 		 * a page size.
5618 		 */
5619 		if (qpp > num_seg) {
5620 			dev_err(&pdev->dev,
5621 				"Incorrect number of egress queues per page\n");
5622 			err = -EINVAL;
5623 			goto out_unmap_bar0;
5624 		}
5625 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5626 		pci_resource_len(pdev, 2));
5627 		if (!adapter->bar2) {
5628 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
5629 			err = -ENOMEM;
5630 			goto out_unmap_bar0;
5631 		}
5632 	}
5633 
5634 	setup_memwin(adapter);
5635 	err = adap_init0(adapter);
5636 	setup_memwin_rdma(adapter);
5637 	if (err)
5638 		goto out_unmap_bar;
5639 
5640 	for_each_port(adapter, i) {
5641 		struct net_device *netdev;
5642 
5643 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
5644 					   MAX_ETH_QSETS);
5645 		if (!netdev) {
5646 			err = -ENOMEM;
5647 			goto out_free_dev;
5648 		}
5649 
5650 		SET_NETDEV_DEV(netdev, &pdev->dev);
5651 
5652 		adapter->port[i] = netdev;
5653 		pi = netdev_priv(netdev);
5654 		pi->adapter = adapter;
5655 		pi->xact_addr_filt = -1;
5656 		pi->port_id = i;
5657 		netdev->irq = pdev->irq;
5658 
5659 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5660 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5661 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
5662 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5663 		if (highdma)
5664 			netdev->hw_features |= NETIF_F_HIGHDMA;
5665 		netdev->features |= netdev->hw_features;
5666 		netdev->vlan_features = netdev->features & VLAN_FEAT;
5667 
5668 		netdev->priv_flags |= IFF_UNICAST_FLT;
5669 
5670 		netdev->netdev_ops = &cxgb4_netdev_ops;
5671 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5672 	}
5673 
5674 	pci_set_drvdata(pdev, adapter);
5675 
5676 	if (adapter->flags & FW_OK) {
5677 		err = t4_port_init(adapter, func, func, 0);
5678 		if (err)
5679 			goto out_free_dev;
5680 	}
5681 
5682 	/*
5683 	 * Configure queues and allocate tables now, they can be needed as
5684 	 * soon as the first register_netdev completes.
5685 	 */
5686 	cfg_queues(adapter);
5687 
5688 	adapter->l2t = t4_init_l2t();
5689 	if (!adapter->l2t) {
5690 		/* We tolerate a lack of L2T, giving up some functionality */
5691 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5692 		adapter->params.offload = 0;
5693 	}
5694 
5695 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5696 		dev_warn(&pdev->dev, "could not allocate TID table, "
5697 			 "continuing\n");
5698 		adapter->params.offload = 0;
5699 	}
5700 
5701 	/* See what interrupts we'll be using */
5702 	if (msi > 1 && enable_msix(adapter) == 0)
5703 		adapter->flags |= USING_MSIX;
5704 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
5705 		adapter->flags |= USING_MSI;
5706 
5707 	err = init_rss(adapter);
5708 	if (err)
5709 		goto out_free_dev;
5710 
5711 	/*
5712 	 * The card is now ready to go.  If any errors occur during device
5713 	 * registration we do not fail the whole card but rather proceed only
5714 	 * with the ports we manage to register successfully.  However we must
5715 	 * register at least one net device.
5716 	 */
5717 	for_each_port(adapter, i) {
5718 		pi = adap2pinfo(adapter, i);
5719 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5720 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5721 
5722 		err = register_netdev(adapter->port[i]);
5723 		if (err)
5724 			break;
5725 		adapter->chan_map[pi->tx_chan] = i;
5726 		print_port_info(adapter->port[i]);
5727 	}
5728 	if (i == 0) {
5729 		dev_err(&pdev->dev, "could not register any net devices\n");
5730 		goto out_free_dev;
5731 	}
5732 	if (err) {
5733 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5734 		err = 0;
5735 	}
5736 
5737 	if (cxgb4_debugfs_root) {
5738 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5739 							   cxgb4_debugfs_root);
5740 		setup_debugfs(adapter);
5741 	}
5742 
5743 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5744 	pdev->needs_freset = 1;
5745 
5746 	if (is_offload(adapter))
5747 		attach_ulds(adapter);
5748 
5749 sriov:
5750 #ifdef CONFIG_PCI_IOV
5751 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5752 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5753 			dev_info(&pdev->dev,
5754 				 "instantiated %u virtual functions\n",
5755 				 num_vf[func]);
5756 #endif
5757 	return 0;
5758 
5759  out_free_dev:
5760 	free_some_resources(adapter);
5761  out_unmap_bar:
5762 	if (!is_t4(adapter->chip))
5763 		iounmap(adapter->bar2);
5764  out_unmap_bar0:
5765 	iounmap(adapter->regs);
5766  out_free_adapter:
5767 	kfree(adapter);
5768  out_disable_device:
5769 	pci_disable_pcie_error_reporting(pdev);
5770 	pci_disable_device(pdev);
5771  out_release_regions:
5772 	pci_release_regions(pdev);
5773 	pci_set_drvdata(pdev, NULL);
5774 	return err;
5775 }
5776 
remove_one(struct pci_dev * pdev)5777 static void remove_one(struct pci_dev *pdev)
5778 {
5779 	struct adapter *adapter = pci_get_drvdata(pdev);
5780 
5781 #ifdef CONFIG_PCI_IOV
5782 	pci_disable_sriov(pdev);
5783 
5784 #endif
5785 
5786 	if (adapter) {
5787 		int i;
5788 
5789 		if (is_offload(adapter))
5790 			detach_ulds(adapter);
5791 
5792 		for_each_port(adapter, i)
5793 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5794 				unregister_netdev(adapter->port[i]);
5795 
5796 		if (adapter->debugfs_root)
5797 			debugfs_remove_recursive(adapter->debugfs_root);
5798 
5799 		/* If we allocated filters, free up state associated with any
5800 		 * valid filters ...
5801 		 */
5802 		if (adapter->tids.ftid_tab) {
5803 			struct filter_entry *f = &adapter->tids.ftid_tab[0];
5804 			for (i = 0; i < (adapter->tids.nftids +
5805 					adapter->tids.nsftids); i++, f++)
5806 				if (f->valid)
5807 					clear_filter(adapter, f);
5808 		}
5809 
5810 		if (adapter->flags & FULL_INIT_DONE)
5811 			cxgb_down(adapter);
5812 
5813 		free_some_resources(adapter);
5814 		iounmap(adapter->regs);
5815 		if (!is_t4(adapter->chip))
5816 			iounmap(adapter->bar2);
5817 		kfree(adapter);
5818 		pci_disable_pcie_error_reporting(pdev);
5819 		pci_disable_device(pdev);
5820 		pci_release_regions(pdev);
5821 		pci_set_drvdata(pdev, NULL);
5822 	} else
5823 		pci_release_regions(pdev);
5824 }
5825 
5826 static struct pci_driver cxgb4_driver = {
5827 	.name     = KBUILD_MODNAME,
5828 	.id_table = cxgb4_pci_tbl,
5829 	.probe    = init_one,
5830 	.remove   = remove_one,
5831 	.err_handler = &cxgb4_eeh,
5832 };
5833 
cxgb4_init_module(void)5834 static int __init cxgb4_init_module(void)
5835 {
5836 	int ret;
5837 
5838 	workq = create_singlethread_workqueue("cxgb4");
5839 	if (!workq)
5840 		return -ENOMEM;
5841 
5842 	/* Debugfs support is optional, just warn if this fails */
5843 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5844 	if (!cxgb4_debugfs_root)
5845 		pr_warn("could not create debugfs entry, continuing\n");
5846 
5847 	ret = pci_register_driver(&cxgb4_driver);
5848 	if (ret < 0)
5849 		debugfs_remove(cxgb4_debugfs_root);
5850 	return ret;
5851 }
5852 
cxgb4_cleanup_module(void)5853 static void __exit cxgb4_cleanup_module(void)
5854 {
5855 	pci_unregister_driver(&cxgb4_driver);
5856 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
5857 	flush_workqueue(workq);
5858 	destroy_workqueue(workq);
5859 }
5860 
5861 module_init(cxgb4_init_module);
5862 module_exit(cxgb4_cleanup_module);
5863