• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65 
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "cxgb4_dcb.h"
71 #include "l2t.h"
72 
73 #include <../drivers/net/bonding/bonding.h>
74 
75 #ifdef DRV_VERSION
76 #undef DRV_VERSION
77 #endif
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80 
81 /*
82  * Max interrupt hold-off timer value in us.  Queues fall back to this value
83  * under extreme memory pressure so it's largish to give the system time to
84  * recover.
85  */
86 #define MAX_SGE_TIMERVAL 200U
87 
88 enum {
89 	/*
90 	 * Physical Function provisioning constants.
91 	 */
92 	PFRES_NVI = 4,			/* # of Virtual Interfaces */
93 	PFRES_NETHCTRL = 128,		/* # of EQs used for ETH or CTRL Qs */
94 	PFRES_NIQFLINT = 128,		/* # of ingress Qs/w Free List(s)/intr
95 					 */
96 	PFRES_NEQ = 256,		/* # of egress queues */
97 	PFRES_NIQ = 0,			/* # of ingress queues */
98 	PFRES_TC = 0,			/* PCI-E traffic class */
99 	PFRES_NEXACTF = 128,		/* # of exact MPS filters */
100 
101 	PFRES_R_CAPS = FW_CMD_CAP_PF,
102 	PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 
104 #ifdef CONFIG_PCI_IOV
105 	/*
106 	 * Virtual Function provisioning constants.  We need two extra Ingress
107 	 * Queues with Interrupt capability to serve as the VF's Firmware
108 	 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 	 * neither will have Free Lists associated with them).  For each
110 	 * Ethernet/Control Egress Queue and for each Free List, we need an
111 	 * Egress Context.
112 	 */
113 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
114 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
115 
116 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
117 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
118 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
120 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
121 	VFRES_TC = 0,			/* PCI-E traffic class */
122 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
123 
124 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif
127 };
128 
129 /*
130  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
131  * static and likely not to be useful in the long run.  We really need to
132  * implement some form of persistent configuration which the firmware
133  * controls.
134  */
pfvfres_pmask(struct adapter * adapter,unsigned int pf,unsigned int vf)135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 				  unsigned int pf, unsigned int vf)
137 {
138 	unsigned int portn, portvec;
139 
140 	/*
141 	 * Give PF's access to all of the ports.
142 	 */
143 	if (vf == 0)
144 		return FW_PFVF_CMD_PMASK_MASK;
145 
146 	/*
147 	 * For VFs, we'll assign them access to the ports based purely on the
148 	 * PF.  We assign active ports in order, wrapping around if there are
149 	 * fewer active ports than PFs: e.g. active port[pf % nports].
150 	 * Unfortunately the adapter's port_info structs haven't been
151 	 * initialized yet so we have to compute this.
152 	 */
153 	if (adapter->params.nports == 0)
154 		return 0;
155 
156 	portn = pf % adapter->params.nports;
157 	portvec = adapter->params.portvec;
158 	for (;;) {
159 		/*
160 		 * Isolate the lowest set bit in the port vector.  If we're at
161 		 * the port number that we want, return that as the pmask.
162 		 * otherwise mask that bit out of the port vector and
163 		 * decrement our port number ...
164 		 */
165 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 		if (portn == 0)
167 			return pmask;
168 		portn--;
169 		portvec &= ~pmask;
170 	}
171 	/*NOTREACHED*/
172 }
173 
174 enum {
175 	MAX_TXQ_ENTRIES      = 16384,
176 	MAX_CTRL_TXQ_ENTRIES = 1024,
177 	MAX_RSPQ_ENTRIES     = 16384,
178 	MAX_RX_BUFFERS       = 16384,
179 	MIN_TXQ_ENTRIES      = 32,
180 	MIN_CTRL_TXQ_ENTRIES = 32,
181 	MIN_RSPQ_ENTRIES     = 128,
182 	MIN_FL_ENTRIES       = 16
183 };
184 
185 /* Host shadow copy of ingress filter entry.  This is in host native format
186  * and doesn't match the ordering or bit order, etc. of the hardware of the
187  * firmware command.  The use of bit-field structure elements is purely to
188  * remind ourselves of the field size limitations and save memory in the case
189  * where the filter table is large.
190  */
191 struct filter_entry {
192 	/* Administrative fields for filter.
193 	 */
194 	u32 valid:1;            /* filter allocated and valid */
195 	u32 locked:1;           /* filter is administratively locked */
196 
197 	u32 pending:1;          /* filter action is pending firmware reply */
198 	u32 smtidx:8;           /* Source MAC Table index for smac */
199 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
200 
201 	/* The filter itself.  Most of this is a straight copy of information
202 	 * provided by the extended ioctl().  Some fields are translated to
203 	 * internal forms -- for instance the Ingress Queue ID passed in from
204 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 	 */
206 	struct ch_filter_specification fs;
207 };
208 
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 	CH_DEVICE(0xa000, 0),  /* PE10K */
217 	CH_DEVICE(0x4001, -1),
218 	CH_DEVICE(0x4002, -1),
219 	CH_DEVICE(0x4003, -1),
220 	CH_DEVICE(0x4004, -1),
221 	CH_DEVICE(0x4005, -1),
222 	CH_DEVICE(0x4006, -1),
223 	CH_DEVICE(0x4007, -1),
224 	CH_DEVICE(0x4008, -1),
225 	CH_DEVICE(0x4009, -1),
226 	CH_DEVICE(0x400a, -1),
227 	CH_DEVICE(0x400d, -1),
228 	CH_DEVICE(0x400e, -1),
229 	CH_DEVICE(0x4080, -1),
230 	CH_DEVICE(0x4081, -1),
231 	CH_DEVICE(0x4082, -1),
232 	CH_DEVICE(0x4083, -1),
233 	CH_DEVICE(0x4084, -1),
234 	CH_DEVICE(0x4085, -1),
235 	CH_DEVICE(0x4086, -1),
236 	CH_DEVICE(0x4087, -1),
237 	CH_DEVICE(0x4088, -1),
238 	CH_DEVICE(0x4401, 4),
239 	CH_DEVICE(0x4402, 4),
240 	CH_DEVICE(0x4403, 4),
241 	CH_DEVICE(0x4404, 4),
242 	CH_DEVICE(0x4405, 4),
243 	CH_DEVICE(0x4406, 4),
244 	CH_DEVICE(0x4407, 4),
245 	CH_DEVICE(0x4408, 4),
246 	CH_DEVICE(0x4409, 4),
247 	CH_DEVICE(0x440a, 4),
248 	CH_DEVICE(0x440d, 4),
249 	CH_DEVICE(0x440e, 4),
250 	CH_DEVICE(0x4480, 4),
251 	CH_DEVICE(0x4481, 4),
252 	CH_DEVICE(0x4482, 4),
253 	CH_DEVICE(0x4483, 4),
254 	CH_DEVICE(0x4484, 4),
255 	CH_DEVICE(0x4485, 4),
256 	CH_DEVICE(0x4486, 4),
257 	CH_DEVICE(0x4487, 4),
258 	CH_DEVICE(0x4488, 4),
259 	CH_DEVICE(0x5001, 4),
260 	CH_DEVICE(0x5002, 4),
261 	CH_DEVICE(0x5003, 4),
262 	CH_DEVICE(0x5004, 4),
263 	CH_DEVICE(0x5005, 4),
264 	CH_DEVICE(0x5006, 4),
265 	CH_DEVICE(0x5007, 4),
266 	CH_DEVICE(0x5008, 4),
267 	CH_DEVICE(0x5009, 4),
268 	CH_DEVICE(0x500A, 4),
269 	CH_DEVICE(0x500B, 4),
270 	CH_DEVICE(0x500C, 4),
271 	CH_DEVICE(0x500D, 4),
272 	CH_DEVICE(0x500E, 4),
273 	CH_DEVICE(0x500F, 4),
274 	CH_DEVICE(0x5010, 4),
275 	CH_DEVICE(0x5011, 4),
276 	CH_DEVICE(0x5012, 4),
277 	CH_DEVICE(0x5013, 4),
278 	CH_DEVICE(0x5014, 4),
279 	CH_DEVICE(0x5015, 4),
280 	CH_DEVICE(0x5080, 4),
281 	CH_DEVICE(0x5081, 4),
282 	CH_DEVICE(0x5082, 4),
283 	CH_DEVICE(0x5083, 4),
284 	CH_DEVICE(0x5084, 4),
285 	CH_DEVICE(0x5085, 4),
286 	CH_DEVICE(0x5086, 4),
287 	CH_DEVICE(0x5087, 4),
288 	CH_DEVICE(0x5088, 4),
289 	CH_DEVICE(0x5401, 4),
290 	CH_DEVICE(0x5402, 4),
291 	CH_DEVICE(0x5403, 4),
292 	CH_DEVICE(0x5404, 4),
293 	CH_DEVICE(0x5405, 4),
294 	CH_DEVICE(0x5406, 4),
295 	CH_DEVICE(0x5407, 4),
296 	CH_DEVICE(0x5408, 4),
297 	CH_DEVICE(0x5409, 4),
298 	CH_DEVICE(0x540A, 4),
299 	CH_DEVICE(0x540B, 4),
300 	CH_DEVICE(0x540C, 4),
301 	CH_DEVICE(0x540D, 4),
302 	CH_DEVICE(0x540E, 4),
303 	CH_DEVICE(0x540F, 4),
304 	CH_DEVICE(0x5410, 4),
305 	CH_DEVICE(0x5411, 4),
306 	CH_DEVICE(0x5412, 4),
307 	CH_DEVICE(0x5413, 4),
308 	CH_DEVICE(0x5414, 4),
309 	CH_DEVICE(0x5415, 4),
310 	CH_DEVICE(0x5480, 4),
311 	CH_DEVICE(0x5481, 4),
312 	CH_DEVICE(0x5482, 4),
313 	CH_DEVICE(0x5483, 4),
314 	CH_DEVICE(0x5484, 4),
315 	CH_DEVICE(0x5485, 4),
316 	CH_DEVICE(0x5486, 4),
317 	CH_DEVICE(0x5487, 4),
318 	CH_DEVICE(0x5488, 4),
319 	{ 0, }
320 };
321 
322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt"
326 
327 MODULE_DESCRIPTION(DRV_DESC);
328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION);
331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
332 MODULE_FIRMWARE(FW4_FNAME);
333 MODULE_FIRMWARE(FW5_FNAME);
334 
335 /*
336  * Normally we're willing to become the firmware's Master PF but will be happy
337  * if another PF has already become the Master and initialized the adapter.
338  * Setting "force_init" will cause this driver to forcibly establish itself as
339  * the Master PF and initialize the adapter.
340  */
341 static uint force_init;
342 
343 module_param(force_init, uint, 0644);
344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
345 
346 /*
347  * Normally if the firmware we connect to has Configuration File support, we
348  * use that and only fall back to the old Driver-based initialization if the
349  * Configuration File fails for some reason.  If force_old_init is set, then
350  * we'll always use the old Driver-based initialization sequence.
351  */
352 static uint force_old_init;
353 
354 module_param(force_old_init, uint, 0644);
355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
356 
357 static int dflt_msg_enable = DFLT_MSG_ENABLE;
358 
359 module_param(dflt_msg_enable, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
361 
362 /*
363  * The driver uses the best interrupt scheme available on a platform in the
364  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
365  * of these schemes the driver may consider as follows:
366  *
367  * msi = 2: choose from among all three options
368  * msi = 1: only consider MSI and INTx interrupts
369  * msi = 0: force INTx interrupts
370  */
371 static int msi = 2;
372 
373 module_param(msi, int, 0644);
374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
375 
376 /*
377  * Queue interrupt hold-off timer values.  Queues default to the first of these
378  * upon creation.
379  */
380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
381 
382 module_param_array(intr_holdoff, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 		 "0..4 in microseconds");
385 
386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
387 
388 module_param_array(intr_cnt, uint, NULL, 0644);
389 MODULE_PARM_DESC(intr_cnt,
390 		 "thresholds 1..3 for queue interrupt packet counters");
391 
392 /*
393  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394  * offset by 2 bytes in order to have the IP headers line up on 4-byte
395  * boundaries.  This is a requirement for many architectures which will throw
396  * a machine check fault if an attempt is made to access one of the 4-byte IP
397  * header fields on a non-4-byte boundary.  And it's a major performance issue
398  * even on some architectures which allow it like some implementations of the
399  * x86 ISA.  However, some architectures don't mind this and for some very
400  * edge-case performance sensitive applications (like forwarding large volumes
401  * of small packets), setting this DMA offset to 0 will decrease the number of
402  * PCI-E Bus transfers enough to measurably affect performance.
403  */
404 static int rx_dma_offset = 2;
405 
406 static bool vf_acls;
407 
408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls, bool, 0644);
410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
411 
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413  * on SR-IOV Capable Physical Functions.
414  */
415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
416 
417 module_param_array(num_vf, uint, NULL, 0644);
418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
419 #endif
420 
421 /* TX Queue select used to determine what algorithm to use for selecting TX
422  * queue. Select between the kernel provided function (select_queue=0) or user
423  * cxgb_select_queue function (select_queue=1)
424  *
425  * Default: select_queue=0
426  */
427 static int select_queue;
428 module_param(select_queue, int, 0644);
429 MODULE_PARM_DESC(select_queue,
430 		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
431 
432 /*
433  * The filter TCAM has a fixed portion and a variable portion.  The fixed
434  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435  * ports.  The variable portion is 36 bits which can include things like Exact
436  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438  * far exceed the 36-bit budget for this "compressed" header portion of the
439  * filter.  Thus, we have a scarce resource which must be carefully managed.
440  *
441  * By default we set this up to mostly match the set of filter matching
442  * capabilities of T3 but with accommodations for some of T4's more
443  * interesting features:
444  *
445  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446  *     [Inner] VLAN (17), Port (3), FCoE (1) }
447  */
448 enum {
449 	TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 	TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 	TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
452 };
453 
454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
455 
456 module_param(tp_vlan_pri_map, uint, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
458 
459 static struct dentry *cxgb4_debugfs_root;
460 
461 static LIST_HEAD(adapter_list);
462 static DEFINE_MUTEX(uld_mutex);
463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list);
465 static DEFINE_SPINLOCK(adap_rcu_lock);
466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467 static const char *uld_str[] = { "RDMA", "iSCSI" };
468 
link_report(struct net_device * dev)469 static void link_report(struct net_device *dev)
470 {
471 	if (!netif_carrier_ok(dev))
472 		netdev_info(dev, "link down\n");
473 	else {
474 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
475 
476 		const char *s = "10Mbps";
477 		const struct port_info *p = netdev_priv(dev);
478 
479 		switch (p->link_cfg.speed) {
480 		case 10000:
481 			s = "10Gbps";
482 			break;
483 		case 1000:
484 			s = "1000Mbps";
485 			break;
486 		case 100:
487 			s = "100Mbps";
488 			break;
489 		case 40000:
490 			s = "40Gbps";
491 			break;
492 		}
493 
494 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
495 			    fc[p->link_cfg.fc]);
496 	}
497 }
498 
499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
dcb_tx_queue_prio_enable(struct net_device * dev,int enable)501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
502 {
503 	struct port_info *pi = netdev_priv(dev);
504 	struct adapter *adap = pi->adapter;
505 	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
506 	int i;
507 
508 	/* We use a simple mapping of Port TX Queue Index to DCB
509 	 * Priority when we're enabling DCB.
510 	 */
511 	for (i = 0; i < pi->nqsets; i++, txq++) {
512 		u32 name, value;
513 		int err;
514 
515 		name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 			FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 			FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 		value = enable ? i : 0xffffffff;
519 
520 		/* Since we can be called while atomic (from "interrupt
521 		 * level") we need to issue the Set Parameters Commannd
522 		 * without sleeping (timeout < 0).
523 		 */
524 		err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
525 					    &name, &value);
526 
527 		if (err)
528 			dev_err(adap->pdev_dev,
529 				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 				enable ? "set" : "unset", pi->port_id, i, -err);
531 		else
532 			txq->dcb_prio = value;
533 	}
534 }
535 #endif /* CONFIG_CHELSIO_T4_DCB */
536 
t4_os_link_changed(struct adapter * adapter,int port_id,int link_stat)537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
538 {
539 	struct net_device *dev = adapter->port[port_id];
540 
541 	/* Skip changes from disabled ports. */
542 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
543 		if (link_stat)
544 			netif_carrier_on(dev);
545 		else {
546 #ifdef CONFIG_CHELSIO_T4_DCB
547 			cxgb4_dcb_state_init(dev);
548 			dcb_tx_queue_prio_enable(dev, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 			netif_carrier_off(dev);
551 		}
552 
553 		link_report(dev);
554 	}
555 }
556 
t4_os_portmod_changed(const struct adapter * adap,int port_id)557 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
558 {
559 	static const char *mod_str[] = {
560 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
561 	};
562 
563 	const struct net_device *dev = adap->port[port_id];
564 	const struct port_info *pi = netdev_priv(dev);
565 
566 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 		netdev_info(dev, "port module unplugged\n");
568 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
569 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
570 }
571 
572 /*
573  * Configure the exact and hash address filters to handle a port's multicast
574  * and secondary unicast MAC addresses.
575  */
set_addr_filters(const struct net_device * dev,bool sleep)576 static int set_addr_filters(const struct net_device *dev, bool sleep)
577 {
578 	u64 mhash = 0;
579 	u64 uhash = 0;
580 	bool free = true;
581 	u16 filt_idx[7];
582 	const u8 *addr[7];
583 	int ret, naddr = 0;
584 	const struct netdev_hw_addr *ha;
585 	int uc_cnt = netdev_uc_count(dev);
586 	int mc_cnt = netdev_mc_count(dev);
587 	const struct port_info *pi = netdev_priv(dev);
588 	unsigned int mb = pi->adapter->fn;
589 
590 	/* first do the secondary unicast addresses */
591 	netdev_for_each_uc_addr(ha, dev) {
592 		addr[naddr++] = ha->addr;
593 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
594 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
595 					naddr, addr, filt_idx, &uhash, sleep);
596 			if (ret < 0)
597 				return ret;
598 
599 			free = false;
600 			naddr = 0;
601 		}
602 	}
603 
604 	/* next set up the multicast addresses */
605 	netdev_for_each_mc_addr(ha, dev) {
606 		addr[naddr++] = ha->addr;
607 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
608 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
609 					naddr, addr, filt_idx, &mhash, sleep);
610 			if (ret < 0)
611 				return ret;
612 
613 			free = false;
614 			naddr = 0;
615 		}
616 	}
617 
618 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
619 				uhash | mhash, sleep);
620 }
621 
622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
625 
626 /*
627  * usecs to sleep while draining the dbfifo
628  */
629 static int dbfifo_drain_delay = 1000;
630 module_param(dbfifo_drain_delay, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay,
632 		 "usecs to sleep while draining the dbfifo");
633 
634 /*
635  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636  * If @mtu is -1 it is left unchanged.
637  */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
639 {
640 	int ret;
641 	struct port_info *pi = netdev_priv(dev);
642 
643 	ret = set_addr_filters(dev, sleep_ok);
644 	if (ret == 0)
645 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
646 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
647 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
648 				    sleep_ok);
649 	return ret;
650 }
651 
652 /**
653  *	link_start - enable a port
654  *	@dev: the port to enable
655  *
656  *	Performs the MAC and PHY actions needed to enable a port.
657  */
link_start(struct net_device * dev)658 static int link_start(struct net_device *dev)
659 {
660 	int ret;
661 	struct port_info *pi = netdev_priv(dev);
662 	unsigned int mb = pi->adapter->fn;
663 
664 	/*
665 	 * We do not set address filters and promiscuity here, the stack does
666 	 * that step explicitly.
667 	 */
668 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
669 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
670 	if (ret == 0) {
671 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
672 				    pi->xact_addr_filt, dev->dev_addr, true,
673 				    true);
674 		if (ret >= 0) {
675 			pi->xact_addr_filt = ret;
676 			ret = 0;
677 		}
678 	}
679 	if (ret == 0)
680 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
681 				    &pi->link_cfg);
682 	if (ret == 0) {
683 		local_bh_disable();
684 		ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 					  true, CXGB4_DCB_ENABLED);
686 		local_bh_enable();
687 	}
688 
689 	return ret;
690 }
691 
cxgb4_dcb_enabled(const struct net_device * dev)692 int cxgb4_dcb_enabled(const struct net_device *dev)
693 {
694 #ifdef CONFIG_CHELSIO_T4_DCB
695 	struct port_info *pi = netdev_priv(dev);
696 
697 	if (!pi->dcb.enabled)
698 		return 0;
699 
700 	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
701 		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
702 #else
703 	return 0;
704 #endif
705 }
706 EXPORT_SYMBOL(cxgb4_dcb_enabled);
707 
708 #ifdef CONFIG_CHELSIO_T4_DCB
709 /* Handle a Data Center Bridging update message from the firmware. */
dcb_rpl(struct adapter * adap,const struct fw_port_cmd * pcmd)710 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
711 {
712 	int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
713 	struct net_device *dev = adap->port[port];
714 	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
715 	int new_dcb_enabled;
716 
717 	cxgb4_dcb_handle_fw_update(adap, pcmd);
718 	new_dcb_enabled = cxgb4_dcb_enabled(dev);
719 
720 	/* If the DCB has become enabled or disabled on the port then we're
721 	 * going to need to set up/tear down DCB Priority parameters for the
722 	 * TX Queues associated with the port.
723 	 */
724 	if (new_dcb_enabled != old_dcb_enabled)
725 		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
726 }
727 #endif /* CONFIG_CHELSIO_T4_DCB */
728 
729 /* Clear a filter and release any of its resources that we own.  This also
730  * clears the filter's "pending" status.
731  */
clear_filter(struct adapter * adap,struct filter_entry * f)732 static void clear_filter(struct adapter *adap, struct filter_entry *f)
733 {
734 	/* If the new or old filter have loopback rewriteing rules then we'll
735 	 * need to free any existing Layer Two Table (L2T) entries of the old
736 	 * filter rule.  The firmware will handle freeing up any Source MAC
737 	 * Table (SMT) entries used for rewriting Source MAC Addresses in
738 	 * loopback rules.
739 	 */
740 	if (f->l2t)
741 		cxgb4_l2t_release(f->l2t);
742 
743 	/* The zeroing of the filter rule below clears the filter valid,
744 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
745 	 * this operation.
746 	 */
747 	memset(f, 0, sizeof(*f));
748 }
749 
750 /* Handle a filter write/deletion reply.
751  */
filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)752 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
753 {
754 	unsigned int idx = GET_TID(rpl);
755 	unsigned int nidx = idx - adap->tids.ftid_base;
756 	unsigned int ret;
757 	struct filter_entry *f;
758 
759 	if (idx >= adap->tids.ftid_base && nidx <
760 	   (adap->tids.nftids + adap->tids.nsftids)) {
761 		idx = nidx;
762 		ret = GET_TCB_COOKIE(rpl->cookie);
763 		f = &adap->tids.ftid_tab[idx];
764 
765 		if (ret == FW_FILTER_WR_FLT_DELETED) {
766 			/* Clear the filter when we get confirmation from the
767 			 * hardware that the filter has been deleted.
768 			 */
769 			clear_filter(adap, f);
770 		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
771 			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
772 				idx);
773 			clear_filter(adap, f);
774 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
775 			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
776 			f->pending = 0;  /* asynchronous setup completed */
777 			f->valid = 1;
778 		} else {
779 			/* Something went wrong.  Issue a warning about the
780 			 * problem and clear everything out.
781 			 */
782 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
783 				idx, ret);
784 			clear_filter(adap, f);
785 		}
786 	}
787 }
788 
789 /* Response queue handler for the FW event queue.
790  */
fwevtq_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)791 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
792 			  const struct pkt_gl *gl)
793 {
794 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
795 
796 	rsp++;                                          /* skip RSS header */
797 
798 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
799 	 */
800 	if (unlikely(opcode == CPL_FW4_MSG &&
801 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
802 		rsp++;
803 		opcode = ((const struct rss_header *)rsp)->opcode;
804 		rsp++;
805 		if (opcode != CPL_SGE_EGR_UPDATE) {
806 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
807 				, opcode);
808 			goto out;
809 		}
810 	}
811 
812 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
813 		const struct cpl_sge_egr_update *p = (void *)rsp;
814 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
815 		struct sge_txq *txq;
816 
817 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
818 		txq->restarts++;
819 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
820 			struct sge_eth_txq *eq;
821 
822 			eq = container_of(txq, struct sge_eth_txq, q);
823 			netif_tx_wake_queue(eq->txq);
824 		} else {
825 			struct sge_ofld_txq *oq;
826 
827 			oq = container_of(txq, struct sge_ofld_txq, q);
828 			tasklet_schedule(&oq->qresume_tsk);
829 		}
830 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
831 		const struct cpl_fw6_msg *p = (void *)rsp;
832 
833 #ifdef CONFIG_CHELSIO_T4_DCB
834 		const struct fw_port_cmd *pcmd = (const void *)p->data;
835 		unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
836 		unsigned int action =
837 			FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
838 
839 		if (cmd == FW_PORT_CMD &&
840 		    action == FW_PORT_ACTION_GET_PORT_INFO) {
841 			int port = FW_PORT_CMD_PORTID_GET(
842 					be32_to_cpu(pcmd->op_to_portid));
843 			struct net_device *dev = q->adap->port[port];
844 			int state_input = ((pcmd->u.info.dcbxdis_pkd &
845 					    FW_PORT_CMD_DCBXDIS)
846 					   ? CXGB4_DCB_INPUT_FW_DISABLED
847 					   : CXGB4_DCB_INPUT_FW_ENABLED);
848 
849 			cxgb4_dcb_state_fsm(dev, state_input);
850 		}
851 
852 		if (cmd == FW_PORT_CMD &&
853 		    action == FW_PORT_ACTION_L2_DCB_CFG)
854 			dcb_rpl(q->adap, pcmd);
855 		else
856 #endif
857 			if (p->type == 0)
858 				t4_handle_fw_rpl(q->adap, p->data);
859 	} else if (opcode == CPL_L2T_WRITE_RPL) {
860 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
861 
862 		do_l2t_write_rpl(q->adap, p);
863 	} else if (opcode == CPL_SET_TCB_RPL) {
864 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
865 
866 		filter_rpl(q->adap, p);
867 	} else
868 		dev_err(q->adap->pdev_dev,
869 			"unexpected CPL %#x on FW event queue\n", opcode);
870 out:
871 	return 0;
872 }
873 
874 /**
875  *	uldrx_handler - response queue handler for ULD queues
876  *	@q: the response queue that received the packet
877  *	@rsp: the response queue descriptor holding the offload message
878  *	@gl: the gather list of packet fragments
879  *
880  *	Deliver an ingress offload packet to a ULD.  All processing is done by
881  *	the ULD, we just maintain statistics.
882  */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)883 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
884 			 const struct pkt_gl *gl)
885 {
886 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
887 
888 	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
889 	 */
890 	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
891 	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
892 		rsp += 2;
893 
894 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
895 		rxq->stats.nomem++;
896 		return -1;
897 	}
898 	if (gl == NULL)
899 		rxq->stats.imm++;
900 	else if (gl == CXGB4_MSG_AN)
901 		rxq->stats.an++;
902 	else
903 		rxq->stats.pkts++;
904 	return 0;
905 }
906 
disable_msi(struct adapter * adapter)907 static void disable_msi(struct adapter *adapter)
908 {
909 	if (adapter->flags & USING_MSIX) {
910 		pci_disable_msix(adapter->pdev);
911 		adapter->flags &= ~USING_MSIX;
912 	} else if (adapter->flags & USING_MSI) {
913 		pci_disable_msi(adapter->pdev);
914 		adapter->flags &= ~USING_MSI;
915 	}
916 }
917 
918 /*
919  * Interrupt handler for non-data events used with MSI-X.
920  */
t4_nondata_intr(int irq,void * cookie)921 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
922 {
923 	struct adapter *adap = cookie;
924 
925 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
926 	if (v & PFSW) {
927 		adap->swintr = 1;
928 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
929 	}
930 	t4_slow_intr_handler(adap);
931 	return IRQ_HANDLED;
932 }
933 
934 /*
935  * Name the MSI-X interrupts.
936  */
name_msix_vecs(struct adapter * adap)937 static void name_msix_vecs(struct adapter *adap)
938 {
939 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
940 
941 	/* non-data interrupts */
942 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
943 
944 	/* FW events */
945 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
946 		 adap->port[0]->name);
947 
948 	/* Ethernet queues */
949 	for_each_port(adap, j) {
950 		struct net_device *d = adap->port[j];
951 		const struct port_info *pi = netdev_priv(d);
952 
953 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
954 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
955 				 d->name, i);
956 	}
957 
958 	/* offload queues */
959 	for_each_ofldrxq(&adap->sge, i)
960 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
961 			 adap->port[0]->name, i);
962 
963 	for_each_rdmarxq(&adap->sge, i)
964 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
965 			 adap->port[0]->name, i);
966 
967 	for_each_rdmaciq(&adap->sge, i)
968 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
969 			 adap->port[0]->name, i);
970 }
971 
request_msix_queue_irqs(struct adapter * adap)972 static int request_msix_queue_irqs(struct adapter *adap)
973 {
974 	struct sge *s = &adap->sge;
975 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
976 	int msi_index = 2;
977 
978 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
979 			  adap->msix_info[1].desc, &s->fw_evtq);
980 	if (err)
981 		return err;
982 
983 	for_each_ethrxq(s, ethqidx) {
984 		err = request_irq(adap->msix_info[msi_index].vec,
985 				  t4_sge_intr_msix, 0,
986 				  adap->msix_info[msi_index].desc,
987 				  &s->ethrxq[ethqidx].rspq);
988 		if (err)
989 			goto unwind;
990 		msi_index++;
991 	}
992 	for_each_ofldrxq(s, ofldqidx) {
993 		err = request_irq(adap->msix_info[msi_index].vec,
994 				  t4_sge_intr_msix, 0,
995 				  adap->msix_info[msi_index].desc,
996 				  &s->ofldrxq[ofldqidx].rspq);
997 		if (err)
998 			goto unwind;
999 		msi_index++;
1000 	}
1001 	for_each_rdmarxq(s, rdmaqidx) {
1002 		err = request_irq(adap->msix_info[msi_index].vec,
1003 				  t4_sge_intr_msix, 0,
1004 				  adap->msix_info[msi_index].desc,
1005 				  &s->rdmarxq[rdmaqidx].rspq);
1006 		if (err)
1007 			goto unwind;
1008 		msi_index++;
1009 	}
1010 	for_each_rdmaciq(s, rdmaciqqidx) {
1011 		err = request_irq(adap->msix_info[msi_index].vec,
1012 				  t4_sge_intr_msix, 0,
1013 				  adap->msix_info[msi_index].desc,
1014 				  &s->rdmaciq[rdmaciqqidx].rspq);
1015 		if (err)
1016 			goto unwind;
1017 		msi_index++;
1018 	}
1019 	return 0;
1020 
1021 unwind:
1022 	while (--rdmaciqqidx >= 0)
1023 		free_irq(adap->msix_info[--msi_index].vec,
1024 			 &s->rdmaciq[rdmaciqqidx].rspq);
1025 	while (--rdmaqidx >= 0)
1026 		free_irq(adap->msix_info[--msi_index].vec,
1027 			 &s->rdmarxq[rdmaqidx].rspq);
1028 	while (--ofldqidx >= 0)
1029 		free_irq(adap->msix_info[--msi_index].vec,
1030 			 &s->ofldrxq[ofldqidx].rspq);
1031 	while (--ethqidx >= 0)
1032 		free_irq(adap->msix_info[--msi_index].vec,
1033 			 &s->ethrxq[ethqidx].rspq);
1034 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1035 	return err;
1036 }
1037 
free_msix_queue_irqs(struct adapter * adap)1038 static void free_msix_queue_irqs(struct adapter *adap)
1039 {
1040 	int i, msi_index = 2;
1041 	struct sge *s = &adap->sge;
1042 
1043 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1044 	for_each_ethrxq(s, i)
1045 		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1046 	for_each_ofldrxq(s, i)
1047 		free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1048 	for_each_rdmarxq(s, i)
1049 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1050 	for_each_rdmaciq(s, i)
1051 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1052 }
1053 
1054 /**
1055  *	write_rss - write the RSS table for a given port
1056  *	@pi: the port
1057  *	@queues: array of queue indices for RSS
1058  *
1059  *	Sets up the portion of the HW RSS table for the port's VI to distribute
1060  *	packets to the Rx queues in @queues.
1061  */
write_rss(const struct port_info * pi,const u16 * queues)1062 static int write_rss(const struct port_info *pi, const u16 *queues)
1063 {
1064 	u16 *rss;
1065 	int i, err;
1066 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1067 
1068 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1069 	if (!rss)
1070 		return -ENOMEM;
1071 
1072 	/* map the queue indices to queue ids */
1073 	for (i = 0; i < pi->rss_size; i++, queues++)
1074 		rss[i] = q[*queues].rspq.abs_id;
1075 
1076 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1077 				  pi->rss_size, rss, pi->rss_size);
1078 	kfree(rss);
1079 	return err;
1080 }
1081 
1082 /**
1083  *	setup_rss - configure RSS
1084  *	@adap: the adapter
1085  *
1086  *	Sets up RSS for each port.
1087  */
setup_rss(struct adapter * adap)1088 static int setup_rss(struct adapter *adap)
1089 {
1090 	int i, err;
1091 
1092 	for_each_port(adap, i) {
1093 		const struct port_info *pi = adap2pinfo(adap, i);
1094 
1095 		err = write_rss(pi, pi->rss);
1096 		if (err)
1097 			return err;
1098 	}
1099 	return 0;
1100 }
1101 
1102 /*
1103  * Return the channel of the ingress queue with the given qid.
1104  */
rxq_to_chan(const struct sge * p,unsigned int qid)1105 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1106 {
1107 	qid -= p->ingr_start;
1108 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1109 }
1110 
1111 /*
1112  * Wait until all NAPI handlers are descheduled.
1113  */
quiesce_rx(struct adapter * adap)1114 static void quiesce_rx(struct adapter *adap)
1115 {
1116 	int i;
1117 
1118 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1119 		struct sge_rspq *q = adap->sge.ingr_map[i];
1120 
1121 		if (q && q->handler)
1122 			napi_disable(&q->napi);
1123 	}
1124 }
1125 
1126 /*
1127  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1128  */
enable_rx(struct adapter * adap)1129 static void enable_rx(struct adapter *adap)
1130 {
1131 	int i;
1132 
1133 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1134 		struct sge_rspq *q = adap->sge.ingr_map[i];
1135 
1136 		if (!q)
1137 			continue;
1138 		if (q->handler)
1139 			napi_enable(&q->napi);
1140 		/* 0-increment GTS to start the timer and enable interrupts */
1141 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1142 			     SEINTARM(q->intr_params) |
1143 			     INGRESSQID(q->cntxt_id));
1144 	}
1145 }
1146 
1147 /**
1148  *	setup_sge_queues - configure SGE Tx/Rx/response queues
1149  *	@adap: the adapter
1150  *
1151  *	Determines how many sets of SGE queues to use and initializes them.
1152  *	We support multiple queue sets per port if we have MSI-X, otherwise
1153  *	just one queue set per port.
1154  */
setup_sge_queues(struct adapter * adap)1155 static int setup_sge_queues(struct adapter *adap)
1156 {
1157 	int err, msi_idx, i, j;
1158 	struct sge *s = &adap->sge;
1159 
1160 	bitmap_zero(s->starving_fl, MAX_EGRQ);
1161 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
1162 
1163 	if (adap->flags & USING_MSIX)
1164 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
1165 	else {
1166 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1167 				       NULL, NULL);
1168 		if (err)
1169 			return err;
1170 		msi_idx = -((int)s->intrq.abs_id + 1);
1171 	}
1172 
1173 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1174 			       msi_idx, NULL, fwevtq_handler);
1175 	if (err) {
1176 freeout:	t4_free_sge_resources(adap);
1177 		return err;
1178 	}
1179 
1180 	for_each_port(adap, i) {
1181 		struct net_device *dev = adap->port[i];
1182 		struct port_info *pi = netdev_priv(dev);
1183 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1184 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1185 
1186 		for (j = 0; j < pi->nqsets; j++, q++) {
1187 			if (msi_idx > 0)
1188 				msi_idx++;
1189 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1190 					       msi_idx, &q->fl,
1191 					       t4_ethrx_handler);
1192 			if (err)
1193 				goto freeout;
1194 			q->rspq.idx = j;
1195 			memset(&q->stats, 0, sizeof(q->stats));
1196 		}
1197 		for (j = 0; j < pi->nqsets; j++, t++) {
1198 			err = t4_sge_alloc_eth_txq(adap, t, dev,
1199 					netdev_get_tx_queue(dev, j),
1200 					s->fw_evtq.cntxt_id);
1201 			if (err)
1202 				goto freeout;
1203 		}
1204 	}
1205 
1206 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1207 	for_each_ofldrxq(s, i) {
1208 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
1209 		struct net_device *dev = adap->port[i / j];
1210 
1211 		if (msi_idx > 0)
1212 			msi_idx++;
1213 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1214 				       q->fl.size ? &q->fl : NULL,
1215 				       uldrx_handler);
1216 		if (err)
1217 			goto freeout;
1218 		memset(&q->stats, 0, sizeof(q->stats));
1219 		s->ofld_rxq[i] = q->rspq.abs_id;
1220 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1221 					    s->fw_evtq.cntxt_id);
1222 		if (err)
1223 			goto freeout;
1224 	}
1225 
1226 	for_each_rdmarxq(s, i) {
1227 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
1228 
1229 		if (msi_idx > 0)
1230 			msi_idx++;
1231 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1232 				       msi_idx, q->fl.size ? &q->fl : NULL,
1233 				       uldrx_handler);
1234 		if (err)
1235 			goto freeout;
1236 		memset(&q->stats, 0, sizeof(q->stats));
1237 		s->rdma_rxq[i] = q->rspq.abs_id;
1238 	}
1239 
1240 	for_each_rdmaciq(s, i) {
1241 		struct sge_ofld_rxq *q = &s->rdmaciq[i];
1242 
1243 		if (msi_idx > 0)
1244 			msi_idx++;
1245 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1246 				       msi_idx, q->fl.size ? &q->fl : NULL,
1247 				       uldrx_handler);
1248 		if (err)
1249 			goto freeout;
1250 		memset(&q->stats, 0, sizeof(q->stats));
1251 		s->rdma_ciq[i] = q->rspq.abs_id;
1252 	}
1253 
1254 	for_each_port(adap, i) {
1255 		/*
1256 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1257 		 * have RDMA queues, and that's the right value.
1258 		 */
1259 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1260 					    s->fw_evtq.cntxt_id,
1261 					    s->rdmarxq[i].rspq.cntxt_id);
1262 		if (err)
1263 			goto freeout;
1264 	}
1265 
1266 	t4_write_reg(adap, is_t4(adap->params.chip) ?
1267 				MPS_TRC_RSS_CONTROL :
1268 				MPS_T5_TRC_RSS_CONTROL,
1269 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1270 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1271 	return 0;
1272 }
1273 
1274 /*
1275  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1276  * The allocated memory is cleared.
1277  */
t4_alloc_mem(size_t size)1278 void *t4_alloc_mem(size_t size)
1279 {
1280 	void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1281 
1282 	if (!p)
1283 		p = vzalloc(size);
1284 	return p;
1285 }
1286 
1287 /*
1288  * Free memory allocated through alloc_mem().
1289  */
t4_free_mem(void * addr)1290 static void t4_free_mem(void *addr)
1291 {
1292 	if (is_vmalloc_addr(addr))
1293 		vfree(addr);
1294 	else
1295 		kfree(addr);
1296 }
1297 
1298 /* Send a Work Request to write the filter at a specified index.  We construct
1299  * a Firmware Filter Work Request to have the work done and put the indicated
1300  * filter into "pending" mode which will prevent any further actions against
1301  * it till we get a reply from the firmware on the completion status of the
1302  * request.
1303  */
set_filter_wr(struct adapter * adapter,int fidx)1304 static int set_filter_wr(struct adapter *adapter, int fidx)
1305 {
1306 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1307 	struct sk_buff *skb;
1308 	struct fw_filter_wr *fwr;
1309 	unsigned int ftid;
1310 
1311 	/* If the new filter requires loopback Destination MAC and/or VLAN
1312 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1313 	 * the filter.
1314 	 */
1315 	if (f->fs.newdmac || f->fs.newvlan) {
1316 		/* allocate L2T entry for new filter */
1317 		f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1318 		if (f->l2t == NULL)
1319 			return -EAGAIN;
1320 		if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1321 					f->fs.eport, f->fs.dmac)) {
1322 			cxgb4_l2t_release(f->l2t);
1323 			f->l2t = NULL;
1324 			return -ENOMEM;
1325 		}
1326 	}
1327 
1328 	ftid = adapter->tids.ftid_base + fidx;
1329 
1330 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1331 	fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1332 	memset(fwr, 0, sizeof(*fwr));
1333 
1334 	/* It would be nice to put most of the following in t4_hw.c but most
1335 	 * of the work is translating the cxgbtool ch_filter_specification
1336 	 * into the Work Request and the definition of that structure is
1337 	 * currently in cxgbtool.h which isn't appropriate to pull into the
1338 	 * common code.  We may eventually try to come up with a more neutral
1339 	 * filter specification structure but for now it's easiest to simply
1340 	 * put this fairly direct code in line ...
1341 	 */
1342 	fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1343 	fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1344 	fwr->tid_to_iq =
1345 		htonl(V_FW_FILTER_WR_TID(ftid) |
1346 		      V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1347 		      V_FW_FILTER_WR_NOREPLY(0) |
1348 		      V_FW_FILTER_WR_IQ(f->fs.iq));
1349 	fwr->del_filter_to_l2tix =
1350 		htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1351 		      V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1352 		      V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1353 		      V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1354 		      V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1355 		      V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1356 		      V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1357 		      V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1358 		      V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1359 					     f->fs.newvlan == VLAN_REWRITE) |
1360 		      V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1361 					    f->fs.newvlan == VLAN_REWRITE) |
1362 		      V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1363 		      V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1364 		      V_FW_FILTER_WR_PRIO(f->fs.prio) |
1365 		      V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1366 	fwr->ethtype = htons(f->fs.val.ethtype);
1367 	fwr->ethtypem = htons(f->fs.mask.ethtype);
1368 	fwr->frag_to_ovlan_vldm =
1369 		(V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1370 		 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1371 		 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1372 		 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1373 		 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1374 		 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1375 	fwr->smac_sel = 0;
1376 	fwr->rx_chan_rx_rpl_iq =
1377 		htons(V_FW_FILTER_WR_RX_CHAN(0) |
1378 		      V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1379 	fwr->maci_to_matchtypem =
1380 		htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1381 		      V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1382 		      V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1383 		      V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1384 		      V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1385 		      V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1386 		      V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1387 		      V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1388 	fwr->ptcl = f->fs.val.proto;
1389 	fwr->ptclm = f->fs.mask.proto;
1390 	fwr->ttyp = f->fs.val.tos;
1391 	fwr->ttypm = f->fs.mask.tos;
1392 	fwr->ivlan = htons(f->fs.val.ivlan);
1393 	fwr->ivlanm = htons(f->fs.mask.ivlan);
1394 	fwr->ovlan = htons(f->fs.val.ovlan);
1395 	fwr->ovlanm = htons(f->fs.mask.ovlan);
1396 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1397 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1398 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1399 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1400 	fwr->lp = htons(f->fs.val.lport);
1401 	fwr->lpm = htons(f->fs.mask.lport);
1402 	fwr->fp = htons(f->fs.val.fport);
1403 	fwr->fpm = htons(f->fs.mask.fport);
1404 	if (f->fs.newsmac)
1405 		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1406 
1407 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1408 	 * When we get the Work Request Reply we'll clear the pending status.
1409 	 */
1410 	f->pending = 1;
1411 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1412 	t4_ofld_send(adapter, skb);
1413 	return 0;
1414 }
1415 
1416 /* Delete the filter at a specified index.
1417  */
del_filter_wr(struct adapter * adapter,int fidx)1418 static int del_filter_wr(struct adapter *adapter, int fidx)
1419 {
1420 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1421 	struct sk_buff *skb;
1422 	struct fw_filter_wr *fwr;
1423 	unsigned int len, ftid;
1424 
1425 	len = sizeof(*fwr);
1426 	ftid = adapter->tids.ftid_base + fidx;
1427 
1428 	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1429 	fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1430 	t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1431 
1432 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1433 	 * When we get the Work Request Reply we'll clear the pending status.
1434 	 */
1435 	f->pending = 1;
1436 	t4_mgmt_tx(adapter, skb);
1437 	return 0;
1438 }
1439 
cxgb_select_queue(struct net_device * dev,struct sk_buff * skb,void * accel_priv,select_queue_fallback_t fallback)1440 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1441 			     void *accel_priv, select_queue_fallback_t fallback)
1442 {
1443 	int txq;
1444 
1445 #ifdef CONFIG_CHELSIO_T4_DCB
1446 	/* If a Data Center Bridging has been successfully negotiated on this
1447 	 * link then we'll use the skb's priority to map it to a TX Queue.
1448 	 * The skb's priority is determined via the VLAN Tag Priority Code
1449 	 * Point field.
1450 	 */
1451 	if (cxgb4_dcb_enabled(dev)) {
1452 		u16 vlan_tci;
1453 		int err;
1454 
1455 		err = vlan_get_tag(skb, &vlan_tci);
1456 		if (unlikely(err)) {
1457 			if (net_ratelimit())
1458 				netdev_warn(dev,
1459 					    "TX Packet without VLAN Tag on DCB Link\n");
1460 			txq = 0;
1461 		} else {
1462 			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1463 		}
1464 		return txq;
1465 	}
1466 #endif /* CONFIG_CHELSIO_T4_DCB */
1467 
1468 	if (select_queue) {
1469 		txq = (skb_rx_queue_recorded(skb)
1470 			? skb_get_rx_queue(skb)
1471 			: smp_processor_id());
1472 
1473 		while (unlikely(txq >= dev->real_num_tx_queues))
1474 			txq -= dev->real_num_tx_queues;
1475 
1476 		return txq;
1477 	}
1478 
1479 	return fallback(dev, skb) % dev->real_num_tx_queues;
1480 }
1481 
is_offload(const struct adapter * adap)1482 static inline int is_offload(const struct adapter *adap)
1483 {
1484 	return adap->params.offload;
1485 }
1486 
1487 /*
1488  * Implementation of ethtool operations.
1489  */
1490 
get_msglevel(struct net_device * dev)1491 static u32 get_msglevel(struct net_device *dev)
1492 {
1493 	return netdev2adap(dev)->msg_enable;
1494 }
1495 
set_msglevel(struct net_device * dev,u32 val)1496 static void set_msglevel(struct net_device *dev, u32 val)
1497 {
1498 	netdev2adap(dev)->msg_enable = val;
1499 }
1500 
1501 static char stats_strings[][ETH_GSTRING_LEN] = {
1502 	"TxOctetsOK         ",
1503 	"TxFramesOK         ",
1504 	"TxBroadcastFrames  ",
1505 	"TxMulticastFrames  ",
1506 	"TxUnicastFrames    ",
1507 	"TxErrorFrames      ",
1508 
1509 	"TxFrames64         ",
1510 	"TxFrames65To127    ",
1511 	"TxFrames128To255   ",
1512 	"TxFrames256To511   ",
1513 	"TxFrames512To1023  ",
1514 	"TxFrames1024To1518 ",
1515 	"TxFrames1519ToMax  ",
1516 
1517 	"TxFramesDropped    ",
1518 	"TxPauseFrames      ",
1519 	"TxPPP0Frames       ",
1520 	"TxPPP1Frames       ",
1521 	"TxPPP2Frames       ",
1522 	"TxPPP3Frames       ",
1523 	"TxPPP4Frames       ",
1524 	"TxPPP5Frames       ",
1525 	"TxPPP6Frames       ",
1526 	"TxPPP7Frames       ",
1527 
1528 	"RxOctetsOK         ",
1529 	"RxFramesOK         ",
1530 	"RxBroadcastFrames  ",
1531 	"RxMulticastFrames  ",
1532 	"RxUnicastFrames    ",
1533 
1534 	"RxFramesTooLong    ",
1535 	"RxJabberErrors     ",
1536 	"RxFCSErrors        ",
1537 	"RxLengthErrors     ",
1538 	"RxSymbolErrors     ",
1539 	"RxRuntFrames       ",
1540 
1541 	"RxFrames64         ",
1542 	"RxFrames65To127    ",
1543 	"RxFrames128To255   ",
1544 	"RxFrames256To511   ",
1545 	"RxFrames512To1023  ",
1546 	"RxFrames1024To1518 ",
1547 	"RxFrames1519ToMax  ",
1548 
1549 	"RxPauseFrames      ",
1550 	"RxPPP0Frames       ",
1551 	"RxPPP1Frames       ",
1552 	"RxPPP2Frames       ",
1553 	"RxPPP3Frames       ",
1554 	"RxPPP4Frames       ",
1555 	"RxPPP5Frames       ",
1556 	"RxPPP6Frames       ",
1557 	"RxPPP7Frames       ",
1558 
1559 	"RxBG0FramesDropped ",
1560 	"RxBG1FramesDropped ",
1561 	"RxBG2FramesDropped ",
1562 	"RxBG3FramesDropped ",
1563 	"RxBG0FramesTrunc   ",
1564 	"RxBG1FramesTrunc   ",
1565 	"RxBG2FramesTrunc   ",
1566 	"RxBG3FramesTrunc   ",
1567 
1568 	"TSO                ",
1569 	"TxCsumOffload      ",
1570 	"RxCsumGood         ",
1571 	"VLANextractions    ",
1572 	"VLANinsertions     ",
1573 	"GROpackets         ",
1574 	"GROmerged          ",
1575 	"WriteCoalSuccess   ",
1576 	"WriteCoalFail      ",
1577 };
1578 
get_sset_count(struct net_device * dev,int sset)1579 static int get_sset_count(struct net_device *dev, int sset)
1580 {
1581 	switch (sset) {
1582 	case ETH_SS_STATS:
1583 		return ARRAY_SIZE(stats_strings);
1584 	default:
1585 		return -EOPNOTSUPP;
1586 	}
1587 }
1588 
1589 #define T4_REGMAP_SIZE (160 * 1024)
1590 #define T5_REGMAP_SIZE (332 * 1024)
1591 
get_regs_len(struct net_device * dev)1592 static int get_regs_len(struct net_device *dev)
1593 {
1594 	struct adapter *adap = netdev2adap(dev);
1595 	if (is_t4(adap->params.chip))
1596 		return T4_REGMAP_SIZE;
1597 	else
1598 		return T5_REGMAP_SIZE;
1599 }
1600 
get_eeprom_len(struct net_device * dev)1601 static int get_eeprom_len(struct net_device *dev)
1602 {
1603 	return EEPROMSIZE;
1604 }
1605 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1606 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1607 {
1608 	struct adapter *adapter = netdev2adap(dev);
1609 
1610 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1611 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1612 	strlcpy(info->bus_info, pci_name(adapter->pdev),
1613 		sizeof(info->bus_info));
1614 
1615 	if (adapter->params.fw_vers)
1616 		snprintf(info->fw_version, sizeof(info->fw_version),
1617 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1618 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1619 			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1620 			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1621 			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1622 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1623 			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1624 			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1625 			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1626 }
1627 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1628 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1629 {
1630 	if (stringset == ETH_SS_STATS)
1631 		memcpy(data, stats_strings, sizeof(stats_strings));
1632 }
1633 
1634 /*
1635  * port stats maintained per queue of the port.  They should be in the same
1636  * order as in stats_strings above.
1637  */
1638 struct queue_port_stats {
1639 	u64 tso;
1640 	u64 tx_csum;
1641 	u64 rx_csum;
1642 	u64 vlan_ex;
1643 	u64 vlan_ins;
1644 	u64 gro_pkts;
1645 	u64 gro_merged;
1646 };
1647 
collect_sge_port_stats(const struct adapter * adap,const struct port_info * p,struct queue_port_stats * s)1648 static void collect_sge_port_stats(const struct adapter *adap,
1649 		const struct port_info *p, struct queue_port_stats *s)
1650 {
1651 	int i;
1652 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1653 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1654 
1655 	memset(s, 0, sizeof(*s));
1656 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1657 		s->tso += tx->tso;
1658 		s->tx_csum += tx->tx_cso;
1659 		s->rx_csum += rx->stats.rx_cso;
1660 		s->vlan_ex += rx->stats.vlan_ex;
1661 		s->vlan_ins += tx->vlan_ins;
1662 		s->gro_pkts += rx->stats.lro_pkts;
1663 		s->gro_merged += rx->stats.lro_merged;
1664 	}
1665 }
1666 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1667 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1668 		      u64 *data)
1669 {
1670 	struct port_info *pi = netdev_priv(dev);
1671 	struct adapter *adapter = pi->adapter;
1672 	u32 val1, val2;
1673 
1674 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1675 
1676 	data += sizeof(struct port_stats) / sizeof(u64);
1677 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1678 	data += sizeof(struct queue_port_stats) / sizeof(u64);
1679 	if (!is_t4(adapter->params.chip)) {
1680 		t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1681 		val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1682 		val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1683 		*data = val1 - val2;
1684 		data++;
1685 		*data = val2;
1686 		data++;
1687 	} else {
1688 		memset(data, 0, 2 * sizeof(u64));
1689 		*data += 2;
1690 	}
1691 }
1692 
1693 /*
1694  * Return a version number to identify the type of adapter.  The scheme is:
1695  * - bits 0..9: chip version
1696  * - bits 10..15: chip revision
1697  * - bits 16..23: register dump version
1698  */
mk_adap_vers(const struct adapter * ap)1699 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1700 {
1701 	return CHELSIO_CHIP_VERSION(ap->params.chip) |
1702 		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1703 }
1704 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)1705 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1706 			   unsigned int end)
1707 {
1708 	u32 *p = buf + start;
1709 
1710 	for ( ; start <= end; start += sizeof(u32))
1711 		*p++ = t4_read_reg(ap, start);
1712 }
1713 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1714 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1715 		     void *buf)
1716 {
1717 	static const unsigned int t4_reg_ranges[] = {
1718 		0x1008, 0x1108,
1719 		0x1180, 0x11b4,
1720 		0x11fc, 0x123c,
1721 		0x1300, 0x173c,
1722 		0x1800, 0x18fc,
1723 		0x3000, 0x30d8,
1724 		0x30e0, 0x5924,
1725 		0x5960, 0x59d4,
1726 		0x5a00, 0x5af8,
1727 		0x6000, 0x6098,
1728 		0x6100, 0x6150,
1729 		0x6200, 0x6208,
1730 		0x6240, 0x6248,
1731 		0x6280, 0x6338,
1732 		0x6370, 0x638c,
1733 		0x6400, 0x643c,
1734 		0x6500, 0x6524,
1735 		0x6a00, 0x6a38,
1736 		0x6a60, 0x6a78,
1737 		0x6b00, 0x6b84,
1738 		0x6bf0, 0x6c84,
1739 		0x6cf0, 0x6d84,
1740 		0x6df0, 0x6e84,
1741 		0x6ef0, 0x6f84,
1742 		0x6ff0, 0x7084,
1743 		0x70f0, 0x7184,
1744 		0x71f0, 0x7284,
1745 		0x72f0, 0x7384,
1746 		0x73f0, 0x7450,
1747 		0x7500, 0x7530,
1748 		0x7600, 0x761c,
1749 		0x7680, 0x76cc,
1750 		0x7700, 0x7798,
1751 		0x77c0, 0x77fc,
1752 		0x7900, 0x79fc,
1753 		0x7b00, 0x7c38,
1754 		0x7d00, 0x7efc,
1755 		0x8dc0, 0x8e1c,
1756 		0x8e30, 0x8e78,
1757 		0x8ea0, 0x8f6c,
1758 		0x8fc0, 0x9074,
1759 		0x90fc, 0x90fc,
1760 		0x9400, 0x9458,
1761 		0x9600, 0x96bc,
1762 		0x9800, 0x9808,
1763 		0x9820, 0x983c,
1764 		0x9850, 0x9864,
1765 		0x9c00, 0x9c6c,
1766 		0x9c80, 0x9cec,
1767 		0x9d00, 0x9d6c,
1768 		0x9d80, 0x9dec,
1769 		0x9e00, 0x9e6c,
1770 		0x9e80, 0x9eec,
1771 		0x9f00, 0x9f6c,
1772 		0x9f80, 0x9fec,
1773 		0xd004, 0xd03c,
1774 		0xdfc0, 0xdfe0,
1775 		0xe000, 0xea7c,
1776 		0xf000, 0x11110,
1777 		0x11118, 0x11190,
1778 		0x19040, 0x1906c,
1779 		0x19078, 0x19080,
1780 		0x1908c, 0x19124,
1781 		0x19150, 0x191b0,
1782 		0x191d0, 0x191e8,
1783 		0x19238, 0x1924c,
1784 		0x193f8, 0x19474,
1785 		0x19490, 0x194f8,
1786 		0x19800, 0x19f30,
1787 		0x1a000, 0x1a06c,
1788 		0x1a0b0, 0x1a120,
1789 		0x1a128, 0x1a138,
1790 		0x1a190, 0x1a1c4,
1791 		0x1a1fc, 0x1a1fc,
1792 		0x1e040, 0x1e04c,
1793 		0x1e284, 0x1e28c,
1794 		0x1e2c0, 0x1e2c0,
1795 		0x1e2e0, 0x1e2e0,
1796 		0x1e300, 0x1e384,
1797 		0x1e3c0, 0x1e3c8,
1798 		0x1e440, 0x1e44c,
1799 		0x1e684, 0x1e68c,
1800 		0x1e6c0, 0x1e6c0,
1801 		0x1e6e0, 0x1e6e0,
1802 		0x1e700, 0x1e784,
1803 		0x1e7c0, 0x1e7c8,
1804 		0x1e840, 0x1e84c,
1805 		0x1ea84, 0x1ea8c,
1806 		0x1eac0, 0x1eac0,
1807 		0x1eae0, 0x1eae0,
1808 		0x1eb00, 0x1eb84,
1809 		0x1ebc0, 0x1ebc8,
1810 		0x1ec40, 0x1ec4c,
1811 		0x1ee84, 0x1ee8c,
1812 		0x1eec0, 0x1eec0,
1813 		0x1eee0, 0x1eee0,
1814 		0x1ef00, 0x1ef84,
1815 		0x1efc0, 0x1efc8,
1816 		0x1f040, 0x1f04c,
1817 		0x1f284, 0x1f28c,
1818 		0x1f2c0, 0x1f2c0,
1819 		0x1f2e0, 0x1f2e0,
1820 		0x1f300, 0x1f384,
1821 		0x1f3c0, 0x1f3c8,
1822 		0x1f440, 0x1f44c,
1823 		0x1f684, 0x1f68c,
1824 		0x1f6c0, 0x1f6c0,
1825 		0x1f6e0, 0x1f6e0,
1826 		0x1f700, 0x1f784,
1827 		0x1f7c0, 0x1f7c8,
1828 		0x1f840, 0x1f84c,
1829 		0x1fa84, 0x1fa8c,
1830 		0x1fac0, 0x1fac0,
1831 		0x1fae0, 0x1fae0,
1832 		0x1fb00, 0x1fb84,
1833 		0x1fbc0, 0x1fbc8,
1834 		0x1fc40, 0x1fc4c,
1835 		0x1fe84, 0x1fe8c,
1836 		0x1fec0, 0x1fec0,
1837 		0x1fee0, 0x1fee0,
1838 		0x1ff00, 0x1ff84,
1839 		0x1ffc0, 0x1ffc8,
1840 		0x20000, 0x2002c,
1841 		0x20100, 0x2013c,
1842 		0x20190, 0x201c8,
1843 		0x20200, 0x20318,
1844 		0x20400, 0x20528,
1845 		0x20540, 0x20614,
1846 		0x21000, 0x21040,
1847 		0x2104c, 0x21060,
1848 		0x210c0, 0x210ec,
1849 		0x21200, 0x21268,
1850 		0x21270, 0x21284,
1851 		0x212fc, 0x21388,
1852 		0x21400, 0x21404,
1853 		0x21500, 0x21518,
1854 		0x2152c, 0x2153c,
1855 		0x21550, 0x21554,
1856 		0x21600, 0x21600,
1857 		0x21608, 0x21628,
1858 		0x21630, 0x2163c,
1859 		0x21700, 0x2171c,
1860 		0x21780, 0x2178c,
1861 		0x21800, 0x21c38,
1862 		0x21c80, 0x21d7c,
1863 		0x21e00, 0x21e04,
1864 		0x22000, 0x2202c,
1865 		0x22100, 0x2213c,
1866 		0x22190, 0x221c8,
1867 		0x22200, 0x22318,
1868 		0x22400, 0x22528,
1869 		0x22540, 0x22614,
1870 		0x23000, 0x23040,
1871 		0x2304c, 0x23060,
1872 		0x230c0, 0x230ec,
1873 		0x23200, 0x23268,
1874 		0x23270, 0x23284,
1875 		0x232fc, 0x23388,
1876 		0x23400, 0x23404,
1877 		0x23500, 0x23518,
1878 		0x2352c, 0x2353c,
1879 		0x23550, 0x23554,
1880 		0x23600, 0x23600,
1881 		0x23608, 0x23628,
1882 		0x23630, 0x2363c,
1883 		0x23700, 0x2371c,
1884 		0x23780, 0x2378c,
1885 		0x23800, 0x23c38,
1886 		0x23c80, 0x23d7c,
1887 		0x23e00, 0x23e04,
1888 		0x24000, 0x2402c,
1889 		0x24100, 0x2413c,
1890 		0x24190, 0x241c8,
1891 		0x24200, 0x24318,
1892 		0x24400, 0x24528,
1893 		0x24540, 0x24614,
1894 		0x25000, 0x25040,
1895 		0x2504c, 0x25060,
1896 		0x250c0, 0x250ec,
1897 		0x25200, 0x25268,
1898 		0x25270, 0x25284,
1899 		0x252fc, 0x25388,
1900 		0x25400, 0x25404,
1901 		0x25500, 0x25518,
1902 		0x2552c, 0x2553c,
1903 		0x25550, 0x25554,
1904 		0x25600, 0x25600,
1905 		0x25608, 0x25628,
1906 		0x25630, 0x2563c,
1907 		0x25700, 0x2571c,
1908 		0x25780, 0x2578c,
1909 		0x25800, 0x25c38,
1910 		0x25c80, 0x25d7c,
1911 		0x25e00, 0x25e04,
1912 		0x26000, 0x2602c,
1913 		0x26100, 0x2613c,
1914 		0x26190, 0x261c8,
1915 		0x26200, 0x26318,
1916 		0x26400, 0x26528,
1917 		0x26540, 0x26614,
1918 		0x27000, 0x27040,
1919 		0x2704c, 0x27060,
1920 		0x270c0, 0x270ec,
1921 		0x27200, 0x27268,
1922 		0x27270, 0x27284,
1923 		0x272fc, 0x27388,
1924 		0x27400, 0x27404,
1925 		0x27500, 0x27518,
1926 		0x2752c, 0x2753c,
1927 		0x27550, 0x27554,
1928 		0x27600, 0x27600,
1929 		0x27608, 0x27628,
1930 		0x27630, 0x2763c,
1931 		0x27700, 0x2771c,
1932 		0x27780, 0x2778c,
1933 		0x27800, 0x27c38,
1934 		0x27c80, 0x27d7c,
1935 		0x27e00, 0x27e04
1936 	};
1937 
1938 	static const unsigned int t5_reg_ranges[] = {
1939 		0x1008, 0x1148,
1940 		0x1180, 0x11b4,
1941 		0x11fc, 0x123c,
1942 		0x1280, 0x173c,
1943 		0x1800, 0x18fc,
1944 		0x3000, 0x3028,
1945 		0x3060, 0x30d8,
1946 		0x30e0, 0x30fc,
1947 		0x3140, 0x357c,
1948 		0x35a8, 0x35cc,
1949 		0x35ec, 0x35ec,
1950 		0x3600, 0x5624,
1951 		0x56cc, 0x575c,
1952 		0x580c, 0x5814,
1953 		0x5890, 0x58bc,
1954 		0x5940, 0x59dc,
1955 		0x59fc, 0x5a18,
1956 		0x5a60, 0x5a9c,
1957 		0x5b9c, 0x5bfc,
1958 		0x6000, 0x6040,
1959 		0x6058, 0x614c,
1960 		0x7700, 0x7798,
1961 		0x77c0, 0x78fc,
1962 		0x7b00, 0x7c54,
1963 		0x7d00, 0x7efc,
1964 		0x8dc0, 0x8de0,
1965 		0x8df8, 0x8e84,
1966 		0x8ea0, 0x8f84,
1967 		0x8fc0, 0x90f8,
1968 		0x9400, 0x9470,
1969 		0x9600, 0x96f4,
1970 		0x9800, 0x9808,
1971 		0x9820, 0x983c,
1972 		0x9850, 0x9864,
1973 		0x9c00, 0x9c6c,
1974 		0x9c80, 0x9cec,
1975 		0x9d00, 0x9d6c,
1976 		0x9d80, 0x9dec,
1977 		0x9e00, 0x9e6c,
1978 		0x9e80, 0x9eec,
1979 		0x9f00, 0x9f6c,
1980 		0x9f80, 0xa020,
1981 		0xd004, 0xd03c,
1982 		0xdfc0, 0xdfe0,
1983 		0xe000, 0x11088,
1984 		0x1109c, 0x11110,
1985 		0x11118, 0x1117c,
1986 		0x11190, 0x11204,
1987 		0x19040, 0x1906c,
1988 		0x19078, 0x19080,
1989 		0x1908c, 0x19124,
1990 		0x19150, 0x191b0,
1991 		0x191d0, 0x191e8,
1992 		0x19238, 0x19290,
1993 		0x193f8, 0x19474,
1994 		0x19490, 0x194cc,
1995 		0x194f0, 0x194f8,
1996 		0x19c00, 0x19c60,
1997 		0x19c94, 0x19e10,
1998 		0x19e50, 0x19f34,
1999 		0x19f40, 0x19f50,
2000 		0x19f90, 0x19fe4,
2001 		0x1a000, 0x1a06c,
2002 		0x1a0b0, 0x1a120,
2003 		0x1a128, 0x1a138,
2004 		0x1a190, 0x1a1c4,
2005 		0x1a1fc, 0x1a1fc,
2006 		0x1e008, 0x1e00c,
2007 		0x1e040, 0x1e04c,
2008 		0x1e284, 0x1e290,
2009 		0x1e2c0, 0x1e2c0,
2010 		0x1e2e0, 0x1e2e0,
2011 		0x1e300, 0x1e384,
2012 		0x1e3c0, 0x1e3c8,
2013 		0x1e408, 0x1e40c,
2014 		0x1e440, 0x1e44c,
2015 		0x1e684, 0x1e690,
2016 		0x1e6c0, 0x1e6c0,
2017 		0x1e6e0, 0x1e6e0,
2018 		0x1e700, 0x1e784,
2019 		0x1e7c0, 0x1e7c8,
2020 		0x1e808, 0x1e80c,
2021 		0x1e840, 0x1e84c,
2022 		0x1ea84, 0x1ea90,
2023 		0x1eac0, 0x1eac0,
2024 		0x1eae0, 0x1eae0,
2025 		0x1eb00, 0x1eb84,
2026 		0x1ebc0, 0x1ebc8,
2027 		0x1ec08, 0x1ec0c,
2028 		0x1ec40, 0x1ec4c,
2029 		0x1ee84, 0x1ee90,
2030 		0x1eec0, 0x1eec0,
2031 		0x1eee0, 0x1eee0,
2032 		0x1ef00, 0x1ef84,
2033 		0x1efc0, 0x1efc8,
2034 		0x1f008, 0x1f00c,
2035 		0x1f040, 0x1f04c,
2036 		0x1f284, 0x1f290,
2037 		0x1f2c0, 0x1f2c0,
2038 		0x1f2e0, 0x1f2e0,
2039 		0x1f300, 0x1f384,
2040 		0x1f3c0, 0x1f3c8,
2041 		0x1f408, 0x1f40c,
2042 		0x1f440, 0x1f44c,
2043 		0x1f684, 0x1f690,
2044 		0x1f6c0, 0x1f6c0,
2045 		0x1f6e0, 0x1f6e0,
2046 		0x1f700, 0x1f784,
2047 		0x1f7c0, 0x1f7c8,
2048 		0x1f808, 0x1f80c,
2049 		0x1f840, 0x1f84c,
2050 		0x1fa84, 0x1fa90,
2051 		0x1fac0, 0x1fac0,
2052 		0x1fae0, 0x1fae0,
2053 		0x1fb00, 0x1fb84,
2054 		0x1fbc0, 0x1fbc8,
2055 		0x1fc08, 0x1fc0c,
2056 		0x1fc40, 0x1fc4c,
2057 		0x1fe84, 0x1fe90,
2058 		0x1fec0, 0x1fec0,
2059 		0x1fee0, 0x1fee0,
2060 		0x1ff00, 0x1ff84,
2061 		0x1ffc0, 0x1ffc8,
2062 		0x30000, 0x30030,
2063 		0x30100, 0x30144,
2064 		0x30190, 0x301d0,
2065 		0x30200, 0x30318,
2066 		0x30400, 0x3052c,
2067 		0x30540, 0x3061c,
2068 		0x30800, 0x30834,
2069 		0x308c0, 0x30908,
2070 		0x30910, 0x309ac,
2071 		0x30a00, 0x30a04,
2072 		0x30a0c, 0x30a2c,
2073 		0x30a44, 0x30a50,
2074 		0x30a74, 0x30c24,
2075 		0x30d08, 0x30d14,
2076 		0x30d1c, 0x30d20,
2077 		0x30d3c, 0x30d50,
2078 		0x31200, 0x3120c,
2079 		0x31220, 0x31220,
2080 		0x31240, 0x31240,
2081 		0x31600, 0x31600,
2082 		0x31608, 0x3160c,
2083 		0x31a00, 0x31a1c,
2084 		0x31e04, 0x31e20,
2085 		0x31e38, 0x31e3c,
2086 		0x31e80, 0x31e80,
2087 		0x31e88, 0x31ea8,
2088 		0x31eb0, 0x31eb4,
2089 		0x31ec8, 0x31ed4,
2090 		0x31fb8, 0x32004,
2091 		0x32208, 0x3223c,
2092 		0x32600, 0x32630,
2093 		0x32a00, 0x32abc,
2094 		0x32b00, 0x32b70,
2095 		0x33000, 0x33048,
2096 		0x33060, 0x3309c,
2097 		0x330f0, 0x33148,
2098 		0x33160, 0x3319c,
2099 		0x331f0, 0x332e4,
2100 		0x332f8, 0x333e4,
2101 		0x333f8, 0x33448,
2102 		0x33460, 0x3349c,
2103 		0x334f0, 0x33548,
2104 		0x33560, 0x3359c,
2105 		0x335f0, 0x336e4,
2106 		0x336f8, 0x337e4,
2107 		0x337f8, 0x337fc,
2108 		0x33814, 0x33814,
2109 		0x3382c, 0x3382c,
2110 		0x33880, 0x3388c,
2111 		0x338e8, 0x338ec,
2112 		0x33900, 0x33948,
2113 		0x33960, 0x3399c,
2114 		0x339f0, 0x33ae4,
2115 		0x33af8, 0x33b10,
2116 		0x33b28, 0x33b28,
2117 		0x33b3c, 0x33b50,
2118 		0x33bf0, 0x33c10,
2119 		0x33c28, 0x33c28,
2120 		0x33c3c, 0x33c50,
2121 		0x33cf0, 0x33cfc,
2122 		0x34000, 0x34030,
2123 		0x34100, 0x34144,
2124 		0x34190, 0x341d0,
2125 		0x34200, 0x34318,
2126 		0x34400, 0x3452c,
2127 		0x34540, 0x3461c,
2128 		0x34800, 0x34834,
2129 		0x348c0, 0x34908,
2130 		0x34910, 0x349ac,
2131 		0x34a00, 0x34a04,
2132 		0x34a0c, 0x34a2c,
2133 		0x34a44, 0x34a50,
2134 		0x34a74, 0x34c24,
2135 		0x34d08, 0x34d14,
2136 		0x34d1c, 0x34d20,
2137 		0x34d3c, 0x34d50,
2138 		0x35200, 0x3520c,
2139 		0x35220, 0x35220,
2140 		0x35240, 0x35240,
2141 		0x35600, 0x35600,
2142 		0x35608, 0x3560c,
2143 		0x35a00, 0x35a1c,
2144 		0x35e04, 0x35e20,
2145 		0x35e38, 0x35e3c,
2146 		0x35e80, 0x35e80,
2147 		0x35e88, 0x35ea8,
2148 		0x35eb0, 0x35eb4,
2149 		0x35ec8, 0x35ed4,
2150 		0x35fb8, 0x36004,
2151 		0x36208, 0x3623c,
2152 		0x36600, 0x36630,
2153 		0x36a00, 0x36abc,
2154 		0x36b00, 0x36b70,
2155 		0x37000, 0x37048,
2156 		0x37060, 0x3709c,
2157 		0x370f0, 0x37148,
2158 		0x37160, 0x3719c,
2159 		0x371f0, 0x372e4,
2160 		0x372f8, 0x373e4,
2161 		0x373f8, 0x37448,
2162 		0x37460, 0x3749c,
2163 		0x374f0, 0x37548,
2164 		0x37560, 0x3759c,
2165 		0x375f0, 0x376e4,
2166 		0x376f8, 0x377e4,
2167 		0x377f8, 0x377fc,
2168 		0x37814, 0x37814,
2169 		0x3782c, 0x3782c,
2170 		0x37880, 0x3788c,
2171 		0x378e8, 0x378ec,
2172 		0x37900, 0x37948,
2173 		0x37960, 0x3799c,
2174 		0x379f0, 0x37ae4,
2175 		0x37af8, 0x37b10,
2176 		0x37b28, 0x37b28,
2177 		0x37b3c, 0x37b50,
2178 		0x37bf0, 0x37c10,
2179 		0x37c28, 0x37c28,
2180 		0x37c3c, 0x37c50,
2181 		0x37cf0, 0x37cfc,
2182 		0x38000, 0x38030,
2183 		0x38100, 0x38144,
2184 		0x38190, 0x381d0,
2185 		0x38200, 0x38318,
2186 		0x38400, 0x3852c,
2187 		0x38540, 0x3861c,
2188 		0x38800, 0x38834,
2189 		0x388c0, 0x38908,
2190 		0x38910, 0x389ac,
2191 		0x38a00, 0x38a04,
2192 		0x38a0c, 0x38a2c,
2193 		0x38a44, 0x38a50,
2194 		0x38a74, 0x38c24,
2195 		0x38d08, 0x38d14,
2196 		0x38d1c, 0x38d20,
2197 		0x38d3c, 0x38d50,
2198 		0x39200, 0x3920c,
2199 		0x39220, 0x39220,
2200 		0x39240, 0x39240,
2201 		0x39600, 0x39600,
2202 		0x39608, 0x3960c,
2203 		0x39a00, 0x39a1c,
2204 		0x39e04, 0x39e20,
2205 		0x39e38, 0x39e3c,
2206 		0x39e80, 0x39e80,
2207 		0x39e88, 0x39ea8,
2208 		0x39eb0, 0x39eb4,
2209 		0x39ec8, 0x39ed4,
2210 		0x39fb8, 0x3a004,
2211 		0x3a208, 0x3a23c,
2212 		0x3a600, 0x3a630,
2213 		0x3aa00, 0x3aabc,
2214 		0x3ab00, 0x3ab70,
2215 		0x3b000, 0x3b048,
2216 		0x3b060, 0x3b09c,
2217 		0x3b0f0, 0x3b148,
2218 		0x3b160, 0x3b19c,
2219 		0x3b1f0, 0x3b2e4,
2220 		0x3b2f8, 0x3b3e4,
2221 		0x3b3f8, 0x3b448,
2222 		0x3b460, 0x3b49c,
2223 		0x3b4f0, 0x3b548,
2224 		0x3b560, 0x3b59c,
2225 		0x3b5f0, 0x3b6e4,
2226 		0x3b6f8, 0x3b7e4,
2227 		0x3b7f8, 0x3b7fc,
2228 		0x3b814, 0x3b814,
2229 		0x3b82c, 0x3b82c,
2230 		0x3b880, 0x3b88c,
2231 		0x3b8e8, 0x3b8ec,
2232 		0x3b900, 0x3b948,
2233 		0x3b960, 0x3b99c,
2234 		0x3b9f0, 0x3bae4,
2235 		0x3baf8, 0x3bb10,
2236 		0x3bb28, 0x3bb28,
2237 		0x3bb3c, 0x3bb50,
2238 		0x3bbf0, 0x3bc10,
2239 		0x3bc28, 0x3bc28,
2240 		0x3bc3c, 0x3bc50,
2241 		0x3bcf0, 0x3bcfc,
2242 		0x3c000, 0x3c030,
2243 		0x3c100, 0x3c144,
2244 		0x3c190, 0x3c1d0,
2245 		0x3c200, 0x3c318,
2246 		0x3c400, 0x3c52c,
2247 		0x3c540, 0x3c61c,
2248 		0x3c800, 0x3c834,
2249 		0x3c8c0, 0x3c908,
2250 		0x3c910, 0x3c9ac,
2251 		0x3ca00, 0x3ca04,
2252 		0x3ca0c, 0x3ca2c,
2253 		0x3ca44, 0x3ca50,
2254 		0x3ca74, 0x3cc24,
2255 		0x3cd08, 0x3cd14,
2256 		0x3cd1c, 0x3cd20,
2257 		0x3cd3c, 0x3cd50,
2258 		0x3d200, 0x3d20c,
2259 		0x3d220, 0x3d220,
2260 		0x3d240, 0x3d240,
2261 		0x3d600, 0x3d600,
2262 		0x3d608, 0x3d60c,
2263 		0x3da00, 0x3da1c,
2264 		0x3de04, 0x3de20,
2265 		0x3de38, 0x3de3c,
2266 		0x3de80, 0x3de80,
2267 		0x3de88, 0x3dea8,
2268 		0x3deb0, 0x3deb4,
2269 		0x3dec8, 0x3ded4,
2270 		0x3dfb8, 0x3e004,
2271 		0x3e208, 0x3e23c,
2272 		0x3e600, 0x3e630,
2273 		0x3ea00, 0x3eabc,
2274 		0x3eb00, 0x3eb70,
2275 		0x3f000, 0x3f048,
2276 		0x3f060, 0x3f09c,
2277 		0x3f0f0, 0x3f148,
2278 		0x3f160, 0x3f19c,
2279 		0x3f1f0, 0x3f2e4,
2280 		0x3f2f8, 0x3f3e4,
2281 		0x3f3f8, 0x3f448,
2282 		0x3f460, 0x3f49c,
2283 		0x3f4f0, 0x3f548,
2284 		0x3f560, 0x3f59c,
2285 		0x3f5f0, 0x3f6e4,
2286 		0x3f6f8, 0x3f7e4,
2287 		0x3f7f8, 0x3f7fc,
2288 		0x3f814, 0x3f814,
2289 		0x3f82c, 0x3f82c,
2290 		0x3f880, 0x3f88c,
2291 		0x3f8e8, 0x3f8ec,
2292 		0x3f900, 0x3f948,
2293 		0x3f960, 0x3f99c,
2294 		0x3f9f0, 0x3fae4,
2295 		0x3faf8, 0x3fb10,
2296 		0x3fb28, 0x3fb28,
2297 		0x3fb3c, 0x3fb50,
2298 		0x3fbf0, 0x3fc10,
2299 		0x3fc28, 0x3fc28,
2300 		0x3fc3c, 0x3fc50,
2301 		0x3fcf0, 0x3fcfc,
2302 		0x40000, 0x4000c,
2303 		0x40040, 0x40068,
2304 		0x40080, 0x40144,
2305 		0x40180, 0x4018c,
2306 		0x40200, 0x40298,
2307 		0x402ac, 0x4033c,
2308 		0x403f8, 0x403fc,
2309 		0x41304, 0x413c4,
2310 		0x41400, 0x4141c,
2311 		0x41480, 0x414d0,
2312 		0x44000, 0x44078,
2313 		0x440c0, 0x44278,
2314 		0x442c0, 0x44478,
2315 		0x444c0, 0x44678,
2316 		0x446c0, 0x44878,
2317 		0x448c0, 0x449fc,
2318 		0x45000, 0x45068,
2319 		0x45080, 0x45084,
2320 		0x450a0, 0x450b0,
2321 		0x45200, 0x45268,
2322 		0x45280, 0x45284,
2323 		0x452a0, 0x452b0,
2324 		0x460c0, 0x460e4,
2325 		0x47000, 0x4708c,
2326 		0x47200, 0x47250,
2327 		0x47400, 0x47420,
2328 		0x47600, 0x47618,
2329 		0x47800, 0x47814,
2330 		0x48000, 0x4800c,
2331 		0x48040, 0x48068,
2332 		0x48080, 0x48144,
2333 		0x48180, 0x4818c,
2334 		0x48200, 0x48298,
2335 		0x482ac, 0x4833c,
2336 		0x483f8, 0x483fc,
2337 		0x49304, 0x493c4,
2338 		0x49400, 0x4941c,
2339 		0x49480, 0x494d0,
2340 		0x4c000, 0x4c078,
2341 		0x4c0c0, 0x4c278,
2342 		0x4c2c0, 0x4c478,
2343 		0x4c4c0, 0x4c678,
2344 		0x4c6c0, 0x4c878,
2345 		0x4c8c0, 0x4c9fc,
2346 		0x4d000, 0x4d068,
2347 		0x4d080, 0x4d084,
2348 		0x4d0a0, 0x4d0b0,
2349 		0x4d200, 0x4d268,
2350 		0x4d280, 0x4d284,
2351 		0x4d2a0, 0x4d2b0,
2352 		0x4e0c0, 0x4e0e4,
2353 		0x4f000, 0x4f08c,
2354 		0x4f200, 0x4f250,
2355 		0x4f400, 0x4f420,
2356 		0x4f600, 0x4f618,
2357 		0x4f800, 0x4f814,
2358 		0x50000, 0x500cc,
2359 		0x50400, 0x50400,
2360 		0x50800, 0x508cc,
2361 		0x50c00, 0x50c00,
2362 		0x51000, 0x5101c,
2363 		0x51300, 0x51308,
2364 	};
2365 
2366 	int i;
2367 	struct adapter *ap = netdev2adap(dev);
2368 	static const unsigned int *reg_ranges;
2369 	int arr_size = 0, buf_size = 0;
2370 
2371 	if (is_t4(ap->params.chip)) {
2372 		reg_ranges = &t4_reg_ranges[0];
2373 		arr_size = ARRAY_SIZE(t4_reg_ranges);
2374 		buf_size = T4_REGMAP_SIZE;
2375 	} else {
2376 		reg_ranges = &t5_reg_ranges[0];
2377 		arr_size = ARRAY_SIZE(t5_reg_ranges);
2378 		buf_size = T5_REGMAP_SIZE;
2379 	}
2380 
2381 	regs->version = mk_adap_vers(ap);
2382 
2383 	memset(buf, 0, buf_size);
2384 	for (i = 0; i < arr_size; i += 2)
2385 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2386 }
2387 
restart_autoneg(struct net_device * dev)2388 static int restart_autoneg(struct net_device *dev)
2389 {
2390 	struct port_info *p = netdev_priv(dev);
2391 
2392 	if (!netif_running(dev))
2393 		return -EAGAIN;
2394 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2395 		return -EINVAL;
2396 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2397 	return 0;
2398 }
2399 
identify_port(struct net_device * dev,enum ethtool_phys_id_state state)2400 static int identify_port(struct net_device *dev,
2401 			 enum ethtool_phys_id_state state)
2402 {
2403 	unsigned int val;
2404 	struct adapter *adap = netdev2adap(dev);
2405 
2406 	if (state == ETHTOOL_ID_ACTIVE)
2407 		val = 0xffff;
2408 	else if (state == ETHTOOL_ID_INACTIVE)
2409 		val = 0;
2410 	else
2411 		return -EINVAL;
2412 
2413 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2414 }
2415 
from_fw_linkcaps(unsigned int type,unsigned int caps)2416 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2417 {
2418 	unsigned int v = 0;
2419 
2420 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2421 	    type == FW_PORT_TYPE_BT_XAUI) {
2422 		v |= SUPPORTED_TP;
2423 		if (caps & FW_PORT_CAP_SPEED_100M)
2424 			v |= SUPPORTED_100baseT_Full;
2425 		if (caps & FW_PORT_CAP_SPEED_1G)
2426 			v |= SUPPORTED_1000baseT_Full;
2427 		if (caps & FW_PORT_CAP_SPEED_10G)
2428 			v |= SUPPORTED_10000baseT_Full;
2429 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2430 		v |= SUPPORTED_Backplane;
2431 		if (caps & FW_PORT_CAP_SPEED_1G)
2432 			v |= SUPPORTED_1000baseKX_Full;
2433 		if (caps & FW_PORT_CAP_SPEED_10G)
2434 			v |= SUPPORTED_10000baseKX4_Full;
2435 	} else if (type == FW_PORT_TYPE_KR)
2436 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2437 	else if (type == FW_PORT_TYPE_BP_AP)
2438 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2439 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2440 	else if (type == FW_PORT_TYPE_BP4_AP)
2441 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2442 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2443 		     SUPPORTED_10000baseKX4_Full;
2444 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
2445 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2446 		v |= SUPPORTED_FIBRE;
2447 		if (caps & FW_PORT_CAP_SPEED_1G)
2448 			v |= SUPPORTED_1000baseT_Full;
2449 		if (caps & FW_PORT_CAP_SPEED_10G)
2450 			v |= SUPPORTED_10000baseT_Full;
2451 	} else if (type == FW_PORT_TYPE_BP40_BA)
2452 		v |= SUPPORTED_40000baseSR4_Full;
2453 
2454 	if (caps & FW_PORT_CAP_ANEG)
2455 		v |= SUPPORTED_Autoneg;
2456 	return v;
2457 }
2458 
to_fw_linkcaps(unsigned int caps)2459 static unsigned int to_fw_linkcaps(unsigned int caps)
2460 {
2461 	unsigned int v = 0;
2462 
2463 	if (caps & ADVERTISED_100baseT_Full)
2464 		v |= FW_PORT_CAP_SPEED_100M;
2465 	if (caps & ADVERTISED_1000baseT_Full)
2466 		v |= FW_PORT_CAP_SPEED_1G;
2467 	if (caps & ADVERTISED_10000baseT_Full)
2468 		v |= FW_PORT_CAP_SPEED_10G;
2469 	if (caps & ADVERTISED_40000baseSR4_Full)
2470 		v |= FW_PORT_CAP_SPEED_40G;
2471 	return v;
2472 }
2473 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)2474 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2475 {
2476 	const struct port_info *p = netdev_priv(dev);
2477 
2478 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2479 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
2480 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
2481 		cmd->port = PORT_TP;
2482 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2483 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2484 		cmd->port = PORT_FIBRE;
2485 	else if (p->port_type == FW_PORT_TYPE_SFP ||
2486 		 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2487 		 p->port_type == FW_PORT_TYPE_QSFP) {
2488 		if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2489 		    p->mod_type == FW_PORT_MOD_TYPE_SR ||
2490 		    p->mod_type == FW_PORT_MOD_TYPE_ER ||
2491 		    p->mod_type == FW_PORT_MOD_TYPE_LRM)
2492 			cmd->port = PORT_FIBRE;
2493 		else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2494 			 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2495 			cmd->port = PORT_DA;
2496 		else
2497 			cmd->port = PORT_OTHER;
2498 	} else
2499 		cmd->port = PORT_OTHER;
2500 
2501 	if (p->mdio_addr >= 0) {
2502 		cmd->phy_address = p->mdio_addr;
2503 		cmd->transceiver = XCVR_EXTERNAL;
2504 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2505 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2506 	} else {
2507 		cmd->phy_address = 0;  /* not really, but no better option */
2508 		cmd->transceiver = XCVR_INTERNAL;
2509 		cmd->mdio_support = 0;
2510 	}
2511 
2512 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2513 	cmd->advertising = from_fw_linkcaps(p->port_type,
2514 					    p->link_cfg.advertising);
2515 	ethtool_cmd_speed_set(cmd,
2516 			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2517 	cmd->duplex = DUPLEX_FULL;
2518 	cmd->autoneg = p->link_cfg.autoneg;
2519 	cmd->maxtxpkt = 0;
2520 	cmd->maxrxpkt = 0;
2521 	return 0;
2522 }
2523 
speed_to_caps(int speed)2524 static unsigned int speed_to_caps(int speed)
2525 {
2526 	if (speed == 100)
2527 		return FW_PORT_CAP_SPEED_100M;
2528 	if (speed == 1000)
2529 		return FW_PORT_CAP_SPEED_1G;
2530 	if (speed == 10000)
2531 		return FW_PORT_CAP_SPEED_10G;
2532 	if (speed == 40000)
2533 		return FW_PORT_CAP_SPEED_40G;
2534 	return 0;
2535 }
2536 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)2537 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2538 {
2539 	unsigned int cap;
2540 	struct port_info *p = netdev_priv(dev);
2541 	struct link_config *lc = &p->link_cfg;
2542 	u32 speed = ethtool_cmd_speed(cmd);
2543 
2544 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2545 		return -EINVAL;
2546 
2547 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2548 		/*
2549 		 * PHY offers a single speed.  See if that's what's
2550 		 * being requested.
2551 		 */
2552 		if (cmd->autoneg == AUTONEG_DISABLE &&
2553 		    (lc->supported & speed_to_caps(speed)))
2554 			return 0;
2555 		return -EINVAL;
2556 	}
2557 
2558 	if (cmd->autoneg == AUTONEG_DISABLE) {
2559 		cap = speed_to_caps(speed);
2560 
2561 		if (!(lc->supported & cap) ||
2562 		    (speed == 1000) ||
2563 		    (speed == 10000) ||
2564 		    (speed == 40000))
2565 			return -EINVAL;
2566 		lc->requested_speed = cap;
2567 		lc->advertising = 0;
2568 	} else {
2569 		cap = to_fw_linkcaps(cmd->advertising);
2570 		if (!(lc->supported & cap))
2571 			return -EINVAL;
2572 		lc->requested_speed = 0;
2573 		lc->advertising = cap | FW_PORT_CAP_ANEG;
2574 	}
2575 	lc->autoneg = cmd->autoneg;
2576 
2577 	if (netif_running(dev))
2578 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2579 				     lc);
2580 	return 0;
2581 }
2582 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)2583 static void get_pauseparam(struct net_device *dev,
2584 			   struct ethtool_pauseparam *epause)
2585 {
2586 	struct port_info *p = netdev_priv(dev);
2587 
2588 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2589 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2590 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2591 }
2592 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)2593 static int set_pauseparam(struct net_device *dev,
2594 			  struct ethtool_pauseparam *epause)
2595 {
2596 	struct port_info *p = netdev_priv(dev);
2597 	struct link_config *lc = &p->link_cfg;
2598 
2599 	if (epause->autoneg == AUTONEG_DISABLE)
2600 		lc->requested_fc = 0;
2601 	else if (lc->supported & FW_PORT_CAP_ANEG)
2602 		lc->requested_fc = PAUSE_AUTONEG;
2603 	else
2604 		return -EINVAL;
2605 
2606 	if (epause->rx_pause)
2607 		lc->requested_fc |= PAUSE_RX;
2608 	if (epause->tx_pause)
2609 		lc->requested_fc |= PAUSE_TX;
2610 	if (netif_running(dev))
2611 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2612 				     lc);
2613 	return 0;
2614 }
2615 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)2616 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2617 {
2618 	const struct port_info *pi = netdev_priv(dev);
2619 	const struct sge *s = &pi->adapter->sge;
2620 
2621 	e->rx_max_pending = MAX_RX_BUFFERS;
2622 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2623 	e->rx_jumbo_max_pending = 0;
2624 	e->tx_max_pending = MAX_TXQ_ENTRIES;
2625 
2626 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2627 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2628 	e->rx_jumbo_pending = 0;
2629 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2630 }
2631 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)2632 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2633 {
2634 	int i;
2635 	const struct port_info *pi = netdev_priv(dev);
2636 	struct adapter *adapter = pi->adapter;
2637 	struct sge *s = &adapter->sge;
2638 
2639 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2640 	    e->tx_pending > MAX_TXQ_ENTRIES ||
2641 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2642 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2643 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2644 		return -EINVAL;
2645 
2646 	if (adapter->flags & FULL_INIT_DONE)
2647 		return -EBUSY;
2648 
2649 	for (i = 0; i < pi->nqsets; ++i) {
2650 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2651 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2652 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2653 	}
2654 	return 0;
2655 }
2656 
closest_timer(const struct sge * s,int time)2657 static int closest_timer(const struct sge *s, int time)
2658 {
2659 	int i, delta, match = 0, min_delta = INT_MAX;
2660 
2661 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2662 		delta = time - s->timer_val[i];
2663 		if (delta < 0)
2664 			delta = -delta;
2665 		if (delta < min_delta) {
2666 			min_delta = delta;
2667 			match = i;
2668 		}
2669 	}
2670 	return match;
2671 }
2672 
closest_thres(const struct sge * s,int thres)2673 static int closest_thres(const struct sge *s, int thres)
2674 {
2675 	int i, delta, match = 0, min_delta = INT_MAX;
2676 
2677 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2678 		delta = thres - s->counter_val[i];
2679 		if (delta < 0)
2680 			delta = -delta;
2681 		if (delta < min_delta) {
2682 			min_delta = delta;
2683 			match = i;
2684 		}
2685 	}
2686 	return match;
2687 }
2688 
2689 /*
2690  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2691  */
qtimer_val(const struct adapter * adap,const struct sge_rspq * q)2692 static unsigned int qtimer_val(const struct adapter *adap,
2693 			       const struct sge_rspq *q)
2694 {
2695 	unsigned int idx = q->intr_params >> 1;
2696 
2697 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2698 }
2699 
2700 /**
2701  *	set_rspq_intr_params - set a queue's interrupt holdoff parameters
2702  *	@q: the Rx queue
2703  *	@us: the hold-off time in us, or 0 to disable timer
2704  *	@cnt: the hold-off packet count, or 0 to disable counter
2705  *
2706  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
2707  *	one of the two needs to be enabled for the queue to generate interrupts.
2708  */
set_rspq_intr_params(struct sge_rspq * q,unsigned int us,unsigned int cnt)2709 static int set_rspq_intr_params(struct sge_rspq *q,
2710 				unsigned int us, unsigned int cnt)
2711 {
2712 	struct adapter *adap = q->adap;
2713 
2714 	if ((us | cnt) == 0)
2715 		cnt = 1;
2716 
2717 	if (cnt) {
2718 		int err;
2719 		u32 v, new_idx;
2720 
2721 		new_idx = closest_thres(&adap->sge, cnt);
2722 		if (q->desc && q->pktcnt_idx != new_idx) {
2723 			/* the queue has already been created, update it */
2724 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2725 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2726 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
2727 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2728 					    &new_idx);
2729 			if (err)
2730 				return err;
2731 		}
2732 		q->pktcnt_idx = new_idx;
2733 	}
2734 
2735 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2736 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2737 	return 0;
2738 }
2739 
2740 /**
2741  * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2742  * @dev: the network device
2743  * @us: the hold-off time in us, or 0 to disable timer
2744  * @cnt: the hold-off packet count, or 0 to disable counter
2745  *
2746  * Set the RX interrupt hold-off parameters for a network device.
2747  */
set_rx_intr_params(struct net_device * dev,unsigned int us,unsigned int cnt)2748 static int set_rx_intr_params(struct net_device *dev,
2749 			      unsigned int us, unsigned int cnt)
2750 {
2751 	int i, err;
2752 	struct port_info *pi = netdev_priv(dev);
2753 	struct adapter *adap = pi->adapter;
2754 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2755 
2756 	for (i = 0; i < pi->nqsets; i++, q++) {
2757 		err = set_rspq_intr_params(&q->rspq, us, cnt);
2758 		if (err)
2759 			return err;
2760 	}
2761 	return 0;
2762 }
2763 
set_adaptive_rx_setting(struct net_device * dev,int adaptive_rx)2764 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2765 {
2766 	int i;
2767 	struct port_info *pi = netdev_priv(dev);
2768 	struct adapter *adap = pi->adapter;
2769 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2770 
2771 	for (i = 0; i < pi->nqsets; i++, q++)
2772 		q->rspq.adaptive_rx = adaptive_rx;
2773 
2774 	return 0;
2775 }
2776 
get_adaptive_rx_setting(struct net_device * dev)2777 static int get_adaptive_rx_setting(struct net_device *dev)
2778 {
2779 	struct port_info *pi = netdev_priv(dev);
2780 	struct adapter *adap = pi->adapter;
2781 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2782 
2783 	return q->rspq.adaptive_rx;
2784 }
2785 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)2786 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2787 {
2788 	set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2789 	return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2790 				  c->rx_max_coalesced_frames);
2791 }
2792 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)2793 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2794 {
2795 	const struct port_info *pi = netdev_priv(dev);
2796 	const struct adapter *adap = pi->adapter;
2797 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2798 
2799 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
2800 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2801 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
2802 	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2803 	return 0;
2804 }
2805 
2806 /**
2807  *	eeprom_ptov - translate a physical EEPROM address to virtual
2808  *	@phys_addr: the physical EEPROM address
2809  *	@fn: the PCI function number
2810  *	@sz: size of function-specific area
2811  *
2812  *	Translate a physical EEPROM address to virtual.  The first 1K is
2813  *	accessed through virtual addresses starting at 31K, the rest is
2814  *	accessed through virtual addresses starting at 0.
2815  *
2816  *	The mapping is as follows:
2817  *	[0..1K) -> [31K..32K)
2818  *	[1K..1K+A) -> [31K-A..31K)
2819  *	[1K+A..ES) -> [0..ES-A-1K)
2820  *
2821  *	where A = @fn * @sz, and ES = EEPROM size.
2822  */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)2823 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2824 {
2825 	fn *= sz;
2826 	if (phys_addr < 1024)
2827 		return phys_addr + (31 << 10);
2828 	if (phys_addr < 1024 + fn)
2829 		return 31744 - fn + phys_addr - 1024;
2830 	if (phys_addr < EEPROMSIZE)
2831 		return phys_addr - 1024 - fn;
2832 	return -EINVAL;
2833 }
2834 
2835 /*
2836  * The next two routines implement eeprom read/write from physical addresses.
2837  */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)2838 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2839 {
2840 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2841 
2842 	if (vaddr >= 0)
2843 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2844 	return vaddr < 0 ? vaddr : 0;
2845 }
2846 
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)2847 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2848 {
2849 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2850 
2851 	if (vaddr >= 0)
2852 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2853 	return vaddr < 0 ? vaddr : 0;
2854 }
2855 
2856 #define EEPROM_MAGIC 0x38E2F10C
2857 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)2858 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2859 		      u8 *data)
2860 {
2861 	int i, err = 0;
2862 	struct adapter *adapter = netdev2adap(dev);
2863 
2864 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2865 	if (!buf)
2866 		return -ENOMEM;
2867 
2868 	e->magic = EEPROM_MAGIC;
2869 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2870 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2871 
2872 	if (!err)
2873 		memcpy(data, buf + e->offset, e->len);
2874 	kfree(buf);
2875 	return err;
2876 }
2877 
set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)2878 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2879 		      u8 *data)
2880 {
2881 	u8 *buf;
2882 	int err = 0;
2883 	u32 aligned_offset, aligned_len, *p;
2884 	struct adapter *adapter = netdev2adap(dev);
2885 
2886 	if (eeprom->magic != EEPROM_MAGIC)
2887 		return -EINVAL;
2888 
2889 	aligned_offset = eeprom->offset & ~3;
2890 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2891 
2892 	if (adapter->fn > 0) {
2893 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2894 
2895 		if (aligned_offset < start ||
2896 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
2897 			return -EPERM;
2898 	}
2899 
2900 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2901 		/*
2902 		 * RMW possibly needed for first or last words.
2903 		 */
2904 		buf = kmalloc(aligned_len, GFP_KERNEL);
2905 		if (!buf)
2906 			return -ENOMEM;
2907 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2908 		if (!err && aligned_len > 4)
2909 			err = eeprom_rd_phys(adapter,
2910 					     aligned_offset + aligned_len - 4,
2911 					     (u32 *)&buf[aligned_len - 4]);
2912 		if (err)
2913 			goto out;
2914 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2915 	} else
2916 		buf = data;
2917 
2918 	err = t4_seeprom_wp(adapter, false);
2919 	if (err)
2920 		goto out;
2921 
2922 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2923 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
2924 		aligned_offset += 4;
2925 	}
2926 
2927 	if (!err)
2928 		err = t4_seeprom_wp(adapter, true);
2929 out:
2930 	if (buf != data)
2931 		kfree(buf);
2932 	return err;
2933 }
2934 
set_flash(struct net_device * netdev,struct ethtool_flash * ef)2935 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2936 {
2937 	int ret;
2938 	const struct firmware *fw;
2939 	struct adapter *adap = netdev2adap(netdev);
2940 	unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2941 
2942 	ef->data[sizeof(ef->data) - 1] = '\0';
2943 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2944 	if (ret < 0)
2945 		return ret;
2946 
2947 	/* If the adapter has been fully initialized then we'll go ahead and
2948 	 * try to get the firmware's cooperation in upgrading to the new
2949 	 * firmware image otherwise we'll try to do the entire job from the
2950 	 * host ... and we always "force" the operation in this path.
2951 	 */
2952 	if (adap->flags & FULL_INIT_DONE)
2953 		mbox = adap->mbox;
2954 
2955 	ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2956 	release_firmware(fw);
2957 	if (!ret)
2958 		dev_info(adap->pdev_dev, "loaded firmware %s,"
2959 			 " reload cxgb4 driver\n", ef->data);
2960 	return ret;
2961 }
2962 
2963 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2964 #define BCAST_CRC 0xa0ccc1a6
2965 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2966 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2967 {
2968 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
2969 	wol->wolopts = netdev2adap(dev)->wol;
2970 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2971 }
2972 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2973 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2974 {
2975 	int err = 0;
2976 	struct port_info *pi = netdev_priv(dev);
2977 
2978 	if (wol->wolopts & ~WOL_SUPPORTED)
2979 		return -EINVAL;
2980 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2981 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2982 	if (wol->wolopts & WAKE_BCAST) {
2983 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2984 					~0ULL, 0, false);
2985 		if (!err)
2986 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2987 						~6ULL, ~0ULL, BCAST_CRC, true);
2988 	} else
2989 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2990 	return err;
2991 }
2992 
cxgb_set_features(struct net_device * dev,netdev_features_t features)2993 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2994 {
2995 	const struct port_info *pi = netdev_priv(dev);
2996 	netdev_features_t changed = dev->features ^ features;
2997 	int err;
2998 
2999 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
3000 		return 0;
3001 
3002 	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
3003 			    -1, -1, -1,
3004 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
3005 	if (unlikely(err))
3006 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
3007 	return err;
3008 }
3009 
get_rss_table_size(struct net_device * dev)3010 static u32 get_rss_table_size(struct net_device *dev)
3011 {
3012 	const struct port_info *pi = netdev_priv(dev);
3013 
3014 	return pi->rss_size;
3015 }
3016 
get_rss_table(struct net_device * dev,u32 * p,u8 * key)3017 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3018 {
3019 	const struct port_info *pi = netdev_priv(dev);
3020 	unsigned int n = pi->rss_size;
3021 
3022 	while (n--)
3023 		p[n] = pi->rss[n];
3024 	return 0;
3025 }
3026 
set_rss_table(struct net_device * dev,const u32 * p,const u8 * key)3027 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3028 {
3029 	unsigned int i;
3030 	struct port_info *pi = netdev_priv(dev);
3031 
3032 	for (i = 0; i < pi->rss_size; i++)
3033 		pi->rss[i] = p[i];
3034 	if (pi->adapter->flags & FULL_INIT_DONE)
3035 		return write_rss(pi, pi->rss);
3036 	return 0;
3037 }
3038 
get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)3039 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3040 		     u32 *rules)
3041 {
3042 	const struct port_info *pi = netdev_priv(dev);
3043 
3044 	switch (info->cmd) {
3045 	case ETHTOOL_GRXFH: {
3046 		unsigned int v = pi->rss_mode;
3047 
3048 		info->data = 0;
3049 		switch (info->flow_type) {
3050 		case TCP_V4_FLOW:
3051 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3052 				info->data = RXH_IP_SRC | RXH_IP_DST |
3053 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3054 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3055 				info->data = RXH_IP_SRC | RXH_IP_DST;
3056 			break;
3057 		case UDP_V4_FLOW:
3058 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3059 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3060 				info->data = RXH_IP_SRC | RXH_IP_DST |
3061 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3062 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3063 				info->data = RXH_IP_SRC | RXH_IP_DST;
3064 			break;
3065 		case SCTP_V4_FLOW:
3066 		case AH_ESP_V4_FLOW:
3067 		case IPV4_FLOW:
3068 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3069 				info->data = RXH_IP_SRC | RXH_IP_DST;
3070 			break;
3071 		case TCP_V6_FLOW:
3072 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3073 				info->data = RXH_IP_SRC | RXH_IP_DST |
3074 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3075 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3076 				info->data = RXH_IP_SRC | RXH_IP_DST;
3077 			break;
3078 		case UDP_V6_FLOW:
3079 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3080 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3081 				info->data = RXH_IP_SRC | RXH_IP_DST |
3082 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
3083 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3084 				info->data = RXH_IP_SRC | RXH_IP_DST;
3085 			break;
3086 		case SCTP_V6_FLOW:
3087 		case AH_ESP_V6_FLOW:
3088 		case IPV6_FLOW:
3089 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3090 				info->data = RXH_IP_SRC | RXH_IP_DST;
3091 			break;
3092 		}
3093 		return 0;
3094 	}
3095 	case ETHTOOL_GRXRINGS:
3096 		info->data = pi->nqsets;
3097 		return 0;
3098 	}
3099 	return -EOPNOTSUPP;
3100 }
3101 
3102 static const struct ethtool_ops cxgb_ethtool_ops = {
3103 	.get_settings      = get_settings,
3104 	.set_settings      = set_settings,
3105 	.get_drvinfo       = get_drvinfo,
3106 	.get_msglevel      = get_msglevel,
3107 	.set_msglevel      = set_msglevel,
3108 	.get_ringparam     = get_sge_param,
3109 	.set_ringparam     = set_sge_param,
3110 	.get_coalesce      = get_coalesce,
3111 	.set_coalesce      = set_coalesce,
3112 	.get_eeprom_len    = get_eeprom_len,
3113 	.get_eeprom        = get_eeprom,
3114 	.set_eeprom        = set_eeprom,
3115 	.get_pauseparam    = get_pauseparam,
3116 	.set_pauseparam    = set_pauseparam,
3117 	.get_link          = ethtool_op_get_link,
3118 	.get_strings       = get_strings,
3119 	.set_phys_id       = identify_port,
3120 	.nway_reset        = restart_autoneg,
3121 	.get_sset_count    = get_sset_count,
3122 	.get_ethtool_stats = get_stats,
3123 	.get_regs_len      = get_regs_len,
3124 	.get_regs          = get_regs,
3125 	.get_wol           = get_wol,
3126 	.set_wol           = set_wol,
3127 	.get_rxnfc         = get_rxnfc,
3128 	.get_rxfh_indir_size = get_rss_table_size,
3129 	.get_rxfh	   = get_rss_table,
3130 	.set_rxfh	   = set_rss_table,
3131 	.flash_device      = set_flash,
3132 };
3133 
3134 /*
3135  * debugfs support
3136  */
mem_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3137 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3138 			loff_t *ppos)
3139 {
3140 	loff_t pos = *ppos;
3141 	loff_t avail = file_inode(file)->i_size;
3142 	unsigned int mem = (uintptr_t)file->private_data & 3;
3143 	struct adapter *adap = file->private_data - mem;
3144 	__be32 *data;
3145 	int ret;
3146 
3147 	if (pos < 0)
3148 		return -EINVAL;
3149 	if (pos >= avail)
3150 		return 0;
3151 	if (count > avail - pos)
3152 		count = avail - pos;
3153 
3154 	data = t4_alloc_mem(count);
3155 	if (!data)
3156 		return -ENOMEM;
3157 
3158 	spin_lock(&adap->win0_lock);
3159 	ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3160 	spin_unlock(&adap->win0_lock);
3161 	if (ret) {
3162 		t4_free_mem(data);
3163 		return ret;
3164 	}
3165 	ret = copy_to_user(buf, data, count);
3166 
3167 	t4_free_mem(data);
3168 	if (ret)
3169 		return -EFAULT;
3170 
3171 	*ppos = pos + count;
3172 	return count;
3173 }
3174 
3175 static const struct file_operations mem_debugfs_fops = {
3176 	.owner   = THIS_MODULE,
3177 	.open    = simple_open,
3178 	.read    = mem_read,
3179 	.llseek  = default_llseek,
3180 };
3181 
add_debugfs_mem(struct adapter * adap,const char * name,unsigned int idx,unsigned int size_mb)3182 static void add_debugfs_mem(struct adapter *adap, const char *name,
3183 			    unsigned int idx, unsigned int size_mb)
3184 {
3185 	struct dentry *de;
3186 
3187 	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3188 				 (void *)adap + idx, &mem_debugfs_fops);
3189 	if (de && de->d_inode)
3190 		de->d_inode->i_size = size_mb << 20;
3191 }
3192 
setup_debugfs(struct adapter * adap)3193 static int setup_debugfs(struct adapter *adap)
3194 {
3195 	int i;
3196 	u32 size;
3197 
3198 	if (IS_ERR_OR_NULL(adap->debugfs_root))
3199 		return -1;
3200 
3201 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3202 	if (i & EDRAM0_ENABLE) {
3203 		size = t4_read_reg(adap, MA_EDRAM0_BAR);
3204 		add_debugfs_mem(adap, "edc0", MEM_EDC0,	EDRAM_SIZE_GET(size));
3205 	}
3206 	if (i & EDRAM1_ENABLE) {
3207 		size = t4_read_reg(adap, MA_EDRAM1_BAR);
3208 		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3209 	}
3210 	if (is_t4(adap->params.chip)) {
3211 		size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3212 		if (i & EXT_MEM_ENABLE)
3213 			add_debugfs_mem(adap, "mc", MEM_MC,
3214 					EXT_MEM_SIZE_GET(size));
3215 	} else {
3216 		if (i & EXT_MEM_ENABLE) {
3217 			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3218 			add_debugfs_mem(adap, "mc0", MEM_MC0,
3219 					EXT_MEM_SIZE_GET(size));
3220 		}
3221 		if (i & EXT_MEM1_ENABLE) {
3222 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3223 			add_debugfs_mem(adap, "mc1", MEM_MC1,
3224 					EXT_MEM_SIZE_GET(size));
3225 		}
3226 	}
3227 	if (adap->l2t)
3228 		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3229 				    &t4_l2t_fops);
3230 	return 0;
3231 }
3232 
3233 /*
3234  * upper-layer driver support
3235  */
3236 
3237 /*
3238  * Allocate an active-open TID and set it to the supplied value.
3239  */
cxgb4_alloc_atid(struct tid_info * t,void * data)3240 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3241 {
3242 	int atid = -1;
3243 
3244 	spin_lock_bh(&t->atid_lock);
3245 	if (t->afree) {
3246 		union aopen_entry *p = t->afree;
3247 
3248 		atid = (p - t->atid_tab) + t->atid_base;
3249 		t->afree = p->next;
3250 		p->data = data;
3251 		t->atids_in_use++;
3252 	}
3253 	spin_unlock_bh(&t->atid_lock);
3254 	return atid;
3255 }
3256 EXPORT_SYMBOL(cxgb4_alloc_atid);
3257 
3258 /*
3259  * Release an active-open TID.
3260  */
cxgb4_free_atid(struct tid_info * t,unsigned int atid)3261 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3262 {
3263 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3264 
3265 	spin_lock_bh(&t->atid_lock);
3266 	p->next = t->afree;
3267 	t->afree = p;
3268 	t->atids_in_use--;
3269 	spin_unlock_bh(&t->atid_lock);
3270 }
3271 EXPORT_SYMBOL(cxgb4_free_atid);
3272 
3273 /*
3274  * Allocate a server TID and set it to the supplied value.
3275  */
cxgb4_alloc_stid(struct tid_info * t,int family,void * data)3276 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3277 {
3278 	int stid;
3279 
3280 	spin_lock_bh(&t->stid_lock);
3281 	if (family == PF_INET) {
3282 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3283 		if (stid < t->nstids)
3284 			__set_bit(stid, t->stid_bmap);
3285 		else
3286 			stid = -1;
3287 	} else {
3288 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3289 		if (stid < 0)
3290 			stid = -1;
3291 	}
3292 	if (stid >= 0) {
3293 		t->stid_tab[stid].data = data;
3294 		stid += t->stid_base;
3295 		/* IPv6 requires max of 520 bits or 16 cells in TCAM
3296 		 * This is equivalent to 4 TIDs. With CLIP enabled it
3297 		 * needs 2 TIDs.
3298 		 */
3299 		if (family == PF_INET)
3300 			t->stids_in_use++;
3301 		else
3302 			t->stids_in_use += 4;
3303 	}
3304 	spin_unlock_bh(&t->stid_lock);
3305 	return stid;
3306 }
3307 EXPORT_SYMBOL(cxgb4_alloc_stid);
3308 
3309 /* Allocate a server filter TID and set it to the supplied value.
3310  */
cxgb4_alloc_sftid(struct tid_info * t,int family,void * data)3311 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3312 {
3313 	int stid;
3314 
3315 	spin_lock_bh(&t->stid_lock);
3316 	if (family == PF_INET) {
3317 		stid = find_next_zero_bit(t->stid_bmap,
3318 				t->nstids + t->nsftids, t->nstids);
3319 		if (stid < (t->nstids + t->nsftids))
3320 			__set_bit(stid, t->stid_bmap);
3321 		else
3322 			stid = -1;
3323 	} else {
3324 		stid = -1;
3325 	}
3326 	if (stid >= 0) {
3327 		t->stid_tab[stid].data = data;
3328 		stid -= t->nstids;
3329 		stid += t->sftid_base;
3330 		t->stids_in_use++;
3331 	}
3332 	spin_unlock_bh(&t->stid_lock);
3333 	return stid;
3334 }
3335 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3336 
3337 /* Release a server TID.
3338  */
cxgb4_free_stid(struct tid_info * t,unsigned int stid,int family)3339 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3340 {
3341 	/* Is it a server filter TID? */
3342 	if (t->nsftids && (stid >= t->sftid_base)) {
3343 		stid -= t->sftid_base;
3344 		stid += t->nstids;
3345 	} else {
3346 		stid -= t->stid_base;
3347 	}
3348 
3349 	spin_lock_bh(&t->stid_lock);
3350 	if (family == PF_INET)
3351 		__clear_bit(stid, t->stid_bmap);
3352 	else
3353 		bitmap_release_region(t->stid_bmap, stid, 2);
3354 	t->stid_tab[stid].data = NULL;
3355 	if (family == PF_INET)
3356 		t->stids_in_use--;
3357 	else
3358 		t->stids_in_use -= 4;
3359 	spin_unlock_bh(&t->stid_lock);
3360 }
3361 EXPORT_SYMBOL(cxgb4_free_stid);
3362 
3363 /*
3364  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3365  */
mk_tid_release(struct sk_buff * skb,unsigned int chan,unsigned int tid)3366 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3367 			   unsigned int tid)
3368 {
3369 	struct cpl_tid_release *req;
3370 
3371 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3372 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3373 	INIT_TP_WR(req, tid);
3374 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3375 }
3376 
3377 /*
3378  * Queue a TID release request and if necessary schedule a work queue to
3379  * process it.
3380  */
cxgb4_queue_tid_release(struct tid_info * t,unsigned int chan,unsigned int tid)3381 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3382 				    unsigned int tid)
3383 {
3384 	void **p = &t->tid_tab[tid];
3385 	struct adapter *adap = container_of(t, struct adapter, tids);
3386 
3387 	spin_lock_bh(&adap->tid_release_lock);
3388 	*p = adap->tid_release_head;
3389 	/* Low 2 bits encode the Tx channel number */
3390 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
3391 	if (!adap->tid_release_task_busy) {
3392 		adap->tid_release_task_busy = true;
3393 		queue_work(adap->workq, &adap->tid_release_task);
3394 	}
3395 	spin_unlock_bh(&adap->tid_release_lock);
3396 }
3397 
3398 /*
3399  * Process the list of pending TID release requests.
3400  */
process_tid_release_list(struct work_struct * work)3401 static void process_tid_release_list(struct work_struct *work)
3402 {
3403 	struct sk_buff *skb;
3404 	struct adapter *adap;
3405 
3406 	adap = container_of(work, struct adapter, tid_release_task);
3407 
3408 	spin_lock_bh(&adap->tid_release_lock);
3409 	while (adap->tid_release_head) {
3410 		void **p = adap->tid_release_head;
3411 		unsigned int chan = (uintptr_t)p & 3;
3412 		p = (void *)p - chan;
3413 
3414 		adap->tid_release_head = *p;
3415 		*p = NULL;
3416 		spin_unlock_bh(&adap->tid_release_lock);
3417 
3418 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3419 					 GFP_KERNEL)))
3420 			schedule_timeout_uninterruptible(1);
3421 
3422 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3423 		t4_ofld_send(adap, skb);
3424 		spin_lock_bh(&adap->tid_release_lock);
3425 	}
3426 	adap->tid_release_task_busy = false;
3427 	spin_unlock_bh(&adap->tid_release_lock);
3428 }
3429 
3430 /*
3431  * Release a TID and inform HW.  If we are unable to allocate the release
3432  * message we defer to a work queue.
3433  */
cxgb4_remove_tid(struct tid_info * t,unsigned int chan,unsigned int tid)3434 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3435 {
3436 	void *old;
3437 	struct sk_buff *skb;
3438 	struct adapter *adap = container_of(t, struct adapter, tids);
3439 
3440 	old = t->tid_tab[tid];
3441 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3442 	if (likely(skb)) {
3443 		t->tid_tab[tid] = NULL;
3444 		mk_tid_release(skb, chan, tid);
3445 		t4_ofld_send(adap, skb);
3446 	} else
3447 		cxgb4_queue_tid_release(t, chan, tid);
3448 	if (old)
3449 		atomic_dec(&t->tids_in_use);
3450 }
3451 EXPORT_SYMBOL(cxgb4_remove_tid);
3452 
3453 /*
3454  * Allocate and initialize the TID tables.  Returns 0 on success.
3455  */
tid_init(struct tid_info * t)3456 static int tid_init(struct tid_info *t)
3457 {
3458 	size_t size;
3459 	unsigned int stid_bmap_size;
3460 	unsigned int natids = t->natids;
3461 	struct adapter *adap = container_of(t, struct adapter, tids);
3462 
3463 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3464 	size = t->ntids * sizeof(*t->tid_tab) +
3465 	       natids * sizeof(*t->atid_tab) +
3466 	       t->nstids * sizeof(*t->stid_tab) +
3467 	       t->nsftids * sizeof(*t->stid_tab) +
3468 	       stid_bmap_size * sizeof(long) +
3469 	       t->nftids * sizeof(*t->ftid_tab) +
3470 	       t->nsftids * sizeof(*t->ftid_tab);
3471 
3472 	t->tid_tab = t4_alloc_mem(size);
3473 	if (!t->tid_tab)
3474 		return -ENOMEM;
3475 
3476 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3477 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3478 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3479 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3480 	spin_lock_init(&t->stid_lock);
3481 	spin_lock_init(&t->atid_lock);
3482 
3483 	t->stids_in_use = 0;
3484 	t->afree = NULL;
3485 	t->atids_in_use = 0;
3486 	atomic_set(&t->tids_in_use, 0);
3487 
3488 	/* Setup the free list for atid_tab and clear the stid bitmap. */
3489 	if (natids) {
3490 		while (--natids)
3491 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3492 		t->afree = t->atid_tab;
3493 	}
3494 	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3495 	/* Reserve stid 0 for T4/T5 adapters */
3496 	if (!t->stid_base &&
3497 	    (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3498 		__set_bit(0, t->stid_bmap);
3499 
3500 	return 0;
3501 }
3502 
cxgb4_clip_get(const struct net_device * dev,const struct in6_addr * lip)3503 int cxgb4_clip_get(const struct net_device *dev,
3504 		   const struct in6_addr *lip)
3505 {
3506 	struct adapter *adap;
3507 	struct fw_clip_cmd c;
3508 
3509 	adap = netdev2adap(dev);
3510 	memset(&c, 0, sizeof(c));
3511 	c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3512 			FW_CMD_REQUEST | FW_CMD_WRITE);
3513 	c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3514 	c.ip_hi = *(__be64 *)(lip->s6_addr);
3515 	c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3516 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3517 }
3518 EXPORT_SYMBOL(cxgb4_clip_get);
3519 
cxgb4_clip_release(const struct net_device * dev,const struct in6_addr * lip)3520 int cxgb4_clip_release(const struct net_device *dev,
3521 		       const struct in6_addr *lip)
3522 {
3523 	struct adapter *adap;
3524 	struct fw_clip_cmd c;
3525 
3526 	adap = netdev2adap(dev);
3527 	memset(&c, 0, sizeof(c));
3528 	c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3529 			FW_CMD_REQUEST | FW_CMD_READ);
3530 	c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3531 	c.ip_hi = *(__be64 *)(lip->s6_addr);
3532 	c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3533 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3534 }
3535 EXPORT_SYMBOL(cxgb4_clip_release);
3536 
3537 /**
3538  *	cxgb4_create_server - create an IP server
3539  *	@dev: the device
3540  *	@stid: the server TID
3541  *	@sip: local IP address to bind server to
3542  *	@sport: the server's TCP port
3543  *	@queue: queue to direct messages from this server to
3544  *
3545  *	Create an IP server for the given port and address.
3546  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3547  */
cxgb4_create_server(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,__be16 vlan,unsigned int queue)3548 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3549 			__be32 sip, __be16 sport, __be16 vlan,
3550 			unsigned int queue)
3551 {
3552 	unsigned int chan;
3553 	struct sk_buff *skb;
3554 	struct adapter *adap;
3555 	struct cpl_pass_open_req *req;
3556 	int ret;
3557 
3558 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3559 	if (!skb)
3560 		return -ENOMEM;
3561 
3562 	adap = netdev2adap(dev);
3563 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3564 	INIT_TP_WR(req, 0);
3565 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3566 	req->local_port = sport;
3567 	req->peer_port = htons(0);
3568 	req->local_ip = sip;
3569 	req->peer_ip = htonl(0);
3570 	chan = rxq_to_chan(&adap->sge, queue);
3571 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
3572 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3573 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3574 	ret = t4_mgmt_tx(adap, skb);
3575 	return net_xmit_eval(ret);
3576 }
3577 EXPORT_SYMBOL(cxgb4_create_server);
3578 
3579 /*	cxgb4_create_server6 - create an IPv6 server
3580  *	@dev: the device
3581  *	@stid: the server TID
3582  *	@sip: local IPv6 address to bind server to
3583  *	@sport: the server's TCP port
3584  *	@queue: queue to direct messages from this server to
3585  *
3586  *	Create an IPv6 server for the given port and address.
3587  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3588  */
cxgb4_create_server6(const struct net_device * dev,unsigned int stid,const struct in6_addr * sip,__be16 sport,unsigned int queue)3589 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3590 			 const struct in6_addr *sip, __be16 sport,
3591 			 unsigned int queue)
3592 {
3593 	unsigned int chan;
3594 	struct sk_buff *skb;
3595 	struct adapter *adap;
3596 	struct cpl_pass_open_req6 *req;
3597 	int ret;
3598 
3599 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3600 	if (!skb)
3601 		return -ENOMEM;
3602 
3603 	adap = netdev2adap(dev);
3604 	req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3605 	INIT_TP_WR(req, 0);
3606 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3607 	req->local_port = sport;
3608 	req->peer_port = htons(0);
3609 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3610 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3611 	req->peer_ip_hi = cpu_to_be64(0);
3612 	req->peer_ip_lo = cpu_to_be64(0);
3613 	chan = rxq_to_chan(&adap->sge, queue);
3614 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
3615 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3616 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3617 	ret = t4_mgmt_tx(adap, skb);
3618 	return net_xmit_eval(ret);
3619 }
3620 EXPORT_SYMBOL(cxgb4_create_server6);
3621 
cxgb4_remove_server(const struct net_device * dev,unsigned int stid,unsigned int queue,bool ipv6)3622 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3623 			unsigned int queue, bool ipv6)
3624 {
3625 	struct sk_buff *skb;
3626 	struct adapter *adap;
3627 	struct cpl_close_listsvr_req *req;
3628 	int ret;
3629 
3630 	adap = netdev2adap(dev);
3631 
3632 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3633 	if (!skb)
3634 		return -ENOMEM;
3635 
3636 	req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3637 	INIT_TP_WR(req, 0);
3638 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3639 	req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3640 				LISTSVR_IPV6(0)) | QUEUENO(queue));
3641 	ret = t4_mgmt_tx(adap, skb);
3642 	return net_xmit_eval(ret);
3643 }
3644 EXPORT_SYMBOL(cxgb4_remove_server);
3645 
3646 /**
3647  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3648  *	@mtus: the HW MTU table
3649  *	@mtu: the target MTU
3650  *	@idx: index of selected entry in the MTU table
3651  *
3652  *	Returns the index and the value in the HW MTU table that is closest to
3653  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
3654  *	table, in which case that smallest available value is selected.
3655  */
cxgb4_best_mtu(const unsigned short * mtus,unsigned short mtu,unsigned int * idx)3656 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3657 			    unsigned int *idx)
3658 {
3659 	unsigned int i = 0;
3660 
3661 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3662 		++i;
3663 	if (idx)
3664 		*idx = i;
3665 	return mtus[i];
3666 }
3667 EXPORT_SYMBOL(cxgb4_best_mtu);
3668 
3669 /**
3670  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3671  *     @mtus: the HW MTU table
3672  *     @header_size: Header Size
3673  *     @data_size_max: maximum Data Segment Size
3674  *     @data_size_align: desired Data Segment Size Alignment (2^N)
3675  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3676  *
3677  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
3678  *     MTU Table based solely on a Maximum MTU parameter, we break that
3679  *     parameter up into a Header Size and Maximum Data Segment Size, and
3680  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
3681  *     the Hardware MTU Table which will result in a Data Segment Size with
3682  *     the requested alignment _and_ that MTU isn't "too far" from the
3683  *     closest MTU, then we'll return that rather than the closest MTU.
3684  */
cxgb4_best_aligned_mtu(const unsigned short * mtus,unsigned short header_size,unsigned short data_size_max,unsigned short data_size_align,unsigned int * mtu_idxp)3685 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3686 				    unsigned short header_size,
3687 				    unsigned short data_size_max,
3688 				    unsigned short data_size_align,
3689 				    unsigned int *mtu_idxp)
3690 {
3691 	unsigned short max_mtu = header_size + data_size_max;
3692 	unsigned short data_size_align_mask = data_size_align - 1;
3693 	int mtu_idx, aligned_mtu_idx;
3694 
3695 	/* Scan the MTU Table till we find an MTU which is larger than our
3696 	 * Maximum MTU or we reach the end of the table.  Along the way,
3697 	 * record the last MTU found, if any, which will result in a Data
3698 	 * Segment Length matching the requested alignment.
3699 	 */
3700 	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3701 		unsigned short data_size = mtus[mtu_idx] - header_size;
3702 
3703 		/* If this MTU minus the Header Size would result in a
3704 		 * Data Segment Size of the desired alignment, remember it.
3705 		 */
3706 		if ((data_size & data_size_align_mask) == 0)
3707 			aligned_mtu_idx = mtu_idx;
3708 
3709 		/* If we're not at the end of the Hardware MTU Table and the
3710 		 * next element is larger than our Maximum MTU, drop out of
3711 		 * the loop.
3712 		 */
3713 		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3714 			break;
3715 	}
3716 
3717 	/* If we fell out of the loop because we ran to the end of the table,
3718 	 * then we just have to use the last [largest] entry.
3719 	 */
3720 	if (mtu_idx == NMTUS)
3721 		mtu_idx--;
3722 
3723 	/* If we found an MTU which resulted in the requested Data Segment
3724 	 * Length alignment and that's "not far" from the largest MTU which is
3725 	 * less than or equal to the maximum MTU, then use that.
3726 	 */
3727 	if (aligned_mtu_idx >= 0 &&
3728 	    mtu_idx - aligned_mtu_idx <= 1)
3729 		mtu_idx = aligned_mtu_idx;
3730 
3731 	/* If the caller has passed in an MTU Index pointer, pass the
3732 	 * MTU Index back.  Return the MTU value.
3733 	 */
3734 	if (mtu_idxp)
3735 		*mtu_idxp = mtu_idx;
3736 	return mtus[mtu_idx];
3737 }
3738 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3739 
3740 /**
3741  *	cxgb4_port_chan - get the HW channel of a port
3742  *	@dev: the net device for the port
3743  *
3744  *	Return the HW Tx channel of the given port.
3745  */
cxgb4_port_chan(const struct net_device * dev)3746 unsigned int cxgb4_port_chan(const struct net_device *dev)
3747 {
3748 	return netdev2pinfo(dev)->tx_chan;
3749 }
3750 EXPORT_SYMBOL(cxgb4_port_chan);
3751 
cxgb4_dbfifo_count(const struct net_device * dev,int lpfifo)3752 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3753 {
3754 	struct adapter *adap = netdev2adap(dev);
3755 	u32 v1, v2, lp_count, hp_count;
3756 
3757 	v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3758 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3759 	if (is_t4(adap->params.chip)) {
3760 		lp_count = G_LP_COUNT(v1);
3761 		hp_count = G_HP_COUNT(v1);
3762 	} else {
3763 		lp_count = G_LP_COUNT_T5(v1);
3764 		hp_count = G_HP_COUNT_T5(v2);
3765 	}
3766 	return lpfifo ? lp_count : hp_count;
3767 }
3768 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3769 
3770 /**
3771  *	cxgb4_port_viid - get the VI id of a port
3772  *	@dev: the net device for the port
3773  *
3774  *	Return the VI id of the given port.
3775  */
cxgb4_port_viid(const struct net_device * dev)3776 unsigned int cxgb4_port_viid(const struct net_device *dev)
3777 {
3778 	return netdev2pinfo(dev)->viid;
3779 }
3780 EXPORT_SYMBOL(cxgb4_port_viid);
3781 
3782 /**
3783  *	cxgb4_port_idx - get the index of a port
3784  *	@dev: the net device for the port
3785  *
3786  *	Return the index of the given port.
3787  */
cxgb4_port_idx(const struct net_device * dev)3788 unsigned int cxgb4_port_idx(const struct net_device *dev)
3789 {
3790 	return netdev2pinfo(dev)->port_id;
3791 }
3792 EXPORT_SYMBOL(cxgb4_port_idx);
3793 
cxgb4_get_tcp_stats(struct pci_dev * pdev,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6)3794 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3795 			 struct tp_tcp_stats *v6)
3796 {
3797 	struct adapter *adap = pci_get_drvdata(pdev);
3798 
3799 	spin_lock(&adap->stats_lock);
3800 	t4_tp_get_tcp_stats(adap, v4, v6);
3801 	spin_unlock(&adap->stats_lock);
3802 }
3803 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3804 
cxgb4_iscsi_init(struct net_device * dev,unsigned int tag_mask,const unsigned int * pgsz_order)3805 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3806 		      const unsigned int *pgsz_order)
3807 {
3808 	struct adapter *adap = netdev2adap(dev);
3809 
3810 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3811 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3812 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3813 		     HPZ3(pgsz_order[3]));
3814 }
3815 EXPORT_SYMBOL(cxgb4_iscsi_init);
3816 
cxgb4_flush_eq_cache(struct net_device * dev)3817 int cxgb4_flush_eq_cache(struct net_device *dev)
3818 {
3819 	struct adapter *adap = netdev2adap(dev);
3820 	int ret;
3821 
3822 	ret = t4_fwaddrspace_write(adap, adap->mbox,
3823 				   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3824 	return ret;
3825 }
3826 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3827 
read_eq_indices(struct adapter * adap,u16 qid,u16 * pidx,u16 * cidx)3828 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3829 {
3830 	u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3831 	__be64 indices;
3832 	int ret;
3833 
3834 	spin_lock(&adap->win0_lock);
3835 	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3836 			   sizeof(indices), (__be32 *)&indices,
3837 			   T4_MEMORY_READ);
3838 	spin_unlock(&adap->win0_lock);
3839 	if (!ret) {
3840 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3841 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3842 	}
3843 	return ret;
3844 }
3845 
cxgb4_sync_txq_pidx(struct net_device * dev,u16 qid,u16 pidx,u16 size)3846 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3847 			u16 size)
3848 {
3849 	struct adapter *adap = netdev2adap(dev);
3850 	u16 hw_pidx, hw_cidx;
3851 	int ret;
3852 
3853 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3854 	if (ret)
3855 		goto out;
3856 
3857 	if (pidx != hw_pidx) {
3858 		u16 delta;
3859 
3860 		if (pidx >= hw_pidx)
3861 			delta = pidx - hw_pidx;
3862 		else
3863 			delta = size - hw_pidx + pidx;
3864 		wmb();
3865 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3866 			     QID(qid) | PIDX(delta));
3867 	}
3868 out:
3869 	return ret;
3870 }
3871 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3872 
cxgb4_disable_db_coalescing(struct net_device * dev)3873 void cxgb4_disable_db_coalescing(struct net_device *dev)
3874 {
3875 	struct adapter *adap;
3876 
3877 	adap = netdev2adap(dev);
3878 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3879 			 F_NOCOALESCE);
3880 }
3881 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3882 
cxgb4_enable_db_coalescing(struct net_device * dev)3883 void cxgb4_enable_db_coalescing(struct net_device *dev)
3884 {
3885 	struct adapter *adap;
3886 
3887 	adap = netdev2adap(dev);
3888 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3889 }
3890 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3891 
cxgb4_read_tpte(struct net_device * dev,u32 stag,__be32 * tpte)3892 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3893 {
3894 	struct adapter *adap;
3895 	u32 offset, memtype, memaddr;
3896 	u32 edc0_size, edc1_size, mc0_size, mc1_size;
3897 	u32 edc0_end, edc1_end, mc0_end, mc1_end;
3898 	int ret;
3899 
3900 	adap = netdev2adap(dev);
3901 
3902 	offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3903 
3904 	/* Figure out where the offset lands in the Memory Type/Address scheme.
3905 	 * This code assumes that the memory is laid out starting at offset 0
3906 	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3907 	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
3908 	 * MC0, and some have both MC0 and MC1.
3909 	 */
3910 	edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3911 	edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3912 	mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3913 
3914 	edc0_end = edc0_size;
3915 	edc1_end = edc0_end + edc1_size;
3916 	mc0_end = edc1_end + mc0_size;
3917 
3918 	if (offset < edc0_end) {
3919 		memtype = MEM_EDC0;
3920 		memaddr = offset;
3921 	} else if (offset < edc1_end) {
3922 		memtype = MEM_EDC1;
3923 		memaddr = offset - edc0_end;
3924 	} else {
3925 		if (offset < mc0_end) {
3926 			memtype = MEM_MC0;
3927 			memaddr = offset - edc1_end;
3928 		} else if (is_t4(adap->params.chip)) {
3929 			/* T4 only has a single memory channel */
3930 			goto err;
3931 		} else {
3932 			mc1_size = EXT_MEM_SIZE_GET(
3933 					t4_read_reg(adap,
3934 						    MA_EXT_MEMORY1_BAR)) << 20;
3935 			mc1_end = mc0_end + mc1_size;
3936 			if (offset < mc1_end) {
3937 				memtype = MEM_MC1;
3938 				memaddr = offset - mc0_end;
3939 			} else {
3940 				/* offset beyond the end of any memory */
3941 				goto err;
3942 			}
3943 		}
3944 	}
3945 
3946 	spin_lock(&adap->win0_lock);
3947 	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3948 	spin_unlock(&adap->win0_lock);
3949 	return ret;
3950 
3951 err:
3952 	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3953 		stag, offset);
3954 	return -EINVAL;
3955 }
3956 EXPORT_SYMBOL(cxgb4_read_tpte);
3957 
cxgb4_read_sge_timestamp(struct net_device * dev)3958 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3959 {
3960 	u32 hi, lo;
3961 	struct adapter *adap;
3962 
3963 	adap = netdev2adap(dev);
3964 	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3965 	hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3966 
3967 	return ((u64)hi << 32) | (u64)lo;
3968 }
3969 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3970 
3971 static struct pci_driver cxgb4_driver;
3972 
check_neigh_update(struct neighbour * neigh)3973 static void check_neigh_update(struct neighbour *neigh)
3974 {
3975 	const struct device *parent;
3976 	const struct net_device *netdev = neigh->dev;
3977 
3978 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
3979 		netdev = vlan_dev_real_dev(netdev);
3980 	parent = netdev->dev.parent;
3981 	if (parent && parent->driver == &cxgb4_driver.driver)
3982 		t4_l2t_update(dev_get_drvdata(parent), neigh);
3983 }
3984 
netevent_cb(struct notifier_block * nb,unsigned long event,void * data)3985 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3986 		       void *data)
3987 {
3988 	switch (event) {
3989 	case NETEVENT_NEIGH_UPDATE:
3990 		check_neigh_update(data);
3991 		break;
3992 	case NETEVENT_REDIRECT:
3993 	default:
3994 		break;
3995 	}
3996 	return 0;
3997 }
3998 
3999 static bool netevent_registered;
4000 static struct notifier_block cxgb4_netevent_nb = {
4001 	.notifier_call = netevent_cb
4002 };
4003 
drain_db_fifo(struct adapter * adap,int usecs)4004 static void drain_db_fifo(struct adapter *adap, int usecs)
4005 {
4006 	u32 v1, v2, lp_count, hp_count;
4007 
4008 	do {
4009 		v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
4010 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
4011 		if (is_t4(adap->params.chip)) {
4012 			lp_count = G_LP_COUNT(v1);
4013 			hp_count = G_HP_COUNT(v1);
4014 		} else {
4015 			lp_count = G_LP_COUNT_T5(v1);
4016 			hp_count = G_HP_COUNT_T5(v2);
4017 		}
4018 
4019 		if (lp_count == 0 && hp_count == 0)
4020 			break;
4021 		set_current_state(TASK_UNINTERRUPTIBLE);
4022 		schedule_timeout(usecs_to_jiffies(usecs));
4023 	} while (1);
4024 }
4025 
disable_txq_db(struct sge_txq * q)4026 static void disable_txq_db(struct sge_txq *q)
4027 {
4028 	unsigned long flags;
4029 
4030 	spin_lock_irqsave(&q->db_lock, flags);
4031 	q->db_disabled = 1;
4032 	spin_unlock_irqrestore(&q->db_lock, flags);
4033 }
4034 
enable_txq_db(struct adapter * adap,struct sge_txq * q)4035 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
4036 {
4037 	spin_lock_irq(&q->db_lock);
4038 	if (q->db_pidx_inc) {
4039 		/* Make sure that all writes to the TX descriptors
4040 		 * are committed before we tell HW about them.
4041 		 */
4042 		wmb();
4043 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4044 			     QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4045 		q->db_pidx_inc = 0;
4046 	}
4047 	q->db_disabled = 0;
4048 	spin_unlock_irq(&q->db_lock);
4049 }
4050 
disable_dbs(struct adapter * adap)4051 static void disable_dbs(struct adapter *adap)
4052 {
4053 	int i;
4054 
4055 	for_each_ethrxq(&adap->sge, i)
4056 		disable_txq_db(&adap->sge.ethtxq[i].q);
4057 	for_each_ofldrxq(&adap->sge, i)
4058 		disable_txq_db(&adap->sge.ofldtxq[i].q);
4059 	for_each_port(adap, i)
4060 		disable_txq_db(&adap->sge.ctrlq[i].q);
4061 }
4062 
enable_dbs(struct adapter * adap)4063 static void enable_dbs(struct adapter *adap)
4064 {
4065 	int i;
4066 
4067 	for_each_ethrxq(&adap->sge, i)
4068 		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4069 	for_each_ofldrxq(&adap->sge, i)
4070 		enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4071 	for_each_port(adap, i)
4072 		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4073 }
4074 
notify_rdma_uld(struct adapter * adap,enum cxgb4_control cmd)4075 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4076 {
4077 	if (adap->uld_handle[CXGB4_ULD_RDMA])
4078 		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4079 				cmd);
4080 }
4081 
process_db_full(struct work_struct * work)4082 static void process_db_full(struct work_struct *work)
4083 {
4084 	struct adapter *adap;
4085 
4086 	adap = container_of(work, struct adapter, db_full_task);
4087 
4088 	drain_db_fifo(adap, dbfifo_drain_delay);
4089 	enable_dbs(adap);
4090 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4091 	t4_set_reg_field(adap, SGE_INT_ENABLE3,
4092 			 DBFIFO_HP_INT | DBFIFO_LP_INT,
4093 			 DBFIFO_HP_INT | DBFIFO_LP_INT);
4094 }
4095 
sync_txq_pidx(struct adapter * adap,struct sge_txq * q)4096 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4097 {
4098 	u16 hw_pidx, hw_cidx;
4099 	int ret;
4100 
4101 	spin_lock_irq(&q->db_lock);
4102 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4103 	if (ret)
4104 		goto out;
4105 	if (q->db_pidx != hw_pidx) {
4106 		u16 delta;
4107 
4108 		if (q->db_pidx >= hw_pidx)
4109 			delta = q->db_pidx - hw_pidx;
4110 		else
4111 			delta = q->size - hw_pidx + q->db_pidx;
4112 		wmb();
4113 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4114 			     QID(q->cntxt_id) | PIDX(delta));
4115 	}
4116 out:
4117 	q->db_disabled = 0;
4118 	q->db_pidx_inc = 0;
4119 	spin_unlock_irq(&q->db_lock);
4120 	if (ret)
4121 		CH_WARN(adap, "DB drop recovery failed.\n");
4122 }
recover_all_queues(struct adapter * adap)4123 static void recover_all_queues(struct adapter *adap)
4124 {
4125 	int i;
4126 
4127 	for_each_ethrxq(&adap->sge, i)
4128 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4129 	for_each_ofldrxq(&adap->sge, i)
4130 		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4131 	for_each_port(adap, i)
4132 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4133 }
4134 
process_db_drop(struct work_struct * work)4135 static void process_db_drop(struct work_struct *work)
4136 {
4137 	struct adapter *adap;
4138 
4139 	adap = container_of(work, struct adapter, db_drop_task);
4140 
4141 	if (is_t4(adap->params.chip)) {
4142 		drain_db_fifo(adap, dbfifo_drain_delay);
4143 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4144 		drain_db_fifo(adap, dbfifo_drain_delay);
4145 		recover_all_queues(adap);
4146 		drain_db_fifo(adap, dbfifo_drain_delay);
4147 		enable_dbs(adap);
4148 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4149 	} else {
4150 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
4151 		u16 qid = (dropped_db >> 15) & 0x1ffff;
4152 		u16 pidx_inc = dropped_db & 0x1fff;
4153 		unsigned int s_qpp;
4154 		unsigned short udb_density;
4155 		unsigned long qpshift;
4156 		int page;
4157 		u32 udb;
4158 
4159 		dev_warn(adap->pdev_dev,
4160 			 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4161 			 dropped_db, qid,
4162 			 (dropped_db >> 14) & 1,
4163 			 (dropped_db >> 13) & 1,
4164 			 pidx_inc);
4165 
4166 		drain_db_fifo(adap, 1);
4167 
4168 		s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4169 		udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4170 				SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4171 		qpshift = PAGE_SHIFT - ilog2(udb_density);
4172 		udb = qid << qpshift;
4173 		udb &= PAGE_MASK;
4174 		page = udb / PAGE_SIZE;
4175 		udb += (qid - (page * udb_density)) * 128;
4176 
4177 		writel(PIDX(pidx_inc),  adap->bar2 + udb + 8);
4178 
4179 		/* Re-enable BAR2 WC */
4180 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4181 	}
4182 
4183 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4184 }
4185 
t4_db_full(struct adapter * adap)4186 void t4_db_full(struct adapter *adap)
4187 {
4188 	if (is_t4(adap->params.chip)) {
4189 		disable_dbs(adap);
4190 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4191 		t4_set_reg_field(adap, SGE_INT_ENABLE3,
4192 				 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4193 		queue_work(adap->workq, &adap->db_full_task);
4194 	}
4195 }
4196 
t4_db_dropped(struct adapter * adap)4197 void t4_db_dropped(struct adapter *adap)
4198 {
4199 	if (is_t4(adap->params.chip)) {
4200 		disable_dbs(adap);
4201 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4202 	}
4203 	queue_work(adap->workq, &adap->db_drop_task);
4204 }
4205 
uld_attach(struct adapter * adap,unsigned int uld)4206 static void uld_attach(struct adapter *adap, unsigned int uld)
4207 {
4208 	void *handle;
4209 	struct cxgb4_lld_info lli;
4210 	unsigned short i;
4211 
4212 	lli.pdev = adap->pdev;
4213 	lli.pf = adap->fn;
4214 	lli.l2t = adap->l2t;
4215 	lli.tids = &adap->tids;
4216 	lli.ports = adap->port;
4217 	lli.vr = &adap->vres;
4218 	lli.mtus = adap->params.mtus;
4219 	if (uld == CXGB4_ULD_RDMA) {
4220 		lli.rxq_ids = adap->sge.rdma_rxq;
4221 		lli.ciq_ids = adap->sge.rdma_ciq;
4222 		lli.nrxq = adap->sge.rdmaqs;
4223 		lli.nciq = adap->sge.rdmaciqs;
4224 	} else if (uld == CXGB4_ULD_ISCSI) {
4225 		lli.rxq_ids = adap->sge.ofld_rxq;
4226 		lli.nrxq = adap->sge.ofldqsets;
4227 	}
4228 	lli.ntxq = adap->sge.ofldqsets;
4229 	lli.nchan = adap->params.nports;
4230 	lli.nports = adap->params.nports;
4231 	lli.wr_cred = adap->params.ofldq_wr_cred;
4232 	lli.adapter_type = adap->params.chip;
4233 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4234 	lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4235 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4236 			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4237 			(adap->fn * 4));
4238 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4239 			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4240 			(adap->fn * 4));
4241 	lli.filt_mode = adap->params.tp.vlan_pri_map;
4242 	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4243 	for (i = 0; i < NCHAN; i++)
4244 		lli.tx_modq[i] = i;
4245 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4246 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4247 	lli.fw_vers = adap->params.fw_vers;
4248 	lli.dbfifo_int_thresh = dbfifo_int_thresh;
4249 	lli.sge_ingpadboundary = adap->sge.fl_align;
4250 	lli.sge_egrstatuspagesize = adap->sge.stat_len;
4251 	lli.sge_pktshift = adap->sge.pktshift;
4252 	lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4253 	lli.max_ordird_qp = adap->params.max_ordird_qp;
4254 	lli.max_ird_adapter = adap->params.max_ird_adapter;
4255 	lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4256 
4257 	handle = ulds[uld].add(&lli);
4258 	if (IS_ERR(handle)) {
4259 		dev_warn(adap->pdev_dev,
4260 			 "could not attach to the %s driver, error %ld\n",
4261 			 uld_str[uld], PTR_ERR(handle));
4262 		return;
4263 	}
4264 
4265 	adap->uld_handle[uld] = handle;
4266 
4267 	if (!netevent_registered) {
4268 		register_netevent_notifier(&cxgb4_netevent_nb);
4269 		netevent_registered = true;
4270 	}
4271 
4272 	if (adap->flags & FULL_INIT_DONE)
4273 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
4274 }
4275 
attach_ulds(struct adapter * adap)4276 static void attach_ulds(struct adapter *adap)
4277 {
4278 	unsigned int i;
4279 
4280 	spin_lock(&adap_rcu_lock);
4281 	list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4282 	spin_unlock(&adap_rcu_lock);
4283 
4284 	mutex_lock(&uld_mutex);
4285 	list_add_tail(&adap->list_node, &adapter_list);
4286 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4287 		if (ulds[i].add)
4288 			uld_attach(adap, i);
4289 	mutex_unlock(&uld_mutex);
4290 }
4291 
detach_ulds(struct adapter * adap)4292 static void detach_ulds(struct adapter *adap)
4293 {
4294 	unsigned int i;
4295 
4296 	mutex_lock(&uld_mutex);
4297 	list_del(&adap->list_node);
4298 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4299 		if (adap->uld_handle[i]) {
4300 			ulds[i].state_change(adap->uld_handle[i],
4301 					     CXGB4_STATE_DETACH);
4302 			adap->uld_handle[i] = NULL;
4303 		}
4304 	if (netevent_registered && list_empty(&adapter_list)) {
4305 		unregister_netevent_notifier(&cxgb4_netevent_nb);
4306 		netevent_registered = false;
4307 	}
4308 	mutex_unlock(&uld_mutex);
4309 
4310 	spin_lock(&adap_rcu_lock);
4311 	list_del_rcu(&adap->rcu_node);
4312 	spin_unlock(&adap_rcu_lock);
4313 }
4314 
notify_ulds(struct adapter * adap,enum cxgb4_state new_state)4315 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4316 {
4317 	unsigned int i;
4318 
4319 	mutex_lock(&uld_mutex);
4320 	for (i = 0; i < CXGB4_ULD_MAX; i++)
4321 		if (adap->uld_handle[i])
4322 			ulds[i].state_change(adap->uld_handle[i], new_state);
4323 	mutex_unlock(&uld_mutex);
4324 }
4325 
4326 /**
4327  *	cxgb4_register_uld - register an upper-layer driver
4328  *	@type: the ULD type
4329  *	@p: the ULD methods
4330  *
4331  *	Registers an upper-layer driver with this driver and notifies the ULD
4332  *	about any presently available devices that support its type.  Returns
4333  *	%-EBUSY if a ULD of the same type is already registered.
4334  */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)4335 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4336 {
4337 	int ret = 0;
4338 	struct adapter *adap;
4339 
4340 	if (type >= CXGB4_ULD_MAX)
4341 		return -EINVAL;
4342 	mutex_lock(&uld_mutex);
4343 	if (ulds[type].add) {
4344 		ret = -EBUSY;
4345 		goto out;
4346 	}
4347 	ulds[type] = *p;
4348 	list_for_each_entry(adap, &adapter_list, list_node)
4349 		uld_attach(adap, type);
4350 out:	mutex_unlock(&uld_mutex);
4351 	return ret;
4352 }
4353 EXPORT_SYMBOL(cxgb4_register_uld);
4354 
4355 /**
4356  *	cxgb4_unregister_uld - unregister an upper-layer driver
4357  *	@type: the ULD type
4358  *
4359  *	Unregisters an existing upper-layer driver.
4360  */
cxgb4_unregister_uld(enum cxgb4_uld type)4361 int cxgb4_unregister_uld(enum cxgb4_uld type)
4362 {
4363 	struct adapter *adap;
4364 
4365 	if (type >= CXGB4_ULD_MAX)
4366 		return -EINVAL;
4367 	mutex_lock(&uld_mutex);
4368 	list_for_each_entry(adap, &adapter_list, list_node)
4369 		adap->uld_handle[type] = NULL;
4370 	ulds[type].add = NULL;
4371 	mutex_unlock(&uld_mutex);
4372 	return 0;
4373 }
4374 EXPORT_SYMBOL(cxgb4_unregister_uld);
4375 
4376 /* Check if netdev on which event is occured belongs to us or not. Return
4377  * success (true) if it belongs otherwise failure (false).
4378  * Called with rcu_read_lock() held.
4379  */
4380 #if IS_ENABLED(CONFIG_IPV6)
cxgb4_netdev(const struct net_device * netdev)4381 static bool cxgb4_netdev(const struct net_device *netdev)
4382 {
4383 	struct adapter *adap;
4384 	int i;
4385 
4386 	list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4387 		for (i = 0; i < MAX_NPORTS; i++)
4388 			if (adap->port[i] == netdev)
4389 				return true;
4390 	return false;
4391 }
4392 
clip_add(struct net_device * event_dev,struct inet6_ifaddr * ifa,unsigned long event)4393 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4394 		    unsigned long event)
4395 {
4396 	int ret = NOTIFY_DONE;
4397 
4398 	rcu_read_lock();
4399 	if (cxgb4_netdev(event_dev)) {
4400 		switch (event) {
4401 		case NETDEV_UP:
4402 			ret = cxgb4_clip_get(event_dev,
4403 				(const struct in6_addr *)ifa->addr.s6_addr);
4404 			if (ret < 0) {
4405 				rcu_read_unlock();
4406 				return ret;
4407 			}
4408 			ret = NOTIFY_OK;
4409 			break;
4410 		case NETDEV_DOWN:
4411 			cxgb4_clip_release(event_dev,
4412 				(const struct in6_addr *)ifa->addr.s6_addr);
4413 			ret = NOTIFY_OK;
4414 			break;
4415 		default:
4416 			break;
4417 		}
4418 	}
4419 	rcu_read_unlock();
4420 	return ret;
4421 }
4422 
cxgb4_inet6addr_handler(struct notifier_block * this,unsigned long event,void * data)4423 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4424 		unsigned long event, void *data)
4425 {
4426 	struct inet6_ifaddr *ifa = data;
4427 	struct net_device *event_dev;
4428 	int ret = NOTIFY_DONE;
4429 	struct bonding *bond = netdev_priv(ifa->idev->dev);
4430 	struct list_head *iter;
4431 	struct slave *slave;
4432 	struct pci_dev *first_pdev = NULL;
4433 
4434 	if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4435 		event_dev = vlan_dev_real_dev(ifa->idev->dev);
4436 		ret = clip_add(event_dev, ifa, event);
4437 	} else if (ifa->idev->dev->flags & IFF_MASTER) {
4438 		/* It is possible that two different adapters are bonded in one
4439 		 * bond. We need to find such different adapters and add clip
4440 		 * in all of them only once.
4441 		 */
4442 		bond_for_each_slave(bond, slave, iter) {
4443 			if (!first_pdev) {
4444 				ret = clip_add(slave->dev, ifa, event);
4445 				/* If clip_add is success then only initialize
4446 				 * first_pdev since it means it is our device
4447 				 */
4448 				if (ret == NOTIFY_OK)
4449 					first_pdev = to_pci_dev(
4450 							slave->dev->dev.parent);
4451 			} else if (first_pdev !=
4452 				   to_pci_dev(slave->dev->dev.parent))
4453 					ret = clip_add(slave->dev, ifa, event);
4454 		}
4455 	} else
4456 		ret = clip_add(ifa->idev->dev, ifa, event);
4457 
4458 	return ret;
4459 }
4460 
4461 static struct notifier_block cxgb4_inet6addr_notifier = {
4462 	.notifier_call = cxgb4_inet6addr_handler
4463 };
4464 
4465 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4466  * a physical device.
4467  * The physical device reference is needed to send the actul CLIP command.
4468  */
update_dev_clip(struct net_device * root_dev,struct net_device * dev)4469 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4470 {
4471 	struct inet6_dev *idev = NULL;
4472 	struct inet6_ifaddr *ifa;
4473 	int ret = 0;
4474 
4475 	idev = __in6_dev_get(root_dev);
4476 	if (!idev)
4477 		return ret;
4478 
4479 	read_lock_bh(&idev->lock);
4480 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
4481 		ret = cxgb4_clip_get(dev,
4482 				(const struct in6_addr *)ifa->addr.s6_addr);
4483 		if (ret < 0)
4484 			break;
4485 	}
4486 	read_unlock_bh(&idev->lock);
4487 
4488 	return ret;
4489 }
4490 
update_root_dev_clip(struct net_device * dev)4491 static int update_root_dev_clip(struct net_device *dev)
4492 {
4493 	struct net_device *root_dev = NULL;
4494 	int i, ret = 0;
4495 
4496 	/* First populate the real net device's IPv6 addresses */
4497 	ret = update_dev_clip(dev, dev);
4498 	if (ret)
4499 		return ret;
4500 
4501 	/* Parse all bond and vlan devices layered on top of the physical dev */
4502 	root_dev = netdev_master_upper_dev_get_rcu(dev);
4503 	if (root_dev) {
4504 		ret = update_dev_clip(root_dev, dev);
4505 		if (ret)
4506 			return ret;
4507 	}
4508 
4509 	for (i = 0; i < VLAN_N_VID; i++) {
4510 		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4511 		if (!root_dev)
4512 			continue;
4513 
4514 		ret = update_dev_clip(root_dev, dev);
4515 		if (ret)
4516 			break;
4517 	}
4518 	return ret;
4519 }
4520 
update_clip(const struct adapter * adap)4521 static void update_clip(const struct adapter *adap)
4522 {
4523 	int i;
4524 	struct net_device *dev;
4525 	int ret;
4526 
4527 	rcu_read_lock();
4528 
4529 	for (i = 0; i < MAX_NPORTS; i++) {
4530 		dev = adap->port[i];
4531 		ret = 0;
4532 
4533 		if (dev)
4534 			ret = update_root_dev_clip(dev);
4535 
4536 		if (ret < 0)
4537 			break;
4538 	}
4539 	rcu_read_unlock();
4540 }
4541 #endif /* IS_ENABLED(CONFIG_IPV6) */
4542 
4543 /**
4544  *	cxgb_up - enable the adapter
4545  *	@adap: adapter being enabled
4546  *
4547  *	Called when the first port is enabled, this function performs the
4548  *	actions necessary to make an adapter operational, such as completing
4549  *	the initialization of HW modules, and enabling interrupts.
4550  *
4551  *	Must be called with the rtnl lock held.
4552  */
cxgb_up(struct adapter * adap)4553 static int cxgb_up(struct adapter *adap)
4554 {
4555 	int err;
4556 
4557 	err = setup_sge_queues(adap);
4558 	if (err)
4559 		goto out;
4560 	err = setup_rss(adap);
4561 	if (err)
4562 		goto freeq;
4563 
4564 	if (adap->flags & USING_MSIX) {
4565 		name_msix_vecs(adap);
4566 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4567 				  adap->msix_info[0].desc, adap);
4568 		if (err)
4569 			goto irq_err;
4570 
4571 		err = request_msix_queue_irqs(adap);
4572 		if (err) {
4573 			free_irq(adap->msix_info[0].vec, adap);
4574 			goto irq_err;
4575 		}
4576 	} else {
4577 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4578 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4579 				  adap->port[0]->name, adap);
4580 		if (err)
4581 			goto irq_err;
4582 	}
4583 
4584 	mutex_lock(&uld_mutex);
4585 	enable_rx(adap);
4586 	t4_sge_start(adap);
4587 	t4_intr_enable(adap);
4588 	adap->flags |= FULL_INIT_DONE;
4589 	mutex_unlock(&uld_mutex);
4590 
4591 	notify_ulds(adap, CXGB4_STATE_UP);
4592 #if IS_ENABLED(CONFIG_IPV6)
4593 	update_clip(adap);
4594 #endif
4595  out:
4596 	return err;
4597  irq_err:
4598 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4599  freeq:
4600 	t4_free_sge_resources(adap);
4601 	goto out;
4602 }
4603 
cxgb_down(struct adapter * adapter)4604 static void cxgb_down(struct adapter *adapter)
4605 {
4606 	t4_intr_disable(adapter);
4607 	cancel_work_sync(&adapter->tid_release_task);
4608 	cancel_work_sync(&adapter->db_full_task);
4609 	cancel_work_sync(&adapter->db_drop_task);
4610 	adapter->tid_release_task_busy = false;
4611 	adapter->tid_release_head = NULL;
4612 
4613 	if (adapter->flags & USING_MSIX) {
4614 		free_msix_queue_irqs(adapter);
4615 		free_irq(adapter->msix_info[0].vec, adapter);
4616 	} else
4617 		free_irq(adapter->pdev->irq, adapter);
4618 	quiesce_rx(adapter);
4619 	t4_sge_stop(adapter);
4620 	t4_free_sge_resources(adapter);
4621 	adapter->flags &= ~FULL_INIT_DONE;
4622 }
4623 
4624 /*
4625  * net_device operations
4626  */
cxgb_open(struct net_device * dev)4627 static int cxgb_open(struct net_device *dev)
4628 {
4629 	int err;
4630 	struct port_info *pi = netdev_priv(dev);
4631 	struct adapter *adapter = pi->adapter;
4632 
4633 	netif_carrier_off(dev);
4634 
4635 	if (!(adapter->flags & FULL_INIT_DONE)) {
4636 		err = cxgb_up(adapter);
4637 		if (err < 0)
4638 			return err;
4639 	}
4640 
4641 	err = link_start(dev);
4642 	if (!err)
4643 		netif_tx_start_all_queues(dev);
4644 	return err;
4645 }
4646 
cxgb_close(struct net_device * dev)4647 static int cxgb_close(struct net_device *dev)
4648 {
4649 	struct port_info *pi = netdev_priv(dev);
4650 	struct adapter *adapter = pi->adapter;
4651 
4652 	netif_tx_stop_all_queues(dev);
4653 	netif_carrier_off(dev);
4654 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4655 }
4656 
4657 /* Return an error number if the indicated filter isn't writable ...
4658  */
writable_filter(struct filter_entry * f)4659 static int writable_filter(struct filter_entry *f)
4660 {
4661 	if (f->locked)
4662 		return -EPERM;
4663 	if (f->pending)
4664 		return -EBUSY;
4665 
4666 	return 0;
4667 }
4668 
4669 /* Delete the filter at the specified index (if valid).  The checks for all
4670  * the common problems with doing this like the filter being locked, currently
4671  * pending in another operation, etc.
4672  */
delete_filter(struct adapter * adapter,unsigned int fidx)4673 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4674 {
4675 	struct filter_entry *f;
4676 	int ret;
4677 
4678 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4679 		return -EINVAL;
4680 
4681 	f = &adapter->tids.ftid_tab[fidx];
4682 	ret = writable_filter(f);
4683 	if (ret)
4684 		return ret;
4685 	if (f->valid)
4686 		return del_filter_wr(adapter, fidx);
4687 
4688 	return 0;
4689 }
4690 
cxgb4_create_server_filter(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,__be16 vlan,unsigned int queue,unsigned char port,unsigned char mask)4691 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4692 		__be32 sip, __be16 sport, __be16 vlan,
4693 		unsigned int queue, unsigned char port, unsigned char mask)
4694 {
4695 	int ret;
4696 	struct filter_entry *f;
4697 	struct adapter *adap;
4698 	int i;
4699 	u8 *val;
4700 
4701 	adap = netdev2adap(dev);
4702 
4703 	/* Adjust stid to correct filter index */
4704 	stid -= adap->tids.sftid_base;
4705 	stid += adap->tids.nftids;
4706 
4707 	/* Check to make sure the filter requested is writable ...
4708 	 */
4709 	f = &adap->tids.ftid_tab[stid];
4710 	ret = writable_filter(f);
4711 	if (ret)
4712 		return ret;
4713 
4714 	/* Clear out any old resources being used by the filter before
4715 	 * we start constructing the new filter.
4716 	 */
4717 	if (f->valid)
4718 		clear_filter(adap, f);
4719 
4720 	/* Clear out filter specifications */
4721 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4722 	f->fs.val.lport = cpu_to_be16(sport);
4723 	f->fs.mask.lport  = ~0;
4724 	val = (u8 *)&sip;
4725 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4726 		for (i = 0; i < 4; i++) {
4727 			f->fs.val.lip[i] = val[i];
4728 			f->fs.mask.lip[i] = ~0;
4729 		}
4730 		if (adap->params.tp.vlan_pri_map & F_PORT) {
4731 			f->fs.val.iport = port;
4732 			f->fs.mask.iport = mask;
4733 		}
4734 	}
4735 
4736 	if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4737 		f->fs.val.proto = IPPROTO_TCP;
4738 		f->fs.mask.proto = ~0;
4739 	}
4740 
4741 	f->fs.dirsteer = 1;
4742 	f->fs.iq = queue;
4743 	/* Mark filter as locked */
4744 	f->locked = 1;
4745 	f->fs.rpttid = 1;
4746 
4747 	ret = set_filter_wr(adap, stid);
4748 	if (ret) {
4749 		clear_filter(adap, f);
4750 		return ret;
4751 	}
4752 
4753 	return 0;
4754 }
4755 EXPORT_SYMBOL(cxgb4_create_server_filter);
4756 
cxgb4_remove_server_filter(const struct net_device * dev,unsigned int stid,unsigned int queue,bool ipv6)4757 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4758 		unsigned int queue, bool ipv6)
4759 {
4760 	int ret;
4761 	struct filter_entry *f;
4762 	struct adapter *adap;
4763 
4764 	adap = netdev2adap(dev);
4765 
4766 	/* Adjust stid to correct filter index */
4767 	stid -= adap->tids.sftid_base;
4768 	stid += adap->tids.nftids;
4769 
4770 	f = &adap->tids.ftid_tab[stid];
4771 	/* Unlock the filter */
4772 	f->locked = 0;
4773 
4774 	ret = delete_filter(adap, stid);
4775 	if (ret)
4776 		return ret;
4777 
4778 	return 0;
4779 }
4780 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4781 
cxgb_get_stats(struct net_device * dev,struct rtnl_link_stats64 * ns)4782 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4783 						struct rtnl_link_stats64 *ns)
4784 {
4785 	struct port_stats stats;
4786 	struct port_info *p = netdev_priv(dev);
4787 	struct adapter *adapter = p->adapter;
4788 
4789 	/* Block retrieving statistics during EEH error
4790 	 * recovery. Otherwise, the recovery might fail
4791 	 * and the PCI device will be removed permanently
4792 	 */
4793 	spin_lock(&adapter->stats_lock);
4794 	if (!netif_device_present(dev)) {
4795 		spin_unlock(&adapter->stats_lock);
4796 		return ns;
4797 	}
4798 	t4_get_port_stats(adapter, p->tx_chan, &stats);
4799 	spin_unlock(&adapter->stats_lock);
4800 
4801 	ns->tx_bytes   = stats.tx_octets;
4802 	ns->tx_packets = stats.tx_frames;
4803 	ns->rx_bytes   = stats.rx_octets;
4804 	ns->rx_packets = stats.rx_frames;
4805 	ns->multicast  = stats.rx_mcast_frames;
4806 
4807 	/* detailed rx_errors */
4808 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4809 			       stats.rx_runt;
4810 	ns->rx_over_errors   = 0;
4811 	ns->rx_crc_errors    = stats.rx_fcs_err;
4812 	ns->rx_frame_errors  = stats.rx_symbol_err;
4813 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4814 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
4815 			       stats.rx_trunc0 + stats.rx_trunc1 +
4816 			       stats.rx_trunc2 + stats.rx_trunc3;
4817 	ns->rx_missed_errors = 0;
4818 
4819 	/* detailed tx_errors */
4820 	ns->tx_aborted_errors   = 0;
4821 	ns->tx_carrier_errors   = 0;
4822 	ns->tx_fifo_errors      = 0;
4823 	ns->tx_heartbeat_errors = 0;
4824 	ns->tx_window_errors    = 0;
4825 
4826 	ns->tx_errors = stats.tx_error_frames;
4827 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4828 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4829 	return ns;
4830 }
4831 
cxgb_ioctl(struct net_device * dev,struct ifreq * req,int cmd)4832 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4833 {
4834 	unsigned int mbox;
4835 	int ret = 0, prtad, devad;
4836 	struct port_info *pi = netdev_priv(dev);
4837 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4838 
4839 	switch (cmd) {
4840 	case SIOCGMIIPHY:
4841 		if (pi->mdio_addr < 0)
4842 			return -EOPNOTSUPP;
4843 		data->phy_id = pi->mdio_addr;
4844 		break;
4845 	case SIOCGMIIREG:
4846 	case SIOCSMIIREG:
4847 		if (mdio_phy_id_is_c45(data->phy_id)) {
4848 			prtad = mdio_phy_id_prtad(data->phy_id);
4849 			devad = mdio_phy_id_devad(data->phy_id);
4850 		} else if (data->phy_id < 32) {
4851 			prtad = data->phy_id;
4852 			devad = 0;
4853 			data->reg_num &= 0x1f;
4854 		} else
4855 			return -EINVAL;
4856 
4857 		mbox = pi->adapter->fn;
4858 		if (cmd == SIOCGMIIREG)
4859 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4860 					 data->reg_num, &data->val_out);
4861 		else
4862 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4863 					 data->reg_num, data->val_in);
4864 		break;
4865 	default:
4866 		return -EOPNOTSUPP;
4867 	}
4868 	return ret;
4869 }
4870 
cxgb_set_rxmode(struct net_device * dev)4871 static void cxgb_set_rxmode(struct net_device *dev)
4872 {
4873 	/* unfortunately we can't return errors to the stack */
4874 	set_rxmode(dev, -1, false);
4875 }
4876 
cxgb_change_mtu(struct net_device * dev,int new_mtu)4877 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4878 {
4879 	int ret;
4880 	struct port_info *pi = netdev_priv(dev);
4881 
4882 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4883 		return -EINVAL;
4884 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4885 			    -1, -1, -1, true);
4886 	if (!ret)
4887 		dev->mtu = new_mtu;
4888 	return ret;
4889 }
4890 
cxgb_set_mac_addr(struct net_device * dev,void * p)4891 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4892 {
4893 	int ret;
4894 	struct sockaddr *addr = p;
4895 	struct port_info *pi = netdev_priv(dev);
4896 
4897 	if (!is_valid_ether_addr(addr->sa_data))
4898 		return -EADDRNOTAVAIL;
4899 
4900 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4901 			    pi->xact_addr_filt, addr->sa_data, true, true);
4902 	if (ret < 0)
4903 		return ret;
4904 
4905 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4906 	pi->xact_addr_filt = ret;
4907 	return 0;
4908 }
4909 
4910 #ifdef CONFIG_NET_POLL_CONTROLLER
cxgb_netpoll(struct net_device * dev)4911 static void cxgb_netpoll(struct net_device *dev)
4912 {
4913 	struct port_info *pi = netdev_priv(dev);
4914 	struct adapter *adap = pi->adapter;
4915 
4916 	if (adap->flags & USING_MSIX) {
4917 		int i;
4918 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4919 
4920 		for (i = pi->nqsets; i; i--, rx++)
4921 			t4_sge_intr_msix(0, &rx->rspq);
4922 	} else
4923 		t4_intr_handler(adap)(0, adap);
4924 }
4925 #endif
4926 
4927 static const struct net_device_ops cxgb4_netdev_ops = {
4928 	.ndo_open             = cxgb_open,
4929 	.ndo_stop             = cxgb_close,
4930 	.ndo_start_xmit       = t4_eth_xmit,
4931 	.ndo_select_queue     =	cxgb_select_queue,
4932 	.ndo_get_stats64      = cxgb_get_stats,
4933 	.ndo_set_rx_mode      = cxgb_set_rxmode,
4934 	.ndo_set_mac_address  = cxgb_set_mac_addr,
4935 	.ndo_set_features     = cxgb_set_features,
4936 	.ndo_validate_addr    = eth_validate_addr,
4937 	.ndo_do_ioctl         = cxgb_ioctl,
4938 	.ndo_change_mtu       = cxgb_change_mtu,
4939 #ifdef CONFIG_NET_POLL_CONTROLLER
4940 	.ndo_poll_controller  = cxgb_netpoll,
4941 #endif
4942 };
4943 
t4_fatal_err(struct adapter * adap)4944 void t4_fatal_err(struct adapter *adap)
4945 {
4946 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4947 	t4_intr_disable(adap);
4948 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4949 }
4950 
4951 /* Return the specified PCI-E Configuration Space register from our Physical
4952  * Function.  We try first via a Firmware LDST Command since we prefer to let
4953  * the firmware own all of these registers, but if that fails we go for it
4954  * directly ourselves.
4955  */
t4_read_pcie_cfg4(struct adapter * adap,int reg)4956 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4957 {
4958 	struct fw_ldst_cmd ldst_cmd;
4959 	u32 val;
4960 	int ret;
4961 
4962 	/* Construct and send the Firmware LDST Command to retrieve the
4963 	 * specified PCI-E Configuration Space register.
4964 	 */
4965 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4966 	ldst_cmd.op_to_addrspace =
4967 		htonl(FW_CMD_OP(FW_LDST_CMD) |
4968 		      FW_CMD_REQUEST |
4969 		      FW_CMD_READ |
4970 		      FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4971 	ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4972 	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4973 	ldst_cmd.u.pcie.ctrl_to_fn =
4974 		(FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4975 	ldst_cmd.u.pcie.r = reg;
4976 	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4977 			 &ldst_cmd);
4978 
4979 	/* If the LDST Command suucceeded, exctract the returned register
4980 	 * value.  Otherwise read it directly ourself.
4981 	 */
4982 	if (ret == 0)
4983 		val = ntohl(ldst_cmd.u.pcie.data[0]);
4984 	else
4985 		t4_hw_pci_read_cfg4(adap, reg, &val);
4986 
4987 	return val;
4988 }
4989 
setup_memwin(struct adapter * adap)4990 static void setup_memwin(struct adapter *adap)
4991 {
4992 	u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4993 
4994 	if (is_t4(adap->params.chip)) {
4995 		u32 bar0;
4996 
4997 		/* Truncation intentional: we only read the bottom 32-bits of
4998 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
4999 		 * mechanism to read BAR0 instead of using
5000 		 * pci_resource_start() because we could be operating from
5001 		 * within a Virtual Machine which is trapping our accesses to
5002 		 * our Configuration Space and we need to set up the PCI-E
5003 		 * Memory Window decoders with the actual addresses which will
5004 		 * be coming across the PCI-E link.
5005 		 */
5006 		bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
5007 		bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
5008 		adap->t4_bar0 = bar0;
5009 
5010 		mem_win0_base = bar0 + MEMWIN0_BASE;
5011 		mem_win1_base = bar0 + MEMWIN1_BASE;
5012 		mem_win2_base = bar0 + MEMWIN2_BASE;
5013 		mem_win2_aperture = MEMWIN2_APERTURE;
5014 	} else {
5015 		/* For T5, only relative offset inside the PCIe BAR is passed */
5016 		mem_win0_base = MEMWIN0_BASE;
5017 		mem_win1_base = MEMWIN1_BASE;
5018 		mem_win2_base = MEMWIN2_BASE_T5;
5019 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
5020 	}
5021 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
5022 		     mem_win0_base | BIR(0) |
5023 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
5024 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
5025 		     mem_win1_base | BIR(0) |
5026 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
5027 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
5028 		     mem_win2_base | BIR(0) |
5029 		     WINDOW(ilog2(mem_win2_aperture) - 10));
5030 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
5031 }
5032 
setup_memwin_rdma(struct adapter * adap)5033 static void setup_memwin_rdma(struct adapter *adap)
5034 {
5035 	if (adap->vres.ocq.size) {
5036 		u32 start;
5037 		unsigned int sz_kb;
5038 
5039 		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
5040 		start &= PCI_BASE_ADDRESS_MEM_MASK;
5041 		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
5042 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
5043 		t4_write_reg(adap,
5044 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
5045 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
5046 		t4_write_reg(adap,
5047 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
5048 			     adap->vres.ocq.start);
5049 		t4_read_reg(adap,
5050 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
5051 	}
5052 }
5053 
adap_init1(struct adapter * adap,struct fw_caps_config_cmd * c)5054 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
5055 {
5056 	u32 v;
5057 	int ret;
5058 
5059 	/* get device capabilities */
5060 	memset(c, 0, sizeof(*c));
5061 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5062 			       FW_CMD_REQUEST | FW_CMD_READ);
5063 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5064 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5065 	if (ret < 0)
5066 		return ret;
5067 
5068 	/* select capabilities we'll be using */
5069 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5070 		if (!vf_acls)
5071 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5072 		else
5073 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5074 	} else if (vf_acls) {
5075 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5076 		return ret;
5077 	}
5078 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5079 			       FW_CMD_REQUEST | FW_CMD_WRITE);
5080 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5081 	if (ret < 0)
5082 		return ret;
5083 
5084 	ret = t4_config_glbl_rss(adap, adap->fn,
5085 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5086 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5087 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5088 	if (ret < 0)
5089 		return ret;
5090 
5091 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5092 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5093 	if (ret < 0)
5094 		return ret;
5095 
5096 	t4_sge_init(adap);
5097 
5098 	/* tweak some settings */
5099 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5100 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5101 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5102 	v = t4_read_reg(adap, TP_PIO_DATA);
5103 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5104 
5105 	/* first 4 Tx modulation queues point to consecutive Tx channels */
5106 	adap->params.tp.tx_modq_map = 0xE4;
5107 	t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5108 		     V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5109 
5110 	/* associate each Tx modulation queue with consecutive Tx channels */
5111 	v = 0x84218421;
5112 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5113 			  &v, 1, A_TP_TX_SCHED_HDR);
5114 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5115 			  &v, 1, A_TP_TX_SCHED_FIFO);
5116 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5117 			  &v, 1, A_TP_TX_SCHED_PCMD);
5118 
5119 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5120 	if (is_offload(adap)) {
5121 		t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5122 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5123 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5124 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5125 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5126 		t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5127 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5128 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5129 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5130 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5131 	}
5132 
5133 	/* get basic stuff going */
5134 	return t4_early_init(adap, adap->fn);
5135 }
5136 
5137 /*
5138  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
5139  */
5140 #define MAX_ATIDS 8192U
5141 
5142 /*
5143  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5144  *
5145  * If the firmware we're dealing with has Configuration File support, then
5146  * we use that to perform all configuration
5147  */
5148 
5149 /*
5150  * Tweak configuration based on module parameters, etc.  Most of these have
5151  * defaults assigned to them by Firmware Configuration Files (if we're using
5152  * them) but need to be explicitly set if we're using hard-coded
5153  * initialization.  But even in the case of using Firmware Configuration
5154  * Files, we'd like to expose the ability to change these via module
5155  * parameters so these are essentially common tweaks/settings for
5156  * Configuration Files and hard-coded initialization ...
5157  */
adap_init0_tweaks(struct adapter * adapter)5158 static int adap_init0_tweaks(struct adapter *adapter)
5159 {
5160 	/*
5161 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
5162 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
5163 	 * 64B Cache Line Size ...
5164 	 */
5165 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5166 
5167 	/*
5168 	 * Process module parameters which affect early initialization.
5169 	 */
5170 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5171 		dev_err(&adapter->pdev->dev,
5172 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
5173 			rx_dma_offset);
5174 		rx_dma_offset = 2;
5175 	}
5176 	t4_set_reg_field(adapter, SGE_CONTROL,
5177 			 PKTSHIFT_MASK,
5178 			 PKTSHIFT(rx_dma_offset));
5179 
5180 	/*
5181 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5182 	 * adds the pseudo header itself.
5183 	 */
5184 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5185 			       CSUM_HAS_PSEUDO_HDR, 0);
5186 
5187 	return 0;
5188 }
5189 
5190 /*
5191  * Attempt to initialize the adapter via a Firmware Configuration File.
5192  */
adap_init0_config(struct adapter * adapter,int reset)5193 static int adap_init0_config(struct adapter *adapter, int reset)
5194 {
5195 	struct fw_caps_config_cmd caps_cmd;
5196 	const struct firmware *cf;
5197 	unsigned long mtype = 0, maddr = 0;
5198 	u32 finiver, finicsum, cfcsum;
5199 	int ret;
5200 	int config_issued = 0;
5201 	char *fw_config_file, fw_config_file_path[256];
5202 	char *config_name = NULL;
5203 
5204 	/*
5205 	 * Reset device if necessary.
5206 	 */
5207 	if (reset) {
5208 		ret = t4_fw_reset(adapter, adapter->mbox,
5209 				  PIORSTMODE | PIORST);
5210 		if (ret < 0)
5211 			goto bye;
5212 	}
5213 
5214 	/*
5215 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5216 	 * then use that.  Otherwise, use the configuration file stored
5217 	 * in the adapter flash ...
5218 	 */
5219 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5220 	case CHELSIO_T4:
5221 		fw_config_file = FW4_CFNAME;
5222 		break;
5223 	case CHELSIO_T5:
5224 		fw_config_file = FW5_CFNAME;
5225 		break;
5226 	default:
5227 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5228 		       adapter->pdev->device);
5229 		ret = -EINVAL;
5230 		goto bye;
5231 	}
5232 
5233 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5234 	if (ret < 0) {
5235 		config_name = "On FLASH";
5236 		mtype = FW_MEMTYPE_CF_FLASH;
5237 		maddr = t4_flash_cfg_addr(adapter);
5238 	} else {
5239 		u32 params[7], val[7];
5240 
5241 		sprintf(fw_config_file_path,
5242 			"/lib/firmware/%s", fw_config_file);
5243 		config_name = fw_config_file_path;
5244 
5245 		if (cf->size >= FLASH_CFG_MAX_SIZE)
5246 			ret = -ENOMEM;
5247 		else {
5248 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5249 			     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5250 			ret = t4_query_params(adapter, adapter->mbox,
5251 					      adapter->fn, 0, 1, params, val);
5252 			if (ret == 0) {
5253 				/*
5254 				 * For t4_memory_rw() below addresses and
5255 				 * sizes have to be in terms of multiples of 4
5256 				 * bytes.  So, if the Configuration File isn't
5257 				 * a multiple of 4 bytes in length we'll have
5258 				 * to write that out separately since we can't
5259 				 * guarantee that the bytes following the
5260 				 * residual byte in the buffer returned by
5261 				 * request_firmware() are zeroed out ...
5262 				 */
5263 				size_t resid = cf->size & 0x3;
5264 				size_t size = cf->size & ~0x3;
5265 				__be32 *data = (__be32 *)cf->data;
5266 
5267 				mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5268 				maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5269 
5270 				spin_lock(&adapter->win0_lock);
5271 				ret = t4_memory_rw(adapter, 0, mtype, maddr,
5272 						   size, data, T4_MEMORY_WRITE);
5273 				if (ret == 0 && resid != 0) {
5274 					union {
5275 						__be32 word;
5276 						char buf[4];
5277 					} last;
5278 					int i;
5279 
5280 					last.word = data[size >> 2];
5281 					for (i = resid; i < 4; i++)
5282 						last.buf[i] = 0;
5283 					ret = t4_memory_rw(adapter, 0, mtype,
5284 							   maddr + size,
5285 							   4, &last.word,
5286 							   T4_MEMORY_WRITE);
5287 				}
5288 				spin_unlock(&adapter->win0_lock);
5289 			}
5290 		}
5291 
5292 		release_firmware(cf);
5293 		if (ret)
5294 			goto bye;
5295 	}
5296 
5297 	/*
5298 	 * Issue a Capability Configuration command to the firmware to get it
5299 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
5300 	 * because we want the ability to modify various features after we've
5301 	 * processed the configuration file ...
5302 	 */
5303 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5304 	caps_cmd.op_to_write =
5305 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5306 		      FW_CMD_REQUEST |
5307 		      FW_CMD_READ);
5308 	caps_cmd.cfvalid_to_len16 =
5309 		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5310 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5311 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5312 		      FW_LEN16(caps_cmd));
5313 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5314 			 &caps_cmd);
5315 
5316 	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5317 	 * Configuration File in FLASH), our last gasp effort is to use the
5318 	 * Firmware Configuration File which is embedded in the firmware.  A
5319 	 * very few early versions of the firmware didn't have one embedded
5320 	 * but we can ignore those.
5321 	 */
5322 	if (ret == -ENOENT) {
5323 		memset(&caps_cmd, 0, sizeof(caps_cmd));
5324 		caps_cmd.op_to_write =
5325 			htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5326 					FW_CMD_REQUEST |
5327 					FW_CMD_READ);
5328 		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5329 		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5330 				sizeof(caps_cmd), &caps_cmd);
5331 		config_name = "Firmware Default";
5332 	}
5333 
5334 	config_issued = 1;
5335 	if (ret < 0)
5336 		goto bye;
5337 
5338 	finiver = ntohl(caps_cmd.finiver);
5339 	finicsum = ntohl(caps_cmd.finicsum);
5340 	cfcsum = ntohl(caps_cmd.cfcsum);
5341 	if (finicsum != cfcsum)
5342 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5343 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5344 			 finicsum, cfcsum);
5345 
5346 	/*
5347 	 * And now tell the firmware to use the configuration we just loaded.
5348 	 */
5349 	caps_cmd.op_to_write =
5350 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5351 		      FW_CMD_REQUEST |
5352 		      FW_CMD_WRITE);
5353 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5354 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5355 			 NULL);
5356 	if (ret < 0)
5357 		goto bye;
5358 
5359 	/*
5360 	 * Tweak configuration based on system architecture, module
5361 	 * parameters, etc.
5362 	 */
5363 	ret = adap_init0_tweaks(adapter);
5364 	if (ret < 0)
5365 		goto bye;
5366 
5367 	/*
5368 	 * And finally tell the firmware to initialize itself using the
5369 	 * parameters from the Configuration File.
5370 	 */
5371 	ret = t4_fw_initialize(adapter, adapter->mbox);
5372 	if (ret < 0)
5373 		goto bye;
5374 
5375 	/*
5376 	 * Return successfully and note that we're operating with parameters
5377 	 * not supplied by the driver, rather than from hard-wired
5378 	 * initialization constants burried in the driver.
5379 	 */
5380 	adapter->flags |= USING_SOFT_PARAMS;
5381 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5382 		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5383 		 config_name, finiver, cfcsum);
5384 	return 0;
5385 
5386 	/*
5387 	 * Something bad happened.  Return the error ...  (If the "error"
5388 	 * is that there's no Configuration File on the adapter we don't
5389 	 * want to issue a warning since this is fairly common.)
5390 	 */
5391 bye:
5392 	if (config_issued && ret != -ENOENT)
5393 		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5394 			 config_name, -ret);
5395 	return ret;
5396 }
5397 
5398 /*
5399  * Attempt to initialize the adapter via hard-coded, driver supplied
5400  * parameters ...
5401  */
adap_init0_no_config(struct adapter * adapter,int reset)5402 static int adap_init0_no_config(struct adapter *adapter, int reset)
5403 {
5404 	struct sge *s = &adapter->sge;
5405 	struct fw_caps_config_cmd caps_cmd;
5406 	u32 v;
5407 	int i, ret;
5408 
5409 	/*
5410 	 * Reset device if necessary
5411 	 */
5412 	if (reset) {
5413 		ret = t4_fw_reset(adapter, adapter->mbox,
5414 				  PIORSTMODE | PIORST);
5415 		if (ret < 0)
5416 			goto bye;
5417 	}
5418 
5419 	/*
5420 	 * Get device capabilities and select which we'll be using.
5421 	 */
5422 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5423 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5424 				     FW_CMD_REQUEST | FW_CMD_READ);
5425 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5426 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5427 			 &caps_cmd);
5428 	if (ret < 0)
5429 		goto bye;
5430 
5431 	if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5432 		if (!vf_acls)
5433 			caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5434 		else
5435 			caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5436 	} else if (vf_acls) {
5437 		dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5438 		goto bye;
5439 	}
5440 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5441 			      FW_CMD_REQUEST | FW_CMD_WRITE);
5442 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5443 			 NULL);
5444 	if (ret < 0)
5445 		goto bye;
5446 
5447 	/*
5448 	 * Tweak configuration based on system architecture, module
5449 	 * parameters, etc.
5450 	 */
5451 	ret = adap_init0_tweaks(adapter);
5452 	if (ret < 0)
5453 		goto bye;
5454 
5455 	/*
5456 	 * Select RSS Global Mode we want to use.  We use "Basic Virtual"
5457 	 * mode which maps each Virtual Interface to its own section of
5458 	 * the RSS Table and we turn on all map and hash enables ...
5459 	 */
5460 	adapter->flags |= RSS_TNLALLLOOKUP;
5461 	ret = t4_config_glbl_rss(adapter, adapter->mbox,
5462 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5463 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5464 				 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5465 				 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5466 					FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5467 	if (ret < 0)
5468 		goto bye;
5469 
5470 	/*
5471 	 * Set up our own fundamental resource provisioning ...
5472 	 */
5473 	ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5474 			  PFRES_NEQ, PFRES_NETHCTRL,
5475 			  PFRES_NIQFLINT, PFRES_NIQ,
5476 			  PFRES_TC, PFRES_NVI,
5477 			  FW_PFVF_CMD_CMASK_MASK,
5478 			  pfvfres_pmask(adapter, adapter->fn, 0),
5479 			  PFRES_NEXACTF,
5480 			  PFRES_R_CAPS, PFRES_WX_CAPS);
5481 	if (ret < 0)
5482 		goto bye;
5483 
5484 	/*
5485 	 * Perform low level SGE initialization.  We need to do this before we
5486 	 * send the firmware the INITIALIZE command because that will cause
5487 	 * any other PF Drivers which are waiting for the Master
5488 	 * Initialization to proceed forward.
5489 	 */
5490 	for (i = 0; i < SGE_NTIMERS - 1; i++)
5491 		s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5492 	s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5493 	s->counter_val[0] = 1;
5494 	for (i = 1; i < SGE_NCOUNTERS; i++)
5495 		s->counter_val[i] = min(intr_cnt[i - 1],
5496 					THRESHOLD_0_GET(THRESHOLD_0_MASK));
5497 	t4_sge_init(adapter);
5498 
5499 #ifdef CONFIG_PCI_IOV
5500 	/*
5501 	 * Provision resource limits for Virtual Functions.  We currently
5502 	 * grant them all the same static resource limits except for the Port
5503 	 * Access Rights Mask which we're assigning based on the PF.  All of
5504 	 * the static provisioning stuff for both the PF and VF really needs
5505 	 * to be managed in a persistent manner for each device which the
5506 	 * firmware controls.
5507 	 */
5508 	{
5509 		int pf, vf;
5510 
5511 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5512 			if (num_vf[pf] <= 0)
5513 				continue;
5514 
5515 			/* VF numbering starts at 1! */
5516 			for (vf = 1; vf <= num_vf[pf]; vf++) {
5517 				ret = t4_cfg_pfvf(adapter, adapter->mbox,
5518 						  pf, vf,
5519 						  VFRES_NEQ, VFRES_NETHCTRL,
5520 						  VFRES_NIQFLINT, VFRES_NIQ,
5521 						  VFRES_TC, VFRES_NVI,
5522 						  FW_PFVF_CMD_CMASK_MASK,
5523 						  pfvfres_pmask(
5524 						  adapter, pf, vf),
5525 						  VFRES_NEXACTF,
5526 						  VFRES_R_CAPS, VFRES_WX_CAPS);
5527 				if (ret < 0)
5528 					dev_warn(adapter->pdev_dev,
5529 						 "failed to "\
5530 						 "provision pf/vf=%d/%d; "
5531 						 "err=%d\n", pf, vf, ret);
5532 			}
5533 		}
5534 	}
5535 #endif
5536 
5537 	/*
5538 	 * Set up the default filter mode.  Later we'll want to implement this
5539 	 * via a firmware command, etc. ...  This needs to be done before the
5540 	 * firmare initialization command ...  If the selected set of fields
5541 	 * isn't equal to the default value, we'll need to make sure that the
5542 	 * field selections will fit in the 36-bit budget.
5543 	 */
5544 	if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5545 		int j, bits = 0;
5546 
5547 		for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5548 			switch (tp_vlan_pri_map & (1 << j)) {
5549 			case 0:
5550 				/* compressed filter field not enabled */
5551 				break;
5552 			case FCOE_MASK:
5553 				bits +=  1;
5554 				break;
5555 			case PORT_MASK:
5556 				bits +=  3;
5557 				break;
5558 			case VNIC_ID_MASK:
5559 				bits += 17;
5560 				break;
5561 			case VLAN_MASK:
5562 				bits += 17;
5563 				break;
5564 			case TOS_MASK:
5565 				bits +=  8;
5566 				break;
5567 			case PROTOCOL_MASK:
5568 				bits +=  8;
5569 				break;
5570 			case ETHERTYPE_MASK:
5571 				bits += 16;
5572 				break;
5573 			case MACMATCH_MASK:
5574 				bits +=  9;
5575 				break;
5576 			case MPSHITTYPE_MASK:
5577 				bits +=  3;
5578 				break;
5579 			case FRAGMENTATION_MASK:
5580 				bits +=  1;
5581 				break;
5582 			}
5583 
5584 		if (bits > 36) {
5585 			dev_err(adapter->pdev_dev,
5586 				"tp_vlan_pri_map=%#x needs %d bits > 36;"\
5587 				" using %#x\n", tp_vlan_pri_map, bits,
5588 				TP_VLAN_PRI_MAP_DEFAULT);
5589 			tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5590 		}
5591 	}
5592 	v = tp_vlan_pri_map;
5593 	t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5594 			  &v, 1, TP_VLAN_PRI_MAP);
5595 
5596 	/*
5597 	 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5598 	 * to support any of the compressed filter fields above.  Newer
5599 	 * versions of the firmware do this automatically but it doesn't hurt
5600 	 * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
5601 	 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5602 	 * since the firmware automatically turns this on and off when we have
5603 	 * a non-zero number of filters active (since it does have a
5604 	 * performance impact).
5605 	 */
5606 	if (tp_vlan_pri_map)
5607 		t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5608 				 FIVETUPLELOOKUP_MASK,
5609 				 FIVETUPLELOOKUP_MASK);
5610 
5611 	/*
5612 	 * Tweak some settings.
5613 	 */
5614 	t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5615 		     RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5616 		     PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5617 		     KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5618 
5619 	/*
5620 	 * Get basic stuff going by issuing the Firmware Initialize command.
5621 	 * Note that this _must_ be after all PFVF commands ...
5622 	 */
5623 	ret = t4_fw_initialize(adapter, adapter->mbox);
5624 	if (ret < 0)
5625 		goto bye;
5626 
5627 	/*
5628 	 * Return successfully!
5629 	 */
5630 	dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5631 		 "driver parameters\n");
5632 	return 0;
5633 
5634 	/*
5635 	 * Something bad happened.  Return the error ...
5636 	 */
5637 bye:
5638 	return ret;
5639 }
5640 
5641 static struct fw_info fw_info_array[] = {
5642 	{
5643 		.chip = CHELSIO_T4,
5644 		.fs_name = FW4_CFNAME,
5645 		.fw_mod_name = FW4_FNAME,
5646 		.fw_hdr = {
5647 			.chip = FW_HDR_CHIP_T4,
5648 			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5649 			.intfver_nic = FW_INTFVER(T4, NIC),
5650 			.intfver_vnic = FW_INTFVER(T4, VNIC),
5651 			.intfver_ri = FW_INTFVER(T4, RI),
5652 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
5653 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
5654 		},
5655 	}, {
5656 		.chip = CHELSIO_T5,
5657 		.fs_name = FW5_CFNAME,
5658 		.fw_mod_name = FW5_FNAME,
5659 		.fw_hdr = {
5660 			.chip = FW_HDR_CHIP_T5,
5661 			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5662 			.intfver_nic = FW_INTFVER(T5, NIC),
5663 			.intfver_vnic = FW_INTFVER(T5, VNIC),
5664 			.intfver_ri = FW_INTFVER(T5, RI),
5665 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
5666 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
5667 		},
5668 	}
5669 };
5670 
find_fw_info(int chip)5671 static struct fw_info *find_fw_info(int chip)
5672 {
5673 	int i;
5674 
5675 	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5676 		if (fw_info_array[i].chip == chip)
5677 			return &fw_info_array[i];
5678 	}
5679 	return NULL;
5680 }
5681 
5682 /*
5683  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5684  */
adap_init0(struct adapter * adap)5685 static int adap_init0(struct adapter *adap)
5686 {
5687 	int ret;
5688 	u32 v, port_vec;
5689 	enum dev_state state;
5690 	u32 params[7], val[7];
5691 	struct fw_caps_config_cmd caps_cmd;
5692 	int reset = 1;
5693 
5694 	/*
5695 	 * Contact FW, advertising Master capability (and potentially forcing
5696 	 * ourselves as the Master PF if our module parameter force_init is
5697 	 * set).
5698 	 */
5699 	ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5700 			  force_init ? MASTER_MUST : MASTER_MAY,
5701 			  &state);
5702 	if (ret < 0) {
5703 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5704 			ret);
5705 		return ret;
5706 	}
5707 	if (ret == adap->mbox)
5708 		adap->flags |= MASTER_PF;
5709 	if (force_init && state == DEV_STATE_INIT)
5710 		state = DEV_STATE_UNINIT;
5711 
5712 	/*
5713 	 * If we're the Master PF Driver and the device is uninitialized,
5714 	 * then let's consider upgrading the firmware ...  (We always want
5715 	 * to check the firmware version number in order to A. get it for
5716 	 * later reporting and B. to warn if the currently loaded firmware
5717 	 * is excessively mismatched relative to the driver.)
5718 	 */
5719 	t4_get_fw_version(adap, &adap->params.fw_vers);
5720 	t4_get_tp_version(adap, &adap->params.tp_vers);
5721 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5722 		struct fw_info *fw_info;
5723 		struct fw_hdr *card_fw;
5724 		const struct firmware *fw;
5725 		const u8 *fw_data = NULL;
5726 		unsigned int fw_size = 0;
5727 
5728 		/* This is the firmware whose headers the driver was compiled
5729 		 * against
5730 		 */
5731 		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5732 		if (fw_info == NULL) {
5733 			dev_err(adap->pdev_dev,
5734 				"unable to get firmware info for chip %d.\n",
5735 				CHELSIO_CHIP_VERSION(adap->params.chip));
5736 			return -EINVAL;
5737 		}
5738 
5739 		/* allocate memory to read the header of the firmware on the
5740 		 * card
5741 		 */
5742 		card_fw = t4_alloc_mem(sizeof(*card_fw));
5743 
5744 		/* Get FW from from /lib/firmware/ */
5745 		ret = request_firmware(&fw, fw_info->fw_mod_name,
5746 				       adap->pdev_dev);
5747 		if (ret < 0) {
5748 			dev_err(adap->pdev_dev,
5749 				"unable to load firmware image %s, error %d\n",
5750 				fw_info->fw_mod_name, ret);
5751 		} else {
5752 			fw_data = fw->data;
5753 			fw_size = fw->size;
5754 		}
5755 
5756 		/* upgrade FW logic */
5757 		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5758 				 state, &reset);
5759 
5760 		/* Cleaning up */
5761 		if (fw != NULL)
5762 			release_firmware(fw);
5763 		t4_free_mem(card_fw);
5764 
5765 		if (ret < 0)
5766 			goto bye;
5767 	}
5768 
5769 	/*
5770 	 * Grab VPD parameters.  This should be done after we establish a
5771 	 * connection to the firmware since some of the VPD parameters
5772 	 * (notably the Core Clock frequency) are retrieved via requests to
5773 	 * the firmware.  On the other hand, we need these fairly early on
5774 	 * so we do this right after getting ahold of the firmware.
5775 	 */
5776 	ret = get_vpd_params(adap, &adap->params.vpd);
5777 	if (ret < 0)
5778 		goto bye;
5779 
5780 	/*
5781 	 * Find out what ports are available to us.  Note that we need to do
5782 	 * this before calling adap_init0_no_config() since it needs nports
5783 	 * and portvec ...
5784 	 */
5785 	v =
5786 	    FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5787 	    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5788 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5789 	if (ret < 0)
5790 		goto bye;
5791 
5792 	adap->params.nports = hweight32(port_vec);
5793 	adap->params.portvec = port_vec;
5794 
5795 	/*
5796 	 * If the firmware is initialized already (and we're not forcing a
5797 	 * master initialization), note that we're living with existing
5798 	 * adapter parameters.  Otherwise, it's time to try initializing the
5799 	 * adapter ...
5800 	 */
5801 	if (state == DEV_STATE_INIT) {
5802 		dev_info(adap->pdev_dev, "Coming up as %s: "\
5803 			 "Adapter already initialized\n",
5804 			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5805 		adap->flags |= USING_SOFT_PARAMS;
5806 	} else {
5807 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5808 			 "Initializing adapter\n");
5809 
5810 		/*
5811 		 * If the firmware doesn't support Configuration
5812 		 * Files warn user and exit,
5813 		 */
5814 		if (ret < 0)
5815 			dev_warn(adap->pdev_dev, "Firmware doesn't support "
5816 				 "configuration file.\n");
5817 		if (force_old_init)
5818 			ret = adap_init0_no_config(adap, reset);
5819 		else {
5820 			/*
5821 			 * Find out whether we're dealing with a version of
5822 			 * the firmware which has configuration file support.
5823 			 */
5824 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5825 				     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5826 			ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5827 					      params, val);
5828 
5829 			/*
5830 			 * If the firmware doesn't support Configuration
5831 			 * Files, use the old Driver-based, hard-wired
5832 			 * initialization.  Otherwise, try using the
5833 			 * Configuration File support and fall back to the
5834 			 * Driver-based initialization if there's no
5835 			 * Configuration File found.
5836 			 */
5837 			if (ret < 0)
5838 				ret = adap_init0_no_config(adap, reset);
5839 			else {
5840 				/*
5841 				 * The firmware provides us with a memory
5842 				 * buffer where we can load a Configuration
5843 				 * File from the host if we want to override
5844 				 * the Configuration File in flash.
5845 				 */
5846 
5847 				ret = adap_init0_config(adap, reset);
5848 				if (ret == -ENOENT) {
5849 					dev_info(adap->pdev_dev,
5850 					    "No Configuration File present "
5851 					    "on adapter. Using hard-wired "
5852 					    "configuration parameters.\n");
5853 					ret = adap_init0_no_config(adap, reset);
5854 				}
5855 			}
5856 		}
5857 		if (ret < 0) {
5858 			dev_err(adap->pdev_dev,
5859 				"could not initialize adapter, error %d\n",
5860 				-ret);
5861 			goto bye;
5862 		}
5863 	}
5864 
5865 	/*
5866 	 * If we're living with non-hard-coded parameters (either from a
5867 	 * Firmware Configuration File or values programmed by a different PF
5868 	 * Driver), give the SGE code a chance to pull in anything that it
5869 	 * needs ...  Note that this must be called after we retrieve our VPD
5870 	 * parameters in order to know how to convert core ticks to seconds.
5871 	 */
5872 	if (adap->flags & USING_SOFT_PARAMS) {
5873 		ret = t4_sge_init(adap);
5874 		if (ret < 0)
5875 			goto bye;
5876 	}
5877 
5878 	if (is_bypass_device(adap->pdev->device))
5879 		adap->params.bypass = 1;
5880 
5881 	/*
5882 	 * Grab some of our basic fundamental operating parameters.
5883 	 */
5884 #define FW_PARAM_DEV(param) \
5885 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5886 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5887 
5888 #define FW_PARAM_PFVF(param) \
5889 	FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5890 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
5891 	FW_PARAMS_PARAM_Y(0) | \
5892 	FW_PARAMS_PARAM_Z(0)
5893 
5894 	params[0] = FW_PARAM_PFVF(EQ_START);
5895 	params[1] = FW_PARAM_PFVF(L2T_START);
5896 	params[2] = FW_PARAM_PFVF(L2T_END);
5897 	params[3] = FW_PARAM_PFVF(FILTER_START);
5898 	params[4] = FW_PARAM_PFVF(FILTER_END);
5899 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
5900 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5901 	if (ret < 0)
5902 		goto bye;
5903 	adap->sge.egr_start = val[0];
5904 	adap->l2t_start = val[1];
5905 	adap->l2t_end = val[2];
5906 	adap->tids.ftid_base = val[3];
5907 	adap->tids.nftids = val[4] - val[3] + 1;
5908 	adap->sge.ingr_start = val[5];
5909 
5910 	/* query params related to active filter region */
5911 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5912 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5913 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5914 	/* If Active filter size is set we enable establishing
5915 	 * offload connection through firmware work request
5916 	 */
5917 	if ((val[0] != val[1]) && (ret >= 0)) {
5918 		adap->flags |= FW_OFLD_CONN;
5919 		adap->tids.aftid_base = val[0];
5920 		adap->tids.aftid_end = val[1];
5921 	}
5922 
5923 	/* If we're running on newer firmware, let it know that we're
5924 	 * prepared to deal with encapsulated CPL messages.  Older
5925 	 * firmware won't understand this and we'll just get
5926 	 * unencapsulated messages ...
5927 	 */
5928 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5929 	val[0] = 1;
5930 	(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5931 
5932 	/*
5933 	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5934 	 * capability.  Earlier versions of the firmware didn't have the
5935 	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5936 	 * permission to use ULPTX MEMWRITE DSGL.
5937 	 */
5938 	if (is_t4(adap->params.chip)) {
5939 		adap->params.ulptx_memwrite_dsgl = false;
5940 	} else {
5941 		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5942 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5943 				      1, params, val);
5944 		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5945 	}
5946 
5947 	/*
5948 	 * Get device capabilities so we can determine what resources we need
5949 	 * to manage.
5950 	 */
5951 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5952 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5953 				     FW_CMD_REQUEST | FW_CMD_READ);
5954 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5955 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5956 			 &caps_cmd);
5957 	if (ret < 0)
5958 		goto bye;
5959 
5960 	if (caps_cmd.ofldcaps) {
5961 		/* query offload-related parameters */
5962 		params[0] = FW_PARAM_DEV(NTID);
5963 		params[1] = FW_PARAM_PFVF(SERVER_START);
5964 		params[2] = FW_PARAM_PFVF(SERVER_END);
5965 		params[3] = FW_PARAM_PFVF(TDDP_START);
5966 		params[4] = FW_PARAM_PFVF(TDDP_END);
5967 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5968 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5969 				      params, val);
5970 		if (ret < 0)
5971 			goto bye;
5972 		adap->tids.ntids = val[0];
5973 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5974 		adap->tids.stid_base = val[1];
5975 		adap->tids.nstids = val[2] - val[1] + 1;
5976 		/*
5977 		 * Setup server filter region. Divide the availble filter
5978 		 * region into two parts. Regular filters get 1/3rd and server
5979 		 * filters get 2/3rd part. This is only enabled if workarond
5980 		 * path is enabled.
5981 		 * 1. For regular filters.
5982 		 * 2. Server filter: This are special filters which are used
5983 		 * to redirect SYN packets to offload queue.
5984 		 */
5985 		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5986 			adap->tids.sftid_base = adap->tids.ftid_base +
5987 					DIV_ROUND_UP(adap->tids.nftids, 3);
5988 			adap->tids.nsftids = adap->tids.nftids -
5989 					 DIV_ROUND_UP(adap->tids.nftids, 3);
5990 			adap->tids.nftids = adap->tids.sftid_base -
5991 						adap->tids.ftid_base;
5992 		}
5993 		adap->vres.ddp.start = val[3];
5994 		adap->vres.ddp.size = val[4] - val[3] + 1;
5995 		adap->params.ofldq_wr_cred = val[5];
5996 
5997 		adap->params.offload = 1;
5998 	}
5999 	if (caps_cmd.rdmacaps) {
6000 		params[0] = FW_PARAM_PFVF(STAG_START);
6001 		params[1] = FW_PARAM_PFVF(STAG_END);
6002 		params[2] = FW_PARAM_PFVF(RQ_START);
6003 		params[3] = FW_PARAM_PFVF(RQ_END);
6004 		params[4] = FW_PARAM_PFVF(PBL_START);
6005 		params[5] = FW_PARAM_PFVF(PBL_END);
6006 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
6007 				      params, val);
6008 		if (ret < 0)
6009 			goto bye;
6010 		adap->vres.stag.start = val[0];
6011 		adap->vres.stag.size = val[1] - val[0] + 1;
6012 		adap->vres.rq.start = val[2];
6013 		adap->vres.rq.size = val[3] - val[2] + 1;
6014 		adap->vres.pbl.start = val[4];
6015 		adap->vres.pbl.size = val[5] - val[4] + 1;
6016 
6017 		params[0] = FW_PARAM_PFVF(SQRQ_START);
6018 		params[1] = FW_PARAM_PFVF(SQRQ_END);
6019 		params[2] = FW_PARAM_PFVF(CQ_START);
6020 		params[3] = FW_PARAM_PFVF(CQ_END);
6021 		params[4] = FW_PARAM_PFVF(OCQ_START);
6022 		params[5] = FW_PARAM_PFVF(OCQ_END);
6023 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
6024 				      val);
6025 		if (ret < 0)
6026 			goto bye;
6027 		adap->vres.qp.start = val[0];
6028 		adap->vres.qp.size = val[1] - val[0] + 1;
6029 		adap->vres.cq.start = val[2];
6030 		adap->vres.cq.size = val[3] - val[2] + 1;
6031 		adap->vres.ocq.start = val[4];
6032 		adap->vres.ocq.size = val[5] - val[4] + 1;
6033 
6034 		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
6035 		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
6036 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
6037 				      val);
6038 		if (ret < 0) {
6039 			adap->params.max_ordird_qp = 8;
6040 			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
6041 			ret = 0;
6042 		} else {
6043 			adap->params.max_ordird_qp = val[0];
6044 			adap->params.max_ird_adapter = val[1];
6045 		}
6046 		dev_info(adap->pdev_dev,
6047 			 "max_ordird_qp %d max_ird_adapter %d\n",
6048 			 adap->params.max_ordird_qp,
6049 			 adap->params.max_ird_adapter);
6050 	}
6051 	if (caps_cmd.iscsicaps) {
6052 		params[0] = FW_PARAM_PFVF(ISCSI_START);
6053 		params[1] = FW_PARAM_PFVF(ISCSI_END);
6054 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
6055 				      params, val);
6056 		if (ret < 0)
6057 			goto bye;
6058 		adap->vres.iscsi.start = val[0];
6059 		adap->vres.iscsi.size = val[1] - val[0] + 1;
6060 	}
6061 #undef FW_PARAM_PFVF
6062 #undef FW_PARAM_DEV
6063 
6064 	/* The MTU/MSS Table is initialized by now, so load their values.  If
6065 	 * we're initializing the adapter, then we'll make any modifications
6066 	 * we want to the MTU/MSS Table and also initialize the congestion
6067 	 * parameters.
6068 	 */
6069 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6070 	if (state != DEV_STATE_INIT) {
6071 		int i;
6072 
6073 		/* The default MTU Table contains values 1492 and 1500.
6074 		 * However, for TCP, it's better to have two values which are
6075 		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6076 		 * This allows us to have a TCP Data Payload which is a
6077 		 * multiple of 8 regardless of what combination of TCP Options
6078 		 * are in use (always a multiple of 4 bytes) which is
6079 		 * important for performance reasons.  For instance, if no
6080 		 * options are in use, then we have a 20-byte IP header and a
6081 		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
6082 		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6083 		 * which is not a multiple of 8.  So using an MSS of 1488 in
6084 		 * this case results in a TCP Data Payload of 1448 bytes which
6085 		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
6086 		 * Stamps have been negotiated, then an MTU of 1500 bytes
6087 		 * results in a TCP Data Payload of 1448 bytes which, as
6088 		 * above, is a multiple of 8 bytes ...
6089 		 */
6090 		for (i = 0; i < NMTUS; i++)
6091 			if (adap->params.mtus[i] == 1492) {
6092 				adap->params.mtus[i] = 1488;
6093 				break;
6094 			}
6095 
6096 		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6097 			     adap->params.b_wnd);
6098 	}
6099 	t4_init_tp_params(adap);
6100 	adap->flags |= FW_OK;
6101 	return 0;
6102 
6103 	/*
6104 	 * Something bad happened.  If a command timed out or failed with EIO
6105 	 * FW does not operate within its spec or something catastrophic
6106 	 * happened to HW/FW, stop issuing commands.
6107 	 */
6108 bye:
6109 	if (ret != -ETIMEDOUT && ret != -EIO)
6110 		t4_fw_bye(adap, adap->mbox);
6111 	return ret;
6112 }
6113 
6114 /* EEH callbacks */
6115 
eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)6116 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6117 					 pci_channel_state_t state)
6118 {
6119 	int i;
6120 	struct adapter *adap = pci_get_drvdata(pdev);
6121 
6122 	if (!adap)
6123 		goto out;
6124 
6125 	rtnl_lock();
6126 	adap->flags &= ~FW_OK;
6127 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6128 	spin_lock(&adap->stats_lock);
6129 	for_each_port(adap, i) {
6130 		struct net_device *dev = adap->port[i];
6131 
6132 		netif_device_detach(dev);
6133 		netif_carrier_off(dev);
6134 	}
6135 	spin_unlock(&adap->stats_lock);
6136 	if (adap->flags & FULL_INIT_DONE)
6137 		cxgb_down(adap);
6138 	rtnl_unlock();
6139 	if ((adap->flags & DEV_ENABLED)) {
6140 		pci_disable_device(pdev);
6141 		adap->flags &= ~DEV_ENABLED;
6142 	}
6143 out:	return state == pci_channel_io_perm_failure ?
6144 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6145 }
6146 
eeh_slot_reset(struct pci_dev * pdev)6147 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6148 {
6149 	int i, ret;
6150 	struct fw_caps_config_cmd c;
6151 	struct adapter *adap = pci_get_drvdata(pdev);
6152 
6153 	if (!adap) {
6154 		pci_restore_state(pdev);
6155 		pci_save_state(pdev);
6156 		return PCI_ERS_RESULT_RECOVERED;
6157 	}
6158 
6159 	if (!(adap->flags & DEV_ENABLED)) {
6160 		if (pci_enable_device(pdev)) {
6161 			dev_err(&pdev->dev, "Cannot reenable PCI "
6162 					    "device after reset\n");
6163 			return PCI_ERS_RESULT_DISCONNECT;
6164 		}
6165 		adap->flags |= DEV_ENABLED;
6166 	}
6167 
6168 	pci_set_master(pdev);
6169 	pci_restore_state(pdev);
6170 	pci_save_state(pdev);
6171 	pci_cleanup_aer_uncorrect_error_status(pdev);
6172 
6173 	if (t4_wait_dev_ready(adap->regs) < 0)
6174 		return PCI_ERS_RESULT_DISCONNECT;
6175 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6176 		return PCI_ERS_RESULT_DISCONNECT;
6177 	adap->flags |= FW_OK;
6178 	if (adap_init1(adap, &c))
6179 		return PCI_ERS_RESULT_DISCONNECT;
6180 
6181 	for_each_port(adap, i) {
6182 		struct port_info *p = adap2pinfo(adap, i);
6183 
6184 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6185 				  NULL, NULL);
6186 		if (ret < 0)
6187 			return PCI_ERS_RESULT_DISCONNECT;
6188 		p->viid = ret;
6189 		p->xact_addr_filt = -1;
6190 	}
6191 
6192 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6193 		     adap->params.b_wnd);
6194 	setup_memwin(adap);
6195 	if (cxgb_up(adap))
6196 		return PCI_ERS_RESULT_DISCONNECT;
6197 	return PCI_ERS_RESULT_RECOVERED;
6198 }
6199 
eeh_resume(struct pci_dev * pdev)6200 static void eeh_resume(struct pci_dev *pdev)
6201 {
6202 	int i;
6203 	struct adapter *adap = pci_get_drvdata(pdev);
6204 
6205 	if (!adap)
6206 		return;
6207 
6208 	rtnl_lock();
6209 	for_each_port(adap, i) {
6210 		struct net_device *dev = adap->port[i];
6211 
6212 		if (netif_running(dev)) {
6213 			link_start(dev);
6214 			cxgb_set_rxmode(dev);
6215 		}
6216 		netif_device_attach(dev);
6217 	}
6218 	rtnl_unlock();
6219 }
6220 
6221 static const struct pci_error_handlers cxgb4_eeh = {
6222 	.error_detected = eeh_err_detected,
6223 	.slot_reset     = eeh_slot_reset,
6224 	.resume         = eeh_resume,
6225 };
6226 
is_x_10g_port(const struct link_config * lc)6227 static inline bool is_x_10g_port(const struct link_config *lc)
6228 {
6229 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6230 	       (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6231 }
6232 
init_rspq(struct adapter * adap,struct sge_rspq * q,unsigned int us,unsigned int cnt,unsigned int size,unsigned int iqe_size)6233 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6234 			     unsigned int us, unsigned int cnt,
6235 			     unsigned int size, unsigned int iqe_size)
6236 {
6237 	q->adap = adap;
6238 	set_rspq_intr_params(q, us, cnt);
6239 	q->iqe_len = iqe_size;
6240 	q->size = size;
6241 }
6242 
6243 /*
6244  * Perform default configuration of DMA queues depending on the number and type
6245  * of ports we found and the number of available CPUs.  Most settings can be
6246  * modified by the admin prior to actual use.
6247  */
cfg_queues(struct adapter * adap)6248 static void cfg_queues(struct adapter *adap)
6249 {
6250 	struct sge *s = &adap->sge;
6251 	int i, n10g = 0, qidx = 0;
6252 #ifndef CONFIG_CHELSIO_T4_DCB
6253 	int q10g = 0;
6254 #endif
6255 	int ciq_size;
6256 
6257 	for_each_port(adap, i)
6258 		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6259 #ifdef CONFIG_CHELSIO_T4_DCB
6260 	/* For Data Center Bridging support we need to be able to support up
6261 	 * to 8 Traffic Priorities; each of which will be assigned to its
6262 	 * own TX Queue in order to prevent Head-Of-Line Blocking.
6263 	 */
6264 	if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6265 		dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6266 			MAX_ETH_QSETS, adap->params.nports * 8);
6267 		BUG_ON(1);
6268 	}
6269 
6270 	for_each_port(adap, i) {
6271 		struct port_info *pi = adap2pinfo(adap, i);
6272 
6273 		pi->first_qset = qidx;
6274 		pi->nqsets = 8;
6275 		qidx += pi->nqsets;
6276 	}
6277 #else /* !CONFIG_CHELSIO_T4_DCB */
6278 	/*
6279 	 * We default to 1 queue per non-10G port and up to # of cores queues
6280 	 * per 10G port.
6281 	 */
6282 	if (n10g)
6283 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6284 	if (q10g > netif_get_num_default_rss_queues())
6285 		q10g = netif_get_num_default_rss_queues();
6286 
6287 	for_each_port(adap, i) {
6288 		struct port_info *pi = adap2pinfo(adap, i);
6289 
6290 		pi->first_qset = qidx;
6291 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6292 		qidx += pi->nqsets;
6293 	}
6294 #endif /* !CONFIG_CHELSIO_T4_DCB */
6295 
6296 	s->ethqsets = qidx;
6297 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
6298 
6299 	if (is_offload(adap)) {
6300 		/*
6301 		 * For offload we use 1 queue/channel if all ports are up to 1G,
6302 		 * otherwise we divide all available queues amongst the channels
6303 		 * capped by the number of available cores.
6304 		 */
6305 		if (n10g) {
6306 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6307 				  num_online_cpus());
6308 			s->ofldqsets = roundup(i, adap->params.nports);
6309 		} else
6310 			s->ofldqsets = adap->params.nports;
6311 		/* For RDMA one Rx queue per channel suffices */
6312 		s->rdmaqs = adap->params.nports;
6313 		s->rdmaciqs = adap->params.nports;
6314 	}
6315 
6316 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6317 		struct sge_eth_rxq *r = &s->ethrxq[i];
6318 
6319 		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6320 		r->fl.size = 72;
6321 	}
6322 
6323 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6324 		s->ethtxq[i].q.size = 1024;
6325 
6326 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6327 		s->ctrlq[i].q.size = 512;
6328 
6329 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6330 		s->ofldtxq[i].q.size = 1024;
6331 
6332 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6333 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
6334 
6335 		init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6336 		r->rspq.uld = CXGB4_ULD_ISCSI;
6337 		r->fl.size = 72;
6338 	}
6339 
6340 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6341 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
6342 
6343 		init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6344 		r->rspq.uld = CXGB4_ULD_RDMA;
6345 		r->fl.size = 72;
6346 	}
6347 
6348 	ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6349 	if (ciq_size > SGE_MAX_IQ_SIZE) {
6350 		CH_WARN(adap, "CIQ size too small for available IQs\n");
6351 		ciq_size = SGE_MAX_IQ_SIZE;
6352 	}
6353 
6354 	for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6355 		struct sge_ofld_rxq *r = &s->rdmaciq[i];
6356 
6357 		init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6358 		r->rspq.uld = CXGB4_ULD_RDMA;
6359 	}
6360 
6361 	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6362 	init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6363 }
6364 
6365 /*
6366  * Reduce the number of Ethernet queues across all ports to at most n.
6367  * n provides at least one queue per port.
6368  */
reduce_ethqs(struct adapter * adap,int n)6369 static void reduce_ethqs(struct adapter *adap, int n)
6370 {
6371 	int i;
6372 	struct port_info *pi;
6373 
6374 	while (n < adap->sge.ethqsets)
6375 		for_each_port(adap, i) {
6376 			pi = adap2pinfo(adap, i);
6377 			if (pi->nqsets > 1) {
6378 				pi->nqsets--;
6379 				adap->sge.ethqsets--;
6380 				if (adap->sge.ethqsets <= n)
6381 					break;
6382 			}
6383 		}
6384 
6385 	n = 0;
6386 	for_each_port(adap, i) {
6387 		pi = adap2pinfo(adap, i);
6388 		pi->first_qset = n;
6389 		n += pi->nqsets;
6390 	}
6391 }
6392 
6393 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6394 #define EXTRA_VECS 2
6395 
enable_msix(struct adapter * adap)6396 static int enable_msix(struct adapter *adap)
6397 {
6398 	int ofld_need = 0;
6399 	int i, want, need;
6400 	struct sge *s = &adap->sge;
6401 	unsigned int nchan = adap->params.nports;
6402 	struct msix_entry entries[MAX_INGQ + 1];
6403 
6404 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
6405 		entries[i].entry = i;
6406 
6407 	want = s->max_ethqsets + EXTRA_VECS;
6408 	if (is_offload(adap)) {
6409 		want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6410 		/* need nchan for each possible ULD */
6411 		ofld_need = 3 * nchan;
6412 	}
6413 #ifdef CONFIG_CHELSIO_T4_DCB
6414 	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6415 	 * each port.
6416 	 */
6417 	need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6418 #else
6419 	need = adap->params.nports + EXTRA_VECS + ofld_need;
6420 #endif
6421 	want = pci_enable_msix_range(adap->pdev, entries, need, want);
6422 	if (want < 0)
6423 		return want;
6424 
6425 	/*
6426 	 * Distribute available vectors to the various queue groups.
6427 	 * Every group gets its minimum requirement and NIC gets top
6428 	 * priority for leftovers.
6429 	 */
6430 	i = want - EXTRA_VECS - ofld_need;
6431 	if (i < s->max_ethqsets) {
6432 		s->max_ethqsets = i;
6433 		if (i < s->ethqsets)
6434 			reduce_ethqs(adap, i);
6435 	}
6436 	if (is_offload(adap)) {
6437 		i = want - EXTRA_VECS - s->max_ethqsets;
6438 		i -= ofld_need - nchan;
6439 		s->ofldqsets = (i / nchan) * nchan;  /* round down */
6440 	}
6441 	for (i = 0; i < want; ++i)
6442 		adap->msix_info[i].vec = entries[i].vector;
6443 
6444 	return 0;
6445 }
6446 
6447 #undef EXTRA_VECS
6448 
init_rss(struct adapter * adap)6449 static int init_rss(struct adapter *adap)
6450 {
6451 	unsigned int i, j;
6452 
6453 	for_each_port(adap, i) {
6454 		struct port_info *pi = adap2pinfo(adap, i);
6455 
6456 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6457 		if (!pi->rss)
6458 			return -ENOMEM;
6459 		for (j = 0; j < pi->rss_size; j++)
6460 			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6461 	}
6462 	return 0;
6463 }
6464 
print_port_info(const struct net_device * dev)6465 static void print_port_info(const struct net_device *dev)
6466 {
6467 	char buf[80];
6468 	char *bufp = buf;
6469 	const char *spd = "";
6470 	const struct port_info *pi = netdev_priv(dev);
6471 	const struct adapter *adap = pi->adapter;
6472 
6473 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6474 		spd = " 2.5 GT/s";
6475 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6476 		spd = " 5 GT/s";
6477 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6478 		spd = " 8 GT/s";
6479 
6480 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6481 		bufp += sprintf(bufp, "100/");
6482 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6483 		bufp += sprintf(bufp, "1000/");
6484 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6485 		bufp += sprintf(bufp, "10G/");
6486 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6487 		bufp += sprintf(bufp, "40G/");
6488 	if (bufp != buf)
6489 		--bufp;
6490 	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6491 
6492 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6493 		    adap->params.vpd.id,
6494 		    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6495 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6496 		    (adap->flags & USING_MSIX) ? " MSI-X" :
6497 		    (adap->flags & USING_MSI) ? " MSI" : "");
6498 	netdev_info(dev, "S/N: %s, P/N: %s\n",
6499 		    adap->params.vpd.sn, adap->params.vpd.pn);
6500 }
6501 
enable_pcie_relaxed_ordering(struct pci_dev * dev)6502 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6503 {
6504 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6505 }
6506 
6507 /*
6508  * Free the following resources:
6509  * - memory used for tables
6510  * - MSI/MSI-X
6511  * - net devices
6512  * - resources FW is holding for us
6513  */
free_some_resources(struct adapter * adapter)6514 static void free_some_resources(struct adapter *adapter)
6515 {
6516 	unsigned int i;
6517 
6518 	t4_free_mem(adapter->l2t);
6519 	t4_free_mem(adapter->tids.tid_tab);
6520 	disable_msi(adapter);
6521 
6522 	for_each_port(adapter, i)
6523 		if (adapter->port[i]) {
6524 			kfree(adap2pinfo(adapter, i)->rss);
6525 			free_netdev(adapter->port[i]);
6526 		}
6527 	if (adapter->flags & FW_OK)
6528 		t4_fw_bye(adapter, adapter->fn);
6529 }
6530 
6531 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6532 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6533 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6534 #define SEGMENT_SIZE 128
6535 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)6536 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6537 {
6538 	int func, i, err, s_qpp, qpp, num_seg;
6539 	struct port_info *pi;
6540 	bool highdma = false;
6541 	struct adapter *adapter = NULL;
6542 	void __iomem *regs;
6543 
6544 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6545 
6546 	err = pci_request_regions(pdev, KBUILD_MODNAME);
6547 	if (err) {
6548 		/* Just info, some other driver may have claimed the device. */
6549 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6550 		return err;
6551 	}
6552 
6553 	err = pci_enable_device(pdev);
6554 	if (err) {
6555 		dev_err(&pdev->dev, "cannot enable PCI device\n");
6556 		goto out_release_regions;
6557 	}
6558 
6559 	regs = pci_ioremap_bar(pdev, 0);
6560 	if (!regs) {
6561 		dev_err(&pdev->dev, "cannot map device registers\n");
6562 		err = -ENOMEM;
6563 		goto out_disable_device;
6564 	}
6565 
6566 	err = t4_wait_dev_ready(regs);
6567 	if (err < 0)
6568 		goto out_unmap_bar0;
6569 
6570 	/* We control everything through one PF */
6571 	func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6572 	if (func != ent->driver_data) {
6573 		iounmap(regs);
6574 		pci_disable_device(pdev);
6575 		pci_save_state(pdev);        /* to restore SR-IOV later */
6576 		goto sriov;
6577 	}
6578 
6579 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6580 		highdma = true;
6581 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6582 		if (err) {
6583 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6584 				"coherent allocations\n");
6585 			goto out_unmap_bar0;
6586 		}
6587 	} else {
6588 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6589 		if (err) {
6590 			dev_err(&pdev->dev, "no usable DMA configuration\n");
6591 			goto out_unmap_bar0;
6592 		}
6593 	}
6594 
6595 	pci_enable_pcie_error_reporting(pdev);
6596 	enable_pcie_relaxed_ordering(pdev);
6597 	pci_set_master(pdev);
6598 	pci_save_state(pdev);
6599 
6600 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6601 	if (!adapter) {
6602 		err = -ENOMEM;
6603 		goto out_unmap_bar0;
6604 	}
6605 
6606 	adapter->workq = create_singlethread_workqueue("cxgb4");
6607 	if (!adapter->workq) {
6608 		err = -ENOMEM;
6609 		goto out_free_adapter;
6610 	}
6611 
6612 	/* PCI device has been enabled */
6613 	adapter->flags |= DEV_ENABLED;
6614 
6615 	adapter->regs = regs;
6616 	adapter->pdev = pdev;
6617 	adapter->pdev_dev = &pdev->dev;
6618 	adapter->mbox = func;
6619 	adapter->fn = func;
6620 	adapter->msg_enable = dflt_msg_enable;
6621 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6622 
6623 	spin_lock_init(&adapter->stats_lock);
6624 	spin_lock_init(&adapter->tid_release_lock);
6625 	spin_lock_init(&adapter->win0_lock);
6626 
6627 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6628 	INIT_WORK(&adapter->db_full_task, process_db_full);
6629 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
6630 
6631 	err = t4_prep_adapter(adapter);
6632 	if (err)
6633 		goto out_free_adapter;
6634 
6635 
6636 	if (!is_t4(adapter->params.chip)) {
6637 		s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6638 		qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6639 		      SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6640 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
6641 
6642 		/* Each segment size is 128B. Write coalescing is enabled only
6643 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6644 		 * queue is less no of segments that can be accommodated in
6645 		 * a page size.
6646 		 */
6647 		if (qpp > num_seg) {
6648 			dev_err(&pdev->dev,
6649 				"Incorrect number of egress queues per page\n");
6650 			err = -EINVAL;
6651 			goto out_free_adapter;
6652 		}
6653 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6654 		pci_resource_len(pdev, 2));
6655 		if (!adapter->bar2) {
6656 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
6657 			err = -ENOMEM;
6658 			goto out_free_adapter;
6659 		}
6660 	}
6661 
6662 	setup_memwin(adapter);
6663 	err = adap_init0(adapter);
6664 	setup_memwin_rdma(adapter);
6665 	if (err)
6666 		goto out_unmap_bar;
6667 
6668 	for_each_port(adapter, i) {
6669 		struct net_device *netdev;
6670 
6671 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
6672 					   MAX_ETH_QSETS);
6673 		if (!netdev) {
6674 			err = -ENOMEM;
6675 			goto out_free_dev;
6676 		}
6677 
6678 		SET_NETDEV_DEV(netdev, &pdev->dev);
6679 
6680 		adapter->port[i] = netdev;
6681 		pi = netdev_priv(netdev);
6682 		pi->adapter = adapter;
6683 		pi->xact_addr_filt = -1;
6684 		pi->port_id = i;
6685 		netdev->irq = pdev->irq;
6686 
6687 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6688 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6689 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
6690 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6691 		if (highdma)
6692 			netdev->hw_features |= NETIF_F_HIGHDMA;
6693 		netdev->features |= netdev->hw_features;
6694 		netdev->vlan_features = netdev->features & VLAN_FEAT;
6695 
6696 		netdev->priv_flags |= IFF_UNICAST_FLT;
6697 
6698 		netdev->netdev_ops = &cxgb4_netdev_ops;
6699 #ifdef CONFIG_CHELSIO_T4_DCB
6700 		netdev->dcbnl_ops = &cxgb4_dcb_ops;
6701 		cxgb4_dcb_state_init(netdev);
6702 #endif
6703 		netdev->ethtool_ops = &cxgb_ethtool_ops;
6704 	}
6705 
6706 	pci_set_drvdata(pdev, adapter);
6707 
6708 	if (adapter->flags & FW_OK) {
6709 		err = t4_port_init(adapter, func, func, 0);
6710 		if (err)
6711 			goto out_free_dev;
6712 	}
6713 
6714 	/*
6715 	 * Configure queues and allocate tables now, they can be needed as
6716 	 * soon as the first register_netdev completes.
6717 	 */
6718 	cfg_queues(adapter);
6719 
6720 	adapter->l2t = t4_init_l2t();
6721 	if (!adapter->l2t) {
6722 		/* We tolerate a lack of L2T, giving up some functionality */
6723 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6724 		adapter->params.offload = 0;
6725 	}
6726 
6727 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6728 		dev_warn(&pdev->dev, "could not allocate TID table, "
6729 			 "continuing\n");
6730 		adapter->params.offload = 0;
6731 	}
6732 
6733 	/* See what interrupts we'll be using */
6734 	if (msi > 1 && enable_msix(adapter) == 0)
6735 		adapter->flags |= USING_MSIX;
6736 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
6737 		adapter->flags |= USING_MSI;
6738 
6739 	err = init_rss(adapter);
6740 	if (err)
6741 		goto out_free_dev;
6742 
6743 	/*
6744 	 * The card is now ready to go.  If any errors occur during device
6745 	 * registration we do not fail the whole card but rather proceed only
6746 	 * with the ports we manage to register successfully.  However we must
6747 	 * register at least one net device.
6748 	 */
6749 	for_each_port(adapter, i) {
6750 		pi = adap2pinfo(adapter, i);
6751 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6752 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6753 
6754 		err = register_netdev(adapter->port[i]);
6755 		if (err)
6756 			break;
6757 		adapter->chan_map[pi->tx_chan] = i;
6758 		print_port_info(adapter->port[i]);
6759 	}
6760 	if (i == 0) {
6761 		dev_err(&pdev->dev, "could not register any net devices\n");
6762 		goto out_free_dev;
6763 	}
6764 	if (err) {
6765 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6766 		err = 0;
6767 	}
6768 
6769 	if (cxgb4_debugfs_root) {
6770 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6771 							   cxgb4_debugfs_root);
6772 		setup_debugfs(adapter);
6773 	}
6774 
6775 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6776 	pdev->needs_freset = 1;
6777 
6778 	if (is_offload(adapter))
6779 		attach_ulds(adapter);
6780 
6781 sriov:
6782 #ifdef CONFIG_PCI_IOV
6783 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6784 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6785 			dev_info(&pdev->dev,
6786 				 "instantiated %u virtual functions\n",
6787 				 num_vf[func]);
6788 #endif
6789 	return 0;
6790 
6791  out_free_dev:
6792 	free_some_resources(adapter);
6793  out_unmap_bar:
6794 	if (!is_t4(adapter->params.chip))
6795 		iounmap(adapter->bar2);
6796  out_free_adapter:
6797 	if (adapter->workq)
6798 		destroy_workqueue(adapter->workq);
6799 
6800 	kfree(adapter);
6801  out_unmap_bar0:
6802 	iounmap(regs);
6803  out_disable_device:
6804 	pci_disable_pcie_error_reporting(pdev);
6805 	pci_disable_device(pdev);
6806  out_release_regions:
6807 	pci_release_regions(pdev);
6808 	return err;
6809 }
6810 
remove_one(struct pci_dev * pdev)6811 static void remove_one(struct pci_dev *pdev)
6812 {
6813 	struct adapter *adapter = pci_get_drvdata(pdev);
6814 
6815 #ifdef CONFIG_PCI_IOV
6816 	pci_disable_sriov(pdev);
6817 
6818 #endif
6819 
6820 	if (adapter) {
6821 		int i;
6822 
6823 		/* Tear down per-adapter Work Queue first since it can contain
6824 		 * references to our adapter data structure.
6825 		 */
6826 		destroy_workqueue(adapter->workq);
6827 
6828 		if (is_offload(adapter))
6829 			detach_ulds(adapter);
6830 
6831 		for_each_port(adapter, i)
6832 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6833 				unregister_netdev(adapter->port[i]);
6834 
6835 		debugfs_remove_recursive(adapter->debugfs_root);
6836 
6837 		/* If we allocated filters, free up state associated with any
6838 		 * valid filters ...
6839 		 */
6840 		if (adapter->tids.ftid_tab) {
6841 			struct filter_entry *f = &adapter->tids.ftid_tab[0];
6842 			for (i = 0; i < (adapter->tids.nftids +
6843 					adapter->tids.nsftids); i++, f++)
6844 				if (f->valid)
6845 					clear_filter(adapter, f);
6846 		}
6847 
6848 		if (adapter->flags & FULL_INIT_DONE)
6849 			cxgb_down(adapter);
6850 
6851 		free_some_resources(adapter);
6852 		iounmap(adapter->regs);
6853 		if (!is_t4(adapter->params.chip))
6854 			iounmap(adapter->bar2);
6855 		pci_disable_pcie_error_reporting(pdev);
6856 		if ((adapter->flags & DEV_ENABLED)) {
6857 			pci_disable_device(pdev);
6858 			adapter->flags &= ~DEV_ENABLED;
6859 		}
6860 		pci_release_regions(pdev);
6861 		synchronize_rcu();
6862 		kfree(adapter);
6863 	} else
6864 		pci_release_regions(pdev);
6865 }
6866 
6867 static struct pci_driver cxgb4_driver = {
6868 	.name     = KBUILD_MODNAME,
6869 	.id_table = cxgb4_pci_tbl,
6870 	.probe    = init_one,
6871 	.remove   = remove_one,
6872 	.shutdown = remove_one,
6873 	.err_handler = &cxgb4_eeh,
6874 };
6875 
cxgb4_init_module(void)6876 static int __init cxgb4_init_module(void)
6877 {
6878 	int ret;
6879 
6880 	/* Debugfs support is optional, just warn if this fails */
6881 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6882 	if (!cxgb4_debugfs_root)
6883 		pr_warn("could not create debugfs entry, continuing\n");
6884 
6885 	ret = pci_register_driver(&cxgb4_driver);
6886 	if (ret < 0)
6887 		debugfs_remove(cxgb4_debugfs_root);
6888 
6889 #if IS_ENABLED(CONFIG_IPV6)
6890 	register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6891 #endif
6892 
6893 	return ret;
6894 }
6895 
cxgb4_cleanup_module(void)6896 static void __exit cxgb4_cleanup_module(void)
6897 {
6898 #if IS_ENABLED(CONFIG_IPV6)
6899 	unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6900 #endif
6901 	pci_unregister_driver(&cxgb4_driver);
6902 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6903 }
6904 
6905 module_init(cxgb4_init_module);
6906 module_exit(cxgb4_cleanup_module);
6907