• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef __CXGB4_H__
36 #define __CXGB4_H__
37 
38 #include "t4_hw.h"
39 
40 #include <linux/bitops.h>
41 #include <linux/cache.h>
42 #include <linux/interrupt.h>
43 #include <linux/list.h>
44 #include <linux/netdevice.h>
45 #include <linux/pci.h>
46 #include <linux/spinlock.h>
47 #include <linux/timer.h>
48 #include <linux/vmalloc.h>
49 #include <linux/etherdevice.h>
50 #include <linux/net_tstamp.h>
51 #include <asm/io.h>
52 #include "t4_chip_type.h"
53 #include "cxgb4_uld.h"
54 
55 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
56 extern struct list_head adapter_list;
57 extern struct mutex uld_mutex;
58 
59 enum {
60 	MAX_NPORTS	= 4,     /* max # of ports */
61 	SERNUM_LEN	= 24,    /* Serial # length */
62 	EC_LEN		= 16,    /* E/C length */
63 	ID_LEN		= 16,    /* ID length */
64 	PN_LEN		= 16,    /* Part Number length */
65 	MACADDR_LEN	= 12,    /* MAC Address length */
66 };
67 
68 enum {
69 	T4_REGMAP_SIZE = (160 * 1024),
70 	T5_REGMAP_SIZE = (332 * 1024),
71 };
72 
73 enum {
74 	MEM_EDC0,
75 	MEM_EDC1,
76 	MEM_MC,
77 	MEM_MC0 = MEM_MC,
78 	MEM_MC1
79 };
80 
81 enum {
82 	MEMWIN0_APERTURE = 2048,
83 	MEMWIN0_BASE     = 0x1b800,
84 	MEMWIN1_APERTURE = 32768,
85 	MEMWIN1_BASE     = 0x28000,
86 	MEMWIN1_BASE_T5  = 0x52000,
87 	MEMWIN2_APERTURE = 65536,
88 	MEMWIN2_BASE     = 0x30000,
89 	MEMWIN2_APERTURE_T5 = 131072,
90 	MEMWIN2_BASE_T5  = 0x60000,
91 };
92 
93 enum dev_master {
94 	MASTER_CANT,
95 	MASTER_MAY,
96 	MASTER_MUST
97 };
98 
99 enum dev_state {
100 	DEV_STATE_UNINIT,
101 	DEV_STATE_INIT,
102 	DEV_STATE_ERR
103 };
104 
105 enum {
106 	PAUSE_RX      = 1 << 0,
107 	PAUSE_TX      = 1 << 1,
108 	PAUSE_AUTONEG = 1 << 2
109 };
110 
111 struct port_stats {
112 	u64 tx_octets;            /* total # of octets in good frames */
113 	u64 tx_frames;            /* all good frames */
114 	u64 tx_bcast_frames;      /* all broadcast frames */
115 	u64 tx_mcast_frames;      /* all multicast frames */
116 	u64 tx_ucast_frames;      /* all unicast frames */
117 	u64 tx_error_frames;      /* all error frames */
118 
119 	u64 tx_frames_64;         /* # of Tx frames in a particular range */
120 	u64 tx_frames_65_127;
121 	u64 tx_frames_128_255;
122 	u64 tx_frames_256_511;
123 	u64 tx_frames_512_1023;
124 	u64 tx_frames_1024_1518;
125 	u64 tx_frames_1519_max;
126 
127 	u64 tx_drop;              /* # of dropped Tx frames */
128 	u64 tx_pause;             /* # of transmitted pause frames */
129 	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
130 	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
131 	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
132 	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
133 	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
134 	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
135 	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
136 	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
137 
138 	u64 rx_octets;            /* total # of octets in good frames */
139 	u64 rx_frames;            /* all good frames */
140 	u64 rx_bcast_frames;      /* all broadcast frames */
141 	u64 rx_mcast_frames;      /* all multicast frames */
142 	u64 rx_ucast_frames;      /* all unicast frames */
143 	u64 rx_too_long;          /* # of frames exceeding MTU */
144 	u64 rx_jabber;            /* # of jabber frames */
145 	u64 rx_fcs_err;           /* # of received frames with bad FCS */
146 	u64 rx_len_err;           /* # of received frames with length error */
147 	u64 rx_symbol_err;        /* symbol errors */
148 	u64 rx_runt;              /* # of short frames */
149 
150 	u64 rx_frames_64;         /* # of Rx frames in a particular range */
151 	u64 rx_frames_65_127;
152 	u64 rx_frames_128_255;
153 	u64 rx_frames_256_511;
154 	u64 rx_frames_512_1023;
155 	u64 rx_frames_1024_1518;
156 	u64 rx_frames_1519_max;
157 
158 	u64 rx_pause;             /* # of received pause frames */
159 	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
160 	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
161 	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
162 	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
163 	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
164 	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
165 	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
166 	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
167 
168 	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
169 	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
170 	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
171 	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
172 	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
173 	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
174 	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
175 	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
176 };
177 
178 struct lb_port_stats {
179 	u64 octets;
180 	u64 frames;
181 	u64 bcast_frames;
182 	u64 mcast_frames;
183 	u64 ucast_frames;
184 	u64 error_frames;
185 
186 	u64 frames_64;
187 	u64 frames_65_127;
188 	u64 frames_128_255;
189 	u64 frames_256_511;
190 	u64 frames_512_1023;
191 	u64 frames_1024_1518;
192 	u64 frames_1519_max;
193 
194 	u64 drop;
195 
196 	u64 ovflow0;
197 	u64 ovflow1;
198 	u64 ovflow2;
199 	u64 ovflow3;
200 	u64 trunc0;
201 	u64 trunc1;
202 	u64 trunc2;
203 	u64 trunc3;
204 };
205 
206 struct tp_tcp_stats {
207 	u32 tcp_out_rsts;
208 	u64 tcp_in_segs;
209 	u64 tcp_out_segs;
210 	u64 tcp_retrans_segs;
211 };
212 
213 struct tp_usm_stats {
214 	u32 frames;
215 	u32 drops;
216 	u64 octets;
217 };
218 
219 struct tp_fcoe_stats {
220 	u32 frames_ddp;
221 	u32 frames_drop;
222 	u64 octets_ddp;
223 };
224 
225 struct tp_err_stats {
226 	u32 mac_in_errs[4];
227 	u32 hdr_in_errs[4];
228 	u32 tcp_in_errs[4];
229 	u32 tnl_cong_drops[4];
230 	u32 ofld_chan_drops[4];
231 	u32 tnl_tx_drops[4];
232 	u32 ofld_vlan_drops[4];
233 	u32 tcp6_in_errs[4];
234 	u32 ofld_no_neigh;
235 	u32 ofld_cong_defer;
236 };
237 
238 struct tp_cpl_stats {
239 	u32 req[4];
240 	u32 rsp[4];
241 };
242 
243 struct tp_rdma_stats {
244 	u32 rqe_dfr_pkt;
245 	u32 rqe_dfr_mod;
246 };
247 
248 struct sge_params {
249 	u32 hps;			/* host page size for our PF/VF */
250 	u32 eq_qpp;			/* egress queues/page for our PF/VF */
251 	u32 iq_qpp;			/* egress queues/page for our PF/VF */
252 };
253 
254 struct tp_params {
255 	unsigned int tre;            /* log2 of core clocks per TP tick */
256 	unsigned int la_mask;        /* what events are recorded by TP LA */
257 	unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
258 				     /* channel map */
259 
260 	uint32_t dack_re;            /* DACK timer resolution */
261 	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
262 
263 	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
264 	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
265 
266 	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
267 	 * subset of the set of fields which may be present in the Compressed
268 	 * Filter Tuple portion of filters and TCP TCB connections.  The
269 	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
270 	 * Since a variable number of fields may or may not be present, their
271 	 * shifted field positions within the Compressed Filter Tuple may
272 	 * vary, or not even be present if the field isn't selected in
273 	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
274 	 * places we store their offsets here, or a -1 if the field isn't
275 	 * present.
276 	 */
277 	int vlan_shift;
278 	int vnic_shift;
279 	int port_shift;
280 	int protocol_shift;
281 };
282 
283 struct vpd_params {
284 	unsigned int cclk;
285 	u8 ec[EC_LEN + 1];
286 	u8 sn[SERNUM_LEN + 1];
287 	u8 id[ID_LEN + 1];
288 	u8 pn[PN_LEN + 1];
289 	u8 na[MACADDR_LEN + 1];
290 };
291 
292 struct pci_params {
293 	unsigned char speed;
294 	unsigned char width;
295 };
296 
297 struct devlog_params {
298 	u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
299 	u32 start;                      /* start of log in firmware memory */
300 	u32 size;                       /* size of log */
301 };
302 
303 /* Stores chip specific parameters */
304 struct arch_specific_params {
305 	u8 nchan;
306 	u8 pm_stats_cnt;
307 	u8 cng_ch_bits_log;		/* congestion channel map bits width */
308 	u16 mps_rplc_size;
309 	u16 vfcount;
310 	u32 sge_fl_db;
311 	u16 mps_tcam_size;
312 };
313 
314 struct adapter_params {
315 	struct sge_params sge;
316 	struct tp_params  tp;
317 	struct vpd_params vpd;
318 	struct pci_params pci;
319 	struct devlog_params devlog;
320 	enum pcie_memwin drv_memwin;
321 
322 	unsigned int cim_la_size;
323 
324 	unsigned int sf_size;             /* serial flash size in bytes */
325 	unsigned int sf_nsec;             /* # of flash sectors */
326 	unsigned int sf_fw_start;         /* start of FW image in flash */
327 
328 	unsigned int fw_vers;
329 	unsigned int bs_vers;		/* bootstrap version */
330 	unsigned int tp_vers;
331 	unsigned int er_vers;		/* expansion ROM version */
332 	u8 api_vers[7];
333 
334 	unsigned short mtus[NMTUS];
335 	unsigned short a_wnd[NCCTRL_WIN];
336 	unsigned short b_wnd[NCCTRL_WIN];
337 
338 	unsigned char nports;             /* # of ethernet ports */
339 	unsigned char portvec;
340 	enum chip_type chip;               /* chip code */
341 	struct arch_specific_params arch;  /* chip specific params */
342 	unsigned char offload;
343 	unsigned char crypto;		/* HW capability for crypto */
344 
345 	unsigned char bypass;
346 
347 	unsigned int ofldq_wr_cred;
348 	bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
349 
350 	unsigned int nsched_cls;          /* number of traffic classes */
351 	unsigned int max_ordird_qp;       /* Max read depth per RDMA QP */
352 	unsigned int max_ird_adapter;     /* Max read depth per adapter */
353 	bool fr_nsmr_tpte_wr_support;	  /* FW support for FR_NSMR_TPTE_WR */
354 };
355 
356 /* State needed to monitor the forward progress of SGE Ingress DMA activities
357  * and possible hangs.
358  */
359 struct sge_idma_monitor_state {
360 	unsigned int idma_1s_thresh;	/* 1s threshold in Core Clock ticks */
361 	unsigned int idma_stalled[2];	/* synthesized stalled timers in HZ */
362 	unsigned int idma_state[2];	/* IDMA Hang detect state */
363 	unsigned int idma_qid[2];	/* IDMA Hung Ingress Queue ID */
364 	unsigned int idma_warn[2];	/* time to warning in HZ */
365 };
366 
367 /* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
368  * The access and execute times are signed in order to accommodate negative
369  * error returns.
370  */
371 struct mbox_cmd {
372 	u64 cmd[MBOX_LEN / 8];		/* a Firmware Mailbox Command/Reply */
373 	u64 timestamp;			/* OS-dependent timestamp */
374 	u32 seqno;			/* sequence number */
375 	s16 access;			/* time (ms) to access mailbox */
376 	s16 execute;			/* time (ms) to execute */
377 };
378 
379 struct mbox_cmd_log {
380 	unsigned int size;		/* number of entries in the log */
381 	unsigned int cursor;		/* next position in the log to write */
382 	u32 seqno;			/* next sequence number */
383 	/* variable length mailbox command log starts here */
384 };
385 
386 /* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
387  * return a pointer to the specified entry.
388  */
mbox_cmd_log_entry(struct mbox_cmd_log * log,unsigned int entry_idx)389 static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
390 						  unsigned int entry_idx)
391 {
392 	return &((struct mbox_cmd *)&(log)[1])[entry_idx];
393 }
394 
395 #include "t4fw_api.h"
396 
397 #define FW_VERSION(chip) ( \
398 		FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
399 		FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
400 		FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
401 		FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
402 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
403 
404 struct fw_info {
405 	u8 chip;
406 	char *fs_name;
407 	char *fw_mod_name;
408 	struct fw_hdr fw_hdr;
409 };
410 
411 struct trace_params {
412 	u32 data[TRACE_LEN / 4];
413 	u32 mask[TRACE_LEN / 4];
414 	unsigned short snap_len;
415 	unsigned short min_len;
416 	unsigned char skip_ofst;
417 	unsigned char skip_len;
418 	unsigned char invert;
419 	unsigned char port;
420 };
421 
422 struct link_config {
423 	unsigned short supported;        /* link capabilities */
424 	unsigned short advertising;      /* advertised capabilities */
425 	unsigned short lp_advertising;   /* peer advertised capabilities */
426 	unsigned int   requested_speed;  /* speed user has requested */
427 	unsigned int   speed;            /* actual link speed */
428 	unsigned char  requested_fc;     /* flow control user has requested */
429 	unsigned char  fc;               /* actual link flow control */
430 	unsigned char  autoneg;          /* autonegotiating? */
431 	unsigned char  link_ok;          /* link up? */
432 	unsigned char  link_down_rc;     /* link down reason */
433 };
434 
435 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
436 
437 enum {
438 	MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
439 	MAX_OFLD_QSETS = 16,          /* # of offload Tx, iscsi Rx queue sets */
440 	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
441 };
442 
443 enum {
444 	MAX_TXQ_ENTRIES      = 16384,
445 	MAX_CTRL_TXQ_ENTRIES = 1024,
446 	MAX_RSPQ_ENTRIES     = 16384,
447 	MAX_RX_BUFFERS       = 16384,
448 	MIN_TXQ_ENTRIES      = 32,
449 	MIN_CTRL_TXQ_ENTRIES = 32,
450 	MIN_RSPQ_ENTRIES     = 128,
451 	MIN_FL_ENTRIES       = 16
452 };
453 
454 enum {
455 	INGQ_EXTRAS = 2,        /* firmware event queue and */
456 				/*   forwarded interrupts */
457 	MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
458 };
459 
460 struct adapter;
461 struct sge_rspq;
462 
463 #include "cxgb4_dcb.h"
464 
465 #ifdef CONFIG_CHELSIO_T4_FCOE
466 #include "cxgb4_fcoe.h"
467 #endif /* CONFIG_CHELSIO_T4_FCOE */
468 
469 struct port_info {
470 	struct adapter *adapter;
471 	u16    viid;
472 	s16    xact_addr_filt;        /* index of exact MAC address filter */
473 	u16    rss_size;              /* size of VI's RSS table slice */
474 	s8     mdio_addr;
475 	enum fw_port_type port_type;
476 	u8     mod_type;
477 	u8     port_id;
478 	u8     tx_chan;
479 	u8     lport;                 /* associated offload logical port */
480 	u8     nqsets;                /* # of qsets */
481 	u8     first_qset;            /* index of first qset */
482 	u8     rss_mode;
483 	struct link_config link_cfg;
484 	u16   *rss;
485 	struct port_stats stats_base;
486 #ifdef CONFIG_CHELSIO_T4_DCB
487 	struct port_dcb_info dcb;     /* Data Center Bridging support */
488 #endif
489 #ifdef CONFIG_CHELSIO_T4_FCOE
490 	struct cxgb_fcoe fcoe;
491 #endif /* CONFIG_CHELSIO_T4_FCOE */
492 	bool rxtstamp;  /* Enable TS */
493 	struct hwtstamp_config tstamp_config;
494 	struct sched_table *sched_tbl;
495 };
496 
497 struct dentry;
498 struct work_struct;
499 
500 enum {                                 /* adapter flags */
501 	FULL_INIT_DONE     = (1 << 0),
502 	DEV_ENABLED        = (1 << 1),
503 	USING_MSI          = (1 << 2),
504 	USING_MSIX         = (1 << 3),
505 	FW_OK              = (1 << 4),
506 	RSS_TNLALLLOOKUP   = (1 << 5),
507 	USING_SOFT_PARAMS  = (1 << 6),
508 	MASTER_PF          = (1 << 7),
509 	FW_OFLD_CONN       = (1 << 9),
510 };
511 
512 enum {
513 	ULP_CRYPTO_LOOKASIDE = 1 << 0,
514 };
515 
516 struct rx_sw_desc;
517 
518 struct sge_fl {                     /* SGE free-buffer queue state */
519 	unsigned int avail;         /* # of available Rx buffers */
520 	unsigned int pend_cred;     /* new buffers since last FL DB ring */
521 	unsigned int cidx;          /* consumer index */
522 	unsigned int pidx;          /* producer index */
523 	unsigned long alloc_failed; /* # of times buffer allocation failed */
524 	unsigned long large_alloc_failed;
525 	unsigned long mapping_err;  /* # of RX Buffer DMA Mapping failures */
526 	unsigned long low;          /* # of times momentarily starving */
527 	unsigned long starving;
528 	/* RO fields */
529 	unsigned int cntxt_id;      /* SGE context id for the free list */
530 	unsigned int size;          /* capacity of free list */
531 	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
532 	__be64 *desc;               /* address of HW Rx descriptor ring */
533 	dma_addr_t addr;            /* bus address of HW ring start */
534 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
535 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
536 };
537 
538 /* A packet gather list */
539 struct pkt_gl {
540 	u64 sgetstamp;		    /* SGE Time Stamp for Ingress Packet */
541 	struct page_frag frags[MAX_SKB_FRAGS];
542 	void *va;                         /* virtual address of first byte */
543 	unsigned int nfrags;              /* # of fragments */
544 	unsigned int tot_len;             /* total length of fragments */
545 };
546 
547 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
548 			      const struct pkt_gl *gl);
549 typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
550 /* LRO related declarations for ULD */
551 struct t4_lro_mgr {
552 #define MAX_LRO_SESSIONS		64
553 	u8 lro_session_cnt;         /* # of sessions to aggregate */
554 	unsigned long lro_pkts;     /* # of LRO super packets */
555 	unsigned long lro_merged;   /* # of wire packets merged by LRO */
556 	struct sk_buff_head lroq;   /* list of aggregated sessions */
557 };
558 
559 struct sge_rspq {                   /* state for an SGE response queue */
560 	struct napi_struct napi;
561 	const __be64 *cur_desc;     /* current descriptor in queue */
562 	unsigned int cidx;          /* consumer index */
563 	u8 gen;                     /* current generation bit */
564 	u8 intr_params;             /* interrupt holdoff parameters */
565 	u8 next_intr_params;        /* holdoff params for next interrupt */
566 	u8 adaptive_rx;
567 	u8 pktcnt_idx;              /* interrupt packet threshold */
568 	u8 uld;                     /* ULD handling this queue */
569 	u8 idx;                     /* queue index within its group */
570 	int offset;                 /* offset into current Rx buffer */
571 	u16 cntxt_id;               /* SGE context id for the response q */
572 	u16 abs_id;                 /* absolute SGE id for the response q */
573 	__be64 *desc;               /* address of HW response ring */
574 	dma_addr_t phys_addr;       /* physical address of the ring */
575 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
576 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
577 	unsigned int iqe_len;       /* entry size */
578 	unsigned int size;          /* capacity of response queue */
579 	struct adapter *adap;
580 	struct net_device *netdev;  /* associated net device */
581 	rspq_handler_t handler;
582 	rspq_flush_handler_t flush_handler;
583 	struct t4_lro_mgr lro_mgr;
584 #ifdef CONFIG_NET_RX_BUSY_POLL
585 #define CXGB_POLL_STATE_IDLE		0
586 #define CXGB_POLL_STATE_NAPI		BIT(0) /* NAPI owns this poll */
587 #define CXGB_POLL_STATE_POLL		BIT(1) /* poll owns this poll */
588 #define CXGB_POLL_STATE_NAPI_YIELD	BIT(2) /* NAPI yielded this poll */
589 #define CXGB_POLL_STATE_POLL_YIELD	BIT(3) /* poll yielded this poll */
590 #define CXGB_POLL_YIELD			(CXGB_POLL_STATE_NAPI_YIELD |   \
591 					 CXGB_POLL_STATE_POLL_YIELD)
592 #define CXGB_POLL_LOCKED		(CXGB_POLL_STATE_NAPI |         \
593 					 CXGB_POLL_STATE_POLL)
594 #define CXGB_POLL_USER_PEND		(CXGB_POLL_STATE_POLL |         \
595 					 CXGB_POLL_STATE_POLL_YIELD)
596 	unsigned int bpoll_state;
597 	spinlock_t bpoll_lock;		/* lock for busy poll */
598 #endif /* CONFIG_NET_RX_BUSY_POLL */
599 
600 };
601 
602 struct sge_eth_stats {              /* Ethernet queue statistics */
603 	unsigned long pkts;         /* # of ethernet packets */
604 	unsigned long lro_pkts;     /* # of LRO super packets */
605 	unsigned long lro_merged;   /* # of wire packets merged by LRO */
606 	unsigned long rx_cso;       /* # of Rx checksum offloads */
607 	unsigned long vlan_ex;      /* # of Rx VLAN extractions */
608 	unsigned long rx_drops;     /* # of packets dropped due to no mem */
609 };
610 
611 struct sge_eth_rxq {                /* SW Ethernet Rx queue */
612 	struct sge_rspq rspq;
613 	struct sge_fl fl;
614 	struct sge_eth_stats stats;
615 } ____cacheline_aligned_in_smp;
616 
617 struct sge_ofld_stats {             /* offload queue statistics */
618 	unsigned long pkts;         /* # of packets */
619 	unsigned long imm;          /* # of immediate-data packets */
620 	unsigned long an;           /* # of asynchronous notifications */
621 	unsigned long nomem;        /* # of responses deferred due to no mem */
622 };
623 
624 struct sge_ofld_rxq {               /* SW offload Rx queue */
625 	struct sge_rspq rspq;
626 	struct sge_fl fl;
627 	struct sge_ofld_stats stats;
628 } ____cacheline_aligned_in_smp;
629 
630 struct tx_desc {
631 	__be64 flit[8];
632 };
633 
634 struct tx_sw_desc;
635 
636 struct sge_txq {
637 	unsigned int  in_use;       /* # of in-use Tx descriptors */
638 	unsigned int  size;         /* # of descriptors */
639 	unsigned int  cidx;         /* SW consumer index */
640 	unsigned int  pidx;         /* producer index */
641 	unsigned long stops;        /* # of times q has been stopped */
642 	unsigned long restarts;     /* # of queue restarts */
643 	unsigned int  cntxt_id;     /* SGE context id for the Tx q */
644 	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
645 	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
646 	struct sge_qstat *stat;     /* queue status entry */
647 	dma_addr_t    phys_addr;    /* physical address of the ring */
648 	spinlock_t db_lock;
649 	int db_disabled;
650 	unsigned short db_pidx;
651 	unsigned short db_pidx_inc;
652 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
653 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
654 };
655 
656 struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
657 	struct sge_txq q;
658 	struct netdev_queue *txq;   /* associated netdev TX queue */
659 #ifdef CONFIG_CHELSIO_T4_DCB
660 	u8 dcb_prio;		    /* DCB Priority bound to queue */
661 #endif
662 	unsigned long tso;          /* # of TSO requests */
663 	unsigned long tx_cso;       /* # of Tx checksum offloads */
664 	unsigned long vlan_ins;     /* # of Tx VLAN insertions */
665 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
666 } ____cacheline_aligned_in_smp;
667 
668 struct sge_ofld_txq {               /* state for an SGE offload Tx queue */
669 	struct sge_txq q;
670 	struct adapter *adap;
671 	struct sk_buff_head sendq;  /* list of backpressured packets */
672 	struct tasklet_struct qresume_tsk; /* restarts the queue */
673 	bool service_ofldq_running; /* service_ofldq() is processing sendq */
674 	u8 full;                    /* the Tx ring is full */
675 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
676 } ____cacheline_aligned_in_smp;
677 
678 struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
679 	struct sge_txq q;
680 	struct adapter *adap;
681 	struct sk_buff_head sendq;  /* list of backpressured packets */
682 	struct tasklet_struct qresume_tsk; /* restarts the queue */
683 	u8 full;                    /* the Tx ring is full */
684 } ____cacheline_aligned_in_smp;
685 
686 struct sge_uld_rxq_info {
687 	char name[IFNAMSIZ];	/* name of ULD driver */
688 	struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
689 	u16 *msix_tbl;		/* msix_tbl for uld */
690 	u16 *rspq_id;		/* response queue id's of rxq */
691 	u16 nrxq;		/* # of ingress uld queues */
692 	u16 nciq;		/* # of completion queues */
693 	u8 uld;			/* uld type */
694 };
695 
696 struct sge {
697 	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
698 	struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
699 	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
700 
701 	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
702 	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
703 	struct sge_uld_rxq_info **uld_rxq_info;
704 
705 	struct sge_rspq intrq ____cacheline_aligned_in_smp;
706 	spinlock_t intrq_lock;
707 
708 	u16 max_ethqsets;           /* # of available Ethernet queue sets */
709 	u16 ethqsets;               /* # of active Ethernet queue sets */
710 	u16 ethtxq_rover;           /* Tx queue to clean up next */
711 	u16 ofldqsets;              /* # of active ofld queue sets */
712 	u16 nqs_per_uld;	    /* # of Rx queues per ULD */
713 	u16 timer_val[SGE_NTIMERS];
714 	u8 counter_val[SGE_NCOUNTERS];
715 	u32 fl_pg_order;            /* large page allocation size */
716 	u32 stat_len;               /* length of status page at ring end */
717 	u32 pktshift;               /* padding between CPL & packet data */
718 	u32 fl_align;               /* response queue message alignment */
719 	u32 fl_starve_thres;        /* Free List starvation threshold */
720 
721 	struct sge_idma_monitor_state idma_monitor;
722 	unsigned int egr_start;
723 	unsigned int egr_sz;
724 	unsigned int ingr_start;
725 	unsigned int ingr_sz;
726 	void **egr_map;    /* qid->queue egress queue map */
727 	struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
728 	unsigned long *starving_fl;
729 	unsigned long *txq_maperr;
730 	unsigned long *blocked_fl;
731 	struct timer_list rx_timer; /* refills starving FLs */
732 	struct timer_list tx_timer; /* checks Tx queues */
733 };
734 
735 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
736 #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
737 
738 struct l2t_data;
739 
740 #ifdef CONFIG_PCI_IOV
741 
742 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7.  However, the Serial
743  * Configuration initialization for T5 only has SR-IOV functionality enabled
744  * on PF0-3 in order to simplify everything.
745  */
746 #define NUM_OF_PF_WITH_SRIOV 4
747 
748 #endif
749 
750 struct doorbell_stats {
751 	u32 db_drop;
752 	u32 db_empty;
753 	u32 db_full;
754 };
755 
756 struct hash_mac_addr {
757 	struct list_head list;
758 	u8 addr[ETH_ALEN];
759 };
760 
761 struct uld_msix_bmap {
762 	unsigned long *msix_bmap;
763 	unsigned int mapsize;
764 	spinlock_t lock; /* lock for acquiring bitmap */
765 };
766 
767 struct uld_msix_info {
768 	unsigned short vec;
769 	char desc[IFNAMSIZ + 10];
770 	unsigned int idx;
771 };
772 
773 struct vf_info {
774 	unsigned char vf_mac_addr[ETH_ALEN];
775 	bool pf_set_mac;
776 };
777 
778 struct adapter {
779 	void __iomem *regs;
780 	void __iomem *bar2;
781 	u32 t4_bar0;
782 	struct pci_dev *pdev;
783 	struct device *pdev_dev;
784 	const char *name;
785 	unsigned int mbox;
786 	unsigned int pf;
787 	unsigned int flags;
788 	unsigned int adap_idx;
789 	enum chip_type chip;
790 
791 	int msg_enable;
792 
793 	struct adapter_params params;
794 	struct cxgb4_virt_res vres;
795 	unsigned int swintr;
796 
797 	struct {
798 		unsigned short vec;
799 		char desc[IFNAMSIZ + 10];
800 	} msix_info[MAX_INGQ + 1];
801 	struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
802 	struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
803 	int msi_idx;
804 
805 	struct doorbell_stats db_stats;
806 	struct sge sge;
807 
808 	struct net_device *port[MAX_NPORTS];
809 	u8 chan_map[NCHAN];                   /* channel -> port map */
810 
811 	struct vf_info *vfinfo;
812 	u8 num_vfs;
813 
814 	u32 filter_mode;
815 	unsigned int l2t_start;
816 	unsigned int l2t_end;
817 	struct l2t_data *l2t;
818 	unsigned int clipt_start;
819 	unsigned int clipt_end;
820 	struct clip_tbl *clipt;
821 	struct cxgb4_uld_info *uld;
822 	void *uld_handle[CXGB4_ULD_MAX];
823 	unsigned int num_uld;
824 	unsigned int num_ofld_uld;
825 	struct list_head list_node;
826 	struct list_head rcu_node;
827 	struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
828 
829 	void *iscsi_ppm;
830 
831 	struct tid_info tids;
832 	void **tid_release_head;
833 	spinlock_t tid_release_lock;
834 	struct workqueue_struct *workq;
835 	struct work_struct tid_release_task;
836 	struct work_struct db_full_task;
837 	struct work_struct db_drop_task;
838 	bool tid_release_task_busy;
839 
840 	/* support for mailbox command/reply logging */
841 #define T4_OS_LOG_MBOX_CMDS 256
842 	struct mbox_cmd_log *mbox_log;
843 
844 	struct mutex uld_mutex;
845 
846 	struct dentry *debugfs_root;
847 	bool use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
848 	bool trace_rss;	/* 1 implies that different RSS flit per filter is
849 			 * used per filter else if 0 default RSS flit is
850 			 * used for all 4 filters.
851 			 */
852 
853 	spinlock_t stats_lock;
854 	spinlock_t win0_lock ____cacheline_aligned_in_smp;
855 
856 	/* TC u32 offload */
857 	struct cxgb4_tc_u32_table *tc_u32;
858 };
859 
860 /* Support for "sched-class" command to allow a TX Scheduling Class to be
861  * programmed with various parameters.
862  */
863 struct ch_sched_params {
864 	s8   type;                     /* packet or flow */
865 	union {
866 		struct {
867 			s8   level;    /* scheduler hierarchy level */
868 			s8   mode;     /* per-class or per-flow */
869 			s8   rateunit; /* bit or packet rate */
870 			s8   ratemode; /* %port relative or kbps absolute */
871 			s8   channel;  /* scheduler channel [0..N] */
872 			s8   class;    /* scheduler class [0..N] */
873 			s32  minrate;  /* minimum rate */
874 			s32  maxrate;  /* maximum rate */
875 			s16  weight;   /* percent weight */
876 			s16  pktsize;  /* average packet size */
877 		} params;
878 	} u;
879 };
880 
881 enum {
882 	SCHED_CLASS_TYPE_PACKET = 0,    /* class type */
883 };
884 
885 enum {
886 	SCHED_CLASS_LEVEL_CL_RL = 0,    /* class rate limiter */
887 };
888 
889 enum {
890 	SCHED_CLASS_MODE_CLASS = 0,     /* per-class scheduling */
891 };
892 
893 enum {
894 	SCHED_CLASS_RATEUNIT_BITS = 0,  /* bit rate scheduling */
895 };
896 
897 enum {
898 	SCHED_CLASS_RATEMODE_ABS = 1,   /* Kb/s */
899 };
900 
901 /* Support for "sched_queue" command to allow one or more NIC TX Queues
902  * to be bound to a TX Scheduling Class.
903  */
904 struct ch_sched_queue {
905 	s8   queue;    /* queue index */
906 	s8   class;    /* class index */
907 };
908 
909 /* Defined bit width of user definable filter tuples
910  */
911 #define ETHTYPE_BITWIDTH 16
912 #define FRAG_BITWIDTH 1
913 #define MACIDX_BITWIDTH 9
914 #define FCOE_BITWIDTH 1
915 #define IPORT_BITWIDTH 3
916 #define MATCHTYPE_BITWIDTH 3
917 #define PROTO_BITWIDTH 8
918 #define TOS_BITWIDTH 8
919 #define PF_BITWIDTH 8
920 #define VF_BITWIDTH 8
921 #define IVLAN_BITWIDTH 16
922 #define OVLAN_BITWIDTH 16
923 
924 /* Filter matching rules.  These consist of a set of ingress packet field
925  * (value, mask) tuples.  The associated ingress packet field matches the
926  * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
927  * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
928  * matches an ingress packet when all of the individual individual field
929  * matching rules are true.
930  *
931  * Partial field masks are always valid, however, while it may be easy to
932  * understand their meanings for some fields (e.g. IP address to match a
933  * subnet), for others making sensible partial masks is less intuitive (e.g.
934  * MPS match type) ...
935  *
936  * Most of the following data structures are modeled on T4 capabilities.
937  * Drivers for earlier chips use the subsets which make sense for those chips.
938  * We really need to come up with a hardware-independent mechanism to
939  * represent hardware filter capabilities ...
940  */
941 struct ch_filter_tuple {
942 	/* Compressed header matching field rules.  The TP_VLAN_PRI_MAP
943 	 * register selects which of these fields will participate in the
944 	 * filter match rules -- up to a maximum of 36 bits.  Because
945 	 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
946 	 * set of fields.
947 	 */
948 	uint32_t ethtype:ETHTYPE_BITWIDTH;      /* Ethernet type */
949 	uint32_t frag:FRAG_BITWIDTH;            /* IP fragmentation header */
950 	uint32_t ivlan_vld:1;                   /* inner VLAN valid */
951 	uint32_t ovlan_vld:1;                   /* outer VLAN valid */
952 	uint32_t pfvf_vld:1;                    /* PF/VF valid */
953 	uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
954 	uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
955 	uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
956 	uint32_t matchtype:MATCHTYPE_BITWIDTH;  /* MPS match type */
957 	uint32_t proto:PROTO_BITWIDTH;          /* protocol type */
958 	uint32_t tos:TOS_BITWIDTH;              /* TOS/Traffic Type */
959 	uint32_t pf:PF_BITWIDTH;                /* PCI-E PF ID */
960 	uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
961 	uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
962 	uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
963 
964 	/* Uncompressed header matching field rules.  These are always
965 	 * available for field rules.
966 	 */
967 	uint8_t lip[16];        /* local IP address (IPv4 in [3:0]) */
968 	uint8_t fip[16];        /* foreign IP address (IPv4 in [3:0]) */
969 	uint16_t lport;         /* local port */
970 	uint16_t fport;         /* foreign port */
971 };
972 
973 /* A filter ioctl command.
974  */
975 struct ch_filter_specification {
976 	/* Administrative fields for filter.
977 	 */
978 	uint32_t hitcnts:1;     /* count filter hits in TCB */
979 	uint32_t prio:1;        /* filter has priority over active/server */
980 
981 	/* Fundamental filter typing.  This is the one element of filter
982 	 * matching that doesn't exist as a (value, mask) tuple.
983 	 */
984 	uint32_t type:1;        /* 0 => IPv4, 1 => IPv6 */
985 
986 	/* Packet dispatch information.  Ingress packets which match the
987 	 * filter rules will be dropped, passed to the host or switched back
988 	 * out as egress packets.
989 	 */
990 	uint32_t action:2;      /* drop, pass, switch */
991 
992 	uint32_t rpttid:1;      /* report TID in RSS hash field */
993 
994 	uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
995 	uint32_t iq:10;         /* ingress queue */
996 
997 	uint32_t maskhash:1;    /* dirsteer=0: store RSS hash in TCB */
998 	uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
999 				/*             1 => TCB contains IQ ID */
1000 
1001 	/* Switch proxy/rewrite fields.  An ingress packet which matches a
1002 	 * filter with "switch" set will be looped back out as an egress
1003 	 * packet -- potentially with some Ethernet header rewriting.
1004 	 */
1005 	uint32_t eport:2;       /* egress port to switch packet out */
1006 	uint32_t newdmac:1;     /* rewrite destination MAC address */
1007 	uint32_t newsmac:1;     /* rewrite source MAC address */
1008 	uint32_t newvlan:2;     /* rewrite VLAN Tag */
1009 	uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
1010 	uint8_t smac[ETH_ALEN]; /* new source MAC address */
1011 	uint16_t vlan;          /* VLAN Tag to insert */
1012 
1013 	/* Filter rule value/mask pairs.
1014 	 */
1015 	struct ch_filter_tuple val;
1016 	struct ch_filter_tuple mask;
1017 };
1018 
1019 enum {
1020 	FILTER_PASS = 0,        /* default */
1021 	FILTER_DROP,
1022 	FILTER_SWITCH
1023 };
1024 
1025 enum {
1026 	VLAN_NOCHANGE = 0,      /* default */
1027 	VLAN_REMOVE,
1028 	VLAN_INSERT,
1029 	VLAN_REWRITE
1030 };
1031 
1032 /* Host shadow copy of ingress filter entry.  This is in host native format
1033  * and doesn't match the ordering or bit order, etc. of the hardware of the
1034  * firmware command.  The use of bit-field structure elements is purely to
1035  * remind ourselves of the field size limitations and save memory in the case
1036  * where the filter table is large.
1037  */
1038 struct filter_entry {
1039 	/* Administrative fields for filter. */
1040 	u32 valid:1;            /* filter allocated and valid */
1041 	u32 locked:1;           /* filter is administratively locked */
1042 
1043 	u32 pending:1;          /* filter action is pending firmware reply */
1044 	u32 smtidx:8;           /* Source MAC Table index for smac */
1045 	struct filter_ctx *ctx; /* Caller's completion hook */
1046 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
1047 	struct net_device *dev; /* Associated net device */
1048 	u32 tid;                /* This will store the actual tid */
1049 
1050 	/* The filter itself.  Most of this is a straight copy of information
1051 	 * provided by the extended ioctl().  Some fields are translated to
1052 	 * internal forms -- for instance the Ingress Queue ID passed in from
1053 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
1054 	 */
1055 	struct ch_filter_specification fs;
1056 };
1057 
is_offload(const struct adapter * adap)1058 static inline int is_offload(const struct adapter *adap)
1059 {
1060 	return adap->params.offload;
1061 }
1062 
is_pci_uld(const struct adapter * adap)1063 static inline int is_pci_uld(const struct adapter *adap)
1064 {
1065 	return adap->params.crypto;
1066 }
1067 
is_uld(const struct adapter * adap)1068 static inline int is_uld(const struct adapter *adap)
1069 {
1070 	return (adap->params.offload || adap->params.crypto);
1071 }
1072 
t4_read_reg(struct adapter * adap,u32 reg_addr)1073 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
1074 {
1075 	return readl(adap->regs + reg_addr);
1076 }
1077 
t4_write_reg(struct adapter * adap,u32 reg_addr,u32 val)1078 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
1079 {
1080 	writel(val, adap->regs + reg_addr);
1081 }
1082 
1083 #ifndef readq
readq(const volatile void __iomem * addr)1084 static inline u64 readq(const volatile void __iomem *addr)
1085 {
1086 	return readl(addr) + ((u64)readl(addr + 4) << 32);
1087 }
1088 
writeq(u64 val,volatile void __iomem * addr)1089 static inline void writeq(u64 val, volatile void __iomem *addr)
1090 {
1091 	writel(val, addr);
1092 	writel(val >> 32, addr + 4);
1093 }
1094 #endif
1095 
t4_read_reg64(struct adapter * adap,u32 reg_addr)1096 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
1097 {
1098 	return readq(adap->regs + reg_addr);
1099 }
1100 
t4_write_reg64(struct adapter * adap,u32 reg_addr,u64 val)1101 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
1102 {
1103 	writeq(val, adap->regs + reg_addr);
1104 }
1105 
1106 /**
1107  * t4_set_hw_addr - store a port's MAC address in SW
1108  * @adapter: the adapter
1109  * @port_idx: the port index
1110  * @hw_addr: the Ethernet address
1111  *
1112  * Store the Ethernet address of the given port in SW.  Called by the common
1113  * code when it retrieves a port's Ethernet address from EEPROM.
1114  */
t4_set_hw_addr(struct adapter * adapter,int port_idx,u8 hw_addr[])1115 static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
1116 				  u8 hw_addr[])
1117 {
1118 	ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
1119 	ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
1120 }
1121 
1122 /**
1123  * netdev2pinfo - return the port_info structure associated with a net_device
1124  * @dev: the netdev
1125  *
1126  * Return the struct port_info associated with a net_device
1127  */
netdev2pinfo(const struct net_device * dev)1128 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
1129 {
1130 	return netdev_priv(dev);
1131 }
1132 
1133 /**
1134  * adap2pinfo - return the port_info of a port
1135  * @adap: the adapter
1136  * @idx: the port index
1137  *
1138  * Return the port_info structure for the port of the given index.
1139  */
adap2pinfo(struct adapter * adap,int idx)1140 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
1141 {
1142 	return netdev_priv(adap->port[idx]);
1143 }
1144 
1145 /**
1146  * netdev2adap - return the adapter structure associated with a net_device
1147  * @dev: the netdev
1148  *
1149  * Return the struct adapter associated with a net_device
1150  */
netdev2adap(const struct net_device * dev)1151 static inline struct adapter *netdev2adap(const struct net_device *dev)
1152 {
1153 	return netdev2pinfo(dev)->adapter;
1154 }
1155 
1156 #ifdef CONFIG_NET_RX_BUSY_POLL
cxgb_busy_poll_init_lock(struct sge_rspq * q)1157 static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
1158 {
1159 	spin_lock_init(&q->bpoll_lock);
1160 	q->bpoll_state = CXGB_POLL_STATE_IDLE;
1161 }
1162 
cxgb_poll_lock_napi(struct sge_rspq * q)1163 static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
1164 {
1165 	bool rc = true;
1166 
1167 	spin_lock(&q->bpoll_lock);
1168 	if (q->bpoll_state & CXGB_POLL_LOCKED) {
1169 		q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
1170 		rc = false;
1171 	} else {
1172 		q->bpoll_state = CXGB_POLL_STATE_NAPI;
1173 	}
1174 	spin_unlock(&q->bpoll_lock);
1175 	return rc;
1176 }
1177 
cxgb_poll_unlock_napi(struct sge_rspq * q)1178 static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
1179 {
1180 	bool rc = false;
1181 
1182 	spin_lock(&q->bpoll_lock);
1183 	if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
1184 		rc = true;
1185 	q->bpoll_state = CXGB_POLL_STATE_IDLE;
1186 	spin_unlock(&q->bpoll_lock);
1187 	return rc;
1188 }
1189 
cxgb_poll_lock_poll(struct sge_rspq * q)1190 static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
1191 {
1192 	bool rc = true;
1193 
1194 	spin_lock_bh(&q->bpoll_lock);
1195 	if (q->bpoll_state & CXGB_POLL_LOCKED) {
1196 		q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
1197 		rc = false;
1198 	} else {
1199 		q->bpoll_state |= CXGB_POLL_STATE_POLL;
1200 	}
1201 	spin_unlock_bh(&q->bpoll_lock);
1202 	return rc;
1203 }
1204 
cxgb_poll_unlock_poll(struct sge_rspq * q)1205 static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
1206 {
1207 	bool rc = false;
1208 
1209 	spin_lock_bh(&q->bpoll_lock);
1210 	if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
1211 		rc = true;
1212 	q->bpoll_state = CXGB_POLL_STATE_IDLE;
1213 	spin_unlock_bh(&q->bpoll_lock);
1214 	return rc;
1215 }
1216 
cxgb_poll_busy_polling(struct sge_rspq * q)1217 static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
1218 {
1219 	return q->bpoll_state & CXGB_POLL_USER_PEND;
1220 }
1221 #else
cxgb_busy_poll_init_lock(struct sge_rspq * q)1222 static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
1223 {
1224 }
1225 
cxgb_poll_lock_napi(struct sge_rspq * q)1226 static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
1227 {
1228 	return true;
1229 }
1230 
cxgb_poll_unlock_napi(struct sge_rspq * q)1231 static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
1232 {
1233 	return false;
1234 }
1235 
cxgb_poll_lock_poll(struct sge_rspq * q)1236 static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
1237 {
1238 	return false;
1239 }
1240 
cxgb_poll_unlock_poll(struct sge_rspq * q)1241 static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
1242 {
1243 	return false;
1244 }
1245 
cxgb_poll_busy_polling(struct sge_rspq * q)1246 static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
1247 {
1248 	return false;
1249 }
1250 #endif /* CONFIG_NET_RX_BUSY_POLL */
1251 
1252 /* Return a version number to identify the type of adapter.  The scheme is:
1253  * - bits 0..9: chip version
1254  * - bits 10..15: chip revision
1255  * - bits 16..23: register dump version
1256  */
mk_adap_vers(struct adapter * ap)1257 static inline unsigned int mk_adap_vers(struct adapter *ap)
1258 {
1259 	return CHELSIO_CHIP_VERSION(ap->params.chip) |
1260 		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1261 }
1262 
1263 /* Return a queue's interrupt hold-off time in us.  0 means no timer. */
qtimer_val(const struct adapter * adap,const struct sge_rspq * q)1264 static inline unsigned int qtimer_val(const struct adapter *adap,
1265 				      const struct sge_rspq *q)
1266 {
1267 	unsigned int idx = q->intr_params >> 1;
1268 
1269 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1270 }
1271 
1272 /* driver version & name used for ethtool_drvinfo */
1273 extern char cxgb4_driver_name[];
1274 extern const char cxgb4_driver_version[];
1275 
1276 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
1277 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
1278 
1279 void *t4_alloc_mem(size_t size);
1280 
1281 void t4_free_sge_resources(struct adapter *adap);
1282 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
1283 irq_handler_t t4_intr_handler(struct adapter *adap);
1284 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
1285 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1286 		     const struct pkt_gl *gl);
1287 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
1288 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
1289 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1290 		     struct net_device *dev, int intr_idx,
1291 		     struct sge_fl *fl, rspq_handler_t hnd,
1292 		     rspq_flush_handler_t flush_handler, int cong);
1293 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1294 			 struct net_device *dev, struct netdev_queue *netdevq,
1295 			 unsigned int iqid);
1296 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
1297 			  struct net_device *dev, unsigned int iqid,
1298 			  unsigned int cmplqid);
1299 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
1300 			unsigned int cmplqid);
1301 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
1302 			  struct net_device *dev, unsigned int iqid);
1303 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
1304 int t4_sge_init(struct adapter *adap);
1305 void t4_sge_start(struct adapter *adap);
1306 void t4_sge_stop(struct adapter *adap);
1307 int cxgb_busy_poll(struct napi_struct *napi);
1308 void cxgb4_set_ethtool_ops(struct net_device *netdev);
1309 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
1310 extern int dbfifo_int_thresh;
1311 
1312 #define for_each_port(adapter, iter) \
1313 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
1314 
is_bypass(struct adapter * adap)1315 static inline int is_bypass(struct adapter *adap)
1316 {
1317 	return adap->params.bypass;
1318 }
1319 
is_bypass_device(int device)1320 static inline int is_bypass_device(int device)
1321 {
1322 	/* this should be set based upon device capabilities */
1323 	switch (device) {
1324 	case 0x440b:
1325 	case 0x440c:
1326 		return 1;
1327 	default:
1328 		return 0;
1329 	}
1330 }
1331 
is_10gbt_device(int device)1332 static inline int is_10gbt_device(int device)
1333 {
1334 	/* this should be set based upon device capabilities */
1335 	switch (device) {
1336 	case 0x4409:
1337 	case 0x4486:
1338 		return 1;
1339 
1340 	default:
1341 		return 0;
1342 	}
1343 }
1344 
core_ticks_per_usec(const struct adapter * adap)1345 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
1346 {
1347 	return adap->params.vpd.cclk / 1000;
1348 }
1349 
us_to_core_ticks(const struct adapter * adap,unsigned int us)1350 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
1351 					    unsigned int us)
1352 {
1353 	return (us * adap->params.vpd.cclk) / 1000;
1354 }
1355 
core_ticks_to_us(const struct adapter * adapter,unsigned int ticks)1356 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
1357 					    unsigned int ticks)
1358 {
1359 	/* add Core Clock / 2 to round ticks to nearest uS */
1360 	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
1361 		adapter->params.vpd.cclk);
1362 }
1363 
1364 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
1365 		      u32 val);
1366 
1367 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1368 			    int size, void *rpl, bool sleep_ok, int timeout);
1369 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
1370 		    void *rpl, bool sleep_ok);
1371 
t4_wr_mbox_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,int timeout)1372 static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
1373 				     const void *cmd, int size, void *rpl,
1374 				     int timeout)
1375 {
1376 	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
1377 				       timeout);
1378 }
1379 
t4_wr_mbox(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)1380 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
1381 			     int size, void *rpl)
1382 {
1383 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
1384 }
1385 
t4_wr_mbox_ns(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)1386 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
1387 				int size, void *rpl)
1388 {
1389 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
1390 }
1391 
1392 /**
1393  *	hash_mac_addr - return the hash value of a MAC address
1394  *	@addr: the 48-bit Ethernet MAC address
1395  *
1396  *	Hashes a MAC address according to the hash function used by HW inexact
1397  *	(hash) address matching.
1398  */
hash_mac_addr(const u8 * addr)1399 static inline int hash_mac_addr(const u8 *addr)
1400 {
1401 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1402 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1403 
1404 	a ^= b;
1405 	a ^= (a >> 12);
1406 	a ^= (a >> 6);
1407 	return a & 0x3f;
1408 }
1409 
1410 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
1411 			       unsigned int cnt);
init_rspq(struct adapter * adap,struct sge_rspq * q,unsigned int us,unsigned int cnt,unsigned int size,unsigned int iqe_size)1412 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
1413 			     unsigned int us, unsigned int cnt,
1414 			     unsigned int size, unsigned int iqe_size)
1415 {
1416 	q->adap = adap;
1417 	cxgb4_set_rspq_intr_params(q, us, cnt);
1418 	q->iqe_len = iqe_size;
1419 	q->size = size;
1420 }
1421 
1422 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
1423 		       unsigned int data_reg, const u32 *vals,
1424 		       unsigned int nregs, unsigned int start_idx);
1425 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
1426 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
1427 		      unsigned int start_idx);
1428 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
1429 
1430 struct fw_filter_wr;
1431 
1432 void t4_intr_enable(struct adapter *adapter);
1433 void t4_intr_disable(struct adapter *adapter);
1434 int t4_slow_intr_handler(struct adapter *adapter);
1435 
1436 int t4_wait_dev_ready(void __iomem *regs);
1437 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
1438 		  struct link_config *lc);
1439 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1440 
1441 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
1442 u32 t4_get_util_window(struct adapter *adap);
1443 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
1444 
1445 #define T4_MEMORY_WRITE	0
1446 #define T4_MEMORY_READ	1
1447 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1448 		 void *buf, int dir);
t4_memory_write(struct adapter * adap,int mtype,u32 addr,u32 len,__be32 * buf)1449 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1450 				  u32 len, __be32 *buf)
1451 {
1452 	return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
1453 }
1454 
1455 unsigned int t4_get_regs_len(struct adapter *adapter);
1456 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
1457 
1458 int t4_seeprom_wp(struct adapter *adapter, bool enable);
1459 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
1460 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1461 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1462 		  unsigned int nwords, u32 *data, int byte_oriented);
1463 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1464 int t4_load_phy_fw(struct adapter *adap,
1465 		   int win, spinlock_t *lock,
1466 		   int (*phy_fw_version)(const u8 *, size_t),
1467 		   const u8 *phy_fw_data, size_t phy_fw_size);
1468 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
1469 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
1470 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1471 		  const u8 *fw_data, unsigned int size, int force);
1472 int t4_fl_pkt_align(struct adapter *adap);
1473 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1474 int t4_check_fw_version(struct adapter *adap);
1475 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1476 int t4_get_bs_version(struct adapter *adapter, u32 *vers);
1477 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1478 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
1479 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1480 	       const u8 *fw_data, unsigned int fw_size,
1481 	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
1482 int t4_prep_adapter(struct adapter *adapter);
1483 
1484 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
1485 int t4_bar2_sge_qregs(struct adapter *adapter,
1486 		      unsigned int qid,
1487 		      enum t4_bar2_qtype qtype,
1488 		      int user,
1489 		      u64 *pbar2_qoffset,
1490 		      unsigned int *pbar2_qid);
1491 
1492 unsigned int qtimer_val(const struct adapter *adap,
1493 			const struct sge_rspq *q);
1494 
1495 int t4_init_devlog_params(struct adapter *adapter);
1496 int t4_init_sge_params(struct adapter *adapter);
1497 int t4_init_tp_params(struct adapter *adap);
1498 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
1499 int t4_init_rss_mode(struct adapter *adap, int mbox);
1500 int t4_init_portinfo(struct port_info *pi, int mbox,
1501 		     int port, int pf, int vf, u8 mac[]);
1502 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
1503 void t4_fatal_err(struct adapter *adapter);
1504 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1505 			int start, int n, const u16 *rspq, unsigned int nrspq);
1506 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1507 		       unsigned int flags);
1508 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
1509 		     unsigned int flags, unsigned int defq);
1510 int t4_read_rss(struct adapter *adapter, u16 *entries);
1511 void t4_read_rss_key(struct adapter *adapter, u32 *key);
1512 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
1513 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
1514 			   u32 *valp);
1515 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
1516 			   u32 *vfl, u32 *vfh);
1517 u32 t4_read_rss_pf_map(struct adapter *adapter);
1518 u32 t4_read_rss_pf_mask(struct adapter *adapter);
1519 
1520 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
1521 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1522 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1523 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
1524 		    size_t n);
1525 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
1526 		    size_t n);
1527 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1528 		unsigned int *valp);
1529 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1530 		 const unsigned int *valp);
1531 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1532 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1533 			unsigned int *pif_req_wrptr,
1534 			unsigned int *pif_rsp_wrptr);
1535 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
1536 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
1537 const char *t4_get_port_type_description(enum fw_port_type port_type);
1538 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1539 void t4_get_port_stats_offset(struct adapter *adap, int idx,
1540 			      struct port_stats *stats,
1541 			      struct port_stats *offset);
1542 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
1543 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1544 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
1545 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1546 			    unsigned int mask, unsigned int val);
1547 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
1548 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
1549 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
1550 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
1551 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
1552 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1553 			 struct tp_tcp_stats *v6);
1554 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
1555 		       struct tp_fcoe_stats *st);
1556 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1557 		  const unsigned short *alpha, const unsigned short *beta);
1558 
1559 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
1560 
1561 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
1562 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1563 
1564 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1565 			 const u8 *addr);
1566 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1567 		      u64 mask0, u64 mask1, unsigned int crc, bool enable);
1568 
1569 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1570 		enum dev_master master, enum dev_state *state);
1571 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1572 int t4_early_init(struct adapter *adap, unsigned int mbox);
1573 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
1574 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1575 			  unsigned int cache_line_size);
1576 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
1577 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1578 		    unsigned int vf, unsigned int nparams, const u32 *params,
1579 		    u32 *val);
1580 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
1581 		       unsigned int vf, unsigned int nparams, const u32 *params,
1582 		       u32 *val, int rw);
1583 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
1584 			  unsigned int pf, unsigned int vf,
1585 			  unsigned int nparams, const u32 *params,
1586 			  const u32 *val, int timeout);
1587 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1588 		  unsigned int vf, unsigned int nparams, const u32 *params,
1589 		  const u32 *val);
1590 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1591 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1592 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
1593 		unsigned int vi, unsigned int cmask, unsigned int pmask,
1594 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1595 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1596 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1597 		unsigned int *rss_size);
1598 int t4_free_vi(struct adapter *adap, unsigned int mbox,
1599 	       unsigned int pf, unsigned int vf,
1600 	       unsigned int viid);
1601 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
1602 		int mtu, int promisc, int all_multi, int bcast, int vlanex,
1603 		bool sleep_ok);
1604 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
1605 		      unsigned int viid, bool free, unsigned int naddr,
1606 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
1607 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
1608 		     unsigned int viid, unsigned int naddr,
1609 		     const u8 **addr, bool sleep_ok);
1610 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1611 		  int idx, const u8 *addr, bool persist, bool add_smt);
1612 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1613 		     bool ucast, u64 vec, bool sleep_ok);
1614 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
1615 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
1616 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1617 		 bool rx_en, bool tx_en);
1618 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
1619 		     unsigned int nblinks);
1620 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1621 	       unsigned int mmd, unsigned int reg, u16 *valp);
1622 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1623 	       unsigned int mmd, unsigned int reg, u16 val);
1624 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
1625 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
1626 	       unsigned int fl0id, unsigned int fl1id);
1627 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1628 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
1629 	       unsigned int fl0id, unsigned int fl1id);
1630 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1631 		   unsigned int vf, unsigned int eqid);
1632 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1633 		    unsigned int vf, unsigned int eqid);
1634 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1635 		    unsigned int vf, unsigned int eqid);
1636 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
1637 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
1638 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
1639 void t4_db_full(struct adapter *adapter);
1640 void t4_db_dropped(struct adapter *adapter);
1641 int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
1642 			int filter_index, int enable);
1643 void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
1644 			 int filter_index, int *enabled);
1645 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1646 			 u32 addr, u32 val);
1647 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
1648 		    int rateunit, int ratemode, int channel, int class,
1649 		    int minrate, int maxrate, int weight, int pktsize);
1650 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1651 void t4_free_mem(void *addr);
1652 void t4_idma_monitor_init(struct adapter *adapter,
1653 			  struct sge_idma_monitor_state *idma);
1654 void t4_idma_monitor(struct adapter *adapter,
1655 		     struct sge_idma_monitor_state *idma,
1656 		     int hz, int ticks);
1657 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
1658 		      unsigned int naddr, u8 *addr);
1659 void t4_uld_mem_free(struct adapter *adap);
1660 int t4_uld_mem_alloc(struct adapter *adap);
1661 void t4_uld_clean_up(struct adapter *adap);
1662 void t4_register_netevent_notifier(void);
1663 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
1664 #endif /* __CXGB4_H__ */
1665