• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51 
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 
69 #define NUM_IB_PORTS 1
70 
71 uint kdeth_qp;
72 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74 
75 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76 module_param(num_vls, uint, S_IRUGO);
77 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78 
79 /*
80  * Default time to aggregate two 10K packets from the idle state
81  * (timer not running). The timer starts at the end of the first packet,
82  * so only the time for one 10K packet and header plus a bit extra is needed.
83  * 10 * 1024 + 64 header byte = 10304 byte
84  * 10304 byte / 12.5 GB/s = 824.32ns
85  */
86 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout, uint, S_IRUGO);
88 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89 
90 uint rcv_intr_count = 16; /* same as qib */
91 module_param(rcv_intr_count, uint, S_IRUGO);
92 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93 
94 ushort link_crc_mask = SUPPORTED_CRCS;
95 module_param(link_crc_mask, ushort, S_IRUGO);
96 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97 
98 uint loopback;
99 module_param_named(loopback, loopback, uint, S_IRUGO);
100 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101 
102 /* Other driver tunables */
103 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband = 1;
105 static uint use_flr = 1;
106 uint quick_linkup; /* skip LNI */
107 
108 struct flag_table {
109 	u64 flag;	/* the flag */
110 	char *str;	/* description string */
111 	u16 extra;	/* extra information */
112 	u16 unused0;
113 	u32 unused1;
114 };
115 
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
119 
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED	0x1
122 #define SEC_PACKET_DROPPED	0x2
123 #define SEC_SC_HALTED		0x4	/* per-context only */
124 #define SEC_SPC_FREEZE		0x8	/* per-HFI only */
125 
126 #define DEFAULT_KRCVQS		  2
127 #define MIN_KERNEL_KCTXTS         2
128 #define FIRST_KERNEL_KCTXT        1
129 
130 /*
131  * RSM instance allocation
132  *   0 - Verbs
133  *   1 - User Fecn Handling
134  *   2 - Vnic
135  */
136 #define RSM_INS_VERBS             0
137 #define RSM_INS_FECN              1
138 #define RSM_INS_VNIC              2
139 
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT     39
142 
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148 
149 /* RSM fields for Verbs */
150 /* packet type */
151 #define IB_PACKET_TYPE         2ull
152 #define QW_SHIFT               6ull
153 /* QPN[7..1] */
154 #define QPN_WIDTH              7ull
155 
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW             0ull
158 #define LRH_BTH_BIT_OFFSET     48ull
159 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK           3ull
163 #define LRH_BTH_VALUE          2ull
164 
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW              0ull
167 #define LRH_SC_BIT_OFFSET      56ull
168 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK            128ull
171 #define LRH_SC_VALUE           0ull
172 
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
175 
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
178 
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW             0ull
182 #define L2_TYPE_BIT_OFFSET     61ull
183 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK           3ull
186 #define L2_16B_VALUE           2ull
187 
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW              1ull
190 #define L4_TYPE_BIT_OFFSET      0ull
191 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK        0xFFull
194 #define L4_16B_ETH_VALUE        0x78ull
195 
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
200 
201 /* defines to build power on SC2VL table */
202 #define SC2VL_VAL( \
203 	num, \
204 	sc0, sc0val, \
205 	sc1, sc1val, \
206 	sc2, sc2val, \
207 	sc3, sc3val, \
208 	sc4, sc4val, \
209 	sc5, sc5val, \
210 	sc6, sc6val, \
211 	sc7, sc7val) \
212 ( \
213 	((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 	((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 	((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 	((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 	((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 	((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 	((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 	((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
221 )
222 
223 #define DC_SC_VL_VAL( \
224 	range, \
225 	e0, e0val, \
226 	e1, e1val, \
227 	e2, e2val, \
228 	e3, e3val, \
229 	e4, e4val, \
230 	e5, e5val, \
231 	e6, e6val, \
232 	e7, e7val, \
233 	e8, e8val, \
234 	e9, e9val, \
235 	e10, e10val, \
236 	e11, e11val, \
237 	e12, e12val, \
238 	e13, e13val, \
239 	e14, e14val, \
240 	e15, e15val) \
241 ( \
242 	((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 	((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 	((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 	((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 	((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 	((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 	((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 	((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 	((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 	((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 	((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 	((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 	((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 	((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 	((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 	((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
258 )
259 
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 			| CCE_STATUS_RXE_FROZE_SMASK \
263 			| CCE_STATUS_TXE_FROZE_SMASK \
264 			| CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 			| CCE_STATUS_TXE_PAUSED_SMASK \
268 			| CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271 
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
274 
275 /*
276  * CCE Error flags.
277  */
278 static struct flag_table cce_err_status_flags[] = {
279 /* 0*/	FLAG_ENTRY0("CceCsrParityErr",
280 		CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281 /* 1*/	FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 		CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283 /* 2*/	FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 		CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285 /* 3*/	FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 		CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287 /* 4*/	FLAG_ENTRY0("CceTrgtAccessErr",
288 		CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289 /* 5*/	FLAG_ENTRY0("CceRspdDataParityErr",
290 		CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291 /* 6*/	FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 		CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293 /* 7*/	FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 		CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295 /* 8*/	FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 		CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297 /* 9*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299 /*10*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301 /*11*/	FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303 /*12*/	FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 		CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305 /*13*/	FLAG_ENTRY0("PcicRetryMemCorErr",
306 		CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307 /*14*/	FLAG_ENTRY0("PcicRetryMemCorErr",
308 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309 /*15*/	FLAG_ENTRY0("PcicPostHdQCorErr",
310 		CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311 /*16*/	FLAG_ENTRY0("PcicPostHdQCorErr",
312 		CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313 /*17*/	FLAG_ENTRY0("PcicPostHdQCorErr",
314 		CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315 /*18*/	FLAG_ENTRY0("PcicCplDatQCorErr",
316 		CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317 /*19*/	FLAG_ENTRY0("PcicNPostHQParityErr",
318 		CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319 /*20*/	FLAG_ENTRY0("PcicNPostDatQParityErr",
320 		CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321 /*21*/	FLAG_ENTRY0("PcicRetryMemUncErr",
322 		CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323 /*22*/	FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325 /*23*/	FLAG_ENTRY0("PcicPostHdQUncErr",
326 		CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327 /*24*/	FLAG_ENTRY0("PcicPostDatQUncErr",
328 		CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329 /*25*/	FLAG_ENTRY0("PcicCplHdQUncErr",
330 		CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331 /*26*/	FLAG_ENTRY0("PcicCplDatQUncErr",
332 		CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333 /*27*/	FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 		CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335 /*28*/	FLAG_ENTRY0("PcicTransmitBackParityErr",
336 		CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337 /*29*/	FLAG_ENTRY0("PcicReceiveParityErr",
338 		CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339 /*30*/	FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 		CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341 /*31*/	FLAG_ENTRY0("LATriggered",
342 		CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343 /*32*/	FLAG_ENTRY0("CceSegReadBadAddrErr",
344 		CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345 /*33*/	FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 		CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347 /*34*/	FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 		CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349 /*35*/	FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 		CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351 /*36*/	FLAG_ENTRY0("CceMsixTableCorErr",
352 		CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353 /*37*/	FLAG_ENTRY0("CceMsixTableUncErr",
354 		CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355 /*38*/	FLAG_ENTRY0("CceIntMapCorErr",
356 		CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357 /*39*/	FLAG_ENTRY0("CceIntMapUncErr",
358 		CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359 /*40*/	FLAG_ENTRY0("CceMsixCsrParityErr",
360 		CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
361 /*41-63 reserved*/
362 };
363 
364 /*
365  * Misc Error flags
366  */
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags[] = {
369 /* 0*/	FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370 /* 1*/	FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371 /* 2*/	FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372 /* 3*/	FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373 /* 4*/	FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374 /* 5*/	FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375 /* 6*/	FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376 /* 7*/	FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377 /* 8*/	FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378 /* 9*/	FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379 /*10*/	FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380 /*11*/	FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381 /*12*/	FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
382 };
383 
384 /*
385  * TXE PIO Error flags and consequences
386  */
387 static struct flag_table pio_err_status_flags[] = {
388 /* 0*/	FLAG_ENTRY("PioWriteBadCtxt",
389 	SEC_WRITE_DROPPED,
390 	SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391 /* 1*/	FLAG_ENTRY("PioWriteAddrParity",
392 	SEC_SPC_FREEZE,
393 	SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394 /* 2*/	FLAG_ENTRY("PioCsrParity",
395 	SEC_SPC_FREEZE,
396 	SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397 /* 3*/	FLAG_ENTRY("PioSbMemFifo0",
398 	SEC_SPC_FREEZE,
399 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400 /* 4*/	FLAG_ENTRY("PioSbMemFifo1",
401 	SEC_SPC_FREEZE,
402 	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403 /* 5*/	FLAG_ENTRY("PioPccFifoParity",
404 	SEC_SPC_FREEZE,
405 	SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406 /* 6*/	FLAG_ENTRY("PioPecFifoParity",
407 	SEC_SPC_FREEZE,
408 	SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409 /* 7*/	FLAG_ENTRY("PioSbrdctlCrrelParity",
410 	SEC_SPC_FREEZE,
411 	SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412 /* 8*/	FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 	SEC_SPC_FREEZE,
414 	SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415 /* 9*/	FLAG_ENTRY("PioPktEvictFifoParityErr",
416 	SEC_SPC_FREEZE,
417 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418 /*10*/	FLAG_ENTRY("PioSmPktResetParity",
419 	SEC_SPC_FREEZE,
420 	SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421 /*11*/	FLAG_ENTRY("PioVlLenMemBank0Unc",
422 	SEC_SPC_FREEZE,
423 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424 /*12*/	FLAG_ENTRY("PioVlLenMemBank1Unc",
425 	SEC_SPC_FREEZE,
426 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427 /*13*/	FLAG_ENTRY("PioVlLenMemBank0Cor",
428 	0,
429 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430 /*14*/	FLAG_ENTRY("PioVlLenMemBank1Cor",
431 	0,
432 	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433 /*15*/	FLAG_ENTRY("PioCreditRetFifoParity",
434 	SEC_SPC_FREEZE,
435 	SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436 /*16*/	FLAG_ENTRY("PioPpmcPblFifo",
437 	SEC_SPC_FREEZE,
438 	SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439 /*17*/	FLAG_ENTRY("PioInitSmIn",
440 	0,
441 	SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442 /*18*/	FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 	SEC_SPC_FREEZE,
444 	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445 /*19*/	FLAG_ENTRY("PioHostAddrMemUnc",
446 	SEC_SPC_FREEZE,
447 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448 /*20*/	FLAG_ENTRY("PioHostAddrMemCor",
449 	0,
450 	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451 /*21*/	FLAG_ENTRY("PioWriteDataParity",
452 	SEC_SPC_FREEZE,
453 	SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454 /*22*/	FLAG_ENTRY("PioStateMachine",
455 	SEC_SPC_FREEZE,
456 	SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457 /*23*/	FLAG_ENTRY("PioWriteQwValidParity",
458 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
459 	SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460 /*24*/	FLAG_ENTRY("PioBlockQwCountParity",
461 	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
462 	SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463 /*25*/	FLAG_ENTRY("PioVlfVlLenParity",
464 	SEC_SPC_FREEZE,
465 	SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466 /*26*/	FLAG_ENTRY("PioVlfSopParity",
467 	SEC_SPC_FREEZE,
468 	SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469 /*27*/	FLAG_ENTRY("PioVlFifoParity",
470 	SEC_SPC_FREEZE,
471 	SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472 /*28*/	FLAG_ENTRY("PioPpmcBqcMemParity",
473 	SEC_SPC_FREEZE,
474 	SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475 /*29*/	FLAG_ENTRY("PioPpmcSopLen",
476 	SEC_SPC_FREEZE,
477 	SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478 /*30-31 reserved*/
479 /*32*/	FLAG_ENTRY("PioCurrentFreeCntParity",
480 	SEC_SPC_FREEZE,
481 	SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482 /*33*/	FLAG_ENTRY("PioLastReturnedCntParity",
483 	SEC_SPC_FREEZE,
484 	SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485 /*34*/	FLAG_ENTRY("PioPccSopHeadParity",
486 	SEC_SPC_FREEZE,
487 	SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488 /*35*/	FLAG_ENTRY("PioPecSopHeadParityErr",
489 	SEC_SPC_FREEZE,
490 	SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
491 /*36-63 reserved*/
492 };
493 
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 	(SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 	| SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 	| SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 	| SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 	| SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 	| SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 	| SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 	| SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 	| SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 	| SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 	| SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 	| SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 	| SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 	| SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 	| SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 	| SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 	| SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 	| SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 	| SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 	| SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 	| SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 	| SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
525 
526 /*
527  * TXE SDMA Error flags
528  */
529 static struct flag_table sdma_err_status_flags[] = {
530 /* 0*/	FLAG_ENTRY0("SDmaRpyTagErr",
531 		SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532 /* 1*/	FLAG_ENTRY0("SDmaCsrParityErr",
533 		SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534 /* 2*/	FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536 /* 3*/	FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
538 /*04-63 reserved*/
539 };
540 
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR  \
543 		(SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 		| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 		| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546 
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 	(SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 	| SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 	| SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
552 
553 /*
554  * TXE Egress Error flags
555  */
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags[] = {
558 /* 0*/	FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559 /* 1*/	FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560 /* 2 reserved */
561 /* 3*/	FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 		SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563 /* 4*/	FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564 /* 5*/	FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565 /* 6 reserved */
566 /* 7*/	FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 		SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568 /* 8*/	FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 		SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570 /* 9-10 reserved */
571 /*11*/	FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 		SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573 /*12*/	FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574 /*13*/	FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575 /*14*/	FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576 /*15*/	FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577 /*16*/	FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 		SEES(TX_SDMA0_DISALLOWED_PACKET)),
579 /*17*/	FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 		SEES(TX_SDMA1_DISALLOWED_PACKET)),
581 /*18*/	FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 		SEES(TX_SDMA2_DISALLOWED_PACKET)),
583 /*19*/	FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 		SEES(TX_SDMA3_DISALLOWED_PACKET)),
585 /*20*/	FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 		SEES(TX_SDMA4_DISALLOWED_PACKET)),
587 /*21*/	FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 		SEES(TX_SDMA5_DISALLOWED_PACKET)),
589 /*22*/	FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 		SEES(TX_SDMA6_DISALLOWED_PACKET)),
591 /*23*/	FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 		SEES(TX_SDMA7_DISALLOWED_PACKET)),
593 /*24*/	FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 		SEES(TX_SDMA8_DISALLOWED_PACKET)),
595 /*25*/	FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 		SEES(TX_SDMA9_DISALLOWED_PACKET)),
597 /*26*/	FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 		SEES(TX_SDMA10_DISALLOWED_PACKET)),
599 /*27*/	FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 		SEES(TX_SDMA11_DISALLOWED_PACKET)),
601 /*28*/	FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 		SEES(TX_SDMA12_DISALLOWED_PACKET)),
603 /*29*/	FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 		SEES(TX_SDMA13_DISALLOWED_PACKET)),
605 /*30*/	FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 		SEES(TX_SDMA14_DISALLOWED_PACKET)),
607 /*31*/	FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 		SEES(TX_SDMA15_DISALLOWED_PACKET)),
609 /*32*/	FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 		SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611 /*33*/	FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 		SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613 /*34*/	FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 		SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615 /*35*/	FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 		SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617 /*36*/	FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 		SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619 /*37*/	FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 		SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621 /*38*/	FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 		SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623 /*39*/	FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 		SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625 /*40*/	FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 		SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627 /*41*/	FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628 /*42*/	FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629 /*43*/	FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630 /*44*/	FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631 /*45*/	FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632 /*46*/	FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633 /*47*/	FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634 /*48*/	FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635 /*49*/	FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636 /*50*/	FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637 /*51*/	FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638 /*52*/	FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639 /*53*/	FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640 /*54*/	FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641 /*55*/	FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642 /*56*/	FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643 /*57*/	FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644 /*58*/	FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645 /*59*/	FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646 /*60*/	FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647 /*61*/	FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648 /*62*/	FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 		SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650 /*63*/	FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 		SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
652 };
653 
654 /*
655  * TXE Egress Error Info flags
656  */
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags[] = {
659 /* 0*/	FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/	FLAG_ENTRY0("VLErr", SEEI(VL)),
661 /* 2*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 3*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 4*/	FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664 /* 5*/	FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665 /* 6*/	FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666 /* 7*/	FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667 /* 8*/	FLAG_ENTRY0("RawErr", SEEI(RAW)),
668 /* 9*/	FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669 /*10*/	FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670 /*11*/	FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671 /*12*/	FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672 /*13*/	FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673 /*14*/	FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674 /*15*/	FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675 /*16*/	FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676 /*17*/	FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677 /*18*/	FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678 /*19*/	FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679 /*20*/	FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680 /*21*/	FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
681 };
682 
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 	(SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 	| SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 	| SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 	| SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 	| SEES(TX_LAUNCH_CSR_PARITY) \
690 	| SEES(TX_SBRD_CTL_CSR_PARITY) \
691 	| SEES(TX_CONFIG_PARITY) \
692 	| SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 	| SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 	| SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 	| SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 	| SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 	| SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 	| SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 	| SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 	| SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 	| SEES(TX_CREDIT_RETURN_PARITY))
702 
703 /*
704  * TXE Send error flags
705  */
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags[] = {
708 /* 0*/	FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
709 /* 1*/	FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710 /* 2*/	FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
711 };
712 
713 /*
714  * TXE Send Context Error flags and consequences
715  */
716 static struct flag_table sc_err_status_flags[] = {
717 /* 0*/	FLAG_ENTRY("InconsistentSop",
718 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 		SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720 /* 1*/	FLAG_ENTRY("DisallowedPacket",
721 		SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 		SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723 /* 2*/	FLAG_ENTRY("WriteCrossesBoundary",
724 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 		SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726 /* 3*/	FLAG_ENTRY("WriteOverflow",
727 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729 /* 4*/	FLAG_ENTRY("WriteOutOfBounds",
730 		SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 		SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
732 /* 5-63 reserved*/
733 };
734 
735 /*
736  * RXE Receive Error flags
737  */
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags[] = {
740 /* 0*/	FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741 /* 1*/	FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742 /* 2*/	FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743 /* 3*/	FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744 /* 4*/	FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745 /* 5*/	FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746 /* 6*/	FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747 /* 7*/	FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748 /* 8*/	FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749 /* 9*/	FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750 /*10*/	FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751 /*11*/	FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752 /*12*/	FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753 /*13*/	FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754 /*14*/	FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755 /*15*/	FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756 /*16*/	FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 		RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758 /*17*/	FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759 /*18*/	FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760 /*19*/	FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 		RXES(RBUF_BLOCK_LIST_READ_UNC)),
762 /*20*/	FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 		RXES(RBUF_BLOCK_LIST_READ_COR)),
764 /*21*/	FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 		RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766 /*22*/	FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 		RXES(RBUF_CSR_QENT_CNT_PARITY)),
768 /*23*/	FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 		RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770 /*24*/	FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 		RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772 /*25*/	FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773 /*26*/	FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774 /*27*/	FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 		RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776 /*28*/	FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777 /*29*/	FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778 /*30*/	FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779 /*31*/	FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780 /*32*/	FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781 /*33*/	FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782 /*34*/	FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783 /*35*/	FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 		RXES(RBUF_FL_INITDONE_PARITY)),
785 /*36*/	FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 		RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787 /*37*/	FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788 /*38*/	FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789 /*39*/	FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790 /*40*/	FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 		RXES(LOOKUP_DES_PART1_UNC_COR)),
792 /*41*/	FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 		RXES(LOOKUP_DES_PART2_PARITY)),
794 /*42*/	FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795 /*43*/	FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796 /*44*/	FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797 /*45*/	FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798 /*46*/	FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799 /*47*/	FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800 /*48*/	FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801 /*49*/	FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802 /*50*/	FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803 /*51*/	FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804 /*52*/	FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805 /*53*/	FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806 /*54*/	FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807 /*55*/	FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808 /*56*/	FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809 /*57*/	FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810 /*58*/	FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811 /*59*/	FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812 /*60*/	FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813 /*61*/	FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814 /*62*/	FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815 /*63*/	FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
816 };
817 
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR  \
820 	(RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 	| RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 	| RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 	| RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 	| RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 	| RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 	| RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 	| RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 	| RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 	| RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 	| RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 	| RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 	| RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 	| RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 	| RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 	| RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 	| RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 	| RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 	| RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 	| RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 	| RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 	| RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 	| RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 	| RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 	| RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 	| RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 	| RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 	| RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 	| RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 	| RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 	| RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 	| RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 	| RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 	| RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864 
865 #define RXE_FREEZE_ABORT_MASK \
866 	(RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 	RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 	RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
869 
870 /*
871  * DCC Error Flags
872  */
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags[] = {
875 	FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 	FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 	FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 	FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 	FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 	FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 	FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 	FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 	FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 	FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 	FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 	FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 	FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 	FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 	FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 	FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 	FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 	FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 	FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 	FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 	FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 	FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 	FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 	FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 	FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 	FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 	FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 	FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 	FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 	FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 	FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 	FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 	FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 	FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 	FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 	FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 	FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 	FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 	FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 	FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 	FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 	FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 	FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 	FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 	FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 	FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
921 };
922 
923 /*
924  * LCB error flags
925  */
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags[] = {
928 /* 0*/	FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929 /* 1*/	FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930 /* 2*/	FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931 /* 3*/	FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 		LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933 /* 4*/	FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934 /* 5*/	FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935 /* 6*/	FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936 /* 7*/	FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937 /* 8*/	FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938 /* 9*/	FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939 /*10*/	FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940 /*11*/	FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941 /*12*/	FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942 /*13*/	FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 		LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944 /*14*/	FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945 /*15*/	FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946 /*16*/	FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947 /*17*/	FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948 /*18*/	FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949 /*19*/	FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 		LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951 /*20*/	FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952 /*21*/	FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953 /*22*/	FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954 /*23*/	FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955 /*24*/	FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956 /*25*/	FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957 /*26*/	FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 		LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959 /*27*/	FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960 /*28*/	FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 		LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962 /*29*/	FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 		LCBE(REDUNDANT_FLIT_PARITY_ERR))
964 };
965 
966 /*
967  * DC8051 Error Flags
968  */
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags[] = {
971 	FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 	FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 	FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 	FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 	FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 	FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 	FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 	FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 	FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 		    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
981 	FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
982 };
983 
984 /*
985  * DC8051 Information Error flags
986  *
987  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988  */
989 static struct flag_table dc8051_info_err_flags[] = {
990 	FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
991 	FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
992 	FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
993 	FLAG_ENTRY0("Serdes internal loopback failure",
994 		    FAILED_SERDES_INTERNAL_LOOPBACK),
995 	FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
996 	FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
997 	FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
998 	FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
999 	FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
1000 	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002 	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1003 	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1004 	FLAG_ENTRY0("External Device Request Timeout",
1005 		    EXTERNAL_DEVICE_REQ_TIMEOUT),
1006 };
1007 
1008 /*
1009  * DC8051 Information Host Information flags
1010  *
1011  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012  */
1013 static struct flag_table dc8051_info_host_msg_flags[] = {
1014 	FLAG_ENTRY0("Host request done", 0x0001),
1015 	FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 	FLAG_ENTRY0("BC SMA message", 0x0004),
1017 	FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 	FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 	FLAG_ENTRY0("External device config request", 0x0020),
1020 	FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 	FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 	FLAG_ENTRY0("Link going down", 0x0100),
1023 	FLAG_ENTRY0("Link width downgraded", 0x0200),
1024 };
1025 
1026 static u32 encoded_size(u32 size);
1027 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030 			       u8 *continuous);
1031 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 				      u8 *remote_tx_rate, u16 *link_widths);
1035 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 				     u8 *flag_bits, u16 *link_widths);
1037 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038 				  u8 *device_rev);
1039 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 			    u8 *tx_polarity_inversion,
1043 			    u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 				unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 			   unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 			   unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062 					  u32 state);
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064 			   u64 *out_data);
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1067 
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070 					    int msecs);
1071 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072 				  int msecs);
1073 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1076 				   int msecs);
1077 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1078 					 int msecs);
1079 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1080 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1081 static void handle_temp_err(struct hfi1_devdata *dd);
1082 static void dc_shutdown(struct hfi1_devdata *dd);
1083 static void dc_start(struct hfi1_devdata *dd);
1084 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1085 			   unsigned int *np);
1086 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1087 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1088 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1089 
1090 /*
1091  * Error interrupt table entry.  This is used as input to the interrupt
1092  * "clear down" routine used for all second tier error interrupt register.
1093  * Second tier interrupt registers have a single bit representing them
1094  * in the top-level CceIntStatus.
1095  */
1096 struct err_reg_info {
1097 	u32 status;		/* status CSR offset */
1098 	u32 clear;		/* clear CSR offset */
1099 	u32 mask;		/* mask CSR offset */
1100 	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1101 	const char *desc;
1102 };
1103 
1104 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1105 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1106 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1107 
1108 /*
1109  * Helpers for building HFI and DC error interrupt table entries.  Different
1110  * helpers are needed because of inconsistent register names.
1111  */
1112 #define EE(reg, handler, desc) \
1113 	{ reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1114 		handler, desc }
1115 #define DC_EE1(reg, handler, desc) \
1116 	{ reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1117 #define DC_EE2(reg, handler, desc) \
1118 	{ reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1119 
1120 /*
1121  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1122  * another register containing more information.
1123  */
1124 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1125 /* 0*/	EE(CCE_ERR,		handle_cce_err,    "CceErr"),
1126 /* 1*/	EE(RCV_ERR,		handle_rxe_err,    "RxeErr"),
1127 /* 2*/	EE(MISC_ERR,	handle_misc_err,   "MiscErr"),
1128 /* 3*/	{ 0, 0, 0, NULL }, /* reserved */
1129 /* 4*/	EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1130 /* 5*/	EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1131 /* 6*/	EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1132 /* 7*/	EE(SEND_ERR,	handle_txe_err,    "TxeErr")
1133 	/* the rest are reserved */
1134 };
1135 
1136 /*
1137  * Index into the Various section of the interrupt sources
1138  * corresponding to the Critical Temperature interrupt.
1139  */
1140 #define TCRIT_INT_SOURCE 4
1141 
1142 /*
1143  * SDMA error interrupt entry - refers to another register containing more
1144  * information.
1145  */
1146 static const struct err_reg_info sdma_eng_err =
1147 	EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1148 
1149 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1150 /* 0*/	{ 0, 0, 0, NULL }, /* PbcInt */
1151 /* 1*/	{ 0, 0, 0, NULL }, /* GpioAssertInt */
1152 /* 2*/	EE(ASIC_QSFP1,	handle_qsfp_int,	"QSFP1"),
1153 /* 3*/	EE(ASIC_QSFP2,	handle_qsfp_int,	"QSFP2"),
1154 /* 4*/	{ 0, 0, 0, NULL }, /* TCritInt */
1155 	/* rest are reserved */
1156 };
1157 
1158 /*
1159  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1160  * register can not be derived from the MTU value because 10K is not
1161  * a power of 2. Therefore, we need a constant. Everything else can
1162  * be calculated.
1163  */
1164 #define DCC_CFG_PORT_MTU_CAP_10240 7
1165 
1166 /*
1167  * Table of the DC grouping of error interrupts.  Each entry refers to
1168  * another register containing more information.
1169  */
1170 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1171 /* 0*/	DC_EE1(DCC_ERR,		handle_dcc_err,	       "DCC Err"),
1172 /* 1*/	DC_EE2(DC_LCB_ERR,	handle_lcb_err,	       "LCB Err"),
1173 /* 2*/	DC_EE2(DC_DC8051_ERR,	handle_8051_interrupt, "DC8051 Interrupt"),
1174 /* 3*/	/* dc_lbm_int - special, see is_dc_int() */
1175 	/* the rest are reserved */
1176 };
1177 
1178 struct cntr_entry {
1179 	/*
1180 	 * counter name
1181 	 */
1182 	char *name;
1183 
1184 	/*
1185 	 * csr to read for name (if applicable)
1186 	 */
1187 	u64 csr;
1188 
1189 	/*
1190 	 * offset into dd or ppd to store the counter's value
1191 	 */
1192 	int offset;
1193 
1194 	/*
1195 	 * flags
1196 	 */
1197 	u8 flags;
1198 
1199 	/*
1200 	 * accessor for stat element, context either dd or ppd
1201 	 */
1202 	u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1203 		       int mode, u64 data);
1204 };
1205 
1206 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1207 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1208 
1209 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1210 { \
1211 	name, \
1212 	csr, \
1213 	offset, \
1214 	flags, \
1215 	accessor \
1216 }
1217 
1218 /* 32bit RXE */
1219 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1220 CNTR_ELEM(#name, \
1221 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1222 	  0, flags | CNTR_32BIT, \
1223 	  port_access_u32_csr)
1224 
1225 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1226 CNTR_ELEM(#name, \
1227 	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1228 	  0, flags | CNTR_32BIT, \
1229 	  dev_access_u32_csr)
1230 
1231 /* 64bit RXE */
1232 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1233 CNTR_ELEM(#name, \
1234 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1235 	  0, flags, \
1236 	  port_access_u64_csr)
1237 
1238 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1239 CNTR_ELEM(#name, \
1240 	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1241 	  0, flags, \
1242 	  dev_access_u64_csr)
1243 
1244 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1245 #define OVR_ELM(ctx) \
1246 CNTR_ELEM("RcvHdrOvr" #ctx, \
1247 	  (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1248 	  0, CNTR_NORMAL, port_access_u64_csr)
1249 
1250 /* 32bit TXE */
1251 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1252 CNTR_ELEM(#name, \
1253 	  (counter * 8 + SEND_COUNTER_ARRAY32), \
1254 	  0, flags | CNTR_32BIT, \
1255 	  port_access_u32_csr)
1256 
1257 /* 64bit TXE */
1258 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1259 CNTR_ELEM(#name, \
1260 	  (counter * 8 + SEND_COUNTER_ARRAY64), \
1261 	  0, flags, \
1262 	  port_access_u64_csr)
1263 
1264 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1265 CNTR_ELEM(#name,\
1266 	  counter * 8 + SEND_COUNTER_ARRAY64, \
1267 	  0, \
1268 	  flags, \
1269 	  dev_access_u64_csr)
1270 
1271 /* CCE */
1272 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1273 CNTR_ELEM(#name, \
1274 	  (counter * 8 + CCE_COUNTER_ARRAY32), \
1275 	  0, flags | CNTR_32BIT, \
1276 	  dev_access_u32_csr)
1277 
1278 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1279 CNTR_ELEM(#name, \
1280 	  (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1281 	  0, flags | CNTR_32BIT, \
1282 	  dev_access_u32_csr)
1283 
1284 /* DC */
1285 #define DC_PERF_CNTR(name, counter, flags) \
1286 CNTR_ELEM(#name, \
1287 	  counter, \
1288 	  0, \
1289 	  flags, \
1290 	  dev_access_u64_csr)
1291 
1292 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1293 CNTR_ELEM(#name, \
1294 	  counter, \
1295 	  0, \
1296 	  flags, \
1297 	  dc_access_lcb_cntr)
1298 
1299 /* ibp counters */
1300 #define SW_IBP_CNTR(name, cntr) \
1301 CNTR_ELEM(#name, \
1302 	  0, \
1303 	  0, \
1304 	  CNTR_SYNTH, \
1305 	  access_ibp_##cntr)
1306 
1307 /**
1308  * hfi_addr_from_offset - return addr for readq/writeq
1309  * @dd - the dd device
1310  * @offset - the offset of the CSR within bar0
1311  *
1312  * This routine selects the appropriate base address
1313  * based on the indicated offset.
1314  */
hfi1_addr_from_offset(const struct hfi1_devdata * dd,u32 offset)1315 static inline void __iomem *hfi1_addr_from_offset(
1316 	const struct hfi1_devdata *dd,
1317 	u32 offset)
1318 {
1319 	if (offset >= dd->base2_start)
1320 		return dd->kregbase2 + (offset - dd->base2_start);
1321 	return dd->kregbase1 + offset;
1322 }
1323 
1324 /**
1325  * read_csr - read CSR at the indicated offset
1326  * @dd - the dd device
1327  * @offset - the offset of the CSR within bar0
1328  *
1329  * Return: the value read or all FF's if there
1330  * is no mapping
1331  */
read_csr(const struct hfi1_devdata * dd,u32 offset)1332 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1333 {
1334 	if (dd->flags & HFI1_PRESENT)
1335 		return readq(hfi1_addr_from_offset(dd, offset));
1336 	return -1;
1337 }
1338 
1339 /**
1340  * write_csr - write CSR at the indicated offset
1341  * @dd - the dd device
1342  * @offset - the offset of the CSR within bar0
1343  * @value - value to write
1344  */
write_csr(const struct hfi1_devdata * dd,u32 offset,u64 value)1345 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1346 {
1347 	if (dd->flags & HFI1_PRESENT) {
1348 		void __iomem *base = hfi1_addr_from_offset(dd, offset);
1349 
1350 		/* avoid write to RcvArray */
1351 		if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1352 			return;
1353 		writeq(value, base);
1354 	}
1355 }
1356 
1357 /**
1358  * get_csr_addr - return te iomem address for offset
1359  * @dd - the dd device
1360  * @offset - the offset of the CSR within bar0
1361  *
1362  * Return: The iomem address to use in subsequent
1363  * writeq/readq operations.
1364  */
get_csr_addr(const struct hfi1_devdata * dd,u32 offset)1365 void __iomem *get_csr_addr(
1366 	const struct hfi1_devdata *dd,
1367 	u32 offset)
1368 {
1369 	if (dd->flags & HFI1_PRESENT)
1370 		return hfi1_addr_from_offset(dd, offset);
1371 	return NULL;
1372 }
1373 
read_write_csr(const struct hfi1_devdata * dd,u32 csr,int mode,u64 value)1374 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1375 				 int mode, u64 value)
1376 {
1377 	u64 ret;
1378 
1379 	if (mode == CNTR_MODE_R) {
1380 		ret = read_csr(dd, csr);
1381 	} else if (mode == CNTR_MODE_W) {
1382 		write_csr(dd, csr, value);
1383 		ret = value;
1384 	} else {
1385 		dd_dev_err(dd, "Invalid cntr register access mode");
1386 		return 0;
1387 	}
1388 
1389 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1390 	return ret;
1391 }
1392 
1393 /* Dev Access */
dev_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1394 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1395 			      void *context, int vl, int mode, u64 data)
1396 {
1397 	struct hfi1_devdata *dd = context;
1398 	u64 csr = entry->csr;
1399 
1400 	if (entry->flags & CNTR_SDMA) {
1401 		if (vl == CNTR_INVALID_VL)
1402 			return 0;
1403 		csr += 0x100 * vl;
1404 	} else {
1405 		if (vl != CNTR_INVALID_VL)
1406 			return 0;
1407 	}
1408 	return read_write_csr(dd, csr, mode, data);
1409 }
1410 
access_sde_err_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1411 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1412 			      void *context, int idx, int mode, u64 data)
1413 {
1414 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1415 
1416 	if (dd->per_sdma && idx < dd->num_sdma)
1417 		return dd->per_sdma[idx].err_cnt;
1418 	return 0;
1419 }
1420 
access_sde_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1421 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1422 			      void *context, int idx, int mode, u64 data)
1423 {
1424 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1425 
1426 	if (dd->per_sdma && idx < dd->num_sdma)
1427 		return dd->per_sdma[idx].sdma_int_cnt;
1428 	return 0;
1429 }
1430 
access_sde_idle_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1431 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1432 				   void *context, int idx, int mode, u64 data)
1433 {
1434 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1435 
1436 	if (dd->per_sdma && idx < dd->num_sdma)
1437 		return dd->per_sdma[idx].idle_int_cnt;
1438 	return 0;
1439 }
1440 
access_sde_progress_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1441 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1442 				       void *context, int idx, int mode,
1443 				       u64 data)
1444 {
1445 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1446 
1447 	if (dd->per_sdma && idx < dd->num_sdma)
1448 		return dd->per_sdma[idx].progress_int_cnt;
1449 	return 0;
1450 }
1451 
dev_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1452 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1453 			      int vl, int mode, u64 data)
1454 {
1455 	struct hfi1_devdata *dd = context;
1456 
1457 	u64 val = 0;
1458 	u64 csr = entry->csr;
1459 
1460 	if (entry->flags & CNTR_VL) {
1461 		if (vl == CNTR_INVALID_VL)
1462 			return 0;
1463 		csr += 8 * vl;
1464 	} else {
1465 		if (vl != CNTR_INVALID_VL)
1466 			return 0;
1467 	}
1468 
1469 	val = read_write_csr(dd, csr, mode, data);
1470 	return val;
1471 }
1472 
dc_access_lcb_cntr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1473 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1474 			      int vl, int mode, u64 data)
1475 {
1476 	struct hfi1_devdata *dd = context;
1477 	u32 csr = entry->csr;
1478 	int ret = 0;
1479 
1480 	if (vl != CNTR_INVALID_VL)
1481 		return 0;
1482 	if (mode == CNTR_MODE_R)
1483 		ret = read_lcb_csr(dd, csr, &data);
1484 	else if (mode == CNTR_MODE_W)
1485 		ret = write_lcb_csr(dd, csr, data);
1486 
1487 	if (ret) {
1488 		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1489 		return 0;
1490 	}
1491 
1492 	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1493 	return data;
1494 }
1495 
1496 /* Port Access */
port_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1497 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1498 			       int vl, int mode, u64 data)
1499 {
1500 	struct hfi1_pportdata *ppd = context;
1501 
1502 	if (vl != CNTR_INVALID_VL)
1503 		return 0;
1504 	return read_write_csr(ppd->dd, entry->csr, mode, data);
1505 }
1506 
port_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1507 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1508 			       void *context, int vl, int mode, u64 data)
1509 {
1510 	struct hfi1_pportdata *ppd = context;
1511 	u64 val;
1512 	u64 csr = entry->csr;
1513 
1514 	if (entry->flags & CNTR_VL) {
1515 		if (vl == CNTR_INVALID_VL)
1516 			return 0;
1517 		csr += 8 * vl;
1518 	} else {
1519 		if (vl != CNTR_INVALID_VL)
1520 			return 0;
1521 	}
1522 	val = read_write_csr(ppd->dd, csr, mode, data);
1523 	return val;
1524 }
1525 
1526 /* Software defined */
read_write_sw(struct hfi1_devdata * dd,u64 * cntr,int mode,u64 data)1527 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1528 				u64 data)
1529 {
1530 	u64 ret;
1531 
1532 	if (mode == CNTR_MODE_R) {
1533 		ret = *cntr;
1534 	} else if (mode == CNTR_MODE_W) {
1535 		*cntr = data;
1536 		ret = data;
1537 	} else {
1538 		dd_dev_err(dd, "Invalid cntr sw access mode");
1539 		return 0;
1540 	}
1541 
1542 	hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1543 
1544 	return ret;
1545 }
1546 
access_sw_link_dn_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1547 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1548 				 int vl, int mode, u64 data)
1549 {
1550 	struct hfi1_pportdata *ppd = context;
1551 
1552 	if (vl != CNTR_INVALID_VL)
1553 		return 0;
1554 	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1555 }
1556 
access_sw_link_up_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1557 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1558 				 int vl, int mode, u64 data)
1559 {
1560 	struct hfi1_pportdata *ppd = context;
1561 
1562 	if (vl != CNTR_INVALID_VL)
1563 		return 0;
1564 	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1565 }
1566 
access_sw_unknown_frame_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1567 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1568 				       void *context, int vl, int mode,
1569 				       u64 data)
1570 {
1571 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1572 
1573 	if (vl != CNTR_INVALID_VL)
1574 		return 0;
1575 	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1576 }
1577 
access_sw_xmit_discards(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1578 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1579 				   void *context, int vl, int mode, u64 data)
1580 {
1581 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1582 	u64 zero = 0;
1583 	u64 *counter;
1584 
1585 	if (vl == CNTR_INVALID_VL)
1586 		counter = &ppd->port_xmit_discards;
1587 	else if (vl >= 0 && vl < C_VL_COUNT)
1588 		counter = &ppd->port_xmit_discards_vl[vl];
1589 	else
1590 		counter = &zero;
1591 
1592 	return read_write_sw(ppd->dd, counter, mode, data);
1593 }
1594 
access_xmit_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1595 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1596 				       void *context, int vl, int mode,
1597 				       u64 data)
1598 {
1599 	struct hfi1_pportdata *ppd = context;
1600 
1601 	if (vl != CNTR_INVALID_VL)
1602 		return 0;
1603 
1604 	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1605 			     mode, data);
1606 }
1607 
access_rcv_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1608 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1609 				      void *context, int vl, int mode, u64 data)
1610 {
1611 	struct hfi1_pportdata *ppd = context;
1612 
1613 	if (vl != CNTR_INVALID_VL)
1614 		return 0;
1615 
1616 	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1617 			     mode, data);
1618 }
1619 
get_all_cpu_total(u64 __percpu * cntr)1620 u64 get_all_cpu_total(u64 __percpu *cntr)
1621 {
1622 	int cpu;
1623 	u64 counter = 0;
1624 
1625 	for_each_possible_cpu(cpu)
1626 		counter += *per_cpu_ptr(cntr, cpu);
1627 	return counter;
1628 }
1629 
read_write_cpu(struct hfi1_devdata * dd,u64 * z_val,u64 __percpu * cntr,int vl,int mode,u64 data)1630 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1631 			  u64 __percpu *cntr,
1632 			  int vl, int mode, u64 data)
1633 {
1634 	u64 ret = 0;
1635 
1636 	if (vl != CNTR_INVALID_VL)
1637 		return 0;
1638 
1639 	if (mode == CNTR_MODE_R) {
1640 		ret = get_all_cpu_total(cntr) - *z_val;
1641 	} else if (mode == CNTR_MODE_W) {
1642 		/* A write can only zero the counter */
1643 		if (data == 0)
1644 			*z_val = get_all_cpu_total(cntr);
1645 		else
1646 			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1647 	} else {
1648 		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1649 		return 0;
1650 	}
1651 
1652 	return ret;
1653 }
1654 
access_sw_cpu_intr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1655 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1656 			      void *context, int vl, int mode, u64 data)
1657 {
1658 	struct hfi1_devdata *dd = context;
1659 
1660 	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1661 			      mode, data);
1662 }
1663 
access_sw_cpu_rcv_limit(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1664 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1665 				   void *context, int vl, int mode, u64 data)
1666 {
1667 	struct hfi1_devdata *dd = context;
1668 
1669 	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1670 			      mode, data);
1671 }
1672 
access_sw_pio_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1673 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1674 			      void *context, int vl, int mode, u64 data)
1675 {
1676 	struct hfi1_devdata *dd = context;
1677 
1678 	return dd->verbs_dev.n_piowait;
1679 }
1680 
access_sw_pio_drain(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1681 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1682 			       void *context, int vl, int mode, u64 data)
1683 {
1684 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1685 
1686 	return dd->verbs_dev.n_piodrain;
1687 }
1688 
access_sw_ctx0_seq_drop(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1689 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1690 				   void *context, int vl, int mode, u64 data)
1691 {
1692 	struct hfi1_devdata *dd = context;
1693 
1694 	return dd->ctx0_seq_drop;
1695 }
1696 
access_sw_vtx_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1697 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1698 			      void *context, int vl, int mode, u64 data)
1699 {
1700 	struct hfi1_devdata *dd = context;
1701 
1702 	return dd->verbs_dev.n_txwait;
1703 }
1704 
access_sw_kmem_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1705 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1706 			       void *context, int vl, int mode, u64 data)
1707 {
1708 	struct hfi1_devdata *dd = context;
1709 
1710 	return dd->verbs_dev.n_kmem_wait;
1711 }
1712 
access_sw_send_schedule(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1713 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1714 				   void *context, int vl, int mode, u64 data)
1715 {
1716 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1717 
1718 	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1719 			      mode, data);
1720 }
1721 
1722 /* Software counters for the error status bits within MISC_ERR_STATUS */
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1723 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1724 					     void *context, int vl, int mode,
1725 					     u64 data)
1726 {
1727 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728 
1729 	return dd->misc_err_status_cnt[12];
1730 }
1731 
access_misc_mbist_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1732 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1733 					  void *context, int vl, int mode,
1734 					  u64 data)
1735 {
1736 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737 
1738 	return dd->misc_err_status_cnt[11];
1739 }
1740 
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1741 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1742 					       void *context, int vl, int mode,
1743 					       u64 data)
1744 {
1745 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746 
1747 	return dd->misc_err_status_cnt[10];
1748 }
1749 
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1750 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1751 						 void *context, int vl,
1752 						 int mode, u64 data)
1753 {
1754 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755 
1756 	return dd->misc_err_status_cnt[9];
1757 }
1758 
access_misc_efuse_write_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1759 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1760 					   void *context, int vl, int mode,
1761 					   u64 data)
1762 {
1763 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764 
1765 	return dd->misc_err_status_cnt[8];
1766 }
1767 
access_misc_efuse_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1768 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1769 				const struct cntr_entry *entry,
1770 				void *context, int vl, int mode, u64 data)
1771 {
1772 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773 
1774 	return dd->misc_err_status_cnt[7];
1775 }
1776 
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1777 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1778 						void *context, int vl,
1779 						int mode, u64 data)
1780 {
1781 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782 
1783 	return dd->misc_err_status_cnt[6];
1784 }
1785 
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1786 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1787 					      void *context, int vl, int mode,
1788 					      u64 data)
1789 {
1790 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791 
1792 	return dd->misc_err_status_cnt[5];
1793 }
1794 
access_misc_key_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1795 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1796 					    void *context, int vl, int mode,
1797 					    u64 data)
1798 {
1799 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800 
1801 	return dd->misc_err_status_cnt[4];
1802 }
1803 
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1804 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1805 						 void *context, int vl,
1806 						 int mode, u64 data)
1807 {
1808 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809 
1810 	return dd->misc_err_status_cnt[3];
1811 }
1812 
access_misc_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1813 static u64 access_misc_csr_write_bad_addr_err_cnt(
1814 				const struct cntr_entry *entry,
1815 				void *context, int vl, int mode, u64 data)
1816 {
1817 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818 
1819 	return dd->misc_err_status_cnt[2];
1820 }
1821 
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1822 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1823 						 void *context, int vl,
1824 						 int mode, u64 data)
1825 {
1826 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827 
1828 	return dd->misc_err_status_cnt[1];
1829 }
1830 
access_misc_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1831 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1832 					  void *context, int vl, int mode,
1833 					  u64 data)
1834 {
1835 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836 
1837 	return dd->misc_err_status_cnt[0];
1838 }
1839 
1840 /*
1841  * Software counter for the aggregate of
1842  * individual CceErrStatus counters
1843  */
access_sw_cce_err_status_aggregated_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1844 static u64 access_sw_cce_err_status_aggregated_cnt(
1845 				const struct cntr_entry *entry,
1846 				void *context, int vl, int mode, u64 data)
1847 {
1848 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849 
1850 	return dd->sw_cce_err_status_aggregate;
1851 }
1852 
1853 /*
1854  * Software counters corresponding to each of the
1855  * error status bits within CceErrStatus
1856  */
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1857 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1858 					      void *context, int vl, int mode,
1859 					      u64 data)
1860 {
1861 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862 
1863 	return dd->cce_err_status_cnt[40];
1864 }
1865 
access_cce_int_map_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1866 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1867 					  void *context, int vl, int mode,
1868 					  u64 data)
1869 {
1870 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871 
1872 	return dd->cce_err_status_cnt[39];
1873 }
1874 
access_cce_int_map_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1875 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1876 					  void *context, int vl, int mode,
1877 					  u64 data)
1878 {
1879 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880 
1881 	return dd->cce_err_status_cnt[38];
1882 }
1883 
access_cce_msix_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1884 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1885 					     void *context, int vl, int mode,
1886 					     u64 data)
1887 {
1888 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889 
1890 	return dd->cce_err_status_cnt[37];
1891 }
1892 
access_cce_msix_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1893 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1894 					     void *context, int vl, int mode,
1895 					     u64 data)
1896 {
1897 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898 
1899 	return dd->cce_err_status_cnt[36];
1900 }
1901 
access_cce_rxdma_conv_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1902 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1903 				const struct cntr_entry *entry,
1904 				void *context, int vl, int mode, u64 data)
1905 {
1906 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907 
1908 	return dd->cce_err_status_cnt[35];
1909 }
1910 
access_cce_rcpl_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1911 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1912 				const struct cntr_entry *entry,
1913 				void *context, int vl, int mode, u64 data)
1914 {
1915 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916 
1917 	return dd->cce_err_status_cnt[34];
1918 }
1919 
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1920 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1921 						 void *context, int vl,
1922 						 int mode, u64 data)
1923 {
1924 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925 
1926 	return dd->cce_err_status_cnt[33];
1927 }
1928 
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1929 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1930 						void *context, int vl, int mode,
1931 						u64 data)
1932 {
1933 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934 
1935 	return dd->cce_err_status_cnt[32];
1936 }
1937 
access_la_triggered_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1938 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1939 				   void *context, int vl, int mode, u64 data)
1940 {
1941 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942 
1943 	return dd->cce_err_status_cnt[31];
1944 }
1945 
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1946 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1947 					       void *context, int vl, int mode,
1948 					       u64 data)
1949 {
1950 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951 
1952 	return dd->cce_err_status_cnt[30];
1953 }
1954 
access_pcic_receive_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1955 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1956 					      void *context, int vl, int mode,
1957 					      u64 data)
1958 {
1959 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960 
1961 	return dd->cce_err_status_cnt[29];
1962 }
1963 
access_pcic_transmit_back_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1964 static u64 access_pcic_transmit_back_parity_err_cnt(
1965 				const struct cntr_entry *entry,
1966 				void *context, int vl, int mode, u64 data)
1967 {
1968 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969 
1970 	return dd->cce_err_status_cnt[28];
1971 }
1972 
access_pcic_transmit_front_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1973 static u64 access_pcic_transmit_front_parity_err_cnt(
1974 				const struct cntr_entry *entry,
1975 				void *context, int vl, int mode, u64 data)
1976 {
1977 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978 
1979 	return dd->cce_err_status_cnt[27];
1980 }
1981 
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1982 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1983 					     void *context, int vl, int mode,
1984 					     u64 data)
1985 {
1986 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987 
1988 	return dd->cce_err_status_cnt[26];
1989 }
1990 
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1991 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1992 					    void *context, int vl, int mode,
1993 					    u64 data)
1994 {
1995 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996 
1997 	return dd->cce_err_status_cnt[25];
1998 }
1999 
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2000 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2001 					      void *context, int vl, int mode,
2002 					      u64 data)
2003 {
2004 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005 
2006 	return dd->cce_err_status_cnt[24];
2007 }
2008 
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2009 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2010 					     void *context, int vl, int mode,
2011 					     u64 data)
2012 {
2013 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014 
2015 	return dd->cce_err_status_cnt[23];
2016 }
2017 
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2018 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2019 						 void *context, int vl,
2020 						 int mode, u64 data)
2021 {
2022 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023 
2024 	return dd->cce_err_status_cnt[22];
2025 }
2026 
access_pcic_retry_mem_unc_err(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2027 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2028 					 void *context, int vl, int mode,
2029 					 u64 data)
2030 {
2031 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032 
2033 	return dd->cce_err_status_cnt[21];
2034 }
2035 
access_pcic_n_post_dat_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2036 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2037 				const struct cntr_entry *entry,
2038 				void *context, int vl, int mode, u64 data)
2039 {
2040 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041 
2042 	return dd->cce_err_status_cnt[20];
2043 }
2044 
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2045 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2046 						 void *context, int vl,
2047 						 int mode, u64 data)
2048 {
2049 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050 
2051 	return dd->cce_err_status_cnt[19];
2052 }
2053 
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2054 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2055 					     void *context, int vl, int mode,
2056 					     u64 data)
2057 {
2058 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059 
2060 	return dd->cce_err_status_cnt[18];
2061 }
2062 
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2063 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2064 					    void *context, int vl, int mode,
2065 					    u64 data)
2066 {
2067 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068 
2069 	return dd->cce_err_status_cnt[17];
2070 }
2071 
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2072 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2073 					      void *context, int vl, int mode,
2074 					      u64 data)
2075 {
2076 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077 
2078 	return dd->cce_err_status_cnt[16];
2079 }
2080 
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2081 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2082 					     void *context, int vl, int mode,
2083 					     u64 data)
2084 {
2085 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086 
2087 	return dd->cce_err_status_cnt[15];
2088 }
2089 
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2090 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2091 						 void *context, int vl,
2092 						 int mode, u64 data)
2093 {
2094 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095 
2096 	return dd->cce_err_status_cnt[14];
2097 }
2098 
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2099 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2100 					     void *context, int vl, int mode,
2101 					     u64 data)
2102 {
2103 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104 
2105 	return dd->cce_err_status_cnt[13];
2106 }
2107 
access_cce_cli1_async_fifo_dbg_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2108 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2109 				const struct cntr_entry *entry,
2110 				void *context, int vl, int mode, u64 data)
2111 {
2112 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113 
2114 	return dd->cce_err_status_cnt[12];
2115 }
2116 
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2117 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2118 				const struct cntr_entry *entry,
2119 				void *context, int vl, int mode, u64 data)
2120 {
2121 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122 
2123 	return dd->cce_err_status_cnt[11];
2124 }
2125 
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2126 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2127 				const struct cntr_entry *entry,
2128 				void *context, int vl, int mode, u64 data)
2129 {
2130 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131 
2132 	return dd->cce_err_status_cnt[10];
2133 }
2134 
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2135 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2136 				const struct cntr_entry *entry,
2137 				void *context, int vl, int mode, u64 data)
2138 {
2139 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140 
2141 	return dd->cce_err_status_cnt[9];
2142 }
2143 
access_cce_cli2_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2144 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2145 				const struct cntr_entry *entry,
2146 				void *context, int vl, int mode, u64 data)
2147 {
2148 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149 
2150 	return dd->cce_err_status_cnt[8];
2151 }
2152 
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2153 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2154 						 void *context, int vl,
2155 						 int mode, u64 data)
2156 {
2157 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158 
2159 	return dd->cce_err_status_cnt[7];
2160 }
2161 
access_cce_cli0_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2162 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2163 				const struct cntr_entry *entry,
2164 				void *context, int vl, int mode, u64 data)
2165 {
2166 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167 
2168 	return dd->cce_err_status_cnt[6];
2169 }
2170 
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2171 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2172 					       void *context, int vl, int mode,
2173 					       u64 data)
2174 {
2175 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176 
2177 	return dd->cce_err_status_cnt[5];
2178 }
2179 
access_cce_trgt_access_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2180 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2181 					  void *context, int vl, int mode,
2182 					  u64 data)
2183 {
2184 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185 
2186 	return dd->cce_err_status_cnt[4];
2187 }
2188 
access_cce_trgt_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2189 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2190 				const struct cntr_entry *entry,
2191 				void *context, int vl, int mode, u64 data)
2192 {
2193 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194 
2195 	return dd->cce_err_status_cnt[3];
2196 }
2197 
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2198 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2199 						 void *context, int vl,
2200 						 int mode, u64 data)
2201 {
2202 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203 
2204 	return dd->cce_err_status_cnt[2];
2205 }
2206 
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2207 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2208 						void *context, int vl,
2209 						int mode, u64 data)
2210 {
2211 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212 
2213 	return dd->cce_err_status_cnt[1];
2214 }
2215 
access_ccs_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2216 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2217 					 void *context, int vl, int mode,
2218 					 u64 data)
2219 {
2220 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221 
2222 	return dd->cce_err_status_cnt[0];
2223 }
2224 
2225 /*
2226  * Software counters corresponding to each of the
2227  * error status bits within RcvErrStatus
2228  */
access_rx_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2229 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2230 					void *context, int vl, int mode,
2231 					u64 data)
2232 {
2233 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234 
2235 	return dd->rcv_err_status_cnt[63];
2236 }
2237 
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2238 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2239 						void *context, int vl,
2240 						int mode, u64 data)
2241 {
2242 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243 
2244 	return dd->rcv_err_status_cnt[62];
2245 }
2246 
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2247 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2248 					       void *context, int vl, int mode,
2249 					       u64 data)
2250 {
2251 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252 
2253 	return dd->rcv_err_status_cnt[61];
2254 }
2255 
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2256 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2257 					 void *context, int vl, int mode,
2258 					 u64 data)
2259 {
2260 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261 
2262 	return dd->rcv_err_status_cnt[60];
2263 }
2264 
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2265 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266 						 void *context, int vl,
2267 						 int mode, u64 data)
2268 {
2269 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270 
2271 	return dd->rcv_err_status_cnt[59];
2272 }
2273 
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2274 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2275 						 void *context, int vl,
2276 						 int mode, u64 data)
2277 {
2278 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279 
2280 	return dd->rcv_err_status_cnt[58];
2281 }
2282 
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2283 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2284 					    void *context, int vl, int mode,
2285 					    u64 data)
2286 {
2287 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288 
2289 	return dd->rcv_err_status_cnt[57];
2290 }
2291 
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2292 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2293 					   void *context, int vl, int mode,
2294 					   u64 data)
2295 {
2296 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297 
2298 	return dd->rcv_err_status_cnt[56];
2299 }
2300 
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2301 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2302 					   void *context, int vl, int mode,
2303 					   u64 data)
2304 {
2305 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306 
2307 	return dd->rcv_err_status_cnt[55];
2308 }
2309 
access_rx_dma_data_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2310 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2311 				const struct cntr_entry *entry,
2312 				void *context, int vl, int mode, u64 data)
2313 {
2314 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315 
2316 	return dd->rcv_err_status_cnt[54];
2317 }
2318 
access_rx_dma_data_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2319 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2320 				const struct cntr_entry *entry,
2321 				void *context, int vl, int mode, u64 data)
2322 {
2323 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324 
2325 	return dd->rcv_err_status_cnt[53];
2326 }
2327 
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2328 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2329 						 void *context, int vl,
2330 						 int mode, u64 data)
2331 {
2332 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333 
2334 	return dd->rcv_err_status_cnt[52];
2335 }
2336 
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2337 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2338 						 void *context, int vl,
2339 						 int mode, u64 data)
2340 {
2341 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342 
2343 	return dd->rcv_err_status_cnt[51];
2344 }
2345 
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2346 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2347 						 void *context, int vl,
2348 						 int mode, u64 data)
2349 {
2350 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351 
2352 	return dd->rcv_err_status_cnt[50];
2353 }
2354 
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2355 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2356 						 void *context, int vl,
2357 						 int mode, u64 data)
2358 {
2359 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360 
2361 	return dd->rcv_err_status_cnt[49];
2362 }
2363 
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2364 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2365 						 void *context, int vl,
2366 						 int mode, u64 data)
2367 {
2368 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369 
2370 	return dd->rcv_err_status_cnt[48];
2371 }
2372 
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2373 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2374 						 void *context, int vl,
2375 						 int mode, u64 data)
2376 {
2377 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378 
2379 	return dd->rcv_err_status_cnt[47];
2380 }
2381 
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2382 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2383 					 void *context, int vl, int mode,
2384 					 u64 data)
2385 {
2386 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387 
2388 	return dd->rcv_err_status_cnt[46];
2389 }
2390 
access_rx_hq_intr_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2391 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2392 				const struct cntr_entry *entry,
2393 				void *context, int vl, int mode, u64 data)
2394 {
2395 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396 
2397 	return dd->rcv_err_status_cnt[45];
2398 }
2399 
access_rx_lookup_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2400 static u64 access_rx_lookup_csr_parity_err_cnt(
2401 				const struct cntr_entry *entry,
2402 				void *context, int vl, int mode, u64 data)
2403 {
2404 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405 
2406 	return dd->rcv_err_status_cnt[44];
2407 }
2408 
access_rx_lookup_rcv_array_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2409 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2410 				const struct cntr_entry *entry,
2411 				void *context, int vl, int mode, u64 data)
2412 {
2413 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414 
2415 	return dd->rcv_err_status_cnt[43];
2416 }
2417 
access_rx_lookup_rcv_array_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2418 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2419 				const struct cntr_entry *entry,
2420 				void *context, int vl, int mode, u64 data)
2421 {
2422 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423 
2424 	return dd->rcv_err_status_cnt[42];
2425 }
2426 
access_rx_lookup_des_part2_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2427 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2428 				const struct cntr_entry *entry,
2429 				void *context, int vl, int mode, u64 data)
2430 {
2431 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432 
2433 	return dd->rcv_err_status_cnt[41];
2434 }
2435 
access_rx_lookup_des_part1_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2436 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2437 				const struct cntr_entry *entry,
2438 				void *context, int vl, int mode, u64 data)
2439 {
2440 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441 
2442 	return dd->rcv_err_status_cnt[40];
2443 }
2444 
access_rx_lookup_des_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2445 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2446 				const struct cntr_entry *entry,
2447 				void *context, int vl, int mode, u64 data)
2448 {
2449 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450 
2451 	return dd->rcv_err_status_cnt[39];
2452 }
2453 
access_rx_rbuf_next_free_buf_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2454 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2455 				const struct cntr_entry *entry,
2456 				void *context, int vl, int mode, u64 data)
2457 {
2458 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459 
2460 	return dd->rcv_err_status_cnt[38];
2461 }
2462 
access_rx_rbuf_next_free_buf_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2463 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2464 				const struct cntr_entry *entry,
2465 				void *context, int vl, int mode, u64 data)
2466 {
2467 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468 
2469 	return dd->rcv_err_status_cnt[37];
2470 }
2471 
access_rbuf_fl_init_wr_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2472 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2473 				const struct cntr_entry *entry,
2474 				void *context, int vl, int mode, u64 data)
2475 {
2476 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477 
2478 	return dd->rcv_err_status_cnt[36];
2479 }
2480 
access_rx_rbuf_fl_initdone_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2481 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2482 				const struct cntr_entry *entry,
2483 				void *context, int vl, int mode, u64 data)
2484 {
2485 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486 
2487 	return dd->rcv_err_status_cnt[35];
2488 }
2489 
access_rx_rbuf_fl_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2490 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2491 				const struct cntr_entry *entry,
2492 				void *context, int vl, int mode, u64 data)
2493 {
2494 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495 
2496 	return dd->rcv_err_status_cnt[34];
2497 }
2498 
access_rx_rbuf_fl_rd_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2499 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2500 				const struct cntr_entry *entry,
2501 				void *context, int vl, int mode, u64 data)
2502 {
2503 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504 
2505 	return dd->rcv_err_status_cnt[33];
2506 }
2507 
access_rx_rbuf_empty_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2508 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2509 					void *context, int vl, int mode,
2510 					u64 data)
2511 {
2512 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513 
2514 	return dd->rcv_err_status_cnt[32];
2515 }
2516 
access_rx_rbuf_full_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2517 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2518 				       void *context, int vl, int mode,
2519 				       u64 data)
2520 {
2521 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522 
2523 	return dd->rcv_err_status_cnt[31];
2524 }
2525 
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2526 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2527 					  void *context, int vl, int mode,
2528 					  u64 data)
2529 {
2530 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531 
2532 	return dd->rcv_err_status_cnt[30];
2533 }
2534 
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2535 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2536 					     void *context, int vl, int mode,
2537 					     u64 data)
2538 {
2539 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540 
2541 	return dd->rcv_err_status_cnt[29];
2542 }
2543 
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2544 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2545 						 void *context, int vl,
2546 						 int mode, u64 data)
2547 {
2548 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549 
2550 	return dd->rcv_err_status_cnt[28];
2551 }
2552 
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2553 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2554 				const struct cntr_entry *entry,
2555 				void *context, int vl, int mode, u64 data)
2556 {
2557 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558 
2559 	return dd->rcv_err_status_cnt[27];
2560 }
2561 
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2562 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2563 				const struct cntr_entry *entry,
2564 				void *context, int vl, int mode, u64 data)
2565 {
2566 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567 
2568 	return dd->rcv_err_status_cnt[26];
2569 }
2570 
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2571 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2572 				const struct cntr_entry *entry,
2573 				void *context, int vl, int mode, u64 data)
2574 {
2575 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576 
2577 	return dd->rcv_err_status_cnt[25];
2578 }
2579 
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2580 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2581 				const struct cntr_entry *entry,
2582 				void *context, int vl, int mode, u64 data)
2583 {
2584 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585 
2586 	return dd->rcv_err_status_cnt[24];
2587 }
2588 
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2589 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2590 				const struct cntr_entry *entry,
2591 				void *context, int vl, int mode, u64 data)
2592 {
2593 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594 
2595 	return dd->rcv_err_status_cnt[23];
2596 }
2597 
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2598 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2599 				const struct cntr_entry *entry,
2600 				void *context, int vl, int mode, u64 data)
2601 {
2602 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603 
2604 	return dd->rcv_err_status_cnt[22];
2605 }
2606 
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2607 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2608 				const struct cntr_entry *entry,
2609 				void *context, int vl, int mode, u64 data)
2610 {
2611 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612 
2613 	return dd->rcv_err_status_cnt[21];
2614 }
2615 
access_rx_rbuf_block_list_read_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2616 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2617 				const struct cntr_entry *entry,
2618 				void *context, int vl, int mode, u64 data)
2619 {
2620 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621 
2622 	return dd->rcv_err_status_cnt[20];
2623 }
2624 
access_rx_rbuf_block_list_read_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2625 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2626 				const struct cntr_entry *entry,
2627 				void *context, int vl, int mode, u64 data)
2628 {
2629 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630 
2631 	return dd->rcv_err_status_cnt[19];
2632 }
2633 
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2634 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2635 						 void *context, int vl,
2636 						 int mode, u64 data)
2637 {
2638 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639 
2640 	return dd->rcv_err_status_cnt[18];
2641 }
2642 
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2643 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2644 						 void *context, int vl,
2645 						 int mode, u64 data)
2646 {
2647 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648 
2649 	return dd->rcv_err_status_cnt[17];
2650 }
2651 
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2652 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2653 				const struct cntr_entry *entry,
2654 				void *context, int vl, int mode, u64 data)
2655 {
2656 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657 
2658 	return dd->rcv_err_status_cnt[16];
2659 }
2660 
access_rx_rbuf_lookup_des_reg_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2661 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2662 				const struct cntr_entry *entry,
2663 				void *context, int vl, int mode, u64 data)
2664 {
2665 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666 
2667 	return dd->rcv_err_status_cnt[15];
2668 }
2669 
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2670 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2671 						void *context, int vl,
2672 						int mode, u64 data)
2673 {
2674 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675 
2676 	return dd->rcv_err_status_cnt[14];
2677 }
2678 
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2679 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2680 						void *context, int vl,
2681 						int mode, u64 data)
2682 {
2683 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684 
2685 	return dd->rcv_err_status_cnt[13];
2686 }
2687 
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2688 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2689 					      void *context, int vl, int mode,
2690 					      u64 data)
2691 {
2692 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693 
2694 	return dd->rcv_err_status_cnt[12];
2695 }
2696 
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2697 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2698 					  void *context, int vl, int mode,
2699 					  u64 data)
2700 {
2701 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702 
2703 	return dd->rcv_err_status_cnt[11];
2704 }
2705 
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2706 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2707 					  void *context, int vl, int mode,
2708 					  u64 data)
2709 {
2710 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711 
2712 	return dd->rcv_err_status_cnt[10];
2713 }
2714 
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2715 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2716 					       void *context, int vl, int mode,
2717 					       u64 data)
2718 {
2719 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720 
2721 	return dd->rcv_err_status_cnt[9];
2722 }
2723 
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2724 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2725 					    void *context, int vl, int mode,
2726 					    u64 data)
2727 {
2728 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729 
2730 	return dd->rcv_err_status_cnt[8];
2731 }
2732 
access_rx_rcv_qp_map_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2733 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2734 				const struct cntr_entry *entry,
2735 				void *context, int vl, int mode, u64 data)
2736 {
2737 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738 
2739 	return dd->rcv_err_status_cnt[7];
2740 }
2741 
access_rx_rcv_qp_map_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2742 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2743 				const struct cntr_entry *entry,
2744 				void *context, int vl, int mode, u64 data)
2745 {
2746 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747 
2748 	return dd->rcv_err_status_cnt[6];
2749 }
2750 
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2751 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2752 					  void *context, int vl, int mode,
2753 					  u64 data)
2754 {
2755 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756 
2757 	return dd->rcv_err_status_cnt[5];
2758 }
2759 
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2760 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2761 					  void *context, int vl, int mode,
2762 					  u64 data)
2763 {
2764 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765 
2766 	return dd->rcv_err_status_cnt[4];
2767 }
2768 
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2769 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2770 					 void *context, int vl, int mode,
2771 					 u64 data)
2772 {
2773 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774 
2775 	return dd->rcv_err_status_cnt[3];
2776 }
2777 
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2778 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2779 					 void *context, int vl, int mode,
2780 					 u64 data)
2781 {
2782 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783 
2784 	return dd->rcv_err_status_cnt[2];
2785 }
2786 
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2787 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2788 					    void *context, int vl, int mode,
2789 					    u64 data)
2790 {
2791 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792 
2793 	return dd->rcv_err_status_cnt[1];
2794 }
2795 
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2796 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2797 					 void *context, int vl, int mode,
2798 					 u64 data)
2799 {
2800 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801 
2802 	return dd->rcv_err_status_cnt[0];
2803 }
2804 
2805 /*
2806  * Software counters corresponding to each of the
2807  * error status bits within SendPioErrStatus
2808  */
access_pio_pec_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2809 static u64 access_pio_pec_sop_head_parity_err_cnt(
2810 				const struct cntr_entry *entry,
2811 				void *context, int vl, int mode, u64 data)
2812 {
2813 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814 
2815 	return dd->send_pio_err_status_cnt[35];
2816 }
2817 
access_pio_pcc_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2818 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2819 				const struct cntr_entry *entry,
2820 				void *context, int vl, int mode, u64 data)
2821 {
2822 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823 
2824 	return dd->send_pio_err_status_cnt[34];
2825 }
2826 
access_pio_last_returned_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2827 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2828 				const struct cntr_entry *entry,
2829 				void *context, int vl, int mode, u64 data)
2830 {
2831 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832 
2833 	return dd->send_pio_err_status_cnt[33];
2834 }
2835 
access_pio_current_free_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2836 static u64 access_pio_current_free_cnt_parity_err_cnt(
2837 				const struct cntr_entry *entry,
2838 				void *context, int vl, int mode, u64 data)
2839 {
2840 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841 
2842 	return dd->send_pio_err_status_cnt[32];
2843 }
2844 
access_pio_reserved_31_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2845 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2846 					  void *context, int vl, int mode,
2847 					  u64 data)
2848 {
2849 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850 
2851 	return dd->send_pio_err_status_cnt[31];
2852 }
2853 
access_pio_reserved_30_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2854 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2855 					  void *context, int vl, int mode,
2856 					  u64 data)
2857 {
2858 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859 
2860 	return dd->send_pio_err_status_cnt[30];
2861 }
2862 
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2863 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2864 					   void *context, int vl, int mode,
2865 					   u64 data)
2866 {
2867 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868 
2869 	return dd->send_pio_err_status_cnt[29];
2870 }
2871 
access_pio_ppmc_bqc_mem_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2872 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2873 				const struct cntr_entry *entry,
2874 				void *context, int vl, int mode, u64 data)
2875 {
2876 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877 
2878 	return dd->send_pio_err_status_cnt[28];
2879 }
2880 
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2881 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2882 					     void *context, int vl, int mode,
2883 					     u64 data)
2884 {
2885 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886 
2887 	return dd->send_pio_err_status_cnt[27];
2888 }
2889 
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2890 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2891 					     void *context, int vl, int mode,
2892 					     u64 data)
2893 {
2894 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895 
2896 	return dd->send_pio_err_status_cnt[26];
2897 }
2898 
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2899 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2900 						void *context, int vl,
2901 						int mode, u64 data)
2902 {
2903 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904 
2905 	return dd->send_pio_err_status_cnt[25];
2906 }
2907 
access_pio_block_qw_count_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2908 static u64 access_pio_block_qw_count_parity_err_cnt(
2909 				const struct cntr_entry *entry,
2910 				void *context, int vl, int mode, u64 data)
2911 {
2912 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913 
2914 	return dd->send_pio_err_status_cnt[24];
2915 }
2916 
access_pio_write_qw_valid_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2917 static u64 access_pio_write_qw_valid_parity_err_cnt(
2918 				const struct cntr_entry *entry,
2919 				void *context, int vl, int mode, u64 data)
2920 {
2921 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922 
2923 	return dd->send_pio_err_status_cnt[23];
2924 }
2925 
access_pio_state_machine_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2926 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2927 					    void *context, int vl, int mode,
2928 					    u64 data)
2929 {
2930 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931 
2932 	return dd->send_pio_err_status_cnt[22];
2933 }
2934 
access_pio_write_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2935 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2936 						void *context, int vl,
2937 						int mode, u64 data)
2938 {
2939 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940 
2941 	return dd->send_pio_err_status_cnt[21];
2942 }
2943 
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2944 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2945 						void *context, int vl,
2946 						int mode, u64 data)
2947 {
2948 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949 
2950 	return dd->send_pio_err_status_cnt[20];
2951 }
2952 
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2953 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2954 						void *context, int vl,
2955 						int mode, u64 data)
2956 {
2957 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958 
2959 	return dd->send_pio_err_status_cnt[19];
2960 }
2961 
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2962 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2963 				const struct cntr_entry *entry,
2964 				void *context, int vl, int mode, u64 data)
2965 {
2966 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967 
2968 	return dd->send_pio_err_status_cnt[18];
2969 }
2970 
access_pio_init_sm_in_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2971 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2972 					 void *context, int vl, int mode,
2973 					 u64 data)
2974 {
2975 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976 
2977 	return dd->send_pio_err_status_cnt[17];
2978 }
2979 
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2980 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2981 					    void *context, int vl, int mode,
2982 					    u64 data)
2983 {
2984 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985 
2986 	return dd->send_pio_err_status_cnt[16];
2987 }
2988 
access_pio_credit_ret_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2989 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2990 				const struct cntr_entry *entry,
2991 				void *context, int vl, int mode, u64 data)
2992 {
2993 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994 
2995 	return dd->send_pio_err_status_cnt[15];
2996 }
2997 
access_pio_v1_len_mem_bank1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2998 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2999 				const struct cntr_entry *entry,
3000 				void *context, int vl, int mode, u64 data)
3001 {
3002 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003 
3004 	return dd->send_pio_err_status_cnt[14];
3005 }
3006 
access_pio_v1_len_mem_bank0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3007 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3008 				const struct cntr_entry *entry,
3009 				void *context, int vl, int mode, u64 data)
3010 {
3011 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012 
3013 	return dd->send_pio_err_status_cnt[13];
3014 }
3015 
access_pio_v1_len_mem_bank1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3016 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3017 				const struct cntr_entry *entry,
3018 				void *context, int vl, int mode, u64 data)
3019 {
3020 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021 
3022 	return dd->send_pio_err_status_cnt[12];
3023 }
3024 
access_pio_v1_len_mem_bank0_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3025 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3026 				const struct cntr_entry *entry,
3027 				void *context, int vl, int mode, u64 data)
3028 {
3029 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030 
3031 	return dd->send_pio_err_status_cnt[11];
3032 }
3033 
access_pio_sm_pkt_reset_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3034 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3035 				const struct cntr_entry *entry,
3036 				void *context, int vl, int mode, u64 data)
3037 {
3038 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039 
3040 	return dd->send_pio_err_status_cnt[10];
3041 }
3042 
access_pio_pkt_evict_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3043 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3044 				const struct cntr_entry *entry,
3045 				void *context, int vl, int mode, u64 data)
3046 {
3047 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048 
3049 	return dd->send_pio_err_status_cnt[9];
3050 }
3051 
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3052 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3053 				const struct cntr_entry *entry,
3054 				void *context, int vl, int mode, u64 data)
3055 {
3056 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057 
3058 	return dd->send_pio_err_status_cnt[8];
3059 }
3060 
access_pio_sbrdctl_crrel_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3061 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3062 				const struct cntr_entry *entry,
3063 				void *context, int vl, int mode, u64 data)
3064 {
3065 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066 
3067 	return dd->send_pio_err_status_cnt[7];
3068 }
3069 
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3070 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071 					      void *context, int vl, int mode,
3072 					      u64 data)
3073 {
3074 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075 
3076 	return dd->send_pio_err_status_cnt[6];
3077 }
3078 
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3079 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3080 					      void *context, int vl, int mode,
3081 					      u64 data)
3082 {
3083 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084 
3085 	return dd->send_pio_err_status_cnt[5];
3086 }
3087 
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3088 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3089 					   void *context, int vl, int mode,
3090 					   u64 data)
3091 {
3092 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093 
3094 	return dd->send_pio_err_status_cnt[4];
3095 }
3096 
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3097 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3098 					   void *context, int vl, int mode,
3099 					   u64 data)
3100 {
3101 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102 
3103 	return dd->send_pio_err_status_cnt[3];
3104 }
3105 
access_pio_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3106 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3107 					 void *context, int vl, int mode,
3108 					 u64 data)
3109 {
3110 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111 
3112 	return dd->send_pio_err_status_cnt[2];
3113 }
3114 
access_pio_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3115 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3116 						void *context, int vl,
3117 						int mode, u64 data)
3118 {
3119 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120 
3121 	return dd->send_pio_err_status_cnt[1];
3122 }
3123 
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3124 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3125 					     void *context, int vl, int mode,
3126 					     u64 data)
3127 {
3128 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3129 
3130 	return dd->send_pio_err_status_cnt[0];
3131 }
3132 
3133 /*
3134  * Software counters corresponding to each of the
3135  * error status bits within SendDmaErrStatus
3136  */
access_sdma_pcie_req_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3137 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3138 				const struct cntr_entry *entry,
3139 				void *context, int vl, int mode, u64 data)
3140 {
3141 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142 
3143 	return dd->send_dma_err_status_cnt[3];
3144 }
3145 
access_sdma_pcie_req_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3146 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3147 				const struct cntr_entry *entry,
3148 				void *context, int vl, int mode, u64 data)
3149 {
3150 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151 
3152 	return dd->send_dma_err_status_cnt[2];
3153 }
3154 
access_sdma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3155 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3156 					  void *context, int vl, int mode,
3157 					  u64 data)
3158 {
3159 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160 
3161 	return dd->send_dma_err_status_cnt[1];
3162 }
3163 
access_sdma_rpy_tag_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3164 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3165 				       void *context, int vl, int mode,
3166 				       u64 data)
3167 {
3168 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169 
3170 	return dd->send_dma_err_status_cnt[0];
3171 }
3172 
3173 /*
3174  * Software counters corresponding to each of the
3175  * error status bits within SendEgressErrStatus
3176  */
access_tx_read_pio_memory_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3177 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3178 				const struct cntr_entry *entry,
3179 				void *context, int vl, int mode, u64 data)
3180 {
3181 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182 
3183 	return dd->send_egress_err_status_cnt[63];
3184 }
3185 
access_tx_read_sdma_memory_csr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3186 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3187 				const struct cntr_entry *entry,
3188 				void *context, int vl, int mode, u64 data)
3189 {
3190 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191 
3192 	return dd->send_egress_err_status_cnt[62];
3193 }
3194 
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3195 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3196 					     void *context, int vl, int mode,
3197 					     u64 data)
3198 {
3199 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200 
3201 	return dd->send_egress_err_status_cnt[61];
3202 }
3203 
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3204 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3205 						 void *context, int vl,
3206 						 int mode, u64 data)
3207 {
3208 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209 
3210 	return dd->send_egress_err_status_cnt[60];
3211 }
3212 
access_tx_read_sdma_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3213 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3214 				const struct cntr_entry *entry,
3215 				void *context, int vl, int mode, u64 data)
3216 {
3217 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218 
3219 	return dd->send_egress_err_status_cnt[59];
3220 }
3221 
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3222 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3223 					void *context, int vl, int mode,
3224 					u64 data)
3225 {
3226 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227 
3228 	return dd->send_egress_err_status_cnt[58];
3229 }
3230 
access_tx_credit_overrun_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3231 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3232 					    void *context, int vl, int mode,
3233 					    u64 data)
3234 {
3235 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236 
3237 	return dd->send_egress_err_status_cnt[57];
3238 }
3239 
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3240 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3241 					      void *context, int vl, int mode,
3242 					      u64 data)
3243 {
3244 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245 
3246 	return dd->send_egress_err_status_cnt[56];
3247 }
3248 
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3249 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3250 					      void *context, int vl, int mode,
3251 					      u64 data)
3252 {
3253 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254 
3255 	return dd->send_egress_err_status_cnt[55];
3256 }
3257 
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3258 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3259 					      void *context, int vl, int mode,
3260 					      u64 data)
3261 {
3262 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263 
3264 	return dd->send_egress_err_status_cnt[54];
3265 }
3266 
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3267 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3268 					      void *context, int vl, int mode,
3269 					      u64 data)
3270 {
3271 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272 
3273 	return dd->send_egress_err_status_cnt[53];
3274 }
3275 
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3276 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3277 					      void *context, int vl, int mode,
3278 					      u64 data)
3279 {
3280 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281 
3282 	return dd->send_egress_err_status_cnt[52];
3283 }
3284 
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3285 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3286 					      void *context, int vl, int mode,
3287 					      u64 data)
3288 {
3289 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290 
3291 	return dd->send_egress_err_status_cnt[51];
3292 }
3293 
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3294 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3295 					      void *context, int vl, int mode,
3296 					      u64 data)
3297 {
3298 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299 
3300 	return dd->send_egress_err_status_cnt[50];
3301 }
3302 
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3303 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3304 					      void *context, int vl, int mode,
3305 					      u64 data)
3306 {
3307 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308 
3309 	return dd->send_egress_err_status_cnt[49];
3310 }
3311 
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3312 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3313 					      void *context, int vl, int mode,
3314 					      u64 data)
3315 {
3316 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317 
3318 	return dd->send_egress_err_status_cnt[48];
3319 }
3320 
access_tx_credit_return_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3321 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3322 					      void *context, int vl, int mode,
3323 					      u64 data)
3324 {
3325 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326 
3327 	return dd->send_egress_err_status_cnt[47];
3328 }
3329 
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3330 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3331 					    void *context, int vl, int mode,
3332 					    u64 data)
3333 {
3334 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335 
3336 	return dd->send_egress_err_status_cnt[46];
3337 }
3338 
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3339 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3340 					     void *context, int vl, int mode,
3341 					     u64 data)
3342 {
3343 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344 
3345 	return dd->send_egress_err_status_cnt[45];
3346 }
3347 
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3348 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3349 						 void *context, int vl,
3350 						 int mode, u64 data)
3351 {
3352 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353 
3354 	return dd->send_egress_err_status_cnt[44];
3355 }
3356 
access_tx_read_sdma_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3357 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3358 				const struct cntr_entry *entry,
3359 				void *context, int vl, int mode, u64 data)
3360 {
3361 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362 
3363 	return dd->send_egress_err_status_cnt[43];
3364 }
3365 
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3366 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3367 					void *context, int vl, int mode,
3368 					u64 data)
3369 {
3370 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371 
3372 	return dd->send_egress_err_status_cnt[42];
3373 }
3374 
access_tx_credit_return_partiy_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3375 static u64 access_tx_credit_return_partiy_err_cnt(
3376 				const struct cntr_entry *entry,
3377 				void *context, int vl, int mode, u64 data)
3378 {
3379 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380 
3381 	return dd->send_egress_err_status_cnt[41];
3382 }
3383 
access_tx_launch_fifo8_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3384 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3385 				const struct cntr_entry *entry,
3386 				void *context, int vl, int mode, u64 data)
3387 {
3388 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389 
3390 	return dd->send_egress_err_status_cnt[40];
3391 }
3392 
access_tx_launch_fifo7_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3393 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3394 				const struct cntr_entry *entry,
3395 				void *context, int vl, int mode, u64 data)
3396 {
3397 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398 
3399 	return dd->send_egress_err_status_cnt[39];
3400 }
3401 
access_tx_launch_fifo6_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3402 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3403 				const struct cntr_entry *entry,
3404 				void *context, int vl, int mode, u64 data)
3405 {
3406 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407 
3408 	return dd->send_egress_err_status_cnt[38];
3409 }
3410 
access_tx_launch_fifo5_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3411 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3412 				const struct cntr_entry *entry,
3413 				void *context, int vl, int mode, u64 data)
3414 {
3415 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416 
3417 	return dd->send_egress_err_status_cnt[37];
3418 }
3419 
access_tx_launch_fifo4_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3420 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3421 				const struct cntr_entry *entry,
3422 				void *context, int vl, int mode, u64 data)
3423 {
3424 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425 
3426 	return dd->send_egress_err_status_cnt[36];
3427 }
3428 
access_tx_launch_fifo3_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3429 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3430 				const struct cntr_entry *entry,
3431 				void *context, int vl, int mode, u64 data)
3432 {
3433 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434 
3435 	return dd->send_egress_err_status_cnt[35];
3436 }
3437 
access_tx_launch_fifo2_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3438 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3439 				const struct cntr_entry *entry,
3440 				void *context, int vl, int mode, u64 data)
3441 {
3442 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443 
3444 	return dd->send_egress_err_status_cnt[34];
3445 }
3446 
access_tx_launch_fifo1_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3447 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3448 				const struct cntr_entry *entry,
3449 				void *context, int vl, int mode, u64 data)
3450 {
3451 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452 
3453 	return dd->send_egress_err_status_cnt[33];
3454 }
3455 
access_tx_launch_fifo0_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3456 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3457 				const struct cntr_entry *entry,
3458 				void *context, int vl, int mode, u64 data)
3459 {
3460 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461 
3462 	return dd->send_egress_err_status_cnt[32];
3463 }
3464 
access_tx_sdma15_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3465 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3466 				const struct cntr_entry *entry,
3467 				void *context, int vl, int mode, u64 data)
3468 {
3469 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470 
3471 	return dd->send_egress_err_status_cnt[31];
3472 }
3473 
access_tx_sdma14_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3474 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3475 				const struct cntr_entry *entry,
3476 				void *context, int vl, int mode, u64 data)
3477 {
3478 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479 
3480 	return dd->send_egress_err_status_cnt[30];
3481 }
3482 
access_tx_sdma13_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3483 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3484 				const struct cntr_entry *entry,
3485 				void *context, int vl, int mode, u64 data)
3486 {
3487 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488 
3489 	return dd->send_egress_err_status_cnt[29];
3490 }
3491 
access_tx_sdma12_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3492 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3493 				const struct cntr_entry *entry,
3494 				void *context, int vl, int mode, u64 data)
3495 {
3496 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497 
3498 	return dd->send_egress_err_status_cnt[28];
3499 }
3500 
access_tx_sdma11_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3501 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3502 				const struct cntr_entry *entry,
3503 				void *context, int vl, int mode, u64 data)
3504 {
3505 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506 
3507 	return dd->send_egress_err_status_cnt[27];
3508 }
3509 
access_tx_sdma10_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3510 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3511 				const struct cntr_entry *entry,
3512 				void *context, int vl, int mode, u64 data)
3513 {
3514 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515 
3516 	return dd->send_egress_err_status_cnt[26];
3517 }
3518 
access_tx_sdma9_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3519 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3520 				const struct cntr_entry *entry,
3521 				void *context, int vl, int mode, u64 data)
3522 {
3523 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524 
3525 	return dd->send_egress_err_status_cnt[25];
3526 }
3527 
access_tx_sdma8_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3528 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3529 				const struct cntr_entry *entry,
3530 				void *context, int vl, int mode, u64 data)
3531 {
3532 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533 
3534 	return dd->send_egress_err_status_cnt[24];
3535 }
3536 
access_tx_sdma7_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3537 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3538 				const struct cntr_entry *entry,
3539 				void *context, int vl, int mode, u64 data)
3540 {
3541 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542 
3543 	return dd->send_egress_err_status_cnt[23];
3544 }
3545 
access_tx_sdma6_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3546 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3547 				const struct cntr_entry *entry,
3548 				void *context, int vl, int mode, u64 data)
3549 {
3550 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551 
3552 	return dd->send_egress_err_status_cnt[22];
3553 }
3554 
access_tx_sdma5_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3555 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3556 				const struct cntr_entry *entry,
3557 				void *context, int vl, int mode, u64 data)
3558 {
3559 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560 
3561 	return dd->send_egress_err_status_cnt[21];
3562 }
3563 
access_tx_sdma4_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3564 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3565 				const struct cntr_entry *entry,
3566 				void *context, int vl, int mode, u64 data)
3567 {
3568 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569 
3570 	return dd->send_egress_err_status_cnt[20];
3571 }
3572 
access_tx_sdma3_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3573 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3574 				const struct cntr_entry *entry,
3575 				void *context, int vl, int mode, u64 data)
3576 {
3577 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578 
3579 	return dd->send_egress_err_status_cnt[19];
3580 }
3581 
access_tx_sdma2_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3582 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3583 				const struct cntr_entry *entry,
3584 				void *context, int vl, int mode, u64 data)
3585 {
3586 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587 
3588 	return dd->send_egress_err_status_cnt[18];
3589 }
3590 
access_tx_sdma1_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3591 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3592 				const struct cntr_entry *entry,
3593 				void *context, int vl, int mode, u64 data)
3594 {
3595 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596 
3597 	return dd->send_egress_err_status_cnt[17];
3598 }
3599 
access_tx_sdma0_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3600 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3601 				const struct cntr_entry *entry,
3602 				void *context, int vl, int mode, u64 data)
3603 {
3604 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605 
3606 	return dd->send_egress_err_status_cnt[16];
3607 }
3608 
access_tx_config_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3609 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3610 					   void *context, int vl, int mode,
3611 					   u64 data)
3612 {
3613 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614 
3615 	return dd->send_egress_err_status_cnt[15];
3616 }
3617 
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3618 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3619 						 void *context, int vl,
3620 						 int mode, u64 data)
3621 {
3622 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623 
3624 	return dd->send_egress_err_status_cnt[14];
3625 }
3626 
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3627 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3628 					       void *context, int vl, int mode,
3629 					       u64 data)
3630 {
3631 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632 
3633 	return dd->send_egress_err_status_cnt[13];
3634 }
3635 
access_tx_illegal_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3636 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3637 					void *context, int vl, int mode,
3638 					u64 data)
3639 {
3640 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641 
3642 	return dd->send_egress_err_status_cnt[12];
3643 }
3644 
access_tx_sbrd_ctl_state_machine_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3645 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3646 				const struct cntr_entry *entry,
3647 				void *context, int vl, int mode, u64 data)
3648 {
3649 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650 
3651 	return dd->send_egress_err_status_cnt[11];
3652 }
3653 
access_egress_reserved_10_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3654 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3655 					     void *context, int vl, int mode,
3656 					     u64 data)
3657 {
3658 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659 
3660 	return dd->send_egress_err_status_cnt[10];
3661 }
3662 
access_egress_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3663 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3664 					    void *context, int vl, int mode,
3665 					    u64 data)
3666 {
3667 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668 
3669 	return dd->send_egress_err_status_cnt[9];
3670 }
3671 
access_tx_sdma_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3672 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3673 				const struct cntr_entry *entry,
3674 				void *context, int vl, int mode, u64 data)
3675 {
3676 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677 
3678 	return dd->send_egress_err_status_cnt[8];
3679 }
3680 
access_tx_pio_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3681 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3682 				const struct cntr_entry *entry,
3683 				void *context, int vl, int mode, u64 data)
3684 {
3685 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686 
3687 	return dd->send_egress_err_status_cnt[7];
3688 }
3689 
access_egress_reserved_6_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3690 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3691 					    void *context, int vl, int mode,
3692 					    u64 data)
3693 {
3694 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695 
3696 	return dd->send_egress_err_status_cnt[6];
3697 }
3698 
access_tx_incorrect_link_state_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3699 static u64 access_tx_incorrect_link_state_err_cnt(
3700 				const struct cntr_entry *entry,
3701 				void *context, int vl, int mode, u64 data)
3702 {
3703 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704 
3705 	return dd->send_egress_err_status_cnt[5];
3706 }
3707 
access_tx_linkdown_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3708 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3709 				      void *context, int vl, int mode,
3710 				      u64 data)
3711 {
3712 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713 
3714 	return dd->send_egress_err_status_cnt[4];
3715 }
3716 
access_tx_egress_fifi_underrun_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3717 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3718 				const struct cntr_entry *entry,
3719 				void *context, int vl, int mode, u64 data)
3720 {
3721 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722 
3723 	return dd->send_egress_err_status_cnt[3];
3724 }
3725 
access_egress_reserved_2_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3726 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3727 					    void *context, int vl, int mode,
3728 					    u64 data)
3729 {
3730 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731 
3732 	return dd->send_egress_err_status_cnt[2];
3733 }
3734 
access_tx_pkt_integrity_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3735 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3736 				const struct cntr_entry *entry,
3737 				void *context, int vl, int mode, u64 data)
3738 {
3739 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740 
3741 	return dd->send_egress_err_status_cnt[1];
3742 }
3743 
access_tx_pkt_integrity_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3744 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3745 				const struct cntr_entry *entry,
3746 				void *context, int vl, int mode, u64 data)
3747 {
3748 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749 
3750 	return dd->send_egress_err_status_cnt[0];
3751 }
3752 
3753 /*
3754  * Software counters corresponding to each of the
3755  * error status bits within SendErrStatus
3756  */
access_send_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3757 static u64 access_send_csr_write_bad_addr_err_cnt(
3758 				const struct cntr_entry *entry,
3759 				void *context, int vl, int mode, u64 data)
3760 {
3761 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762 
3763 	return dd->send_err_status_cnt[2];
3764 }
3765 
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3766 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3767 						 void *context, int vl,
3768 						 int mode, u64 data)
3769 {
3770 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771 
3772 	return dd->send_err_status_cnt[1];
3773 }
3774 
access_send_csr_parity_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3775 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3776 				      void *context, int vl, int mode,
3777 				      u64 data)
3778 {
3779 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3780 
3781 	return dd->send_err_status_cnt[0];
3782 }
3783 
3784 /*
3785  * Software counters corresponding to each of the
3786  * error status bits within SendCtxtErrStatus
3787  */
access_pio_write_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3788 static u64 access_pio_write_out_of_bounds_err_cnt(
3789 				const struct cntr_entry *entry,
3790 				void *context, int vl, int mode, u64 data)
3791 {
3792 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793 
3794 	return dd->sw_ctxt_err_status_cnt[4];
3795 }
3796 
access_pio_write_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3797 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3798 					     void *context, int vl, int mode,
3799 					     u64 data)
3800 {
3801 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802 
3803 	return dd->sw_ctxt_err_status_cnt[3];
3804 }
3805 
access_pio_write_crosses_boundary_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3806 static u64 access_pio_write_crosses_boundary_err_cnt(
3807 				const struct cntr_entry *entry,
3808 				void *context, int vl, int mode, u64 data)
3809 {
3810 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811 
3812 	return dd->sw_ctxt_err_status_cnt[2];
3813 }
3814 
access_pio_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3815 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3816 						void *context, int vl,
3817 						int mode, u64 data)
3818 {
3819 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820 
3821 	return dd->sw_ctxt_err_status_cnt[1];
3822 }
3823 
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3824 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3825 					       void *context, int vl, int mode,
3826 					       u64 data)
3827 {
3828 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829 
3830 	return dd->sw_ctxt_err_status_cnt[0];
3831 }
3832 
3833 /*
3834  * Software counters corresponding to each of the
3835  * error status bits within SendDmaEngErrStatus
3836  */
access_sdma_header_request_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3837 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3838 				const struct cntr_entry *entry,
3839 				void *context, int vl, int mode, u64 data)
3840 {
3841 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842 
3843 	return dd->sw_send_dma_eng_err_status_cnt[23];
3844 }
3845 
access_sdma_header_storage_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3846 static u64 access_sdma_header_storage_cor_err_cnt(
3847 				const struct cntr_entry *entry,
3848 				void *context, int vl, int mode, u64 data)
3849 {
3850 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851 
3852 	return dd->sw_send_dma_eng_err_status_cnt[22];
3853 }
3854 
access_sdma_packet_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3855 static u64 access_sdma_packet_tracking_cor_err_cnt(
3856 				const struct cntr_entry *entry,
3857 				void *context, int vl, int mode, u64 data)
3858 {
3859 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860 
3861 	return dd->sw_send_dma_eng_err_status_cnt[21];
3862 }
3863 
access_sdma_assembly_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3864 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3865 					    void *context, int vl, int mode,
3866 					    u64 data)
3867 {
3868 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869 
3870 	return dd->sw_send_dma_eng_err_status_cnt[20];
3871 }
3872 
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3873 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3874 					      void *context, int vl, int mode,
3875 					      u64 data)
3876 {
3877 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878 
3879 	return dd->sw_send_dma_eng_err_status_cnt[19];
3880 }
3881 
access_sdma_header_request_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3882 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3883 				const struct cntr_entry *entry,
3884 				void *context, int vl, int mode, u64 data)
3885 {
3886 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887 
3888 	return dd->sw_send_dma_eng_err_status_cnt[18];
3889 }
3890 
access_sdma_header_storage_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3891 static u64 access_sdma_header_storage_unc_err_cnt(
3892 				const struct cntr_entry *entry,
3893 				void *context, int vl, int mode, u64 data)
3894 {
3895 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896 
3897 	return dd->sw_send_dma_eng_err_status_cnt[17];
3898 }
3899 
access_sdma_packet_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3900 static u64 access_sdma_packet_tracking_unc_err_cnt(
3901 				const struct cntr_entry *entry,
3902 				void *context, int vl, int mode, u64 data)
3903 {
3904 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905 
3906 	return dd->sw_send_dma_eng_err_status_cnt[16];
3907 }
3908 
access_sdma_assembly_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3909 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3910 					    void *context, int vl, int mode,
3911 					    u64 data)
3912 {
3913 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914 
3915 	return dd->sw_send_dma_eng_err_status_cnt[15];
3916 }
3917 
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3918 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3919 					      void *context, int vl, int mode,
3920 					      u64 data)
3921 {
3922 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923 
3924 	return dd->sw_send_dma_eng_err_status_cnt[14];
3925 }
3926 
access_sdma_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3927 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3928 				       void *context, int vl, int mode,
3929 				       u64 data)
3930 {
3931 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932 
3933 	return dd->sw_send_dma_eng_err_status_cnt[13];
3934 }
3935 
access_sdma_header_length_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3936 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3937 					     void *context, int vl, int mode,
3938 					     u64 data)
3939 {
3940 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941 
3942 	return dd->sw_send_dma_eng_err_status_cnt[12];
3943 }
3944 
access_sdma_header_address_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3945 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3946 					      void *context, int vl, int mode,
3947 					      u64 data)
3948 {
3949 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950 
3951 	return dd->sw_send_dma_eng_err_status_cnt[11];
3952 }
3953 
access_sdma_header_select_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3954 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3955 					     void *context, int vl, int mode,
3956 					     u64 data)
3957 {
3958 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959 
3960 	return dd->sw_send_dma_eng_err_status_cnt[10];
3961 }
3962 
access_sdma_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3963 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3964 					  void *context, int vl, int mode,
3965 					  u64 data)
3966 {
3967 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968 
3969 	return dd->sw_send_dma_eng_err_status_cnt[9];
3970 }
3971 
access_sdma_packet_desc_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3972 static u64 access_sdma_packet_desc_overflow_err_cnt(
3973 				const struct cntr_entry *entry,
3974 				void *context, int vl, int mode, u64 data)
3975 {
3976 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977 
3978 	return dd->sw_send_dma_eng_err_status_cnt[8];
3979 }
3980 
access_sdma_length_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3981 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3982 					       void *context, int vl,
3983 					       int mode, u64 data)
3984 {
3985 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3986 
3987 	return dd->sw_send_dma_eng_err_status_cnt[7];
3988 }
3989 
access_sdma_halt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3990 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3991 				    void *context, int vl, int mode, u64 data)
3992 {
3993 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994 
3995 	return dd->sw_send_dma_eng_err_status_cnt[6];
3996 }
3997 
access_sdma_mem_read_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3998 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3999 					void *context, int vl, int mode,
4000 					u64 data)
4001 {
4002 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003 
4004 	return dd->sw_send_dma_eng_err_status_cnt[5];
4005 }
4006 
access_sdma_first_desc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4007 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4008 					  void *context, int vl, int mode,
4009 					  u64 data)
4010 {
4011 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012 
4013 	return dd->sw_send_dma_eng_err_status_cnt[4];
4014 }
4015 
access_sdma_tail_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4016 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4017 				const struct cntr_entry *entry,
4018 				void *context, int vl, int mode, u64 data)
4019 {
4020 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021 
4022 	return dd->sw_send_dma_eng_err_status_cnt[3];
4023 }
4024 
access_sdma_too_long_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4025 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4026 					void *context, int vl, int mode,
4027 					u64 data)
4028 {
4029 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030 
4031 	return dd->sw_send_dma_eng_err_status_cnt[2];
4032 }
4033 
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4034 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4035 					    void *context, int vl, int mode,
4036 					    u64 data)
4037 {
4038 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039 
4040 	return dd->sw_send_dma_eng_err_status_cnt[1];
4041 }
4042 
access_sdma_wrong_dw_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4043 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4044 					void *context, int vl, int mode,
4045 					u64 data)
4046 {
4047 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048 
4049 	return dd->sw_send_dma_eng_err_status_cnt[0];
4050 }
4051 
access_dc_rcv_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4052 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4053 				 void *context, int vl, int mode,
4054 				 u64 data)
4055 {
4056 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4057 
4058 	u64 val = 0;
4059 	u64 csr = entry->csr;
4060 
4061 	val = read_write_csr(dd, csr, mode, data);
4062 	if (mode == CNTR_MODE_R) {
4063 		val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4064 			CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4065 	} else if (mode == CNTR_MODE_W) {
4066 		dd->sw_rcv_bypass_packet_errors = 0;
4067 	} else {
4068 		dd_dev_err(dd, "Invalid cntr register access mode");
4069 		return 0;
4070 	}
4071 	return val;
4072 }
4073 
4074 #define def_access_sw_cpu(cntr) \
4075 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,		      \
4076 			      void *context, int vl, int mode, u64 data)      \
4077 {									      \
4078 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4079 	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
4080 			      ppd->ibport_data.rvp.cntr, vl,		      \
4081 			      mode, data);				      \
4082 }
4083 
4084 def_access_sw_cpu(rc_acks);
4085 def_access_sw_cpu(rc_qacks);
4086 def_access_sw_cpu(rc_delayed_comp);
4087 
4088 #define def_access_ibp_counter(cntr) \
4089 static u64 access_ibp_##cntr(const struct cntr_entry *entry,		      \
4090 				void *context, int vl, int mode, u64 data)    \
4091 {									      \
4092 	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4093 									      \
4094 	if (vl != CNTR_INVALID_VL)					      \
4095 		return 0;						      \
4096 									      \
4097 	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
4098 			     mode, data);				      \
4099 }
4100 
4101 def_access_ibp_counter(loop_pkts);
4102 def_access_ibp_counter(rc_resends);
4103 def_access_ibp_counter(rnr_naks);
4104 def_access_ibp_counter(other_naks);
4105 def_access_ibp_counter(rc_timeouts);
4106 def_access_ibp_counter(pkt_drops);
4107 def_access_ibp_counter(dmawait);
4108 def_access_ibp_counter(rc_seqnak);
4109 def_access_ibp_counter(rc_dupreq);
4110 def_access_ibp_counter(rdma_seq);
4111 def_access_ibp_counter(unaligned);
4112 def_access_ibp_counter(seq_naks);
4113 
4114 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4115 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4116 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4117 			CNTR_NORMAL),
4118 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4119 			CNTR_NORMAL),
4120 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4121 			RCV_TID_FLOW_GEN_MISMATCH_CNT,
4122 			CNTR_NORMAL),
4123 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4124 			CNTR_NORMAL),
4125 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4126 			RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4127 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4128 			CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4129 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4130 			CNTR_NORMAL),
4131 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4132 			CNTR_NORMAL),
4133 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4134 			CNTR_NORMAL),
4135 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4136 			CNTR_NORMAL),
4137 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4138 			CNTR_NORMAL),
4139 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4140 			CNTR_NORMAL),
4141 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4142 			CCE_RCV_URGENT_INT_CNT,	CNTR_NORMAL),
4143 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4144 			CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4145 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4146 			      CNTR_SYNTH),
4147 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4148 			    access_dc_rcv_err_cnt),
4149 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4150 				 CNTR_SYNTH),
4151 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4152 				  CNTR_SYNTH),
4153 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4154 				  CNTR_SYNTH),
4155 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4156 				   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4157 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4158 				  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4159 				  CNTR_SYNTH),
4160 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4161 				DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4162 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4163 			       CNTR_SYNTH),
4164 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4165 			      CNTR_SYNTH),
4166 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4167 			       CNTR_SYNTH),
4168 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4169 				 CNTR_SYNTH),
4170 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4171 				CNTR_SYNTH),
4172 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4173 				CNTR_SYNTH),
4174 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4175 			       CNTR_SYNTH),
4176 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4177 				 CNTR_SYNTH | CNTR_VL),
4178 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4179 				CNTR_SYNTH | CNTR_VL),
4180 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4181 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4182 				 CNTR_SYNTH | CNTR_VL),
4183 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4184 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4185 				 CNTR_SYNTH | CNTR_VL),
4186 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4187 			      CNTR_SYNTH),
4188 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4189 				 CNTR_SYNTH | CNTR_VL),
4190 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4191 				CNTR_SYNTH),
4192 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4193 				   CNTR_SYNTH | CNTR_VL),
4194 [C_DC_TOTAL_CRC] =
4195 	DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4196 			 CNTR_SYNTH),
4197 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4198 				  CNTR_SYNTH),
4199 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4200 				  CNTR_SYNTH),
4201 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4202 				  CNTR_SYNTH),
4203 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4204 				  CNTR_SYNTH),
4205 [C_DC_CRC_MULT_LN] =
4206 	DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4207 			 CNTR_SYNTH),
4208 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4209 				    CNTR_SYNTH),
4210 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4211 				    CNTR_SYNTH),
4212 [C_DC_SEQ_CRC_CNT] =
4213 	DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4214 			 CNTR_SYNTH),
4215 [C_DC_ESC0_ONLY_CNT] =
4216 	DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4217 			 CNTR_SYNTH),
4218 [C_DC_ESC0_PLUS1_CNT] =
4219 	DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4220 			 CNTR_SYNTH),
4221 [C_DC_ESC0_PLUS2_CNT] =
4222 	DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4223 			 CNTR_SYNTH),
4224 [C_DC_REINIT_FROM_PEER_CNT] =
4225 	DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4226 			 CNTR_SYNTH),
4227 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4228 				  CNTR_SYNTH),
4229 [C_DC_MISC_FLG_CNT] =
4230 	DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4231 			 CNTR_SYNTH),
4232 [C_DC_PRF_GOOD_LTP_CNT] =
4233 	DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4234 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4235 	DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4236 			 CNTR_SYNTH),
4237 [C_DC_PRF_RX_FLIT_CNT] =
4238 	DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4239 [C_DC_PRF_TX_FLIT_CNT] =
4240 	DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4241 [C_DC_PRF_CLK_CNTR] =
4242 	DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4243 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4244 	DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4245 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4246 	DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4247 			 CNTR_SYNTH),
4248 [C_DC_PG_STS_TX_SBE_CNT] =
4249 	DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4250 [C_DC_PG_STS_TX_MBE_CNT] =
4251 	DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4252 			 CNTR_SYNTH),
4253 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4254 			    access_sw_cpu_intr),
4255 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4256 			    access_sw_cpu_rcv_limit),
4257 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4258 			    access_sw_ctx0_seq_drop),
4259 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4260 			    access_sw_vtx_wait),
4261 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4262 			    access_sw_pio_wait),
4263 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4264 			    access_sw_pio_drain),
4265 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4266 			    access_sw_kmem_wait),
4267 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4268 			    access_sw_send_schedule),
4269 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4270 				      SEND_DMA_DESC_FETCHED_CNT, 0,
4271 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272 				      dev_access_u32_csr),
4273 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4274 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275 			     access_sde_int_cnt),
4276 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4277 			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278 			     access_sde_err_cnt),
4279 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4280 				  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4281 				  access_sde_idle_int_cnt),
4282 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4283 				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4284 				      access_sde_progress_int_cnt),
4285 /* MISC_ERR_STATUS */
4286 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4287 				CNTR_NORMAL,
4288 				access_misc_pll_lock_fail_err_cnt),
4289 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4290 				CNTR_NORMAL,
4291 				access_misc_mbist_fail_err_cnt),
4292 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4293 				CNTR_NORMAL,
4294 				access_misc_invalid_eep_cmd_err_cnt),
4295 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4296 				CNTR_NORMAL,
4297 				access_misc_efuse_done_parity_err_cnt),
4298 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4299 				CNTR_NORMAL,
4300 				access_misc_efuse_write_err_cnt),
4301 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4302 				0, CNTR_NORMAL,
4303 				access_misc_efuse_read_bad_addr_err_cnt),
4304 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4305 				CNTR_NORMAL,
4306 				access_misc_efuse_csr_parity_err_cnt),
4307 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4308 				CNTR_NORMAL,
4309 				access_misc_fw_auth_failed_err_cnt),
4310 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4311 				CNTR_NORMAL,
4312 				access_misc_key_mismatch_err_cnt),
4313 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4314 				CNTR_NORMAL,
4315 				access_misc_sbus_write_failed_err_cnt),
4316 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4317 				CNTR_NORMAL,
4318 				access_misc_csr_write_bad_addr_err_cnt),
4319 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4320 				CNTR_NORMAL,
4321 				access_misc_csr_read_bad_addr_err_cnt),
4322 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4323 				CNTR_NORMAL,
4324 				access_misc_csr_parity_err_cnt),
4325 /* CceErrStatus */
4326 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4327 				CNTR_NORMAL,
4328 				access_sw_cce_err_status_aggregated_cnt),
4329 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4330 				CNTR_NORMAL,
4331 				access_cce_msix_csr_parity_err_cnt),
4332 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4333 				CNTR_NORMAL,
4334 				access_cce_int_map_unc_err_cnt),
4335 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4336 				CNTR_NORMAL,
4337 				access_cce_int_map_cor_err_cnt),
4338 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4339 				CNTR_NORMAL,
4340 				access_cce_msix_table_unc_err_cnt),
4341 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4342 				CNTR_NORMAL,
4343 				access_cce_msix_table_cor_err_cnt),
4344 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4345 				0, CNTR_NORMAL,
4346 				access_cce_rxdma_conv_fifo_parity_err_cnt),
4347 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4348 				0, CNTR_NORMAL,
4349 				access_cce_rcpl_async_fifo_parity_err_cnt),
4350 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4351 				CNTR_NORMAL,
4352 				access_cce_seg_write_bad_addr_err_cnt),
4353 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4354 				CNTR_NORMAL,
4355 				access_cce_seg_read_bad_addr_err_cnt),
4356 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4357 				CNTR_NORMAL,
4358 				access_la_triggered_cnt),
4359 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4360 				CNTR_NORMAL,
4361 				access_cce_trgt_cpl_timeout_err_cnt),
4362 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4363 				CNTR_NORMAL,
4364 				access_pcic_receive_parity_err_cnt),
4365 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4366 				CNTR_NORMAL,
4367 				access_pcic_transmit_back_parity_err_cnt),
4368 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4369 				0, CNTR_NORMAL,
4370 				access_pcic_transmit_front_parity_err_cnt),
4371 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4372 				CNTR_NORMAL,
4373 				access_pcic_cpl_dat_q_unc_err_cnt),
4374 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4375 				CNTR_NORMAL,
4376 				access_pcic_cpl_hd_q_unc_err_cnt),
4377 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4378 				CNTR_NORMAL,
4379 				access_pcic_post_dat_q_unc_err_cnt),
4380 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4381 				CNTR_NORMAL,
4382 				access_pcic_post_hd_q_unc_err_cnt),
4383 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4384 				CNTR_NORMAL,
4385 				access_pcic_retry_sot_mem_unc_err_cnt),
4386 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4387 				CNTR_NORMAL,
4388 				access_pcic_retry_mem_unc_err),
4389 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4390 				CNTR_NORMAL,
4391 				access_pcic_n_post_dat_q_parity_err_cnt),
4392 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4393 				CNTR_NORMAL,
4394 				access_pcic_n_post_h_q_parity_err_cnt),
4395 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4396 				CNTR_NORMAL,
4397 				access_pcic_cpl_dat_q_cor_err_cnt),
4398 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4399 				CNTR_NORMAL,
4400 				access_pcic_cpl_hd_q_cor_err_cnt),
4401 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4402 				CNTR_NORMAL,
4403 				access_pcic_post_dat_q_cor_err_cnt),
4404 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4405 				CNTR_NORMAL,
4406 				access_pcic_post_hd_q_cor_err_cnt),
4407 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4408 				CNTR_NORMAL,
4409 				access_pcic_retry_sot_mem_cor_err_cnt),
4410 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4411 				CNTR_NORMAL,
4412 				access_pcic_retry_mem_cor_err_cnt),
4413 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4414 				"CceCli1AsyncFifoDbgParityError", 0, 0,
4415 				CNTR_NORMAL,
4416 				access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4417 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4418 				"CceCli1AsyncFifoRxdmaParityError", 0, 0,
4419 				CNTR_NORMAL,
4420 				access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4421 				),
4422 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4423 			"CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4424 			CNTR_NORMAL,
4425 			access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4426 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4427 			"CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4428 			CNTR_NORMAL,
4429 			access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4430 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4431 			0, CNTR_NORMAL,
4432 			access_cce_cli2_async_fifo_parity_err_cnt),
4433 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4434 			CNTR_NORMAL,
4435 			access_cce_csr_cfg_bus_parity_err_cnt),
4436 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4437 			0, CNTR_NORMAL,
4438 			access_cce_cli0_async_fifo_parity_err_cnt),
4439 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4440 			CNTR_NORMAL,
4441 			access_cce_rspd_data_parity_err_cnt),
4442 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4443 			CNTR_NORMAL,
4444 			access_cce_trgt_access_err_cnt),
4445 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4446 			0, CNTR_NORMAL,
4447 			access_cce_trgt_async_fifo_parity_err_cnt),
4448 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4449 			CNTR_NORMAL,
4450 			access_cce_csr_write_bad_addr_err_cnt),
4451 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4452 			CNTR_NORMAL,
4453 			access_cce_csr_read_bad_addr_err_cnt),
4454 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4455 			CNTR_NORMAL,
4456 			access_ccs_csr_parity_err_cnt),
4457 
4458 /* RcvErrStatus */
4459 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4460 			CNTR_NORMAL,
4461 			access_rx_csr_parity_err_cnt),
4462 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4463 			CNTR_NORMAL,
4464 			access_rx_csr_write_bad_addr_err_cnt),
4465 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4466 			CNTR_NORMAL,
4467 			access_rx_csr_read_bad_addr_err_cnt),
4468 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4469 			CNTR_NORMAL,
4470 			access_rx_dma_csr_unc_err_cnt),
4471 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4472 			CNTR_NORMAL,
4473 			access_rx_dma_dq_fsm_encoding_err_cnt),
4474 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4475 			CNTR_NORMAL,
4476 			access_rx_dma_eq_fsm_encoding_err_cnt),
4477 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4478 			CNTR_NORMAL,
4479 			access_rx_dma_csr_parity_err_cnt),
4480 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4481 			CNTR_NORMAL,
4482 			access_rx_rbuf_data_cor_err_cnt),
4483 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4484 			CNTR_NORMAL,
4485 			access_rx_rbuf_data_unc_err_cnt),
4486 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4487 			CNTR_NORMAL,
4488 			access_rx_dma_data_fifo_rd_cor_err_cnt),
4489 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4490 			CNTR_NORMAL,
4491 			access_rx_dma_data_fifo_rd_unc_err_cnt),
4492 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4493 			CNTR_NORMAL,
4494 			access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4495 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4496 			CNTR_NORMAL,
4497 			access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4498 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4499 			CNTR_NORMAL,
4500 			access_rx_rbuf_desc_part2_cor_err_cnt),
4501 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4502 			CNTR_NORMAL,
4503 			access_rx_rbuf_desc_part2_unc_err_cnt),
4504 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4505 			CNTR_NORMAL,
4506 			access_rx_rbuf_desc_part1_cor_err_cnt),
4507 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4508 			CNTR_NORMAL,
4509 			access_rx_rbuf_desc_part1_unc_err_cnt),
4510 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4511 			CNTR_NORMAL,
4512 			access_rx_hq_intr_fsm_err_cnt),
4513 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4514 			CNTR_NORMAL,
4515 			access_rx_hq_intr_csr_parity_err_cnt),
4516 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4517 			CNTR_NORMAL,
4518 			access_rx_lookup_csr_parity_err_cnt),
4519 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4520 			CNTR_NORMAL,
4521 			access_rx_lookup_rcv_array_cor_err_cnt),
4522 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4523 			CNTR_NORMAL,
4524 			access_rx_lookup_rcv_array_unc_err_cnt),
4525 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4526 			0, CNTR_NORMAL,
4527 			access_rx_lookup_des_part2_parity_err_cnt),
4528 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4529 			0, CNTR_NORMAL,
4530 			access_rx_lookup_des_part1_unc_cor_err_cnt),
4531 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4532 			CNTR_NORMAL,
4533 			access_rx_lookup_des_part1_unc_err_cnt),
4534 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4535 			CNTR_NORMAL,
4536 			access_rx_rbuf_next_free_buf_cor_err_cnt),
4537 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4538 			CNTR_NORMAL,
4539 			access_rx_rbuf_next_free_buf_unc_err_cnt),
4540 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4541 			"RxRbufFlInitWrAddrParityErr", 0, 0,
4542 			CNTR_NORMAL,
4543 			access_rbuf_fl_init_wr_addr_parity_err_cnt),
4544 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4545 			0, CNTR_NORMAL,
4546 			access_rx_rbuf_fl_initdone_parity_err_cnt),
4547 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4548 			0, CNTR_NORMAL,
4549 			access_rx_rbuf_fl_write_addr_parity_err_cnt),
4550 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4551 			CNTR_NORMAL,
4552 			access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4553 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4554 			CNTR_NORMAL,
4555 			access_rx_rbuf_empty_err_cnt),
4556 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4557 			CNTR_NORMAL,
4558 			access_rx_rbuf_full_err_cnt),
4559 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4560 			CNTR_NORMAL,
4561 			access_rbuf_bad_lookup_err_cnt),
4562 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4563 			CNTR_NORMAL,
4564 			access_rbuf_ctx_id_parity_err_cnt),
4565 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4566 			CNTR_NORMAL,
4567 			access_rbuf_csr_qeopdw_parity_err_cnt),
4568 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4569 			"RxRbufCsrQNumOfPktParityErr", 0, 0,
4570 			CNTR_NORMAL,
4571 			access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4572 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4573 			"RxRbufCsrQTlPtrParityErr", 0, 0,
4574 			CNTR_NORMAL,
4575 			access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4577 			0, CNTR_NORMAL,
4578 			access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4580 			0, CNTR_NORMAL,
4581 			access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4583 			0, 0, CNTR_NORMAL,
4584 			access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4585 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4586 			0, CNTR_NORMAL,
4587 			access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4588 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4589 			"RxRbufCsrQHeadBufNumParityErr", 0, 0,
4590 			CNTR_NORMAL,
4591 			access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4592 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4593 			0, CNTR_NORMAL,
4594 			access_rx_rbuf_block_list_read_cor_err_cnt),
4595 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4596 			0, CNTR_NORMAL,
4597 			access_rx_rbuf_block_list_read_unc_err_cnt),
4598 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4599 			CNTR_NORMAL,
4600 			access_rx_rbuf_lookup_des_cor_err_cnt),
4601 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4602 			CNTR_NORMAL,
4603 			access_rx_rbuf_lookup_des_unc_err_cnt),
4604 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4605 			"RxRbufLookupDesRegUncCorErr", 0, 0,
4606 			CNTR_NORMAL,
4607 			access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4608 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4609 			CNTR_NORMAL,
4610 			access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4611 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4612 			CNTR_NORMAL,
4613 			access_rx_rbuf_free_list_cor_err_cnt),
4614 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4615 			CNTR_NORMAL,
4616 			access_rx_rbuf_free_list_unc_err_cnt),
4617 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4618 			CNTR_NORMAL,
4619 			access_rx_rcv_fsm_encoding_err_cnt),
4620 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4621 			CNTR_NORMAL,
4622 			access_rx_dma_flag_cor_err_cnt),
4623 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4624 			CNTR_NORMAL,
4625 			access_rx_dma_flag_unc_err_cnt),
4626 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4627 			CNTR_NORMAL,
4628 			access_rx_dc_sop_eop_parity_err_cnt),
4629 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4630 			CNTR_NORMAL,
4631 			access_rx_rcv_csr_parity_err_cnt),
4632 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4633 			CNTR_NORMAL,
4634 			access_rx_rcv_qp_map_table_cor_err_cnt),
4635 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4636 			CNTR_NORMAL,
4637 			access_rx_rcv_qp_map_table_unc_err_cnt),
4638 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4639 			CNTR_NORMAL,
4640 			access_rx_rcv_data_cor_err_cnt),
4641 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4642 			CNTR_NORMAL,
4643 			access_rx_rcv_data_unc_err_cnt),
4644 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4645 			CNTR_NORMAL,
4646 			access_rx_rcv_hdr_cor_err_cnt),
4647 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4648 			CNTR_NORMAL,
4649 			access_rx_rcv_hdr_unc_err_cnt),
4650 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4651 			CNTR_NORMAL,
4652 			access_rx_dc_intf_parity_err_cnt),
4653 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4654 			CNTR_NORMAL,
4655 			access_rx_dma_csr_cor_err_cnt),
4656 /* SendPioErrStatus */
4657 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4658 			CNTR_NORMAL,
4659 			access_pio_pec_sop_head_parity_err_cnt),
4660 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4661 			CNTR_NORMAL,
4662 			access_pio_pcc_sop_head_parity_err_cnt),
4663 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4664 			0, 0, CNTR_NORMAL,
4665 			access_pio_last_returned_cnt_parity_err_cnt),
4666 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4667 			0, CNTR_NORMAL,
4668 			access_pio_current_free_cnt_parity_err_cnt),
4669 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4670 			CNTR_NORMAL,
4671 			access_pio_reserved_31_err_cnt),
4672 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4673 			CNTR_NORMAL,
4674 			access_pio_reserved_30_err_cnt),
4675 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4676 			CNTR_NORMAL,
4677 			access_pio_ppmc_sop_len_err_cnt),
4678 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4679 			CNTR_NORMAL,
4680 			access_pio_ppmc_bqc_mem_parity_err_cnt),
4681 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4682 			CNTR_NORMAL,
4683 			access_pio_vl_fifo_parity_err_cnt),
4684 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4685 			CNTR_NORMAL,
4686 			access_pio_vlf_sop_parity_err_cnt),
4687 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4688 			CNTR_NORMAL,
4689 			access_pio_vlf_v1_len_parity_err_cnt),
4690 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4691 			CNTR_NORMAL,
4692 			access_pio_block_qw_count_parity_err_cnt),
4693 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4694 			CNTR_NORMAL,
4695 			access_pio_write_qw_valid_parity_err_cnt),
4696 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4697 			CNTR_NORMAL,
4698 			access_pio_state_machine_err_cnt),
4699 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4700 			CNTR_NORMAL,
4701 			access_pio_write_data_parity_err_cnt),
4702 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4703 			CNTR_NORMAL,
4704 			access_pio_host_addr_mem_cor_err_cnt),
4705 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4706 			CNTR_NORMAL,
4707 			access_pio_host_addr_mem_unc_err_cnt),
4708 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4709 			CNTR_NORMAL,
4710 			access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4711 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4712 			CNTR_NORMAL,
4713 			access_pio_init_sm_in_err_cnt),
4714 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4715 			CNTR_NORMAL,
4716 			access_pio_ppmc_pbl_fifo_err_cnt),
4717 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4718 			0, CNTR_NORMAL,
4719 			access_pio_credit_ret_fifo_parity_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4721 			CNTR_NORMAL,
4722 			access_pio_v1_len_mem_bank1_cor_err_cnt),
4723 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4724 			CNTR_NORMAL,
4725 			access_pio_v1_len_mem_bank0_cor_err_cnt),
4726 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4727 			CNTR_NORMAL,
4728 			access_pio_v1_len_mem_bank1_unc_err_cnt),
4729 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4730 			CNTR_NORMAL,
4731 			access_pio_v1_len_mem_bank0_unc_err_cnt),
4732 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4733 			CNTR_NORMAL,
4734 			access_pio_sm_pkt_reset_parity_err_cnt),
4735 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4736 			CNTR_NORMAL,
4737 			access_pio_pkt_evict_fifo_parity_err_cnt),
4738 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4739 			"PioSbrdctrlCrrelFifoParityErr", 0, 0,
4740 			CNTR_NORMAL,
4741 			access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4742 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4743 			CNTR_NORMAL,
4744 			access_pio_sbrdctl_crrel_parity_err_cnt),
4745 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4746 			CNTR_NORMAL,
4747 			access_pio_pec_fifo_parity_err_cnt),
4748 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4749 			CNTR_NORMAL,
4750 			access_pio_pcc_fifo_parity_err_cnt),
4751 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4752 			CNTR_NORMAL,
4753 			access_pio_sb_mem_fifo1_err_cnt),
4754 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4755 			CNTR_NORMAL,
4756 			access_pio_sb_mem_fifo0_err_cnt),
4757 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4758 			CNTR_NORMAL,
4759 			access_pio_csr_parity_err_cnt),
4760 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4761 			CNTR_NORMAL,
4762 			access_pio_write_addr_parity_err_cnt),
4763 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4764 			CNTR_NORMAL,
4765 			access_pio_write_bad_ctxt_err_cnt),
4766 /* SendDmaErrStatus */
4767 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4768 			0, CNTR_NORMAL,
4769 			access_sdma_pcie_req_tracking_cor_err_cnt),
4770 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4771 			0, CNTR_NORMAL,
4772 			access_sdma_pcie_req_tracking_unc_err_cnt),
4773 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4774 			CNTR_NORMAL,
4775 			access_sdma_csr_parity_err_cnt),
4776 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4777 			CNTR_NORMAL,
4778 			access_sdma_rpy_tag_err_cnt),
4779 /* SendEgressErrStatus */
4780 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4781 			CNTR_NORMAL,
4782 			access_tx_read_pio_memory_csr_unc_err_cnt),
4783 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4784 			0, CNTR_NORMAL,
4785 			access_tx_read_sdma_memory_csr_err_cnt),
4786 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4787 			CNTR_NORMAL,
4788 			access_tx_egress_fifo_cor_err_cnt),
4789 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4790 			CNTR_NORMAL,
4791 			access_tx_read_pio_memory_cor_err_cnt),
4792 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4793 			CNTR_NORMAL,
4794 			access_tx_read_sdma_memory_cor_err_cnt),
4795 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4796 			CNTR_NORMAL,
4797 			access_tx_sb_hdr_cor_err_cnt),
4798 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4799 			CNTR_NORMAL,
4800 			access_tx_credit_overrun_err_cnt),
4801 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4802 			CNTR_NORMAL,
4803 			access_tx_launch_fifo8_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4805 			CNTR_NORMAL,
4806 			access_tx_launch_fifo7_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4808 			CNTR_NORMAL,
4809 			access_tx_launch_fifo6_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4811 			CNTR_NORMAL,
4812 			access_tx_launch_fifo5_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4814 			CNTR_NORMAL,
4815 			access_tx_launch_fifo4_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4817 			CNTR_NORMAL,
4818 			access_tx_launch_fifo3_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4820 			CNTR_NORMAL,
4821 			access_tx_launch_fifo2_cor_err_cnt),
4822 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4823 			CNTR_NORMAL,
4824 			access_tx_launch_fifo1_cor_err_cnt),
4825 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4826 			CNTR_NORMAL,
4827 			access_tx_launch_fifo0_cor_err_cnt),
4828 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4829 			CNTR_NORMAL,
4830 			access_tx_credit_return_vl_err_cnt),
4831 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4832 			CNTR_NORMAL,
4833 			access_tx_hcrc_insertion_err_cnt),
4834 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4835 			CNTR_NORMAL,
4836 			access_tx_egress_fifo_unc_err_cnt),
4837 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4838 			CNTR_NORMAL,
4839 			access_tx_read_pio_memory_unc_err_cnt),
4840 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4841 			CNTR_NORMAL,
4842 			access_tx_read_sdma_memory_unc_err_cnt),
4843 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4844 			CNTR_NORMAL,
4845 			access_tx_sb_hdr_unc_err_cnt),
4846 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4847 			CNTR_NORMAL,
4848 			access_tx_credit_return_partiy_err_cnt),
4849 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4850 			0, 0, CNTR_NORMAL,
4851 			access_tx_launch_fifo8_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4853 			0, 0, CNTR_NORMAL,
4854 			access_tx_launch_fifo7_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4856 			0, 0, CNTR_NORMAL,
4857 			access_tx_launch_fifo6_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4859 			0, 0, CNTR_NORMAL,
4860 			access_tx_launch_fifo5_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4862 			0, 0, CNTR_NORMAL,
4863 			access_tx_launch_fifo4_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4865 			0, 0, CNTR_NORMAL,
4866 			access_tx_launch_fifo3_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4868 			0, 0, CNTR_NORMAL,
4869 			access_tx_launch_fifo2_unc_or_parity_err_cnt),
4870 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4871 			0, 0, CNTR_NORMAL,
4872 			access_tx_launch_fifo1_unc_or_parity_err_cnt),
4873 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4874 			0, 0, CNTR_NORMAL,
4875 			access_tx_launch_fifo0_unc_or_parity_err_cnt),
4876 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4877 			0, 0, CNTR_NORMAL,
4878 			access_tx_sdma15_disallowed_packet_err_cnt),
4879 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4880 			0, 0, CNTR_NORMAL,
4881 			access_tx_sdma14_disallowed_packet_err_cnt),
4882 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4883 			0, 0, CNTR_NORMAL,
4884 			access_tx_sdma13_disallowed_packet_err_cnt),
4885 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4886 			0, 0, CNTR_NORMAL,
4887 			access_tx_sdma12_disallowed_packet_err_cnt),
4888 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4889 			0, 0, CNTR_NORMAL,
4890 			access_tx_sdma11_disallowed_packet_err_cnt),
4891 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4892 			0, 0, CNTR_NORMAL,
4893 			access_tx_sdma10_disallowed_packet_err_cnt),
4894 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4895 			0, 0, CNTR_NORMAL,
4896 			access_tx_sdma9_disallowed_packet_err_cnt),
4897 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4898 			0, 0, CNTR_NORMAL,
4899 			access_tx_sdma8_disallowed_packet_err_cnt),
4900 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4901 			0, 0, CNTR_NORMAL,
4902 			access_tx_sdma7_disallowed_packet_err_cnt),
4903 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4904 			0, 0, CNTR_NORMAL,
4905 			access_tx_sdma6_disallowed_packet_err_cnt),
4906 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4907 			0, 0, CNTR_NORMAL,
4908 			access_tx_sdma5_disallowed_packet_err_cnt),
4909 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4910 			0, 0, CNTR_NORMAL,
4911 			access_tx_sdma4_disallowed_packet_err_cnt),
4912 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4913 			0, 0, CNTR_NORMAL,
4914 			access_tx_sdma3_disallowed_packet_err_cnt),
4915 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4916 			0, 0, CNTR_NORMAL,
4917 			access_tx_sdma2_disallowed_packet_err_cnt),
4918 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4919 			0, 0, CNTR_NORMAL,
4920 			access_tx_sdma1_disallowed_packet_err_cnt),
4921 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4922 			0, 0, CNTR_NORMAL,
4923 			access_tx_sdma0_disallowed_packet_err_cnt),
4924 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4925 			CNTR_NORMAL,
4926 			access_tx_config_parity_err_cnt),
4927 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4928 			CNTR_NORMAL,
4929 			access_tx_sbrd_ctl_csr_parity_err_cnt),
4930 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4931 			CNTR_NORMAL,
4932 			access_tx_launch_csr_parity_err_cnt),
4933 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4934 			CNTR_NORMAL,
4935 			access_tx_illegal_vl_err_cnt),
4936 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4937 			"TxSbrdCtlStateMachineParityErr", 0, 0,
4938 			CNTR_NORMAL,
4939 			access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4940 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4941 			CNTR_NORMAL,
4942 			access_egress_reserved_10_err_cnt),
4943 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4944 			CNTR_NORMAL,
4945 			access_egress_reserved_9_err_cnt),
4946 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4947 			0, 0, CNTR_NORMAL,
4948 			access_tx_sdma_launch_intf_parity_err_cnt),
4949 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4950 			CNTR_NORMAL,
4951 			access_tx_pio_launch_intf_parity_err_cnt),
4952 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4953 			CNTR_NORMAL,
4954 			access_egress_reserved_6_err_cnt),
4955 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4956 			CNTR_NORMAL,
4957 			access_tx_incorrect_link_state_err_cnt),
4958 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4959 			CNTR_NORMAL,
4960 			access_tx_linkdown_err_cnt),
4961 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4962 			"EgressFifoUnderrunOrParityErr", 0, 0,
4963 			CNTR_NORMAL,
4964 			access_tx_egress_fifi_underrun_or_parity_err_cnt),
4965 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4966 			CNTR_NORMAL,
4967 			access_egress_reserved_2_err_cnt),
4968 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4969 			CNTR_NORMAL,
4970 			access_tx_pkt_integrity_mem_unc_err_cnt),
4971 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4972 			CNTR_NORMAL,
4973 			access_tx_pkt_integrity_mem_cor_err_cnt),
4974 /* SendErrStatus */
4975 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4976 			CNTR_NORMAL,
4977 			access_send_csr_write_bad_addr_err_cnt),
4978 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4979 			CNTR_NORMAL,
4980 			access_send_csr_read_bad_addr_err_cnt),
4981 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4982 			CNTR_NORMAL,
4983 			access_send_csr_parity_cnt),
4984 /* SendCtxtErrStatus */
4985 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4986 			CNTR_NORMAL,
4987 			access_pio_write_out_of_bounds_err_cnt),
4988 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4989 			CNTR_NORMAL,
4990 			access_pio_write_overflow_err_cnt),
4991 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4992 			0, 0, CNTR_NORMAL,
4993 			access_pio_write_crosses_boundary_err_cnt),
4994 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4995 			CNTR_NORMAL,
4996 			access_pio_disallowed_packet_err_cnt),
4997 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4998 			CNTR_NORMAL,
4999 			access_pio_inconsistent_sop_err_cnt),
5000 /* SendDmaEngErrStatus */
5001 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5002 			0, 0, CNTR_NORMAL,
5003 			access_sdma_header_request_fifo_cor_err_cnt),
5004 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5005 			CNTR_NORMAL,
5006 			access_sdma_header_storage_cor_err_cnt),
5007 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5008 			CNTR_NORMAL,
5009 			access_sdma_packet_tracking_cor_err_cnt),
5010 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5011 			CNTR_NORMAL,
5012 			access_sdma_assembly_cor_err_cnt),
5013 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5014 			CNTR_NORMAL,
5015 			access_sdma_desc_table_cor_err_cnt),
5016 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5017 			0, 0, CNTR_NORMAL,
5018 			access_sdma_header_request_fifo_unc_err_cnt),
5019 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5020 			CNTR_NORMAL,
5021 			access_sdma_header_storage_unc_err_cnt),
5022 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5023 			CNTR_NORMAL,
5024 			access_sdma_packet_tracking_unc_err_cnt),
5025 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5026 			CNTR_NORMAL,
5027 			access_sdma_assembly_unc_err_cnt),
5028 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5029 			CNTR_NORMAL,
5030 			access_sdma_desc_table_unc_err_cnt),
5031 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5032 			CNTR_NORMAL,
5033 			access_sdma_timeout_err_cnt),
5034 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5035 			CNTR_NORMAL,
5036 			access_sdma_header_length_err_cnt),
5037 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5038 			CNTR_NORMAL,
5039 			access_sdma_header_address_err_cnt),
5040 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5041 			CNTR_NORMAL,
5042 			access_sdma_header_select_err_cnt),
5043 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5044 			CNTR_NORMAL,
5045 			access_sdma_reserved_9_err_cnt),
5046 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5047 			CNTR_NORMAL,
5048 			access_sdma_packet_desc_overflow_err_cnt),
5049 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5050 			CNTR_NORMAL,
5051 			access_sdma_length_mismatch_err_cnt),
5052 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5053 			CNTR_NORMAL,
5054 			access_sdma_halt_err_cnt),
5055 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5056 			CNTR_NORMAL,
5057 			access_sdma_mem_read_err_cnt),
5058 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5059 			CNTR_NORMAL,
5060 			access_sdma_first_desc_err_cnt),
5061 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5062 			CNTR_NORMAL,
5063 			access_sdma_tail_out_of_bounds_err_cnt),
5064 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5065 			CNTR_NORMAL,
5066 			access_sdma_too_long_err_cnt),
5067 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5068 			CNTR_NORMAL,
5069 			access_sdma_gen_mismatch_err_cnt),
5070 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5071 			CNTR_NORMAL,
5072 			access_sdma_wrong_dw_err_cnt),
5073 };
5074 
5075 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5076 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5077 			CNTR_NORMAL),
5078 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5079 			CNTR_NORMAL),
5080 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5081 			CNTR_NORMAL),
5082 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5083 			CNTR_NORMAL),
5084 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5085 			CNTR_NORMAL),
5086 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5087 			CNTR_NORMAL),
5088 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5089 			CNTR_NORMAL),
5090 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5091 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5092 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5093 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5094 				      CNTR_SYNTH | CNTR_VL),
5095 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5096 				     CNTR_SYNTH | CNTR_VL),
5097 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5098 				      CNTR_SYNTH | CNTR_VL),
5099 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5100 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5101 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5102 			     access_sw_link_dn_cnt),
5103 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5104 			   access_sw_link_up_cnt),
5105 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5106 				 access_sw_unknown_frame_cnt),
5107 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5108 			     access_sw_xmit_discards),
5109 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5110 				CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5111 				access_sw_xmit_discards),
5112 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5113 				 access_xmit_constraint_errs),
5114 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5115 				access_rcv_constraint_errs),
5116 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5117 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5118 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5119 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5120 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5121 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5122 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5123 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5124 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5125 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5126 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5127 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5128 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5129 			       access_sw_cpu_rc_acks),
5130 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5131 				access_sw_cpu_rc_qacks),
5132 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5133 				       access_sw_cpu_rc_delayed_comp),
5134 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5135 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5136 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5137 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5138 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5139 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5140 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5141 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5142 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5143 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5144 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5145 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5146 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5147 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5148 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5149 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5150 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5151 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5152 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5153 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5154 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5155 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5156 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5157 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5158 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5159 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5160 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5161 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5162 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5163 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5164 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5165 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5166 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5167 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5168 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5169 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5170 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5171 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5172 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5173 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5174 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5175 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5176 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5177 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5178 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5179 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5180 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5181 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5182 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5183 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5184 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5185 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5186 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5187 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5188 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5189 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5190 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5191 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5192 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5193 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5194 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5195 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5196 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5197 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5198 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5199 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5200 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5201 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5202 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5203 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5204 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5205 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5206 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5207 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5208 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5209 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5210 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5211 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5212 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5213 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5214 };
5215 
5216 /* ======================================================================== */
5217 
5218 /* return true if this is chip revision revision a */
is_ax(struct hfi1_devdata * dd)5219 int is_ax(struct hfi1_devdata *dd)
5220 {
5221 	u8 chip_rev_minor =
5222 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5223 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5224 	return (chip_rev_minor & 0xf0) == 0;
5225 }
5226 
5227 /* return true if this is chip revision revision b */
is_bx(struct hfi1_devdata * dd)5228 int is_bx(struct hfi1_devdata *dd)
5229 {
5230 	u8 chip_rev_minor =
5231 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5232 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5233 	return (chip_rev_minor & 0xF0) == 0x10;
5234 }
5235 
5236 /*
5237  * Append string s to buffer buf.  Arguments curp and len are the current
5238  * position and remaining length, respectively.
5239  *
5240  * return 0 on success, 1 on out of room
5241  */
append_str(char * buf,char ** curp,int * lenp,const char * s)5242 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5243 {
5244 	char *p = *curp;
5245 	int len = *lenp;
5246 	int result = 0; /* success */
5247 	char c;
5248 
5249 	/* add a comma, if first in the buffer */
5250 	if (p != buf) {
5251 		if (len == 0) {
5252 			result = 1; /* out of room */
5253 			goto done;
5254 		}
5255 		*p++ = ',';
5256 		len--;
5257 	}
5258 
5259 	/* copy the string */
5260 	while ((c = *s++) != 0) {
5261 		if (len == 0) {
5262 			result = 1; /* out of room */
5263 			goto done;
5264 		}
5265 		*p++ = c;
5266 		len--;
5267 	}
5268 
5269 done:
5270 	/* write return values */
5271 	*curp = p;
5272 	*lenp = len;
5273 
5274 	return result;
5275 }
5276 
5277 /*
5278  * Using the given flag table, print a comma separated string into
5279  * the buffer.  End in '*' if the buffer is too short.
5280  */
flag_string(char * buf,int buf_len,u64 flags,struct flag_table * table,int table_size)5281 static char *flag_string(char *buf, int buf_len, u64 flags,
5282 			 struct flag_table *table, int table_size)
5283 {
5284 	char extra[32];
5285 	char *p = buf;
5286 	int len = buf_len;
5287 	int no_room = 0;
5288 	int i;
5289 
5290 	/* make sure there is at least 2 so we can form "*" */
5291 	if (len < 2)
5292 		return "";
5293 
5294 	len--;	/* leave room for a nul */
5295 	for (i = 0; i < table_size; i++) {
5296 		if (flags & table[i].flag) {
5297 			no_room = append_str(buf, &p, &len, table[i].str);
5298 			if (no_room)
5299 				break;
5300 			flags &= ~table[i].flag;
5301 		}
5302 	}
5303 
5304 	/* any undocumented bits left? */
5305 	if (!no_room && flags) {
5306 		snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5307 		no_room = append_str(buf, &p, &len, extra);
5308 	}
5309 
5310 	/* add * if ran out of room */
5311 	if (no_room) {
5312 		/* may need to back up to add space for a '*' */
5313 		if (len == 0)
5314 			--p;
5315 		*p++ = '*';
5316 	}
5317 
5318 	/* add final nul - space already allocated above */
5319 	*p = 0;
5320 	return buf;
5321 }
5322 
5323 /* first 8 CCE error interrupt source names */
5324 static const char * const cce_misc_names[] = {
5325 	"CceErrInt",		/* 0 */
5326 	"RxeErrInt",		/* 1 */
5327 	"MiscErrInt",		/* 2 */
5328 	"Reserved3",		/* 3 */
5329 	"PioErrInt",		/* 4 */
5330 	"SDmaErrInt",		/* 5 */
5331 	"EgressErrInt",		/* 6 */
5332 	"TxeErrInt"		/* 7 */
5333 };
5334 
5335 /*
5336  * Return the miscellaneous error interrupt name.
5337  */
is_misc_err_name(char * buf,size_t bsize,unsigned int source)5338 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5339 {
5340 	if (source < ARRAY_SIZE(cce_misc_names))
5341 		strncpy(buf, cce_misc_names[source], bsize);
5342 	else
5343 		snprintf(buf, bsize, "Reserved%u",
5344 			 source + IS_GENERAL_ERR_START);
5345 
5346 	return buf;
5347 }
5348 
5349 /*
5350  * Return the SDMA engine error interrupt name.
5351  */
is_sdma_eng_err_name(char * buf,size_t bsize,unsigned int source)5352 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5353 {
5354 	snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5355 	return buf;
5356 }
5357 
5358 /*
5359  * Return the send context error interrupt name.
5360  */
is_sendctxt_err_name(char * buf,size_t bsize,unsigned int source)5361 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5362 {
5363 	snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5364 	return buf;
5365 }
5366 
5367 static const char * const various_names[] = {
5368 	"PbcInt",
5369 	"GpioAssertInt",
5370 	"Qsfp1Int",
5371 	"Qsfp2Int",
5372 	"TCritInt"
5373 };
5374 
5375 /*
5376  * Return the various interrupt name.
5377  */
is_various_name(char * buf,size_t bsize,unsigned int source)5378 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5379 {
5380 	if (source < ARRAY_SIZE(various_names))
5381 		strncpy(buf, various_names[source], bsize);
5382 	else
5383 		snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5384 	return buf;
5385 }
5386 
5387 /*
5388  * Return the DC interrupt name.
5389  */
is_dc_name(char * buf,size_t bsize,unsigned int source)5390 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5391 {
5392 	static const char * const dc_int_names[] = {
5393 		"common",
5394 		"lcb",
5395 		"8051",
5396 		"lbm"	/* local block merge */
5397 	};
5398 
5399 	if (source < ARRAY_SIZE(dc_int_names))
5400 		snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5401 	else
5402 		snprintf(buf, bsize, "DCInt%u", source);
5403 	return buf;
5404 }
5405 
5406 static const char * const sdma_int_names[] = {
5407 	"SDmaInt",
5408 	"SdmaIdleInt",
5409 	"SdmaProgressInt",
5410 };
5411 
5412 /*
5413  * Return the SDMA engine interrupt name.
5414  */
is_sdma_eng_name(char * buf,size_t bsize,unsigned int source)5415 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5416 {
5417 	/* what interrupt */
5418 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5419 	/* which engine */
5420 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5421 
5422 	if (likely(what < 3))
5423 		snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5424 	else
5425 		snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5426 	return buf;
5427 }
5428 
5429 /*
5430  * Return the receive available interrupt name.
5431  */
is_rcv_avail_name(char * buf,size_t bsize,unsigned int source)5432 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5433 {
5434 	snprintf(buf, bsize, "RcvAvailInt%u", source);
5435 	return buf;
5436 }
5437 
5438 /*
5439  * Return the receive urgent interrupt name.
5440  */
is_rcv_urgent_name(char * buf,size_t bsize,unsigned int source)5441 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5442 {
5443 	snprintf(buf, bsize, "RcvUrgentInt%u", source);
5444 	return buf;
5445 }
5446 
5447 /*
5448  * Return the send credit interrupt name.
5449  */
is_send_credit_name(char * buf,size_t bsize,unsigned int source)5450 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5451 {
5452 	snprintf(buf, bsize, "SendCreditInt%u", source);
5453 	return buf;
5454 }
5455 
5456 /*
5457  * Return the reserved interrupt name.
5458  */
is_reserved_name(char * buf,size_t bsize,unsigned int source)5459 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5460 {
5461 	snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5462 	return buf;
5463 }
5464 
cce_err_status_string(char * buf,int buf_len,u64 flags)5465 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5466 {
5467 	return flag_string(buf, buf_len, flags,
5468 			   cce_err_status_flags,
5469 			   ARRAY_SIZE(cce_err_status_flags));
5470 }
5471 
rxe_err_status_string(char * buf,int buf_len,u64 flags)5472 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5473 {
5474 	return flag_string(buf, buf_len, flags,
5475 			   rxe_err_status_flags,
5476 			   ARRAY_SIZE(rxe_err_status_flags));
5477 }
5478 
misc_err_status_string(char * buf,int buf_len,u64 flags)5479 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5480 {
5481 	return flag_string(buf, buf_len, flags, misc_err_status_flags,
5482 			   ARRAY_SIZE(misc_err_status_flags));
5483 }
5484 
pio_err_status_string(char * buf,int buf_len,u64 flags)5485 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5486 {
5487 	return flag_string(buf, buf_len, flags,
5488 			   pio_err_status_flags,
5489 			   ARRAY_SIZE(pio_err_status_flags));
5490 }
5491 
sdma_err_status_string(char * buf,int buf_len,u64 flags)5492 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5493 {
5494 	return flag_string(buf, buf_len, flags,
5495 			   sdma_err_status_flags,
5496 			   ARRAY_SIZE(sdma_err_status_flags));
5497 }
5498 
egress_err_status_string(char * buf,int buf_len,u64 flags)5499 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5500 {
5501 	return flag_string(buf, buf_len, flags,
5502 			   egress_err_status_flags,
5503 			   ARRAY_SIZE(egress_err_status_flags));
5504 }
5505 
egress_err_info_string(char * buf,int buf_len,u64 flags)5506 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5507 {
5508 	return flag_string(buf, buf_len, flags,
5509 			   egress_err_info_flags,
5510 			   ARRAY_SIZE(egress_err_info_flags));
5511 }
5512 
send_err_status_string(char * buf,int buf_len,u64 flags)5513 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5514 {
5515 	return flag_string(buf, buf_len, flags,
5516 			   send_err_status_flags,
5517 			   ARRAY_SIZE(send_err_status_flags));
5518 }
5519 
handle_cce_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5520 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5521 {
5522 	char buf[96];
5523 	int i = 0;
5524 
5525 	/*
5526 	 * For most these errors, there is nothing that can be done except
5527 	 * report or record it.
5528 	 */
5529 	dd_dev_info(dd, "CCE Error: %s\n",
5530 		    cce_err_status_string(buf, sizeof(buf), reg));
5531 
5532 	if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5533 	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5534 		/* this error requires a manual drop into SPC freeze mode */
5535 		/* then a fix up */
5536 		start_freeze_handling(dd->pport, FREEZE_SELF);
5537 	}
5538 
5539 	for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5540 		if (reg & (1ull << i)) {
5541 			incr_cntr64(&dd->cce_err_status_cnt[i]);
5542 			/* maintain a counter over all cce_err_status errors */
5543 			incr_cntr64(&dd->sw_cce_err_status_aggregate);
5544 		}
5545 	}
5546 }
5547 
5548 /*
5549  * Check counters for receive errors that do not have an interrupt
5550  * associated with them.
5551  */
5552 #define RCVERR_CHECK_TIME 10
update_rcverr_timer(unsigned long opaque)5553 static void update_rcverr_timer(unsigned long opaque)
5554 {
5555 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5556 	struct hfi1_pportdata *ppd = dd->pport;
5557 	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5558 
5559 	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5560 	    ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5561 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5562 		set_link_down_reason(
5563 		ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5564 		OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5565 		queue_work(ppd->link_wq, &ppd->link_bounce_work);
5566 	}
5567 	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5568 
5569 	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5570 }
5571 
init_rcverr(struct hfi1_devdata * dd)5572 static int init_rcverr(struct hfi1_devdata *dd)
5573 {
5574 	setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5575 	/* Assume the hardware counter has been reset */
5576 	dd->rcv_ovfl_cnt = 0;
5577 	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5578 }
5579 
free_rcverr(struct hfi1_devdata * dd)5580 static void free_rcverr(struct hfi1_devdata *dd)
5581 {
5582 	if (dd->rcverr_timer.data)
5583 		del_timer_sync(&dd->rcverr_timer);
5584 	dd->rcverr_timer.data = 0;
5585 }
5586 
handle_rxe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5587 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5588 {
5589 	char buf[96];
5590 	int i = 0;
5591 
5592 	dd_dev_info(dd, "Receive Error: %s\n",
5593 		    rxe_err_status_string(buf, sizeof(buf), reg));
5594 
5595 	if (reg & ALL_RXE_FREEZE_ERR) {
5596 		int flags = 0;
5597 
5598 		/*
5599 		 * Freeze mode recovery is disabled for the errors
5600 		 * in RXE_FREEZE_ABORT_MASK
5601 		 */
5602 		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5603 			flags = FREEZE_ABORT;
5604 
5605 		start_freeze_handling(dd->pport, flags);
5606 	}
5607 
5608 	for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5609 		if (reg & (1ull << i))
5610 			incr_cntr64(&dd->rcv_err_status_cnt[i]);
5611 	}
5612 }
5613 
handle_misc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5614 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5615 {
5616 	char buf[96];
5617 	int i = 0;
5618 
5619 	dd_dev_info(dd, "Misc Error: %s",
5620 		    misc_err_status_string(buf, sizeof(buf), reg));
5621 	for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5622 		if (reg & (1ull << i))
5623 			incr_cntr64(&dd->misc_err_status_cnt[i]);
5624 	}
5625 }
5626 
handle_pio_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5627 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5628 {
5629 	char buf[96];
5630 	int i = 0;
5631 
5632 	dd_dev_info(dd, "PIO Error: %s\n",
5633 		    pio_err_status_string(buf, sizeof(buf), reg));
5634 
5635 	if (reg & ALL_PIO_FREEZE_ERR)
5636 		start_freeze_handling(dd->pport, 0);
5637 
5638 	for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5639 		if (reg & (1ull << i))
5640 			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5641 	}
5642 }
5643 
handle_sdma_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5644 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5645 {
5646 	char buf[96];
5647 	int i = 0;
5648 
5649 	dd_dev_info(dd, "SDMA Error: %s\n",
5650 		    sdma_err_status_string(buf, sizeof(buf), reg));
5651 
5652 	if (reg & ALL_SDMA_FREEZE_ERR)
5653 		start_freeze_handling(dd->pport, 0);
5654 
5655 	for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5656 		if (reg & (1ull << i))
5657 			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5658 	}
5659 }
5660 
__count_port_discards(struct hfi1_pportdata * ppd)5661 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5662 {
5663 	incr_cntr64(&ppd->port_xmit_discards);
5664 }
5665 
count_port_inactive(struct hfi1_devdata * dd)5666 static void count_port_inactive(struct hfi1_devdata *dd)
5667 {
5668 	__count_port_discards(dd->pport);
5669 }
5670 
5671 /*
5672  * We have had a "disallowed packet" error during egress. Determine the
5673  * integrity check which failed, and update relevant error counter, etc.
5674  *
5675  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5676  * bit of state per integrity check, and so we can miss the reason for an
5677  * egress error if more than one packet fails the same integrity check
5678  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5679  */
handle_send_egress_err_info(struct hfi1_devdata * dd,int vl)5680 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5681 					int vl)
5682 {
5683 	struct hfi1_pportdata *ppd = dd->pport;
5684 	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5685 	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5686 	char buf[96];
5687 
5688 	/* clear down all observed info as quickly as possible after read */
5689 	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5690 
5691 	dd_dev_info(dd,
5692 		    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5693 		    info, egress_err_info_string(buf, sizeof(buf), info), src);
5694 
5695 	/* Eventually add other counters for each bit */
5696 	if (info & PORT_DISCARD_EGRESS_ERRS) {
5697 		int weight, i;
5698 
5699 		/*
5700 		 * Count all applicable bits as individual errors and
5701 		 * attribute them to the packet that triggered this handler.
5702 		 * This may not be completely accurate due to limitations
5703 		 * on the available hardware error information.  There is
5704 		 * a single information register and any number of error
5705 		 * packets may have occurred and contributed to it before
5706 		 * this routine is called.  This means that:
5707 		 * a) If multiple packets with the same error occur before
5708 		 *    this routine is called, earlier packets are missed.
5709 		 *    There is only a single bit for each error type.
5710 		 * b) Errors may not be attributed to the correct VL.
5711 		 *    The driver is attributing all bits in the info register
5712 		 *    to the packet that triggered this call, but bits
5713 		 *    could be an accumulation of different packets with
5714 		 *    different VLs.
5715 		 * c) A single error packet may have multiple counts attached
5716 		 *    to it.  There is no way for the driver to know if
5717 		 *    multiple bits set in the info register are due to a
5718 		 *    single packet or multiple packets.  The driver assumes
5719 		 *    multiple packets.
5720 		 */
5721 		weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5722 		for (i = 0; i < weight; i++) {
5723 			__count_port_discards(ppd);
5724 			if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5725 				incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5726 			else if (vl == 15)
5727 				incr_cntr64(&ppd->port_xmit_discards_vl
5728 					    [C_VL_15]);
5729 		}
5730 	}
5731 }
5732 
5733 /*
5734  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5735  * register. Does it represent a 'port inactive' error?
5736  */
port_inactive_err(u64 posn)5737 static inline int port_inactive_err(u64 posn)
5738 {
5739 	return (posn >= SEES(TX_LINKDOWN) &&
5740 		posn <= SEES(TX_INCORRECT_LINK_STATE));
5741 }
5742 
5743 /*
5744  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5745  * register. Does it represent a 'disallowed packet' error?
5746  */
disallowed_pkt_err(int posn)5747 static inline int disallowed_pkt_err(int posn)
5748 {
5749 	return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5750 		posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5751 }
5752 
5753 /*
5754  * Input value is a bit position of one of the SDMA engine disallowed
5755  * packet errors.  Return which engine.  Use of this must be guarded by
5756  * disallowed_pkt_err().
5757  */
disallowed_pkt_engine(int posn)5758 static inline int disallowed_pkt_engine(int posn)
5759 {
5760 	return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5761 }
5762 
5763 /*
5764  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5765  * be done.
5766  */
engine_to_vl(struct hfi1_devdata * dd,int engine)5767 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5768 {
5769 	struct sdma_vl_map *m;
5770 	int vl;
5771 
5772 	/* range check */
5773 	if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5774 		return -1;
5775 
5776 	rcu_read_lock();
5777 	m = rcu_dereference(dd->sdma_map);
5778 	vl = m->engine_to_vl[engine];
5779 	rcu_read_unlock();
5780 
5781 	return vl;
5782 }
5783 
5784 /*
5785  * Translate the send context (sofware index) into a VL.  Return -1 if the
5786  * translation cannot be done.
5787  */
sc_to_vl(struct hfi1_devdata * dd,int sw_index)5788 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5789 {
5790 	struct send_context_info *sci;
5791 	struct send_context *sc;
5792 	int i;
5793 
5794 	sci = &dd->send_contexts[sw_index];
5795 
5796 	/* there is no information for user (PSM) and ack contexts */
5797 	if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5798 		return -1;
5799 
5800 	sc = sci->sc;
5801 	if (!sc)
5802 		return -1;
5803 	if (dd->vld[15].sc == sc)
5804 		return 15;
5805 	for (i = 0; i < num_vls; i++)
5806 		if (dd->vld[i].sc == sc)
5807 			return i;
5808 
5809 	return -1;
5810 }
5811 
handle_egress_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5812 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5813 {
5814 	u64 reg_copy = reg, handled = 0;
5815 	char buf[96];
5816 	int i = 0;
5817 
5818 	if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5819 		start_freeze_handling(dd->pport, 0);
5820 	else if (is_ax(dd) &&
5821 		 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5822 		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5823 		start_freeze_handling(dd->pport, 0);
5824 
5825 	while (reg_copy) {
5826 		int posn = fls64(reg_copy);
5827 		/* fls64() returns a 1-based offset, we want it zero based */
5828 		int shift = posn - 1;
5829 		u64 mask = 1ULL << shift;
5830 
5831 		if (port_inactive_err(shift)) {
5832 			count_port_inactive(dd);
5833 			handled |= mask;
5834 		} else if (disallowed_pkt_err(shift)) {
5835 			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5836 
5837 			handle_send_egress_err_info(dd, vl);
5838 			handled |= mask;
5839 		}
5840 		reg_copy &= ~mask;
5841 	}
5842 
5843 	reg &= ~handled;
5844 
5845 	if (reg)
5846 		dd_dev_info(dd, "Egress Error: %s\n",
5847 			    egress_err_status_string(buf, sizeof(buf), reg));
5848 
5849 	for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5850 		if (reg & (1ull << i))
5851 			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5852 	}
5853 }
5854 
handle_txe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5855 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5856 {
5857 	char buf[96];
5858 	int i = 0;
5859 
5860 	dd_dev_info(dd, "Send Error: %s\n",
5861 		    send_err_status_string(buf, sizeof(buf), reg));
5862 
5863 	for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5864 		if (reg & (1ull << i))
5865 			incr_cntr64(&dd->send_err_status_cnt[i]);
5866 	}
5867 }
5868 
5869 /*
5870  * The maximum number of times the error clear down will loop before
5871  * blocking a repeating error.  This value is arbitrary.
5872  */
5873 #define MAX_CLEAR_COUNT 20
5874 
5875 /*
5876  * Clear and handle an error register.  All error interrupts are funneled
5877  * through here to have a central location to correctly handle single-
5878  * or multi-shot errors.
5879  *
5880  * For non per-context registers, call this routine with a context value
5881  * of 0 so the per-context offset is zero.
5882  *
5883  * If the handler loops too many times, assume that something is wrong
5884  * and can't be fixed, so mask the error bits.
5885  */
interrupt_clear_down(struct hfi1_devdata * dd,u32 context,const struct err_reg_info * eri)5886 static void interrupt_clear_down(struct hfi1_devdata *dd,
5887 				 u32 context,
5888 				 const struct err_reg_info *eri)
5889 {
5890 	u64 reg;
5891 	u32 count;
5892 
5893 	/* read in a loop until no more errors are seen */
5894 	count = 0;
5895 	while (1) {
5896 		reg = read_kctxt_csr(dd, context, eri->status);
5897 		if (reg == 0)
5898 			break;
5899 		write_kctxt_csr(dd, context, eri->clear, reg);
5900 		if (likely(eri->handler))
5901 			eri->handler(dd, context, reg);
5902 		count++;
5903 		if (count > MAX_CLEAR_COUNT) {
5904 			u64 mask;
5905 
5906 			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5907 				   eri->desc, reg);
5908 			/*
5909 			 * Read-modify-write so any other masked bits
5910 			 * remain masked.
5911 			 */
5912 			mask = read_kctxt_csr(dd, context, eri->mask);
5913 			mask &= ~reg;
5914 			write_kctxt_csr(dd, context, eri->mask, mask);
5915 			break;
5916 		}
5917 	}
5918 }
5919 
5920 /*
5921  * CCE block "misc" interrupt.  Source is < 16.
5922  */
is_misc_err_int(struct hfi1_devdata * dd,unsigned int source)5923 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5924 {
5925 	const struct err_reg_info *eri = &misc_errs[source];
5926 
5927 	if (eri->handler) {
5928 		interrupt_clear_down(dd, 0, eri);
5929 	} else {
5930 		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5931 			   source);
5932 	}
5933 }
5934 
send_context_err_status_string(char * buf,int buf_len,u64 flags)5935 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5936 {
5937 	return flag_string(buf, buf_len, flags,
5938 			   sc_err_status_flags,
5939 			   ARRAY_SIZE(sc_err_status_flags));
5940 }
5941 
5942 /*
5943  * Send context error interrupt.  Source (hw_context) is < 160.
5944  *
5945  * All send context errors cause the send context to halt.  The normal
5946  * clear-down mechanism cannot be used because we cannot clear the
5947  * error bits until several other long-running items are done first.
5948  * This is OK because with the context halted, nothing else is going
5949  * to happen on it anyway.
5950  */
is_sendctxt_err_int(struct hfi1_devdata * dd,unsigned int hw_context)5951 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5952 				unsigned int hw_context)
5953 {
5954 	struct send_context_info *sci;
5955 	struct send_context *sc;
5956 	char flags[96];
5957 	u64 status;
5958 	u32 sw_index;
5959 	int i = 0;
5960 	unsigned long irq_flags;
5961 
5962 	sw_index = dd->hw_to_sw[hw_context];
5963 	if (sw_index >= dd->num_send_contexts) {
5964 		dd_dev_err(dd,
5965 			   "out of range sw index %u for send context %u\n",
5966 			   sw_index, hw_context);
5967 		return;
5968 	}
5969 	sci = &dd->send_contexts[sw_index];
5970 	spin_lock_irqsave(&dd->sc_lock, irq_flags);
5971 	sc = sci->sc;
5972 	if (!sc) {
5973 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5974 			   sw_index, hw_context);
5975 		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5976 		return;
5977 	}
5978 
5979 	/* tell the software that a halt has begun */
5980 	sc_stop(sc, SCF_HALTED);
5981 
5982 	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5983 
5984 	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5985 		    send_context_err_status_string(flags, sizeof(flags),
5986 						   status));
5987 
5988 	if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5989 		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5990 
5991 	/*
5992 	 * Automatically restart halted kernel contexts out of interrupt
5993 	 * context.  User contexts must ask the driver to restart the context.
5994 	 */
5995 	if (sc->type != SC_USER)
5996 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5997 	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5998 
5999 	/*
6000 	 * Update the counters for the corresponding status bits.
6001 	 * Note that these particular counters are aggregated over all
6002 	 * 160 contexts.
6003 	 */
6004 	for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6005 		if (status & (1ull << i))
6006 			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6007 	}
6008 }
6009 
handle_sdma_eng_err(struct hfi1_devdata * dd,unsigned int source,u64 status)6010 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6011 				unsigned int source, u64 status)
6012 {
6013 	struct sdma_engine *sde;
6014 	int i = 0;
6015 
6016 	sde = &dd->per_sdma[source];
6017 #ifdef CONFIG_SDMA_VERBOSITY
6018 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6019 		   slashstrip(__FILE__), __LINE__, __func__);
6020 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6021 		   sde->this_idx, source, (unsigned long long)status);
6022 #endif
6023 	sde->err_cnt++;
6024 	sdma_engine_error(sde, status);
6025 
6026 	/*
6027 	* Update the counters for the corresponding status bits.
6028 	* Note that these particular counters are aggregated over
6029 	* all 16 DMA engines.
6030 	*/
6031 	for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6032 		if (status & (1ull << i))
6033 			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6034 	}
6035 }
6036 
6037 /*
6038  * CCE block SDMA error interrupt.  Source is < 16.
6039  */
is_sdma_eng_err_int(struct hfi1_devdata * dd,unsigned int source)6040 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6041 {
6042 #ifdef CONFIG_SDMA_VERBOSITY
6043 	struct sdma_engine *sde = &dd->per_sdma[source];
6044 
6045 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6046 		   slashstrip(__FILE__), __LINE__, __func__);
6047 	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6048 		   source);
6049 	sdma_dumpstate(sde);
6050 #endif
6051 	interrupt_clear_down(dd, source, &sdma_eng_err);
6052 }
6053 
6054 /*
6055  * CCE block "various" interrupt.  Source is < 8.
6056  */
is_various_int(struct hfi1_devdata * dd,unsigned int source)6057 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6058 {
6059 	const struct err_reg_info *eri = &various_err[source];
6060 
6061 	/*
6062 	 * TCritInt cannot go through interrupt_clear_down()
6063 	 * because it is not a second tier interrupt. The handler
6064 	 * should be called directly.
6065 	 */
6066 	if (source == TCRIT_INT_SOURCE)
6067 		handle_temp_err(dd);
6068 	else if (eri->handler)
6069 		interrupt_clear_down(dd, 0, eri);
6070 	else
6071 		dd_dev_info(dd,
6072 			    "%s: Unimplemented/reserved interrupt %d\n",
6073 			    __func__, source);
6074 }
6075 
handle_qsfp_int(struct hfi1_devdata * dd,u32 src_ctx,u64 reg)6076 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6077 {
6078 	/* src_ctx is always zero */
6079 	struct hfi1_pportdata *ppd = dd->pport;
6080 	unsigned long flags;
6081 	u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6082 
6083 	if (reg & QSFP_HFI0_MODPRST_N) {
6084 		if (!qsfp_mod_present(ppd)) {
6085 			dd_dev_info(dd, "%s: QSFP module removed\n",
6086 				    __func__);
6087 
6088 			ppd->driver_link_ready = 0;
6089 			/*
6090 			 * Cable removed, reset all our information about the
6091 			 * cache and cable capabilities
6092 			 */
6093 
6094 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6095 			/*
6096 			 * We don't set cache_refresh_required here as we expect
6097 			 * an interrupt when a cable is inserted
6098 			 */
6099 			ppd->qsfp_info.cache_valid = 0;
6100 			ppd->qsfp_info.reset_needed = 0;
6101 			ppd->qsfp_info.limiting_active = 0;
6102 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6103 					       flags);
6104 			/* Invert the ModPresent pin now to detect plug-in */
6105 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6106 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6107 
6108 			if ((ppd->offline_disabled_reason >
6109 			  HFI1_ODR_MASK(
6110 			  OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6111 			  (ppd->offline_disabled_reason ==
6112 			  HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6113 				ppd->offline_disabled_reason =
6114 				HFI1_ODR_MASK(
6115 				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6116 
6117 			if (ppd->host_link_state == HLS_DN_POLL) {
6118 				/*
6119 				 * The link is still in POLL. This means
6120 				 * that the normal link down processing
6121 				 * will not happen. We have to do it here
6122 				 * before turning the DC off.
6123 				 */
6124 				queue_work(ppd->link_wq, &ppd->link_down_work);
6125 			}
6126 		} else {
6127 			dd_dev_info(dd, "%s: QSFP module inserted\n",
6128 				    __func__);
6129 
6130 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6131 			ppd->qsfp_info.cache_valid = 0;
6132 			ppd->qsfp_info.cache_refresh_required = 1;
6133 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6134 					       flags);
6135 
6136 			/*
6137 			 * Stop inversion of ModPresent pin to detect
6138 			 * removal of the cable
6139 			 */
6140 			qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6141 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6142 				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6143 
6144 			ppd->offline_disabled_reason =
6145 				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6146 		}
6147 	}
6148 
6149 	if (reg & QSFP_HFI0_INT_N) {
6150 		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6151 			    __func__);
6152 		spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6153 		ppd->qsfp_info.check_interrupt_flags = 1;
6154 		spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6155 	}
6156 
6157 	/* Schedule the QSFP work only if there is a cable attached. */
6158 	if (qsfp_mod_present(ppd))
6159 		queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6160 }
6161 
request_host_lcb_access(struct hfi1_devdata * dd)6162 static int request_host_lcb_access(struct hfi1_devdata *dd)
6163 {
6164 	int ret;
6165 
6166 	ret = do_8051_command(dd, HCMD_MISC,
6167 			      (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6168 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6169 	if (ret != HCMD_SUCCESS) {
6170 		dd_dev_err(dd, "%s: command failed with error %d\n",
6171 			   __func__, ret);
6172 	}
6173 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6174 }
6175 
request_8051_lcb_access(struct hfi1_devdata * dd)6176 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6177 {
6178 	int ret;
6179 
6180 	ret = do_8051_command(dd, HCMD_MISC,
6181 			      (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6182 			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6183 	if (ret != HCMD_SUCCESS) {
6184 		dd_dev_err(dd, "%s: command failed with error %d\n",
6185 			   __func__, ret);
6186 	}
6187 	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6188 }
6189 
6190 /*
6191  * Set the LCB selector - allow host access.  The DCC selector always
6192  * points to the host.
6193  */
set_host_lcb_access(struct hfi1_devdata * dd)6194 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6195 {
6196 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6197 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6198 		  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6199 }
6200 
6201 /*
6202  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6203  * points to the host.
6204  */
set_8051_lcb_access(struct hfi1_devdata * dd)6205 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6206 {
6207 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6208 		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6209 }
6210 
6211 /*
6212  * Acquire LCB access from the 8051.  If the host already has access,
6213  * just increment a counter.  Otherwise, inform the 8051 that the
6214  * host is taking access.
6215  *
6216  * Returns:
6217  *	0 on success
6218  *	-EBUSY if the 8051 has control and cannot be disturbed
6219  *	-errno if unable to acquire access from the 8051
6220  */
acquire_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6221 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6222 {
6223 	struct hfi1_pportdata *ppd = dd->pport;
6224 	int ret = 0;
6225 
6226 	/*
6227 	 * Use the host link state lock so the operation of this routine
6228 	 * { link state check, selector change, count increment } can occur
6229 	 * as a unit against a link state change.  Otherwise there is a
6230 	 * race between the state change and the count increment.
6231 	 */
6232 	if (sleep_ok) {
6233 		mutex_lock(&ppd->hls_lock);
6234 	} else {
6235 		while (!mutex_trylock(&ppd->hls_lock))
6236 			udelay(1);
6237 	}
6238 
6239 	/* this access is valid only when the link is up */
6240 	if (ppd->host_link_state & HLS_DOWN) {
6241 		dd_dev_info(dd, "%s: link state %s not up\n",
6242 			    __func__, link_state_name(ppd->host_link_state));
6243 		ret = -EBUSY;
6244 		goto done;
6245 	}
6246 
6247 	if (dd->lcb_access_count == 0) {
6248 		ret = request_host_lcb_access(dd);
6249 		if (ret) {
6250 			dd_dev_err(dd,
6251 				   "%s: unable to acquire LCB access, err %d\n",
6252 				   __func__, ret);
6253 			goto done;
6254 		}
6255 		set_host_lcb_access(dd);
6256 	}
6257 	dd->lcb_access_count++;
6258 done:
6259 	mutex_unlock(&ppd->hls_lock);
6260 	return ret;
6261 }
6262 
6263 /*
6264  * Release LCB access by decrementing the use count.  If the count is moving
6265  * from 1 to 0, inform 8051 that it has control back.
6266  *
6267  * Returns:
6268  *	0 on success
6269  *	-errno if unable to release access to the 8051
6270  */
release_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6271 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6272 {
6273 	int ret = 0;
6274 
6275 	/*
6276 	 * Use the host link state lock because the acquire needed it.
6277 	 * Here, we only need to keep { selector change, count decrement }
6278 	 * as a unit.
6279 	 */
6280 	if (sleep_ok) {
6281 		mutex_lock(&dd->pport->hls_lock);
6282 	} else {
6283 		while (!mutex_trylock(&dd->pport->hls_lock))
6284 			udelay(1);
6285 	}
6286 
6287 	if (dd->lcb_access_count == 0) {
6288 		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6289 			   __func__);
6290 		goto done;
6291 	}
6292 
6293 	if (dd->lcb_access_count == 1) {
6294 		set_8051_lcb_access(dd);
6295 		ret = request_8051_lcb_access(dd);
6296 		if (ret) {
6297 			dd_dev_err(dd,
6298 				   "%s: unable to release LCB access, err %d\n",
6299 				   __func__, ret);
6300 			/* restore host access if the grant didn't work */
6301 			set_host_lcb_access(dd);
6302 			goto done;
6303 		}
6304 	}
6305 	dd->lcb_access_count--;
6306 done:
6307 	mutex_unlock(&dd->pport->hls_lock);
6308 	return ret;
6309 }
6310 
6311 /*
6312  * Initialize LCB access variables and state.  Called during driver load,
6313  * after most of the initialization is finished.
6314  *
6315  * The DC default is LCB access on for the host.  The driver defaults to
6316  * leaving access to the 8051.  Assign access now - this constrains the call
6317  * to this routine to be after all LCB set-up is done.  In particular, after
6318  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6319  */
init_lcb_access(struct hfi1_devdata * dd)6320 static void init_lcb_access(struct hfi1_devdata *dd)
6321 {
6322 	dd->lcb_access_count = 0;
6323 }
6324 
6325 /*
6326  * Write a response back to a 8051 request.
6327  */
hreq_response(struct hfi1_devdata * dd,u8 return_code,u16 rsp_data)6328 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6329 {
6330 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6331 		  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6332 		  (u64)return_code <<
6333 		  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6334 		  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6335 }
6336 
6337 /*
6338  * Handle host requests from the 8051.
6339  */
handle_8051_request(struct hfi1_pportdata * ppd)6340 static void handle_8051_request(struct hfi1_pportdata *ppd)
6341 {
6342 	struct hfi1_devdata *dd = ppd->dd;
6343 	u64 reg;
6344 	u16 data = 0;
6345 	u8 type;
6346 
6347 	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6348 	if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6349 		return;	/* no request */
6350 
6351 	/* zero out COMPLETED so the response is seen */
6352 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6353 
6354 	/* extract request details */
6355 	type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6356 			& DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6357 	data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6358 			& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6359 
6360 	switch (type) {
6361 	case HREQ_LOAD_CONFIG:
6362 	case HREQ_SAVE_CONFIG:
6363 	case HREQ_READ_CONFIG:
6364 	case HREQ_SET_TX_EQ_ABS:
6365 	case HREQ_SET_TX_EQ_REL:
6366 	case HREQ_ENABLE:
6367 		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6368 			    type);
6369 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6370 		break;
6371 	case HREQ_CONFIG_DONE:
6372 		hreq_response(dd, HREQ_SUCCESS, 0);
6373 		break;
6374 
6375 	case HREQ_INTERFACE_TEST:
6376 		hreq_response(dd, HREQ_SUCCESS, data);
6377 		break;
6378 	default:
6379 		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6380 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6381 		break;
6382 	}
6383 }
6384 
6385 /*
6386  * Set up allocation unit vaulue.
6387  */
set_up_vau(struct hfi1_devdata * dd,u8 vau)6388 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6389 {
6390 	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6391 
6392 	/* do not modify other values in the register */
6393 	reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6394 	reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6395 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6396 }
6397 
6398 /*
6399  * Set up initial VL15 credits of the remote.  Assumes the rest of
6400  * the CM credit registers are zero from a previous global or credit reset.
6401  * Shared limit for VL15 will always be 0.
6402  */
set_up_vl15(struct hfi1_devdata * dd,u16 vl15buf)6403 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6404 {
6405 	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6406 
6407 	/* set initial values for total and shared credit limit */
6408 	reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6409 		 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6410 
6411 	/*
6412 	 * Set total limit to be equal to VL15 credits.
6413 	 * Leave shared limit at 0.
6414 	 */
6415 	reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6416 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6417 
6418 	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6419 		  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6420 }
6421 
6422 /*
6423  * Zero all credit details from the previous connection and
6424  * reset the CM manager's internal counters.
6425  */
reset_link_credits(struct hfi1_devdata * dd)6426 void reset_link_credits(struct hfi1_devdata *dd)
6427 {
6428 	int i;
6429 
6430 	/* remove all previous VL credit limits */
6431 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
6432 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6433 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6434 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6435 	/* reset the CM block */
6436 	pio_send_control(dd, PSC_CM_RESET);
6437 	/* reset cached value */
6438 	dd->vl15buf_cached = 0;
6439 }
6440 
6441 /* convert a vCU to a CU */
vcu_to_cu(u8 vcu)6442 static u32 vcu_to_cu(u8 vcu)
6443 {
6444 	return 1 << vcu;
6445 }
6446 
6447 /* convert a CU to a vCU */
cu_to_vcu(u32 cu)6448 static u8 cu_to_vcu(u32 cu)
6449 {
6450 	return ilog2(cu);
6451 }
6452 
6453 /* convert a vAU to an AU */
vau_to_au(u8 vau)6454 static u32 vau_to_au(u8 vau)
6455 {
6456 	return 8 * (1 << vau);
6457 }
6458 
set_linkup_defaults(struct hfi1_pportdata * ppd)6459 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6460 {
6461 	ppd->sm_trap_qp = 0x0;
6462 	ppd->sa_qp = 0x1;
6463 }
6464 
6465 /*
6466  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6467  */
lcb_shutdown(struct hfi1_devdata * dd,int abort)6468 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6469 {
6470 	u64 reg;
6471 
6472 	/* clear lcb run: LCB_CFG_RUN.EN = 0 */
6473 	write_csr(dd, DC_LCB_CFG_RUN, 0);
6474 	/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6475 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6476 		  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6477 	/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6478 	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6479 	reg = read_csr(dd, DCC_CFG_RESET);
6480 	write_csr(dd, DCC_CFG_RESET, reg |
6481 		  (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6482 		  (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6483 	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6484 	if (!abort) {
6485 		udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6486 		write_csr(dd, DCC_CFG_RESET, reg);
6487 		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6488 	}
6489 }
6490 
6491 /*
6492  * This routine should be called after the link has been transitioned to
6493  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6494  * reset).
6495  *
6496  * The expectation is that the caller of this routine would have taken
6497  * care of properly transitioning the link into the correct state.
6498  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6499  *       before calling this function.
6500  */
_dc_shutdown(struct hfi1_devdata * dd)6501 static void _dc_shutdown(struct hfi1_devdata *dd)
6502 {
6503 	lockdep_assert_held(&dd->dc8051_lock);
6504 
6505 	if (dd->dc_shutdown)
6506 		return;
6507 
6508 	dd->dc_shutdown = 1;
6509 	/* Shutdown the LCB */
6510 	lcb_shutdown(dd, 1);
6511 	/*
6512 	 * Going to OFFLINE would have causes the 8051 to put the
6513 	 * SerDes into reset already. Just need to shut down the 8051,
6514 	 * itself.
6515 	 */
6516 	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6517 }
6518 
dc_shutdown(struct hfi1_devdata * dd)6519 static void dc_shutdown(struct hfi1_devdata *dd)
6520 {
6521 	mutex_lock(&dd->dc8051_lock);
6522 	_dc_shutdown(dd);
6523 	mutex_unlock(&dd->dc8051_lock);
6524 }
6525 
6526 /*
6527  * Calling this after the DC has been brought out of reset should not
6528  * do any damage.
6529  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6530  *       before calling this function.
6531  */
_dc_start(struct hfi1_devdata * dd)6532 static void _dc_start(struct hfi1_devdata *dd)
6533 {
6534 	lockdep_assert_held(&dd->dc8051_lock);
6535 
6536 	if (!dd->dc_shutdown)
6537 		return;
6538 
6539 	/* Take the 8051 out of reset */
6540 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6541 	/* Wait until 8051 is ready */
6542 	if (wait_fm_ready(dd, TIMEOUT_8051_START))
6543 		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6544 			   __func__);
6545 
6546 	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6547 	write_csr(dd, DCC_CFG_RESET, 0x10);
6548 	/* lcb_shutdown() with abort=1 does not restore these */
6549 	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6550 	dd->dc_shutdown = 0;
6551 }
6552 
dc_start(struct hfi1_devdata * dd)6553 static void dc_start(struct hfi1_devdata *dd)
6554 {
6555 	mutex_lock(&dd->dc8051_lock);
6556 	_dc_start(dd);
6557 	mutex_unlock(&dd->dc8051_lock);
6558 }
6559 
6560 /*
6561  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6562  */
adjust_lcb_for_fpga_serdes(struct hfi1_devdata * dd)6563 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6564 {
6565 	u64 rx_radr, tx_radr;
6566 	u32 version;
6567 
6568 	if (dd->icode != ICODE_FPGA_EMULATION)
6569 		return;
6570 
6571 	/*
6572 	 * These LCB defaults on emulator _s are good, nothing to do here:
6573 	 *	LCB_CFG_TX_FIFOS_RADR
6574 	 *	LCB_CFG_RX_FIFOS_RADR
6575 	 *	LCB_CFG_LN_DCLK
6576 	 *	LCB_CFG_IGNORE_LOST_RCLK
6577 	 */
6578 	if (is_emulator_s(dd))
6579 		return;
6580 	/* else this is _p */
6581 
6582 	version = emulator_rev(dd);
6583 	if (!is_ax(dd))
6584 		version = 0x2d;	/* all B0 use 0x2d or higher settings */
6585 
6586 	if (version <= 0x12) {
6587 		/* release 0x12 and below */
6588 
6589 		/*
6590 		 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6591 		 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6592 		 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6593 		 */
6594 		rx_radr =
6595 		      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6596 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6597 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6598 		/*
6599 		 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6600 		 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6601 		 */
6602 		tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6603 	} else if (version <= 0x18) {
6604 		/* release 0x13 up to 0x18 */
6605 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6606 		rx_radr =
6607 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6611 	} else if (version == 0x19) {
6612 		/* release 0x19 */
6613 		/* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6614 		rx_radr =
6615 		      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6616 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6617 		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6618 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6619 	} else if (version == 0x1a) {
6620 		/* release 0x1a */
6621 		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6622 		rx_radr =
6623 		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6624 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6625 		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6626 		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6627 		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6628 	} else {
6629 		/* release 0x1b and higher */
6630 		/* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6631 		rx_radr =
6632 		      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6633 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6634 		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6635 		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6636 	}
6637 
6638 	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6639 	/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6640 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6641 		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6642 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6643 }
6644 
6645 /*
6646  * Handle a SMA idle message
6647  *
6648  * This is a work-queue function outside of the interrupt.
6649  */
handle_sma_message(struct work_struct * work)6650 void handle_sma_message(struct work_struct *work)
6651 {
6652 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6653 							sma_message_work);
6654 	struct hfi1_devdata *dd = ppd->dd;
6655 	u64 msg;
6656 	int ret;
6657 
6658 	/*
6659 	 * msg is bytes 1-4 of the 40-bit idle message - the command code
6660 	 * is stripped off
6661 	 */
6662 	ret = read_idle_sma(dd, &msg);
6663 	if (ret)
6664 		return;
6665 	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6666 	/*
6667 	 * React to the SMA message.  Byte[1] (0 for us) is the command.
6668 	 */
6669 	switch (msg & 0xff) {
6670 	case SMA_IDLE_ARM:
6671 		/*
6672 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6673 		 * State Transitions
6674 		 *
6675 		 * Only expected in INIT or ARMED, discard otherwise.
6676 		 */
6677 		if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6678 			ppd->neighbor_normal = 1;
6679 		break;
6680 	case SMA_IDLE_ACTIVE:
6681 		/*
6682 		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6683 		 * State Transitions
6684 		 *
6685 		 * Can activate the node.  Discard otherwise.
6686 		 */
6687 		if (ppd->host_link_state == HLS_UP_ARMED &&
6688 		    ppd->is_active_optimize_enabled) {
6689 			ppd->neighbor_normal = 1;
6690 			ret = set_link_state(ppd, HLS_UP_ACTIVE);
6691 			if (ret)
6692 				dd_dev_err(
6693 					dd,
6694 					"%s: received Active SMA idle message, couldn't set link to Active\n",
6695 					__func__);
6696 		}
6697 		break;
6698 	default:
6699 		dd_dev_err(dd,
6700 			   "%s: received unexpected SMA idle message 0x%llx\n",
6701 			   __func__, msg);
6702 		break;
6703 	}
6704 }
6705 
adjust_rcvctrl(struct hfi1_devdata * dd,u64 add,u64 clear)6706 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6707 {
6708 	u64 rcvctrl;
6709 	unsigned long flags;
6710 
6711 	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6712 	rcvctrl = read_csr(dd, RCV_CTRL);
6713 	rcvctrl |= add;
6714 	rcvctrl &= ~clear;
6715 	write_csr(dd, RCV_CTRL, rcvctrl);
6716 	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6717 }
6718 
add_rcvctrl(struct hfi1_devdata * dd,u64 add)6719 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6720 {
6721 	adjust_rcvctrl(dd, add, 0);
6722 }
6723 
clear_rcvctrl(struct hfi1_devdata * dd,u64 clear)6724 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6725 {
6726 	adjust_rcvctrl(dd, 0, clear);
6727 }
6728 
6729 /*
6730  * Called from all interrupt handlers to start handling an SPC freeze.
6731  */
start_freeze_handling(struct hfi1_pportdata * ppd,int flags)6732 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6733 {
6734 	struct hfi1_devdata *dd = ppd->dd;
6735 	struct send_context *sc;
6736 	int i;
6737 	int sc_flags;
6738 
6739 	if (flags & FREEZE_SELF)
6740 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6741 
6742 	/* enter frozen mode */
6743 	dd->flags |= HFI1_FROZEN;
6744 
6745 	/* notify all SDMA engines that they are going into a freeze */
6746 	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6747 
6748 	sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6749 					      SCF_LINK_DOWN : 0);
6750 	/* do halt pre-handling on all enabled send contexts */
6751 	for (i = 0; i < dd->num_send_contexts; i++) {
6752 		sc = dd->send_contexts[i].sc;
6753 		if (sc && (sc->flags & SCF_ENABLED))
6754 			sc_stop(sc, sc_flags);
6755 	}
6756 
6757 	/* Send context are frozen. Notify user space */
6758 	hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6759 
6760 	if (flags & FREEZE_ABORT) {
6761 		dd_dev_err(dd,
6762 			   "Aborted freeze recovery. Please REBOOT system\n");
6763 		return;
6764 	}
6765 	/* queue non-interrupt handler */
6766 	queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6767 }
6768 
6769 /*
6770  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6771  * depending on the "freeze" parameter.
6772  *
6773  * No need to return an error if it times out, our only option
6774  * is to proceed anyway.
6775  */
wait_for_freeze_status(struct hfi1_devdata * dd,int freeze)6776 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6777 {
6778 	unsigned long timeout;
6779 	u64 reg;
6780 
6781 	timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6782 	while (1) {
6783 		reg = read_csr(dd, CCE_STATUS);
6784 		if (freeze) {
6785 			/* waiting until all indicators are set */
6786 			if ((reg & ALL_FROZE) == ALL_FROZE)
6787 				return;	/* all done */
6788 		} else {
6789 			/* waiting until all indicators are clear */
6790 			if ((reg & ALL_FROZE) == 0)
6791 				return; /* all done */
6792 		}
6793 
6794 		if (time_after(jiffies, timeout)) {
6795 			dd_dev_err(dd,
6796 				   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6797 				   freeze ? "" : "un", reg & ALL_FROZE,
6798 				   freeze ? ALL_FROZE : 0ull);
6799 			return;
6800 		}
6801 		usleep_range(80, 120);
6802 	}
6803 }
6804 
6805 /*
6806  * Do all freeze handling for the RXE block.
6807  */
rxe_freeze(struct hfi1_devdata * dd)6808 static void rxe_freeze(struct hfi1_devdata *dd)
6809 {
6810 	int i;
6811 	struct hfi1_ctxtdata *rcd;
6812 
6813 	/* disable port */
6814 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6815 
6816 	/* disable all receive contexts */
6817 	for (i = 0; i < dd->num_rcv_contexts; i++) {
6818 		rcd = hfi1_rcd_get_by_index(dd, i);
6819 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6820 		hfi1_rcd_put(rcd);
6821 	}
6822 }
6823 
6824 /*
6825  * Unfreeze handling for the RXE block - kernel contexts only.
6826  * This will also enable the port.  User contexts will do unfreeze
6827  * handling on a per-context basis as they call into the driver.
6828  *
6829  */
rxe_kernel_unfreeze(struct hfi1_devdata * dd)6830 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6831 {
6832 	u32 rcvmask;
6833 	u16 i;
6834 	struct hfi1_ctxtdata *rcd;
6835 
6836 	/* enable all kernel contexts */
6837 	for (i = 0; i < dd->num_rcv_contexts; i++) {
6838 		rcd = hfi1_rcd_get_by_index(dd, i);
6839 
6840 		/* Ensure all non-user contexts(including vnic) are enabled */
6841 		if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6842 			hfi1_rcd_put(rcd);
6843 			continue;
6844 		}
6845 		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6846 		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6847 		rcvmask |= rcd->rcvhdrtail_kvaddr ?
6848 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6849 		hfi1_rcvctrl(dd, rcvmask, rcd);
6850 		hfi1_rcd_put(rcd);
6851 	}
6852 
6853 	/* enable port */
6854 	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6855 }
6856 
6857 /*
6858  * Non-interrupt SPC freeze handling.
6859  *
6860  * This is a work-queue function outside of the triggering interrupt.
6861  */
handle_freeze(struct work_struct * work)6862 void handle_freeze(struct work_struct *work)
6863 {
6864 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6865 								freeze_work);
6866 	struct hfi1_devdata *dd = ppd->dd;
6867 
6868 	/* wait for freeze indicators on all affected blocks */
6869 	wait_for_freeze_status(dd, 1);
6870 
6871 	/* SPC is now frozen */
6872 
6873 	/* do send PIO freeze steps */
6874 	pio_freeze(dd);
6875 
6876 	/* do send DMA freeze steps */
6877 	sdma_freeze(dd);
6878 
6879 	/* do send egress freeze steps - nothing to do */
6880 
6881 	/* do receive freeze steps */
6882 	rxe_freeze(dd);
6883 
6884 	/*
6885 	 * Unfreeze the hardware - clear the freeze, wait for each
6886 	 * block's frozen bit to clear, then clear the frozen flag.
6887 	 */
6888 	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6889 	wait_for_freeze_status(dd, 0);
6890 
6891 	if (is_ax(dd)) {
6892 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6893 		wait_for_freeze_status(dd, 1);
6894 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895 		wait_for_freeze_status(dd, 0);
6896 	}
6897 
6898 	/* do send PIO unfreeze steps for kernel contexts */
6899 	pio_kernel_unfreeze(dd);
6900 
6901 	/* do send DMA unfreeze steps */
6902 	sdma_unfreeze(dd);
6903 
6904 	/* do send egress unfreeze steps - nothing to do */
6905 
6906 	/* do receive unfreeze steps for kernel contexts */
6907 	rxe_kernel_unfreeze(dd);
6908 
6909 	/*
6910 	 * The unfreeze procedure touches global device registers when
6911 	 * it disables and re-enables RXE. Mark the device unfrozen
6912 	 * after all that is done so other parts of the driver waiting
6913 	 * for the device to unfreeze don't do things out of order.
6914 	 *
6915 	 * The above implies that the meaning of HFI1_FROZEN flag is
6916 	 * "Device has gone into freeze mode and freeze mode handling
6917 	 * is still in progress."
6918 	 *
6919 	 * The flag will be removed when freeze mode processing has
6920 	 * completed.
6921 	 */
6922 	dd->flags &= ~HFI1_FROZEN;
6923 	wake_up(&dd->event_queue);
6924 
6925 	/* no longer frozen */
6926 }
6927 
6928 /*
6929  * Handle a link up interrupt from the 8051.
6930  *
6931  * This is a work-queue function outside of the interrupt.
6932  */
handle_link_up(struct work_struct * work)6933 void handle_link_up(struct work_struct *work)
6934 {
6935 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6936 						  link_up_work);
6937 	struct hfi1_devdata *dd = ppd->dd;
6938 
6939 	set_link_state(ppd, HLS_UP_INIT);
6940 
6941 	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6942 	read_ltp_rtt(dd);
6943 	/*
6944 	 * OPA specifies that certain counters are cleared on a transition
6945 	 * to link up, so do that.
6946 	 */
6947 	clear_linkup_counters(dd);
6948 	/*
6949 	 * And (re)set link up default values.
6950 	 */
6951 	set_linkup_defaults(ppd);
6952 
6953 	/*
6954 	 * Set VL15 credits. Use cached value from verify cap interrupt.
6955 	 * In case of quick linkup or simulator, vl15 value will be set by
6956 	 * handle_linkup_change. VerifyCap interrupt handler will not be
6957 	 * called in those scenarios.
6958 	 */
6959 	if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6960 		set_up_vl15(dd, dd->vl15buf_cached);
6961 
6962 	/* enforce link speed enabled */
6963 	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6964 		/* oops - current speed is not enabled, bounce */
6965 		dd_dev_err(dd,
6966 			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6967 			   ppd->link_speed_active, ppd->link_speed_enabled);
6968 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6969 				     OPA_LINKDOWN_REASON_SPEED_POLICY);
6970 		set_link_state(ppd, HLS_DN_OFFLINE);
6971 		start_link(ppd);
6972 	}
6973 }
6974 
6975 /*
6976  * Several pieces of LNI information were cached for SMA in ppd.
6977  * Reset these on link down
6978  */
reset_neighbor_info(struct hfi1_pportdata * ppd)6979 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6980 {
6981 	ppd->neighbor_guid = 0;
6982 	ppd->neighbor_port_number = 0;
6983 	ppd->neighbor_type = 0;
6984 	ppd->neighbor_fm_security = 0;
6985 }
6986 
6987 static const char * const link_down_reason_strs[] = {
6988 	[OPA_LINKDOWN_REASON_NONE] = "None",
6989 	[OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6990 	[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6991 	[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6992 	[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6993 	[OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6994 	[OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6995 	[OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6996 	[OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6997 	[OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6998 	[OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6999 	[OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7000 	[OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7001 	[OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7002 	[OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7003 	[OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7004 	[OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7005 	[OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7006 	[OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7007 	[OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7008 	[OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7009 	[OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7010 	[OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7011 	[OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7012 	[OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7013 	[OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7014 	[OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7015 	[OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7016 	[OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7017 	[OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7018 	[OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7019 	[OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7020 	[OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7021 					"Excessive buffer overrun",
7022 	[OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7023 	[OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7024 	[OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7025 	[OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7026 	[OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7027 	[OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7028 	[OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7029 	[OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7030 					"Local media not installed",
7031 	[OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7032 	[OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7033 	[OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7034 					"End to end not installed",
7035 	[OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7036 	[OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7037 	[OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7038 	[OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7039 	[OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7040 	[OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7041 };
7042 
7043 /* return the neighbor link down reason string */
link_down_reason_str(u8 reason)7044 static const char *link_down_reason_str(u8 reason)
7045 {
7046 	const char *str = NULL;
7047 
7048 	if (reason < ARRAY_SIZE(link_down_reason_strs))
7049 		str = link_down_reason_strs[reason];
7050 	if (!str)
7051 		str = "(invalid)";
7052 
7053 	return str;
7054 }
7055 
7056 /*
7057  * Handle a link down interrupt from the 8051.
7058  *
7059  * This is a work-queue function outside of the interrupt.
7060  */
handle_link_down(struct work_struct * work)7061 void handle_link_down(struct work_struct *work)
7062 {
7063 	u8 lcl_reason, neigh_reason = 0;
7064 	u8 link_down_reason;
7065 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7066 						  link_down_work);
7067 	int was_up;
7068 	static const char ldr_str[] = "Link down reason: ";
7069 
7070 	if ((ppd->host_link_state &
7071 	     (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7072 	     ppd->port_type == PORT_TYPE_FIXED)
7073 		ppd->offline_disabled_reason =
7074 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7075 
7076 	/* Go offline first, then deal with reading/writing through 8051 */
7077 	was_up = !!(ppd->host_link_state & HLS_UP);
7078 	set_link_state(ppd, HLS_DN_OFFLINE);
7079 	xchg(&ppd->is_link_down_queued, 0);
7080 
7081 	if (was_up) {
7082 		lcl_reason = 0;
7083 		/* link down reason is only valid if the link was up */
7084 		read_link_down_reason(ppd->dd, &link_down_reason);
7085 		switch (link_down_reason) {
7086 		case LDR_LINK_TRANSFER_ACTIVE_LOW:
7087 			/* the link went down, no idle message reason */
7088 			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7089 				    ldr_str);
7090 			break;
7091 		case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7092 			/*
7093 			 * The neighbor reason is only valid if an idle message
7094 			 * was received for it.
7095 			 */
7096 			read_planned_down_reason_code(ppd->dd, &neigh_reason);
7097 			dd_dev_info(ppd->dd,
7098 				    "%sNeighbor link down message %d, %s\n",
7099 				    ldr_str, neigh_reason,
7100 				    link_down_reason_str(neigh_reason));
7101 			break;
7102 		case LDR_RECEIVED_HOST_OFFLINE_REQ:
7103 			dd_dev_info(ppd->dd,
7104 				    "%sHost requested link to go offline\n",
7105 				    ldr_str);
7106 			break;
7107 		default:
7108 			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7109 				    ldr_str, link_down_reason);
7110 			break;
7111 		}
7112 
7113 		/*
7114 		 * If no reason, assume peer-initiated but missed
7115 		 * LinkGoingDown idle flits.
7116 		 */
7117 		if (neigh_reason == 0)
7118 			lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7119 	} else {
7120 		/* went down while polling or going up */
7121 		lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7122 	}
7123 
7124 	set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7125 
7126 	/* inform the SMA when the link transitions from up to down */
7127 	if (was_up && ppd->local_link_down_reason.sma == 0 &&
7128 	    ppd->neigh_link_down_reason.sma == 0) {
7129 		ppd->local_link_down_reason.sma =
7130 					ppd->local_link_down_reason.latest;
7131 		ppd->neigh_link_down_reason.sma =
7132 					ppd->neigh_link_down_reason.latest;
7133 	}
7134 
7135 	reset_neighbor_info(ppd);
7136 
7137 	/* disable the port */
7138 	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7139 
7140 	/*
7141 	 * If there is no cable attached, turn the DC off. Otherwise,
7142 	 * start the link bring up.
7143 	 */
7144 	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7145 		dc_shutdown(ppd->dd);
7146 	else
7147 		start_link(ppd);
7148 }
7149 
handle_link_bounce(struct work_struct * work)7150 void handle_link_bounce(struct work_struct *work)
7151 {
7152 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7153 							link_bounce_work);
7154 
7155 	/*
7156 	 * Only do something if the link is currently up.
7157 	 */
7158 	if (ppd->host_link_state & HLS_UP) {
7159 		set_link_state(ppd, HLS_DN_OFFLINE);
7160 		start_link(ppd);
7161 	} else {
7162 		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7163 			    __func__, link_state_name(ppd->host_link_state));
7164 	}
7165 }
7166 
7167 /*
7168  * Mask conversion: Capability exchange to Port LTP.  The capability
7169  * exchange has an implicit 16b CRC that is mandatory.
7170  */
cap_to_port_ltp(int cap)7171 static int cap_to_port_ltp(int cap)
7172 {
7173 	int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7174 
7175 	if (cap & CAP_CRC_14B)
7176 		port_ltp |= PORT_LTP_CRC_MODE_14;
7177 	if (cap & CAP_CRC_48B)
7178 		port_ltp |= PORT_LTP_CRC_MODE_48;
7179 	if (cap & CAP_CRC_12B_16B_PER_LANE)
7180 		port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7181 
7182 	return port_ltp;
7183 }
7184 
7185 /*
7186  * Convert an OPA Port LTP mask to capability mask
7187  */
port_ltp_to_cap(int port_ltp)7188 int port_ltp_to_cap(int port_ltp)
7189 {
7190 	int cap_mask = 0;
7191 
7192 	if (port_ltp & PORT_LTP_CRC_MODE_14)
7193 		cap_mask |= CAP_CRC_14B;
7194 	if (port_ltp & PORT_LTP_CRC_MODE_48)
7195 		cap_mask |= CAP_CRC_48B;
7196 	if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7197 		cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7198 
7199 	return cap_mask;
7200 }
7201 
7202 /*
7203  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7204  */
lcb_to_port_ltp(int lcb_crc)7205 static int lcb_to_port_ltp(int lcb_crc)
7206 {
7207 	int port_ltp = 0;
7208 
7209 	if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7210 		port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7211 	else if (lcb_crc == LCB_CRC_48B)
7212 		port_ltp = PORT_LTP_CRC_MODE_48;
7213 	else if (lcb_crc == LCB_CRC_14B)
7214 		port_ltp = PORT_LTP_CRC_MODE_14;
7215 	else
7216 		port_ltp = PORT_LTP_CRC_MODE_16;
7217 
7218 	return port_ltp;
7219 }
7220 
7221 /*
7222  * Our neighbor has indicated that we are allowed to act as a fabric
7223  * manager, so place the full management partition key in the second
7224  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7225  * that we should already have the limited management partition key in
7226  * array element 1, and also that the port is not yet up when
7227  * add_full_mgmt_pkey() is invoked.
7228  */
add_full_mgmt_pkey(struct hfi1_pportdata * ppd)7229 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7230 {
7231 	struct hfi1_devdata *dd = ppd->dd;
7232 
7233 	/* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7234 	if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7235 		dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7236 			    __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7237 	ppd->pkeys[2] = FULL_MGMT_P_KEY;
7238 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7239 	hfi1_event_pkey_change(ppd->dd, ppd->port);
7240 }
7241 
clear_full_mgmt_pkey(struct hfi1_pportdata * ppd)7242 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7243 {
7244 	if (ppd->pkeys[2] != 0) {
7245 		ppd->pkeys[2] = 0;
7246 		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7247 		hfi1_event_pkey_change(ppd->dd, ppd->port);
7248 	}
7249 }
7250 
7251 /*
7252  * Convert the given link width to the OPA link width bitmask.
7253  */
link_width_to_bits(struct hfi1_devdata * dd,u16 width)7254 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7255 {
7256 	switch (width) {
7257 	case 0:
7258 		/*
7259 		 * Simulator and quick linkup do not set the width.
7260 		 * Just set it to 4x without complaint.
7261 		 */
7262 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7263 			return OPA_LINK_WIDTH_4X;
7264 		return 0; /* no lanes up */
7265 	case 1: return OPA_LINK_WIDTH_1X;
7266 	case 2: return OPA_LINK_WIDTH_2X;
7267 	case 3: return OPA_LINK_WIDTH_3X;
7268 	default:
7269 		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7270 			    __func__, width);
7271 		/* fall through */
7272 	case 4: return OPA_LINK_WIDTH_4X;
7273 	}
7274 }
7275 
7276 /*
7277  * Do a population count on the bottom nibble.
7278  */
7279 static const u8 bit_counts[16] = {
7280 	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7281 };
7282 
nibble_to_count(u8 nibble)7283 static inline u8 nibble_to_count(u8 nibble)
7284 {
7285 	return bit_counts[nibble & 0xf];
7286 }
7287 
7288 /*
7289  * Read the active lane information from the 8051 registers and return
7290  * their widths.
7291  *
7292  * Active lane information is found in these 8051 registers:
7293  *	enable_lane_tx
7294  *	enable_lane_rx
7295  */
get_link_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7296 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7297 			    u16 *rx_width)
7298 {
7299 	u16 tx, rx;
7300 	u8 enable_lane_rx;
7301 	u8 enable_lane_tx;
7302 	u8 tx_polarity_inversion;
7303 	u8 rx_polarity_inversion;
7304 	u8 max_rate;
7305 
7306 	/* read the active lanes */
7307 	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7308 			 &rx_polarity_inversion, &max_rate);
7309 	read_local_lni(dd, &enable_lane_rx);
7310 
7311 	/* convert to counts */
7312 	tx = nibble_to_count(enable_lane_tx);
7313 	rx = nibble_to_count(enable_lane_rx);
7314 
7315 	/*
7316 	 * Set link_speed_active here, overriding what was set in
7317 	 * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7318 	 * set the max_rate field in handle_verify_cap until v0.19.
7319 	 */
7320 	if ((dd->icode == ICODE_RTL_SILICON) &&
7321 	    (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7322 		/* max_rate: 0 = 12.5G, 1 = 25G */
7323 		switch (max_rate) {
7324 		case 0:
7325 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7326 			break;
7327 		default:
7328 			dd_dev_err(dd,
7329 				   "%s: unexpected max rate %d, using 25Gb\n",
7330 				   __func__, (int)max_rate);
7331 			/* fall through */
7332 		case 1:
7333 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7334 			break;
7335 		}
7336 	}
7337 
7338 	dd_dev_info(dd,
7339 		    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7340 		    enable_lane_tx, tx, enable_lane_rx, rx);
7341 	*tx_width = link_width_to_bits(dd, tx);
7342 	*rx_width = link_width_to_bits(dd, rx);
7343 }
7344 
7345 /*
7346  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7347  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7348  * after link up.  I.e. look elsewhere for downgrade information.
7349  *
7350  * Bits are:
7351  *	+ bits [7:4] contain the number of active transmitters
7352  *	+ bits [3:0] contain the number of active receivers
7353  * These are numbers 1 through 4 and can be different values if the
7354  * link is asymmetric.
7355  *
7356  * verify_cap_local_fm_link_width[0] retains its original value.
7357  */
get_linkup_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7358 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7359 			      u16 *rx_width)
7360 {
7361 	u16 widths, tx, rx;
7362 	u8 misc_bits, local_flags;
7363 	u16 active_tx, active_rx;
7364 
7365 	read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7366 	tx = widths >> 12;
7367 	rx = (widths >> 8) & 0xf;
7368 
7369 	*tx_width = link_width_to_bits(dd, tx);
7370 	*rx_width = link_width_to_bits(dd, rx);
7371 
7372 	/* print the active widths */
7373 	get_link_widths(dd, &active_tx, &active_rx);
7374 }
7375 
7376 /*
7377  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7378  * hardware information when the link first comes up.
7379  *
7380  * The link width is not available until after VerifyCap.AllFramesReceived
7381  * (the trigger for handle_verify_cap), so this is outside that routine
7382  * and should be called when the 8051 signals linkup.
7383  */
get_linkup_link_widths(struct hfi1_pportdata * ppd)7384 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7385 {
7386 	u16 tx_width, rx_width;
7387 
7388 	/* get end-of-LNI link widths */
7389 	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7390 
7391 	/* use tx_width as the link is supposed to be symmetric on link up */
7392 	ppd->link_width_active = tx_width;
7393 	/* link width downgrade active (LWD.A) starts out matching LW.A */
7394 	ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7395 	ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7396 	/* per OPA spec, on link up LWD.E resets to LWD.S */
7397 	ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7398 	/* cache the active egress rate (units {10^6 bits/sec]) */
7399 	ppd->current_egress_rate = active_egress_rate(ppd);
7400 }
7401 
7402 /*
7403  * Handle a verify capabilities interrupt from the 8051.
7404  *
7405  * This is a work-queue function outside of the interrupt.
7406  */
handle_verify_cap(struct work_struct * work)7407 void handle_verify_cap(struct work_struct *work)
7408 {
7409 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7410 								link_vc_work);
7411 	struct hfi1_devdata *dd = ppd->dd;
7412 	u64 reg;
7413 	u8 power_management;
7414 	u8 continuous;
7415 	u8 vcu;
7416 	u8 vau;
7417 	u8 z;
7418 	u16 vl15buf;
7419 	u16 link_widths;
7420 	u16 crc_mask;
7421 	u16 crc_val;
7422 	u16 device_id;
7423 	u16 active_tx, active_rx;
7424 	u8 partner_supported_crc;
7425 	u8 remote_tx_rate;
7426 	u8 device_rev;
7427 
7428 	set_link_state(ppd, HLS_VERIFY_CAP);
7429 
7430 	lcb_shutdown(dd, 0);
7431 	adjust_lcb_for_fpga_serdes(dd);
7432 
7433 	read_vc_remote_phy(dd, &power_management, &continuous);
7434 	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7435 			      &partner_supported_crc);
7436 	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7437 	read_remote_device_id(dd, &device_id, &device_rev);
7438 	/*
7439 	 * And the 'MgmtAllowed' information, which is exchanged during
7440 	 * LNI, is also be available at this point.
7441 	 */
7442 	read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7443 	/* print the active widths */
7444 	get_link_widths(dd, &active_tx, &active_rx);
7445 	dd_dev_info(dd,
7446 		    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7447 		    (int)power_management, (int)continuous);
7448 	dd_dev_info(dd,
7449 		    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7450 		    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7451 		    (int)partner_supported_crc);
7452 	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7453 		    (u32)remote_tx_rate, (u32)link_widths);
7454 	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7455 		    (u32)device_id, (u32)device_rev);
7456 	/*
7457 	 * The peer vAU value just read is the peer receiver value.  HFI does
7458 	 * not support a transmit vAU of 0 (AU == 8).  We advertised that
7459 	 * with Z=1 in the fabric capabilities sent to the peer.  The peer
7460 	 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7461 	 * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7462 	 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7463 	 * subject to the Z value exception.
7464 	 */
7465 	if (vau == 0)
7466 		vau = 1;
7467 	set_up_vau(dd, vau);
7468 
7469 	/*
7470 	 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7471 	 * credits value and wait for link-up interrupt ot set it.
7472 	 */
7473 	set_up_vl15(dd, 0);
7474 	dd->vl15buf_cached = vl15buf;
7475 
7476 	/* set up the LCB CRC mode */
7477 	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7478 
7479 	/* order is important: use the lowest bit in common */
7480 	if (crc_mask & CAP_CRC_14B)
7481 		crc_val = LCB_CRC_14B;
7482 	else if (crc_mask & CAP_CRC_48B)
7483 		crc_val = LCB_CRC_48B;
7484 	else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7485 		crc_val = LCB_CRC_12B_16B_PER_LANE;
7486 	else
7487 		crc_val = LCB_CRC_16B;
7488 
7489 	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7490 	write_csr(dd, DC_LCB_CFG_CRC_MODE,
7491 		  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7492 
7493 	/* set (14b only) or clear sideband credit */
7494 	reg = read_csr(dd, SEND_CM_CTRL);
7495 	if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7496 		write_csr(dd, SEND_CM_CTRL,
7497 			  reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7498 	} else {
7499 		write_csr(dd, SEND_CM_CTRL,
7500 			  reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7501 	}
7502 
7503 	ppd->link_speed_active = 0;	/* invalid value */
7504 	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7505 		/* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7506 		switch (remote_tx_rate) {
7507 		case 0:
7508 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7509 			break;
7510 		case 1:
7511 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7512 			break;
7513 		}
7514 	} else {
7515 		/* actual rate is highest bit of the ANDed rates */
7516 		u8 rate = remote_tx_rate & ppd->local_tx_rate;
7517 
7518 		if (rate & 2)
7519 			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7520 		else if (rate & 1)
7521 			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7522 	}
7523 	if (ppd->link_speed_active == 0) {
7524 		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7525 			   __func__, (int)remote_tx_rate);
7526 		ppd->link_speed_active = OPA_LINK_SPEED_25G;
7527 	}
7528 
7529 	/*
7530 	 * Cache the values of the supported, enabled, and active
7531 	 * LTP CRC modes to return in 'portinfo' queries. But the bit
7532 	 * flags that are returned in the portinfo query differ from
7533 	 * what's in the link_crc_mask, crc_sizes, and crc_val
7534 	 * variables. Convert these here.
7535 	 */
7536 	ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7537 		/* supported crc modes */
7538 	ppd->port_ltp_crc_mode |=
7539 		cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7540 		/* enabled crc modes */
7541 	ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7542 		/* active crc mode */
7543 
7544 	/* set up the remote credit return table */
7545 	assign_remote_cm_au_table(dd, vcu);
7546 
7547 	/*
7548 	 * The LCB is reset on entry to handle_verify_cap(), so this must
7549 	 * be applied on every link up.
7550 	 *
7551 	 * Adjust LCB error kill enable to kill the link if
7552 	 * these RBUF errors are seen:
7553 	 *	REPLAY_BUF_MBE_SMASK
7554 	 *	FLIT_INPUT_BUF_MBE_SMASK
7555 	 */
7556 	if (is_ax(dd)) {			/* fixed in B0 */
7557 		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7558 		reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7559 			| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7560 		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7561 	}
7562 
7563 	/* pull LCB fifos out of reset - all fifo clocks must be stable */
7564 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7565 
7566 	/* give 8051 access to the LCB CSRs */
7567 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7568 	set_8051_lcb_access(dd);
7569 
7570 	if (ppd->mgmt_allowed)
7571 		add_full_mgmt_pkey(ppd);
7572 
7573 	/* tell the 8051 to go to LinkUp */
7574 	set_link_state(ppd, HLS_GOING_UP);
7575 }
7576 
7577 /*
7578  * Apply the link width downgrade enabled policy against the current active
7579  * link widths.
7580  *
7581  * Called when the enabled policy changes or the active link widths change.
7582  */
apply_link_downgrade_policy(struct hfi1_pportdata * ppd,int refresh_widths)7583 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7584 {
7585 	int do_bounce = 0;
7586 	int tries;
7587 	u16 lwde;
7588 	u16 tx, rx;
7589 
7590 	/* use the hls lock to avoid a race with actual link up */
7591 	tries = 0;
7592 retry:
7593 	mutex_lock(&ppd->hls_lock);
7594 	/* only apply if the link is up */
7595 	if (ppd->host_link_state & HLS_DOWN) {
7596 		/* still going up..wait and retry */
7597 		if (ppd->host_link_state & HLS_GOING_UP) {
7598 			if (++tries < 1000) {
7599 				mutex_unlock(&ppd->hls_lock);
7600 				usleep_range(100, 120); /* arbitrary */
7601 				goto retry;
7602 			}
7603 			dd_dev_err(ppd->dd,
7604 				   "%s: giving up waiting for link state change\n",
7605 				   __func__);
7606 		}
7607 		goto done;
7608 	}
7609 
7610 	lwde = ppd->link_width_downgrade_enabled;
7611 
7612 	if (refresh_widths) {
7613 		get_link_widths(ppd->dd, &tx, &rx);
7614 		ppd->link_width_downgrade_tx_active = tx;
7615 		ppd->link_width_downgrade_rx_active = rx;
7616 	}
7617 
7618 	if (ppd->link_width_downgrade_tx_active == 0 ||
7619 	    ppd->link_width_downgrade_rx_active == 0) {
7620 		/* the 8051 reported a dead link as a downgrade */
7621 		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7622 	} else if (lwde == 0) {
7623 		/* downgrade is disabled */
7624 
7625 		/* bounce if not at starting active width */
7626 		if ((ppd->link_width_active !=
7627 		     ppd->link_width_downgrade_tx_active) ||
7628 		    (ppd->link_width_active !=
7629 		     ppd->link_width_downgrade_rx_active)) {
7630 			dd_dev_err(ppd->dd,
7631 				   "Link downgrade is disabled and link has downgraded, downing link\n");
7632 			dd_dev_err(ppd->dd,
7633 				   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7634 				   ppd->link_width_active,
7635 				   ppd->link_width_downgrade_tx_active,
7636 				   ppd->link_width_downgrade_rx_active);
7637 			do_bounce = 1;
7638 		}
7639 	} else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7640 		   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7641 		/* Tx or Rx is outside the enabled policy */
7642 		dd_dev_err(ppd->dd,
7643 			   "Link is outside of downgrade allowed, downing link\n");
7644 		dd_dev_err(ppd->dd,
7645 			   "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7646 			   lwde, ppd->link_width_downgrade_tx_active,
7647 			   ppd->link_width_downgrade_rx_active);
7648 		do_bounce = 1;
7649 	}
7650 
7651 done:
7652 	mutex_unlock(&ppd->hls_lock);
7653 
7654 	if (do_bounce) {
7655 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7656 				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7657 		set_link_state(ppd, HLS_DN_OFFLINE);
7658 		start_link(ppd);
7659 	}
7660 }
7661 
7662 /*
7663  * Handle a link downgrade interrupt from the 8051.
7664  *
7665  * This is a work-queue function outside of the interrupt.
7666  */
handle_link_downgrade(struct work_struct * work)7667 void handle_link_downgrade(struct work_struct *work)
7668 {
7669 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7670 							link_downgrade_work);
7671 
7672 	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7673 	apply_link_downgrade_policy(ppd, 1);
7674 }
7675 
dcc_err_string(char * buf,int buf_len,u64 flags)7676 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7677 {
7678 	return flag_string(buf, buf_len, flags, dcc_err_flags,
7679 		ARRAY_SIZE(dcc_err_flags));
7680 }
7681 
lcb_err_string(char * buf,int buf_len,u64 flags)7682 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7683 {
7684 	return flag_string(buf, buf_len, flags, lcb_err_flags,
7685 		ARRAY_SIZE(lcb_err_flags));
7686 }
7687 
dc8051_err_string(char * buf,int buf_len,u64 flags)7688 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7689 {
7690 	return flag_string(buf, buf_len, flags, dc8051_err_flags,
7691 		ARRAY_SIZE(dc8051_err_flags));
7692 }
7693 
dc8051_info_err_string(char * buf,int buf_len,u64 flags)7694 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7695 {
7696 	return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7697 		ARRAY_SIZE(dc8051_info_err_flags));
7698 }
7699 
dc8051_info_host_msg_string(char * buf,int buf_len,u64 flags)7700 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7701 {
7702 	return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7703 		ARRAY_SIZE(dc8051_info_host_msg_flags));
7704 }
7705 
handle_8051_interrupt(struct hfi1_devdata * dd,u32 unused,u64 reg)7706 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7707 {
7708 	struct hfi1_pportdata *ppd = dd->pport;
7709 	u64 info, err, host_msg;
7710 	int queue_link_down = 0;
7711 	char buf[96];
7712 
7713 	/* look at the flags */
7714 	if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7715 		/* 8051 information set by firmware */
7716 		/* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7717 		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7718 		err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7719 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7720 		host_msg = (info >>
7721 			DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7722 			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7723 
7724 		/*
7725 		 * Handle error flags.
7726 		 */
7727 		if (err & FAILED_LNI) {
7728 			/*
7729 			 * LNI error indications are cleared by the 8051
7730 			 * only when starting polling.  Only pay attention
7731 			 * to them when in the states that occur during
7732 			 * LNI.
7733 			 */
7734 			if (ppd->host_link_state
7735 			    & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7736 				queue_link_down = 1;
7737 				dd_dev_info(dd, "Link error: %s\n",
7738 					    dc8051_info_err_string(buf,
7739 								   sizeof(buf),
7740 								   err &
7741 								   FAILED_LNI));
7742 			}
7743 			err &= ~(u64)FAILED_LNI;
7744 		}
7745 		/* unknown frames can happen durning LNI, just count */
7746 		if (err & UNKNOWN_FRAME) {
7747 			ppd->unknown_frame_count++;
7748 			err &= ~(u64)UNKNOWN_FRAME;
7749 		}
7750 		if (err) {
7751 			/* report remaining errors, but do not do anything */
7752 			dd_dev_err(dd, "8051 info error: %s\n",
7753 				   dc8051_info_err_string(buf, sizeof(buf),
7754 							  err));
7755 		}
7756 
7757 		/*
7758 		 * Handle host message flags.
7759 		 */
7760 		if (host_msg & HOST_REQ_DONE) {
7761 			/*
7762 			 * Presently, the driver does a busy wait for
7763 			 * host requests to complete.  This is only an
7764 			 * informational message.
7765 			 * NOTE: The 8051 clears the host message
7766 			 * information *on the next 8051 command*.
7767 			 * Therefore, when linkup is achieved,
7768 			 * this flag will still be set.
7769 			 */
7770 			host_msg &= ~(u64)HOST_REQ_DONE;
7771 		}
7772 		if (host_msg & BC_SMA_MSG) {
7773 			queue_work(ppd->link_wq, &ppd->sma_message_work);
7774 			host_msg &= ~(u64)BC_SMA_MSG;
7775 		}
7776 		if (host_msg & LINKUP_ACHIEVED) {
7777 			dd_dev_info(dd, "8051: Link up\n");
7778 			queue_work(ppd->link_wq, &ppd->link_up_work);
7779 			host_msg &= ~(u64)LINKUP_ACHIEVED;
7780 		}
7781 		if (host_msg & EXT_DEVICE_CFG_REQ) {
7782 			handle_8051_request(ppd);
7783 			host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7784 		}
7785 		if (host_msg & VERIFY_CAP_FRAME) {
7786 			queue_work(ppd->link_wq, &ppd->link_vc_work);
7787 			host_msg &= ~(u64)VERIFY_CAP_FRAME;
7788 		}
7789 		if (host_msg & LINK_GOING_DOWN) {
7790 			const char *extra = "";
7791 			/* no downgrade action needed if going down */
7792 			if (host_msg & LINK_WIDTH_DOWNGRADED) {
7793 				host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7794 				extra = " (ignoring downgrade)";
7795 			}
7796 			dd_dev_info(dd, "8051: Link down%s\n", extra);
7797 			queue_link_down = 1;
7798 			host_msg &= ~(u64)LINK_GOING_DOWN;
7799 		}
7800 		if (host_msg & LINK_WIDTH_DOWNGRADED) {
7801 			queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7802 			host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7803 		}
7804 		if (host_msg) {
7805 			/* report remaining messages, but do not do anything */
7806 			dd_dev_info(dd, "8051 info host message: %s\n",
7807 				    dc8051_info_host_msg_string(buf,
7808 								sizeof(buf),
7809 								host_msg));
7810 		}
7811 
7812 		reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7813 	}
7814 	if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7815 		/*
7816 		 * Lost the 8051 heartbeat.  If this happens, we
7817 		 * receive constant interrupts about it.  Disable
7818 		 * the interrupt after the first.
7819 		 */
7820 		dd_dev_err(dd, "Lost 8051 heartbeat\n");
7821 		write_csr(dd, DC_DC8051_ERR_EN,
7822 			  read_csr(dd, DC_DC8051_ERR_EN) &
7823 			  ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7824 
7825 		reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7826 	}
7827 	if (reg) {
7828 		/* report the error, but do not do anything */
7829 		dd_dev_err(dd, "8051 error: %s\n",
7830 			   dc8051_err_string(buf, sizeof(buf), reg));
7831 	}
7832 
7833 	if (queue_link_down) {
7834 		/*
7835 		 * if the link is already going down or disabled, do not
7836 		 * queue another. If there's a link down entry already
7837 		 * queued, don't queue another one.
7838 		 */
7839 		if ((ppd->host_link_state &
7840 		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7841 		    ppd->link_enabled == 0) {
7842 			dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7843 				    __func__, ppd->host_link_state,
7844 				    ppd->link_enabled);
7845 		} else {
7846 			if (xchg(&ppd->is_link_down_queued, 1) == 1)
7847 				dd_dev_info(dd,
7848 					    "%s: link down request already queued\n",
7849 					    __func__);
7850 			else
7851 				queue_work(ppd->link_wq, &ppd->link_down_work);
7852 		}
7853 	}
7854 }
7855 
7856 static const char * const fm_config_txt[] = {
7857 [0] =
7858 	"BadHeadDist: Distance violation between two head flits",
7859 [1] =
7860 	"BadTailDist: Distance violation between two tail flits",
7861 [2] =
7862 	"BadCtrlDist: Distance violation between two credit control flits",
7863 [3] =
7864 	"BadCrdAck: Credits return for unsupported VL",
7865 [4] =
7866 	"UnsupportedVLMarker: Received VL Marker",
7867 [5] =
7868 	"BadPreempt: Exceeded the preemption nesting level",
7869 [6] =
7870 	"BadControlFlit: Received unsupported control flit",
7871 /* no 7 */
7872 [8] =
7873 	"UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7874 };
7875 
7876 static const char * const port_rcv_txt[] = {
7877 [1] =
7878 	"BadPktLen: Illegal PktLen",
7879 [2] =
7880 	"PktLenTooLong: Packet longer than PktLen",
7881 [3] =
7882 	"PktLenTooShort: Packet shorter than PktLen",
7883 [4] =
7884 	"BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7885 [5] =
7886 	"BadDLID: Illegal DLID (0, doesn't match HFI)",
7887 [6] =
7888 	"BadL2: Illegal L2 opcode",
7889 [7] =
7890 	"BadSC: Unsupported SC",
7891 [9] =
7892 	"BadRC: Illegal RC",
7893 [11] =
7894 	"PreemptError: Preempting with same VL",
7895 [12] =
7896 	"PreemptVL15: Preempting a VL15 packet",
7897 };
7898 
7899 #define OPA_LDR_FMCONFIG_OFFSET 16
7900 #define OPA_LDR_PORTRCV_OFFSET 0
handle_dcc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)7901 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7902 {
7903 	u64 info, hdr0, hdr1;
7904 	const char *extra;
7905 	char buf[96];
7906 	struct hfi1_pportdata *ppd = dd->pport;
7907 	u8 lcl_reason = 0;
7908 	int do_bounce = 0;
7909 
7910 	if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7911 		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7912 			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7913 			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7914 			/* set status bit */
7915 			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7916 		}
7917 		reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7918 	}
7919 
7920 	if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7921 		struct hfi1_pportdata *ppd = dd->pport;
7922 		/* this counter saturates at (2^32) - 1 */
7923 		if (ppd->link_downed < (u32)UINT_MAX)
7924 			ppd->link_downed++;
7925 		reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7926 	}
7927 
7928 	if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7929 		u8 reason_valid = 1;
7930 
7931 		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7932 		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7933 			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7934 			/* set status bit */
7935 			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7936 		}
7937 		switch (info) {
7938 		case 0:
7939 		case 1:
7940 		case 2:
7941 		case 3:
7942 		case 4:
7943 		case 5:
7944 		case 6:
7945 			extra = fm_config_txt[info];
7946 			break;
7947 		case 8:
7948 			extra = fm_config_txt[info];
7949 			if (ppd->port_error_action &
7950 			    OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7951 				do_bounce = 1;
7952 				/*
7953 				 * lcl_reason cannot be derived from info
7954 				 * for this error
7955 				 */
7956 				lcl_reason =
7957 				  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7958 			}
7959 			break;
7960 		default:
7961 			reason_valid = 0;
7962 			snprintf(buf, sizeof(buf), "reserved%lld", info);
7963 			extra = buf;
7964 			break;
7965 		}
7966 
7967 		if (reason_valid && !do_bounce) {
7968 			do_bounce = ppd->port_error_action &
7969 					(1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7970 			lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7971 		}
7972 
7973 		/* just report this */
7974 		dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7975 					extra);
7976 		reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7977 	}
7978 
7979 	if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7980 		u8 reason_valid = 1;
7981 
7982 		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7983 		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7984 		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7985 		if (!(dd->err_info_rcvport.status_and_code &
7986 		      OPA_EI_STATUS_SMASK)) {
7987 			dd->err_info_rcvport.status_and_code =
7988 				info & OPA_EI_CODE_SMASK;
7989 			/* set status bit */
7990 			dd->err_info_rcvport.status_and_code |=
7991 				OPA_EI_STATUS_SMASK;
7992 			/*
7993 			 * save first 2 flits in the packet that caused
7994 			 * the error
7995 			 */
7996 			dd->err_info_rcvport.packet_flit1 = hdr0;
7997 			dd->err_info_rcvport.packet_flit2 = hdr1;
7998 		}
7999 		switch (info) {
8000 		case 1:
8001 		case 2:
8002 		case 3:
8003 		case 4:
8004 		case 5:
8005 		case 6:
8006 		case 7:
8007 		case 9:
8008 		case 11:
8009 		case 12:
8010 			extra = port_rcv_txt[info];
8011 			break;
8012 		default:
8013 			reason_valid = 0;
8014 			snprintf(buf, sizeof(buf), "reserved%lld", info);
8015 			extra = buf;
8016 			break;
8017 		}
8018 
8019 		if (reason_valid && !do_bounce) {
8020 			do_bounce = ppd->port_error_action &
8021 					(1 << (OPA_LDR_PORTRCV_OFFSET + info));
8022 			lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8023 		}
8024 
8025 		/* just report this */
8026 		dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8027 					"               hdr0 0x%llx, hdr1 0x%llx\n",
8028 					extra, hdr0, hdr1);
8029 
8030 		reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8031 	}
8032 
8033 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8034 		/* informative only */
8035 		dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8036 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8037 	}
8038 	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8039 		/* informative only */
8040 		dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8041 		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8042 	}
8043 
8044 	if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8045 		reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8046 
8047 	/* report any remaining errors */
8048 	if (reg)
8049 		dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8050 					dcc_err_string(buf, sizeof(buf), reg));
8051 
8052 	if (lcl_reason == 0)
8053 		lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8054 
8055 	if (do_bounce) {
8056 		dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8057 					__func__);
8058 		set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8059 		queue_work(ppd->link_wq, &ppd->link_bounce_work);
8060 	}
8061 }
8062 
handle_lcb_err(struct hfi1_devdata * dd,u32 unused,u64 reg)8063 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8064 {
8065 	char buf[96];
8066 
8067 	dd_dev_info(dd, "LCB Error: %s\n",
8068 		    lcb_err_string(buf, sizeof(buf), reg));
8069 }
8070 
8071 /*
8072  * CCE block DC interrupt.  Source is < 8.
8073  */
is_dc_int(struct hfi1_devdata * dd,unsigned int source)8074 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8075 {
8076 	const struct err_reg_info *eri = &dc_errs[source];
8077 
8078 	if (eri->handler) {
8079 		interrupt_clear_down(dd, 0, eri);
8080 	} else if (source == 3 /* dc_lbm_int */) {
8081 		/*
8082 		 * This indicates that a parity error has occurred on the
8083 		 * address/control lines presented to the LBM.  The error
8084 		 * is a single pulse, there is no associated error flag,
8085 		 * and it is non-maskable.  This is because if a parity
8086 		 * error occurs on the request the request is dropped.
8087 		 * This should never occur, but it is nice to know if it
8088 		 * ever does.
8089 		 */
8090 		dd_dev_err(dd, "Parity error in DC LBM block\n");
8091 	} else {
8092 		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8093 	}
8094 }
8095 
8096 /*
8097  * TX block send credit interrupt.  Source is < 160.
8098  */
is_send_credit_int(struct hfi1_devdata * dd,unsigned int source)8099 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8100 {
8101 	sc_group_release_update(dd, source);
8102 }
8103 
8104 /*
8105  * TX block SDMA interrupt.  Source is < 48.
8106  *
8107  * SDMA interrupts are grouped by type:
8108  *
8109  *	 0 -  N-1 = SDma
8110  *	 N - 2N-1 = SDmaProgress
8111  *	2N - 3N-1 = SDmaIdle
8112  */
is_sdma_eng_int(struct hfi1_devdata * dd,unsigned int source)8113 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8114 {
8115 	/* what interrupt */
8116 	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8117 	/* which engine */
8118 	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8119 
8120 #ifdef CONFIG_SDMA_VERBOSITY
8121 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8122 		   slashstrip(__FILE__), __LINE__, __func__);
8123 	sdma_dumpstate(&dd->per_sdma[which]);
8124 #endif
8125 
8126 	if (likely(what < 3 && which < dd->num_sdma)) {
8127 		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8128 	} else {
8129 		/* should not happen */
8130 		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8131 	}
8132 }
8133 
8134 /*
8135  * RX block receive available interrupt.  Source is < 160.
8136  */
is_rcv_avail_int(struct hfi1_devdata * dd,unsigned int source)8137 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8138 {
8139 	struct hfi1_ctxtdata *rcd;
8140 	char *err_detail;
8141 
8142 	if (likely(source < dd->num_rcv_contexts)) {
8143 		rcd = hfi1_rcd_get_by_index(dd, source);
8144 		if (rcd) {
8145 			/* Check for non-user contexts, including vnic */
8146 			if ((source < dd->first_dyn_alloc_ctxt) ||
8147 			    (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8148 				rcd->do_interrupt(rcd, 0);
8149 			else
8150 				handle_user_interrupt(rcd);
8151 
8152 			hfi1_rcd_put(rcd);
8153 			return;	/* OK */
8154 		}
8155 		/* received an interrupt, but no rcd */
8156 		err_detail = "dataless";
8157 	} else {
8158 		/* received an interrupt, but are not using that context */
8159 		err_detail = "out of range";
8160 	}
8161 	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8162 		   err_detail, source);
8163 }
8164 
8165 /*
8166  * RX block receive urgent interrupt.  Source is < 160.
8167  */
is_rcv_urgent_int(struct hfi1_devdata * dd,unsigned int source)8168 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8169 {
8170 	struct hfi1_ctxtdata *rcd;
8171 	char *err_detail;
8172 
8173 	if (likely(source < dd->num_rcv_contexts)) {
8174 		rcd = hfi1_rcd_get_by_index(dd, source);
8175 		if (rcd) {
8176 			/* only pay attention to user urgent interrupts */
8177 			if ((source >= dd->first_dyn_alloc_ctxt) &&
8178 			    (!rcd->sc || (rcd->sc->type == SC_USER)))
8179 				handle_user_interrupt(rcd);
8180 
8181 			hfi1_rcd_put(rcd);
8182 			return;	/* OK */
8183 		}
8184 		/* received an interrupt, but no rcd */
8185 		err_detail = "dataless";
8186 	} else {
8187 		/* received an interrupt, but are not using that context */
8188 		err_detail = "out of range";
8189 	}
8190 	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8191 		   err_detail, source);
8192 }
8193 
8194 /*
8195  * Reserved range interrupt.  Should not be called in normal operation.
8196  */
is_reserved_int(struct hfi1_devdata * dd,unsigned int source)8197 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8198 {
8199 	char name[64];
8200 
8201 	dd_dev_err(dd, "unexpected %s interrupt\n",
8202 		   is_reserved_name(name, sizeof(name), source));
8203 }
8204 
8205 static const struct is_table is_table[] = {
8206 /*
8207  * start		 end
8208  *				name func		interrupt func
8209  */
8210 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8211 				is_misc_err_name,	is_misc_err_int },
8212 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8213 				is_sdma_eng_err_name,	is_sdma_eng_err_int },
8214 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8215 				is_sendctxt_err_name,	is_sendctxt_err_int },
8216 { IS_SDMA_START,	     IS_SDMA_END,
8217 				is_sdma_eng_name,	is_sdma_eng_int },
8218 { IS_VARIOUS_START,	     IS_VARIOUS_END,
8219 				is_various_name,	is_various_int },
8220 { IS_DC_START,	     IS_DC_END,
8221 				is_dc_name,		is_dc_int },
8222 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8223 				is_rcv_avail_name,	is_rcv_avail_int },
8224 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8225 				is_rcv_urgent_name,	is_rcv_urgent_int },
8226 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8227 				is_send_credit_name,	is_send_credit_int},
8228 { IS_RESERVED_START,     IS_RESERVED_END,
8229 				is_reserved_name,	is_reserved_int},
8230 };
8231 
8232 /*
8233  * Interrupt source interrupt - called when the given source has an interrupt.
8234  * Source is a bit index into an array of 64-bit integers.
8235  */
is_interrupt(struct hfi1_devdata * dd,unsigned int source)8236 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8237 {
8238 	const struct is_table *entry;
8239 
8240 	/* avoids a double compare by walking the table in-order */
8241 	for (entry = &is_table[0]; entry->is_name; entry++) {
8242 		if (source < entry->end) {
8243 			trace_hfi1_interrupt(dd, entry, source);
8244 			entry->is_int(dd, source - entry->start);
8245 			return;
8246 		}
8247 	}
8248 	/* fell off the end */
8249 	dd_dev_err(dd, "invalid interrupt source %u\n", source);
8250 }
8251 
8252 /*
8253  * General interrupt handler.  This is able to correctly handle
8254  * all interrupts in case INTx is used.
8255  */
general_interrupt(int irq,void * data)8256 static irqreturn_t general_interrupt(int irq, void *data)
8257 {
8258 	struct hfi1_devdata *dd = data;
8259 	u64 regs[CCE_NUM_INT_CSRS];
8260 	u32 bit;
8261 	int i;
8262 	irqreturn_t handled = IRQ_NONE;
8263 
8264 	this_cpu_inc(*dd->int_counter);
8265 
8266 	/* phase 1: scan and clear all handled interrupts */
8267 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8268 		if (dd->gi_mask[i] == 0) {
8269 			regs[i] = 0;	/* used later */
8270 			continue;
8271 		}
8272 		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8273 				dd->gi_mask[i];
8274 		/* only clear if anything is set */
8275 		if (regs[i])
8276 			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8277 	}
8278 
8279 	/* phase 2: call the appropriate handler */
8280 	for_each_set_bit(bit, (unsigned long *)&regs[0],
8281 			 CCE_NUM_INT_CSRS * 64) {
8282 		is_interrupt(dd, bit);
8283 		handled = IRQ_HANDLED;
8284 	}
8285 
8286 	return handled;
8287 }
8288 
sdma_interrupt(int irq,void * data)8289 static irqreturn_t sdma_interrupt(int irq, void *data)
8290 {
8291 	struct sdma_engine *sde = data;
8292 	struct hfi1_devdata *dd = sde->dd;
8293 	u64 status;
8294 
8295 #ifdef CONFIG_SDMA_VERBOSITY
8296 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8297 		   slashstrip(__FILE__), __LINE__, __func__);
8298 	sdma_dumpstate(sde);
8299 #endif
8300 
8301 	this_cpu_inc(*dd->int_counter);
8302 
8303 	/* This read_csr is really bad in the hot path */
8304 	status = read_csr(dd,
8305 			  CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8306 			  & sde->imask;
8307 	if (likely(status)) {
8308 		/* clear the interrupt(s) */
8309 		write_csr(dd,
8310 			  CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8311 			  status);
8312 
8313 		/* handle the interrupt(s) */
8314 		sdma_engine_interrupt(sde, status);
8315 	} else {
8316 		dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8317 					sde->this_idx);
8318 	}
8319 	return IRQ_HANDLED;
8320 }
8321 
8322 /*
8323  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8324  * to insure that the write completed.  This does NOT guarantee that
8325  * queued DMA writes to memory from the chip are pushed.
8326  */
clear_recv_intr(struct hfi1_ctxtdata * rcd)8327 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8328 {
8329 	struct hfi1_devdata *dd = rcd->dd;
8330 	u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8331 
8332 	mmiowb();	/* make sure everything before is written */
8333 	write_csr(dd, addr, rcd->imask);
8334 	/* force the above write on the chip and get a value back */
8335 	(void)read_csr(dd, addr);
8336 }
8337 
8338 /* force the receive interrupt */
force_recv_intr(struct hfi1_ctxtdata * rcd)8339 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8340 {
8341 	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8342 }
8343 
8344 /*
8345  * Return non-zero if a packet is present.
8346  *
8347  * This routine is called when rechecking for packets after the RcvAvail
8348  * interrupt has been cleared down.  First, do a quick check of memory for
8349  * a packet present.  If not found, use an expensive CSR read of the context
8350  * tail to determine the actual tail.  The CSR read is necessary because there
8351  * is no method to push pending DMAs to memory other than an interrupt and we
8352  * are trying to determine if we need to force an interrupt.
8353  */
check_packet_present(struct hfi1_ctxtdata * rcd)8354 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8355 {
8356 	u32 tail;
8357 	int present;
8358 
8359 	if (!rcd->rcvhdrtail_kvaddr)
8360 		present = (rcd->seq_cnt ==
8361 				rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8362 	else /* is RDMA rtail */
8363 		present = (rcd->head != get_rcvhdrtail(rcd));
8364 
8365 	if (present)
8366 		return 1;
8367 
8368 	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8369 	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8370 	return rcd->head != tail;
8371 }
8372 
8373 /*
8374  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8375  * This routine will try to handle packets immediately (latency), but if
8376  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8377  * chip receive interrupt is *not* cleared down until this or the thread (if
8378  * invoked) is finished.  The intent is to avoid extra interrupts while we
8379  * are processing packets anyway.
8380  */
receive_context_interrupt(int irq,void * data)8381 static irqreturn_t receive_context_interrupt(int irq, void *data)
8382 {
8383 	struct hfi1_ctxtdata *rcd = data;
8384 	struct hfi1_devdata *dd = rcd->dd;
8385 	int disposition;
8386 	int present;
8387 
8388 	trace_hfi1_receive_interrupt(dd, rcd);
8389 	this_cpu_inc(*dd->int_counter);
8390 	aspm_ctx_disable(rcd);
8391 
8392 	/* receive interrupt remains blocked while processing packets */
8393 	disposition = rcd->do_interrupt(rcd, 0);
8394 
8395 	/*
8396 	 * Too many packets were seen while processing packets in this
8397 	 * IRQ handler.  Invoke the handler thread.  The receive interrupt
8398 	 * remains blocked.
8399 	 */
8400 	if (disposition == RCV_PKT_LIMIT)
8401 		return IRQ_WAKE_THREAD;
8402 
8403 	/*
8404 	 * The packet processor detected no more packets.  Clear the receive
8405 	 * interrupt and recheck for a packet packet that may have arrived
8406 	 * after the previous check and interrupt clear.  If a packet arrived,
8407 	 * force another interrupt.
8408 	 */
8409 	clear_recv_intr(rcd);
8410 	present = check_packet_present(rcd);
8411 	if (present)
8412 		force_recv_intr(rcd);
8413 
8414 	return IRQ_HANDLED;
8415 }
8416 
8417 /*
8418  * Receive packet thread handler.  This expects to be invoked with the
8419  * receive interrupt still blocked.
8420  */
receive_context_thread(int irq,void * data)8421 static irqreturn_t receive_context_thread(int irq, void *data)
8422 {
8423 	struct hfi1_ctxtdata *rcd = data;
8424 	int present;
8425 
8426 	/* receive interrupt is still blocked from the IRQ handler */
8427 	(void)rcd->do_interrupt(rcd, 1);
8428 
8429 	/*
8430 	 * The packet processor will only return if it detected no more
8431 	 * packets.  Hold IRQs here so we can safely clear the interrupt and
8432 	 * recheck for a packet that may have arrived after the previous
8433 	 * check and the interrupt clear.  If a packet arrived, force another
8434 	 * interrupt.
8435 	 */
8436 	local_irq_disable();
8437 	clear_recv_intr(rcd);
8438 	present = check_packet_present(rcd);
8439 	if (present)
8440 		force_recv_intr(rcd);
8441 	local_irq_enable();
8442 
8443 	return IRQ_HANDLED;
8444 }
8445 
8446 /* ========================================================================= */
8447 
read_physical_state(struct hfi1_devdata * dd)8448 u32 read_physical_state(struct hfi1_devdata *dd)
8449 {
8450 	u64 reg;
8451 
8452 	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8453 	return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8454 				& DC_DC8051_STS_CUR_STATE_PORT_MASK;
8455 }
8456 
read_logical_state(struct hfi1_devdata * dd)8457 u32 read_logical_state(struct hfi1_devdata *dd)
8458 {
8459 	u64 reg;
8460 
8461 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8462 	return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8463 				& DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8464 }
8465 
set_logical_state(struct hfi1_devdata * dd,u32 chip_lstate)8466 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8467 {
8468 	u64 reg;
8469 
8470 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8471 	/* clear current state, set new state */
8472 	reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8473 	reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8474 	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8475 }
8476 
8477 /*
8478  * Use the 8051 to read a LCB CSR.
8479  */
read_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 * data)8480 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8481 {
8482 	u32 regno;
8483 	int ret;
8484 
8485 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8486 		if (acquire_lcb_access(dd, 0) == 0) {
8487 			*data = read_csr(dd, addr);
8488 			release_lcb_access(dd, 0);
8489 			return 0;
8490 		}
8491 		return -EBUSY;
8492 	}
8493 
8494 	/* register is an index of LCB registers: (offset - base) / 8 */
8495 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8496 	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8497 	if (ret != HCMD_SUCCESS)
8498 		return -EBUSY;
8499 	return 0;
8500 }
8501 
8502 /*
8503  * Provide a cache for some of the LCB registers in case the LCB is
8504  * unavailable.
8505  * (The LCB is unavailable in certain link states, for example.)
8506  */
8507 struct lcb_datum {
8508 	u32 off;
8509 	u64 val;
8510 };
8511 
8512 static struct lcb_datum lcb_cache[] = {
8513 	{ DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8514 	{ DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8515 	{ DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8516 };
8517 
update_lcb_cache(struct hfi1_devdata * dd)8518 static void update_lcb_cache(struct hfi1_devdata *dd)
8519 {
8520 	int i;
8521 	int ret;
8522 	u64 val;
8523 
8524 	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8525 		ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8526 
8527 		/* Update if we get good data */
8528 		if (likely(ret != -EBUSY))
8529 			lcb_cache[i].val = val;
8530 	}
8531 }
8532 
read_lcb_cache(u32 off,u64 * val)8533 static int read_lcb_cache(u32 off, u64 *val)
8534 {
8535 	int i;
8536 
8537 	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8538 		if (lcb_cache[i].off == off) {
8539 			*val = lcb_cache[i].val;
8540 			return 0;
8541 		}
8542 	}
8543 
8544 	pr_warn("%s bad offset 0x%x\n", __func__, off);
8545 	return -1;
8546 }
8547 
8548 /*
8549  * Read an LCB CSR.  Access may not be in host control, so check.
8550  * Return 0 on success, -EBUSY on failure.
8551  */
read_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 * data)8552 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8553 {
8554 	struct hfi1_pportdata *ppd = dd->pport;
8555 
8556 	/* if up, go through the 8051 for the value */
8557 	if (ppd->host_link_state & HLS_UP)
8558 		return read_lcb_via_8051(dd, addr, data);
8559 	/* if going up or down, check the cache, otherwise, no access */
8560 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8561 		if (read_lcb_cache(addr, data))
8562 			return -EBUSY;
8563 		return 0;
8564 	}
8565 
8566 	/* otherwise, host has access */
8567 	*data = read_csr(dd, addr);
8568 	return 0;
8569 }
8570 
8571 /*
8572  * Use the 8051 to write a LCB CSR.
8573  */
write_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 data)8574 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8575 {
8576 	u32 regno;
8577 	int ret;
8578 
8579 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8580 	    (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8581 		if (acquire_lcb_access(dd, 0) == 0) {
8582 			write_csr(dd, addr, data);
8583 			release_lcb_access(dd, 0);
8584 			return 0;
8585 		}
8586 		return -EBUSY;
8587 	}
8588 
8589 	/* register is an index of LCB registers: (offset - base) / 8 */
8590 	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8591 	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8592 	if (ret != HCMD_SUCCESS)
8593 		return -EBUSY;
8594 	return 0;
8595 }
8596 
8597 /*
8598  * Write an LCB CSR.  Access may not be in host control, so check.
8599  * Return 0 on success, -EBUSY on failure.
8600  */
write_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 data)8601 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8602 {
8603 	struct hfi1_pportdata *ppd = dd->pport;
8604 
8605 	/* if up, go through the 8051 for the value */
8606 	if (ppd->host_link_state & HLS_UP)
8607 		return write_lcb_via_8051(dd, addr, data);
8608 	/* if going up or down, no access */
8609 	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8610 		return -EBUSY;
8611 	/* otherwise, host has access */
8612 	write_csr(dd, addr, data);
8613 	return 0;
8614 }
8615 
8616 /*
8617  * Returns:
8618  *	< 0 = Linux error, not able to get access
8619  *	> 0 = 8051 command RETURN_CODE
8620  */
do_8051_command(struct hfi1_devdata * dd,u32 type,u64 in_data,u64 * out_data)8621 static int do_8051_command(
8622 	struct hfi1_devdata *dd,
8623 	u32 type,
8624 	u64 in_data,
8625 	u64 *out_data)
8626 {
8627 	u64 reg, completed;
8628 	int return_code;
8629 	unsigned long timeout;
8630 
8631 	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8632 
8633 	mutex_lock(&dd->dc8051_lock);
8634 
8635 	/* We can't send any commands to the 8051 if it's in reset */
8636 	if (dd->dc_shutdown) {
8637 		return_code = -ENODEV;
8638 		goto fail;
8639 	}
8640 
8641 	/*
8642 	 * If an 8051 host command timed out previously, then the 8051 is
8643 	 * stuck.
8644 	 *
8645 	 * On first timeout, attempt to reset and restart the entire DC
8646 	 * block (including 8051). (Is this too big of a hammer?)
8647 	 *
8648 	 * If the 8051 times out a second time, the reset did not bring it
8649 	 * back to healthy life. In that case, fail any subsequent commands.
8650 	 */
8651 	if (dd->dc8051_timed_out) {
8652 		if (dd->dc8051_timed_out > 1) {
8653 			dd_dev_err(dd,
8654 				   "Previous 8051 host command timed out, skipping command %u\n",
8655 				   type);
8656 			return_code = -ENXIO;
8657 			goto fail;
8658 		}
8659 		_dc_shutdown(dd);
8660 		_dc_start(dd);
8661 	}
8662 
8663 	/*
8664 	 * If there is no timeout, then the 8051 command interface is
8665 	 * waiting for a command.
8666 	 */
8667 
8668 	/*
8669 	 * When writing a LCB CSR, out_data contains the full value to
8670 	 * to be written, while in_data contains the relative LCB
8671 	 * address in 7:0.  Do the work here, rather than the caller,
8672 	 * of distrubting the write data to where it needs to go:
8673 	 *
8674 	 * Write data
8675 	 *   39:00 -> in_data[47:8]
8676 	 *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8677 	 *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8678 	 */
8679 	if (type == HCMD_WRITE_LCB_CSR) {
8680 		in_data |= ((*out_data) & 0xffffffffffull) << 8;
8681 		/* must preserve COMPLETED - it is tied to hardware */
8682 		reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8683 		reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8684 		reg |= ((((*out_data) >> 40) & 0xff) <<
8685 				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8686 		      | ((((*out_data) >> 48) & 0xffff) <<
8687 				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8688 		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8689 	}
8690 
8691 	/*
8692 	 * Do two writes: the first to stabilize the type and req_data, the
8693 	 * second to activate.
8694 	 */
8695 	reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8696 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8697 		| (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8698 			<< DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8699 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8700 	reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8701 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8702 
8703 	/* wait for completion, alternate: interrupt */
8704 	timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8705 	while (1) {
8706 		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8707 		completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8708 		if (completed)
8709 			break;
8710 		if (time_after(jiffies, timeout)) {
8711 			dd->dc8051_timed_out++;
8712 			dd_dev_err(dd, "8051 host command %u timeout\n", type);
8713 			if (out_data)
8714 				*out_data = 0;
8715 			return_code = -ETIMEDOUT;
8716 			goto fail;
8717 		}
8718 		udelay(2);
8719 	}
8720 
8721 	if (out_data) {
8722 		*out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8723 				& DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8724 		if (type == HCMD_READ_LCB_CSR) {
8725 			/* top 16 bits are in a different register */
8726 			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8727 				& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8728 				<< (48
8729 				    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8730 		}
8731 	}
8732 	return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8733 				& DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8734 	dd->dc8051_timed_out = 0;
8735 	/*
8736 	 * Clear command for next user.
8737 	 */
8738 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8739 
8740 fail:
8741 	mutex_unlock(&dd->dc8051_lock);
8742 	return return_code;
8743 }
8744 
set_physical_link_state(struct hfi1_devdata * dd,u64 state)8745 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8746 {
8747 	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8748 }
8749 
load_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 config_data)8750 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8751 		     u8 lane_id, u32 config_data)
8752 {
8753 	u64 data;
8754 	int ret;
8755 
8756 	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8757 		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8758 		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
8759 	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8760 	if (ret != HCMD_SUCCESS) {
8761 		dd_dev_err(dd,
8762 			   "load 8051 config: field id %d, lane %d, err %d\n",
8763 			   (int)field_id, (int)lane_id, ret);
8764 	}
8765 	return ret;
8766 }
8767 
8768 /*
8769  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8770  * set the result, even on error.
8771  * Return 0 on success, -errno on failure
8772  */
read_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 * result)8773 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8774 		     u32 *result)
8775 {
8776 	u64 big_data;
8777 	u32 addr;
8778 	int ret;
8779 
8780 	/* address start depends on the lane_id */
8781 	if (lane_id < 4)
8782 		addr = (4 * NUM_GENERAL_FIELDS)
8783 			+ (lane_id * 4 * NUM_LANE_FIELDS);
8784 	else
8785 		addr = 0;
8786 	addr += field_id * 4;
8787 
8788 	/* read is in 8-byte chunks, hardware will truncate the address down */
8789 	ret = read_8051_data(dd, addr, 8, &big_data);
8790 
8791 	if (ret == 0) {
8792 		/* extract the 4 bytes we want */
8793 		if (addr & 0x4)
8794 			*result = (u32)(big_data >> 32);
8795 		else
8796 			*result = (u32)big_data;
8797 	} else {
8798 		*result = 0;
8799 		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8800 			   __func__, lane_id, field_id);
8801 	}
8802 
8803 	return ret;
8804 }
8805 
write_vc_local_phy(struct hfi1_devdata * dd,u8 power_management,u8 continuous)8806 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8807 			      u8 continuous)
8808 {
8809 	u32 frame;
8810 
8811 	frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8812 		| power_management << POWER_MANAGEMENT_SHIFT;
8813 	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8814 				GENERAL_CONFIG, frame);
8815 }
8816 
write_vc_local_fabric(struct hfi1_devdata * dd,u8 vau,u8 z,u8 vcu,u16 vl15buf,u8 crc_sizes)8817 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8818 				 u16 vl15buf, u8 crc_sizes)
8819 {
8820 	u32 frame;
8821 
8822 	frame = (u32)vau << VAU_SHIFT
8823 		| (u32)z << Z_SHIFT
8824 		| (u32)vcu << VCU_SHIFT
8825 		| (u32)vl15buf << VL15BUF_SHIFT
8826 		| (u32)crc_sizes << CRC_SIZES_SHIFT;
8827 	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8828 				GENERAL_CONFIG, frame);
8829 }
8830 
read_vc_local_link_width(struct hfi1_devdata * dd,u8 * misc_bits,u8 * flag_bits,u16 * link_widths)8831 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8832 				     u8 *flag_bits, u16 *link_widths)
8833 {
8834 	u32 frame;
8835 
8836 	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8837 			 &frame);
8838 	*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8839 	*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8840 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8841 }
8842 
write_vc_local_link_width(struct hfi1_devdata * dd,u8 misc_bits,u8 flag_bits,u16 link_widths)8843 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8844 				     u8 misc_bits,
8845 				     u8 flag_bits,
8846 				     u16 link_widths)
8847 {
8848 	u32 frame;
8849 
8850 	frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8851 		| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8852 		| (u32)link_widths << LINK_WIDTH_SHIFT;
8853 	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8854 		     frame);
8855 }
8856 
write_local_device_id(struct hfi1_devdata * dd,u16 device_id,u8 device_rev)8857 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8858 				 u8 device_rev)
8859 {
8860 	u32 frame;
8861 
8862 	frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8863 		| ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8864 	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8865 }
8866 
read_remote_device_id(struct hfi1_devdata * dd,u16 * device_id,u8 * device_rev)8867 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8868 				  u8 *device_rev)
8869 {
8870 	u32 frame;
8871 
8872 	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8873 	*device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8874 	*device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8875 			& REMOTE_DEVICE_REV_MASK;
8876 }
8877 
write_host_interface_version(struct hfi1_devdata * dd,u8 version)8878 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8879 {
8880 	u32 frame;
8881 	u32 mask;
8882 
8883 	mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8884 	read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8885 	/* Clear, then set field */
8886 	frame &= ~mask;
8887 	frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8888 	return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8889 				frame);
8890 }
8891 
read_misc_status(struct hfi1_devdata * dd,u8 * ver_major,u8 * ver_minor,u8 * ver_patch)8892 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8893 		      u8 *ver_patch)
8894 {
8895 	u32 frame;
8896 
8897 	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8898 	*ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8899 		STS_FM_VERSION_MAJOR_MASK;
8900 	*ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8901 		STS_FM_VERSION_MINOR_MASK;
8902 
8903 	read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8904 	*ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8905 		STS_FM_VERSION_PATCH_MASK;
8906 }
8907 
read_vc_remote_phy(struct hfi1_devdata * dd,u8 * power_management,u8 * continuous)8908 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8909 			       u8 *continuous)
8910 {
8911 	u32 frame;
8912 
8913 	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8914 	*power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8915 					& POWER_MANAGEMENT_MASK;
8916 	*continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8917 					& CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8918 }
8919 
read_vc_remote_fabric(struct hfi1_devdata * dd,u8 * vau,u8 * z,u8 * vcu,u16 * vl15buf,u8 * crc_sizes)8920 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8921 				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8922 {
8923 	u32 frame;
8924 
8925 	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8926 	*vau = (frame >> VAU_SHIFT) & VAU_MASK;
8927 	*z = (frame >> Z_SHIFT) & Z_MASK;
8928 	*vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8929 	*vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8930 	*crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8931 }
8932 
read_vc_remote_link_width(struct hfi1_devdata * dd,u8 * remote_tx_rate,u16 * link_widths)8933 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8934 				      u8 *remote_tx_rate,
8935 				      u16 *link_widths)
8936 {
8937 	u32 frame;
8938 
8939 	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8940 			 &frame);
8941 	*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8942 				& REMOTE_TX_RATE_MASK;
8943 	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8944 }
8945 
read_local_lni(struct hfi1_devdata * dd,u8 * enable_lane_rx)8946 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8947 {
8948 	u32 frame;
8949 
8950 	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8951 	*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8952 }
8953 
read_mgmt_allowed(struct hfi1_devdata * dd,u8 * mgmt_allowed)8954 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8955 {
8956 	u32 frame;
8957 
8958 	read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8959 	*mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8960 }
8961 
read_last_local_state(struct hfi1_devdata * dd,u32 * lls)8962 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8963 {
8964 	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8965 }
8966 
read_last_remote_state(struct hfi1_devdata * dd,u32 * lrs)8967 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8968 {
8969 	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8970 }
8971 
hfi1_read_link_quality(struct hfi1_devdata * dd,u8 * link_quality)8972 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8973 {
8974 	u32 frame;
8975 	int ret;
8976 
8977 	*link_quality = 0;
8978 	if (dd->pport->host_link_state & HLS_UP) {
8979 		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8980 				       &frame);
8981 		if (ret == 0)
8982 			*link_quality = (frame >> LINK_QUALITY_SHIFT)
8983 						& LINK_QUALITY_MASK;
8984 	}
8985 }
8986 
read_planned_down_reason_code(struct hfi1_devdata * dd,u8 * pdrrc)8987 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8988 {
8989 	u32 frame;
8990 
8991 	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8992 	*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8993 }
8994 
read_link_down_reason(struct hfi1_devdata * dd,u8 * ldr)8995 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8996 {
8997 	u32 frame;
8998 
8999 	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9000 	*ldr = (frame & 0xff);
9001 }
9002 
read_tx_settings(struct hfi1_devdata * dd,u8 * enable_lane_tx,u8 * tx_polarity_inversion,u8 * rx_polarity_inversion,u8 * max_rate)9003 static int read_tx_settings(struct hfi1_devdata *dd,
9004 			    u8 *enable_lane_tx,
9005 			    u8 *tx_polarity_inversion,
9006 			    u8 *rx_polarity_inversion,
9007 			    u8 *max_rate)
9008 {
9009 	u32 frame;
9010 	int ret;
9011 
9012 	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9013 	*enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9014 				& ENABLE_LANE_TX_MASK;
9015 	*tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9016 				& TX_POLARITY_INVERSION_MASK;
9017 	*rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9018 				& RX_POLARITY_INVERSION_MASK;
9019 	*max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9020 	return ret;
9021 }
9022 
write_tx_settings(struct hfi1_devdata * dd,u8 enable_lane_tx,u8 tx_polarity_inversion,u8 rx_polarity_inversion,u8 max_rate)9023 static int write_tx_settings(struct hfi1_devdata *dd,
9024 			     u8 enable_lane_tx,
9025 			     u8 tx_polarity_inversion,
9026 			     u8 rx_polarity_inversion,
9027 			     u8 max_rate)
9028 {
9029 	u32 frame;
9030 
9031 	/* no need to mask, all variable sizes match field widths */
9032 	frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9033 		| tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9034 		| rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9035 		| max_rate << MAX_RATE_SHIFT;
9036 	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9037 }
9038 
9039 /*
9040  * Read an idle LCB message.
9041  *
9042  * Returns 0 on success, -EINVAL on error
9043  */
read_idle_message(struct hfi1_devdata * dd,u64 type,u64 * data_out)9044 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9045 {
9046 	int ret;
9047 
9048 	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9049 	if (ret != HCMD_SUCCESS) {
9050 		dd_dev_err(dd, "read idle message: type %d, err %d\n",
9051 			   (u32)type, ret);
9052 		return -EINVAL;
9053 	}
9054 	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9055 	/* return only the payload as we already know the type */
9056 	*data_out >>= IDLE_PAYLOAD_SHIFT;
9057 	return 0;
9058 }
9059 
9060 /*
9061  * Read an idle SMA message.  To be done in response to a notification from
9062  * the 8051.
9063  *
9064  * Returns 0 on success, -EINVAL on error
9065  */
read_idle_sma(struct hfi1_devdata * dd,u64 * data)9066 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9067 {
9068 	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9069 				 data);
9070 }
9071 
9072 /*
9073  * Send an idle LCB message.
9074  *
9075  * Returns 0 on success, -EINVAL on error
9076  */
send_idle_message(struct hfi1_devdata * dd,u64 data)9077 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9078 {
9079 	int ret;
9080 
9081 	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9082 	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9083 	if (ret != HCMD_SUCCESS) {
9084 		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9085 			   data, ret);
9086 		return -EINVAL;
9087 	}
9088 	return 0;
9089 }
9090 
9091 /*
9092  * Send an idle SMA message.
9093  *
9094  * Returns 0 on success, -EINVAL on error
9095  */
send_idle_sma(struct hfi1_devdata * dd,u64 message)9096 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9097 {
9098 	u64 data;
9099 
9100 	data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9101 		((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9102 	return send_idle_message(dd, data);
9103 }
9104 
9105 /*
9106  * Initialize the LCB then do a quick link up.  This may or may not be
9107  * in loopback.
9108  *
9109  * return 0 on success, -errno on error
9110  */
do_quick_linkup(struct hfi1_devdata * dd)9111 static int do_quick_linkup(struct hfi1_devdata *dd)
9112 {
9113 	int ret;
9114 
9115 	lcb_shutdown(dd, 0);
9116 
9117 	if (loopback) {
9118 		/* LCB_CFG_LOOPBACK.VAL = 2 */
9119 		/* LCB_CFG_LANE_WIDTH.VAL = 0 */
9120 		write_csr(dd, DC_LCB_CFG_LOOPBACK,
9121 			  IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9122 		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9123 	}
9124 
9125 	/* start the LCBs */
9126 	/* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9127 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9128 
9129 	/* simulator only loopback steps */
9130 	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9131 		/* LCB_CFG_RUN.EN = 1 */
9132 		write_csr(dd, DC_LCB_CFG_RUN,
9133 			  1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9134 
9135 		ret = wait_link_transfer_active(dd, 10);
9136 		if (ret)
9137 			return ret;
9138 
9139 		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9140 			  1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9141 	}
9142 
9143 	if (!loopback) {
9144 		/*
9145 		 * When doing quick linkup and not in loopback, both
9146 		 * sides must be done with LCB set-up before either
9147 		 * starts the quick linkup.  Put a delay here so that
9148 		 * both sides can be started and have a chance to be
9149 		 * done with LCB set up before resuming.
9150 		 */
9151 		dd_dev_err(dd,
9152 			   "Pausing for peer to be finished with LCB set up\n");
9153 		msleep(5000);
9154 		dd_dev_err(dd, "Continuing with quick linkup\n");
9155 	}
9156 
9157 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9158 	set_8051_lcb_access(dd);
9159 
9160 	/*
9161 	 * State "quick" LinkUp request sets the physical link state to
9162 	 * LinkUp without a verify capability sequence.
9163 	 * This state is in simulator v37 and later.
9164 	 */
9165 	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9166 	if (ret != HCMD_SUCCESS) {
9167 		dd_dev_err(dd,
9168 			   "%s: set physical link state to quick LinkUp failed with return %d\n",
9169 			   __func__, ret);
9170 
9171 		set_host_lcb_access(dd);
9172 		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9173 
9174 		if (ret >= 0)
9175 			ret = -EINVAL;
9176 		return ret;
9177 	}
9178 
9179 	return 0; /* success */
9180 }
9181 
9182 /*
9183  * Set the SerDes to internal loopback mode.
9184  * Returns 0 on success, -errno on error.
9185  */
set_serdes_loopback_mode(struct hfi1_devdata * dd)9186 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9187 {
9188 	int ret;
9189 
9190 	ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9191 	if (ret == HCMD_SUCCESS)
9192 		return 0;
9193 	dd_dev_err(dd,
9194 		   "Set physical link state to SerDes Loopback failed with return %d\n",
9195 		   ret);
9196 	if (ret >= 0)
9197 		ret = -EINVAL;
9198 	return ret;
9199 }
9200 
9201 /*
9202  * Do all special steps to set up loopback.
9203  */
init_loopback(struct hfi1_devdata * dd)9204 static int init_loopback(struct hfi1_devdata *dd)
9205 {
9206 	dd_dev_info(dd, "Entering loopback mode\n");
9207 
9208 	/* all loopbacks should disable self GUID check */
9209 	write_csr(dd, DC_DC8051_CFG_MODE,
9210 		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9211 
9212 	/*
9213 	 * The simulator has only one loopback option - LCB.  Switch
9214 	 * to that option, which includes quick link up.
9215 	 *
9216 	 * Accept all valid loopback values.
9217 	 */
9218 	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9219 	    (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9220 	     loopback == LOOPBACK_CABLE)) {
9221 		loopback = LOOPBACK_LCB;
9222 		quick_linkup = 1;
9223 		return 0;
9224 	}
9225 
9226 	/* handle serdes loopback */
9227 	if (loopback == LOOPBACK_SERDES) {
9228 		/* internal serdes loopack needs quick linkup on RTL */
9229 		if (dd->icode == ICODE_RTL_SILICON)
9230 			quick_linkup = 1;
9231 		return set_serdes_loopback_mode(dd);
9232 	}
9233 
9234 	/* LCB loopback - handled at poll time */
9235 	if (loopback == LOOPBACK_LCB) {
9236 		quick_linkup = 1; /* LCB is always quick linkup */
9237 
9238 		/* not supported in emulation due to emulation RTL changes */
9239 		if (dd->icode == ICODE_FPGA_EMULATION) {
9240 			dd_dev_err(dd,
9241 				   "LCB loopback not supported in emulation\n");
9242 			return -EINVAL;
9243 		}
9244 		return 0;
9245 	}
9246 
9247 	/* external cable loopback requires no extra steps */
9248 	if (loopback == LOOPBACK_CABLE)
9249 		return 0;
9250 
9251 	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9252 	return -EINVAL;
9253 }
9254 
9255 /*
9256  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9257  * used in the Verify Capability link width attribute.
9258  */
opa_to_vc_link_widths(u16 opa_widths)9259 static u16 opa_to_vc_link_widths(u16 opa_widths)
9260 {
9261 	int i;
9262 	u16 result = 0;
9263 
9264 	static const struct link_bits {
9265 		u16 from;
9266 		u16 to;
9267 	} opa_link_xlate[] = {
9268 		{ OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9269 		{ OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9270 		{ OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9271 		{ OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9272 	};
9273 
9274 	for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9275 		if (opa_widths & opa_link_xlate[i].from)
9276 			result |= opa_link_xlate[i].to;
9277 	}
9278 	return result;
9279 }
9280 
9281 /*
9282  * Set link attributes before moving to polling.
9283  */
set_local_link_attributes(struct hfi1_pportdata * ppd)9284 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9285 {
9286 	struct hfi1_devdata *dd = ppd->dd;
9287 	u8 enable_lane_tx;
9288 	u8 tx_polarity_inversion;
9289 	u8 rx_polarity_inversion;
9290 	int ret;
9291 
9292 	/* reset our fabric serdes to clear any lingering problems */
9293 	fabric_serdes_reset(dd);
9294 
9295 	/* set the local tx rate - need to read-modify-write */
9296 	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9297 			       &rx_polarity_inversion, &ppd->local_tx_rate);
9298 	if (ret)
9299 		goto set_local_link_attributes_fail;
9300 
9301 	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9302 		/* set the tx rate to the fastest enabled */
9303 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9304 			ppd->local_tx_rate = 1;
9305 		else
9306 			ppd->local_tx_rate = 0;
9307 	} else {
9308 		/* set the tx rate to all enabled */
9309 		ppd->local_tx_rate = 0;
9310 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9311 			ppd->local_tx_rate |= 2;
9312 		if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9313 			ppd->local_tx_rate |= 1;
9314 	}
9315 
9316 	enable_lane_tx = 0xF; /* enable all four lanes */
9317 	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9318 				rx_polarity_inversion, ppd->local_tx_rate);
9319 	if (ret != HCMD_SUCCESS)
9320 		goto set_local_link_attributes_fail;
9321 
9322 	/*
9323 	 * DC supports continuous updates.
9324 	 */
9325 	ret = write_vc_local_phy(dd,
9326 				 0 /* no power management */,
9327 				 1 /* continuous updates */);
9328 	if (ret != HCMD_SUCCESS)
9329 		goto set_local_link_attributes_fail;
9330 
9331 	/* z=1 in the next call: AU of 0 is not supported by the hardware */
9332 	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9333 				    ppd->port_crc_mode_enabled);
9334 	if (ret != HCMD_SUCCESS)
9335 		goto set_local_link_attributes_fail;
9336 
9337 	ret = write_vc_local_link_width(dd, 0, 0,
9338 					opa_to_vc_link_widths(
9339 						ppd->link_width_enabled));
9340 	if (ret != HCMD_SUCCESS)
9341 		goto set_local_link_attributes_fail;
9342 
9343 	/* let peer know who we are */
9344 	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9345 	if (ret == HCMD_SUCCESS)
9346 		return 0;
9347 
9348 set_local_link_attributes_fail:
9349 	dd_dev_err(dd,
9350 		   "Failed to set local link attributes, return 0x%x\n",
9351 		   ret);
9352 	return ret;
9353 }
9354 
9355 /*
9356  * Call this to start the link.
9357  * Do not do anything if the link is disabled.
9358  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9359  */
start_link(struct hfi1_pportdata * ppd)9360 int start_link(struct hfi1_pportdata *ppd)
9361 {
9362 	/*
9363 	 * Tune the SerDes to a ballpark setting for optimal signal and bit
9364 	 * error rate.  Needs to be done before starting the link.
9365 	 */
9366 	tune_serdes(ppd);
9367 
9368 	if (!ppd->driver_link_ready) {
9369 		dd_dev_info(ppd->dd,
9370 			    "%s: stopping link start because driver is not ready\n",
9371 			    __func__);
9372 		return 0;
9373 	}
9374 
9375 	/*
9376 	 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9377 	 * pkey table can be configured properly if the HFI unit is connected
9378 	 * to switch port with MgmtAllowed=NO
9379 	 */
9380 	clear_full_mgmt_pkey(ppd);
9381 
9382 	return set_link_state(ppd, HLS_DN_POLL);
9383 }
9384 
wait_for_qsfp_init(struct hfi1_pportdata * ppd)9385 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9386 {
9387 	struct hfi1_devdata *dd = ppd->dd;
9388 	u64 mask;
9389 	unsigned long timeout;
9390 
9391 	/*
9392 	 * Some QSFP cables have a quirk that asserts the IntN line as a side
9393 	 * effect of power up on plug-in. We ignore this false positive
9394 	 * interrupt until the module has finished powering up by waiting for
9395 	 * a minimum timeout of the module inrush initialization time of
9396 	 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9397 	 * module have stabilized.
9398 	 */
9399 	msleep(500);
9400 
9401 	/*
9402 	 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9403 	 */
9404 	timeout = jiffies + msecs_to_jiffies(2000);
9405 	while (1) {
9406 		mask = read_csr(dd, dd->hfi1_id ?
9407 				ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9408 		if (!(mask & QSFP_HFI0_INT_N))
9409 			break;
9410 		if (time_after(jiffies, timeout)) {
9411 			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9412 				    __func__);
9413 			break;
9414 		}
9415 		udelay(2);
9416 	}
9417 }
9418 
set_qsfp_int_n(struct hfi1_pportdata * ppd,u8 enable)9419 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9420 {
9421 	struct hfi1_devdata *dd = ppd->dd;
9422 	u64 mask;
9423 
9424 	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9425 	if (enable) {
9426 		/*
9427 		 * Clear the status register to avoid an immediate interrupt
9428 		 * when we re-enable the IntN pin
9429 		 */
9430 		write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9431 			  QSFP_HFI0_INT_N);
9432 		mask |= (u64)QSFP_HFI0_INT_N;
9433 	} else {
9434 		mask &= ~(u64)QSFP_HFI0_INT_N;
9435 	}
9436 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9437 }
9438 
reset_qsfp(struct hfi1_pportdata * ppd)9439 int reset_qsfp(struct hfi1_pportdata *ppd)
9440 {
9441 	struct hfi1_devdata *dd = ppd->dd;
9442 	u64 mask, qsfp_mask;
9443 
9444 	/* Disable INT_N from triggering QSFP interrupts */
9445 	set_qsfp_int_n(ppd, 0);
9446 
9447 	/* Reset the QSFP */
9448 	mask = (u64)QSFP_HFI0_RESET_N;
9449 
9450 	qsfp_mask = read_csr(dd,
9451 			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9452 	qsfp_mask &= ~mask;
9453 	write_csr(dd,
9454 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9455 
9456 	udelay(10);
9457 
9458 	qsfp_mask |= mask;
9459 	write_csr(dd,
9460 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9461 
9462 	wait_for_qsfp_init(ppd);
9463 
9464 	/*
9465 	 * Allow INT_N to trigger the QSFP interrupt to watch
9466 	 * for alarms and warnings
9467 	 */
9468 	set_qsfp_int_n(ppd, 1);
9469 
9470 	/*
9471 	 * After the reset, AOC transmitters are enabled by default. They need
9472 	 * to be turned off to complete the QSFP setup before they can be
9473 	 * enabled again.
9474 	 */
9475 	return set_qsfp_tx(ppd, 0);
9476 }
9477 
handle_qsfp_error_conditions(struct hfi1_pportdata * ppd,u8 * qsfp_interrupt_status)9478 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9479 					u8 *qsfp_interrupt_status)
9480 {
9481 	struct hfi1_devdata *dd = ppd->dd;
9482 
9483 	if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9484 	    (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9485 		dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9486 			   __func__);
9487 
9488 	if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9489 	    (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9490 		dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9491 			   __func__);
9492 
9493 	/*
9494 	 * The remaining alarms/warnings don't matter if the link is down.
9495 	 */
9496 	if (ppd->host_link_state & HLS_DOWN)
9497 		return 0;
9498 
9499 	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9500 	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9501 		dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9502 			   __func__);
9503 
9504 	if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9505 	    (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9506 		dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9507 			   __func__);
9508 
9509 	/* Byte 2 is vendor specific */
9510 
9511 	if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9512 	    (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9513 		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9514 			   __func__);
9515 
9516 	if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9517 	    (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9518 		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9519 			   __func__);
9520 
9521 	if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9522 	    (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9523 		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9524 			   __func__);
9525 
9526 	if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9527 	    (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9528 		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9529 			   __func__);
9530 
9531 	if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9532 	    (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9533 		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9534 			   __func__);
9535 
9536 	if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9537 	    (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9538 		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9539 			   __func__);
9540 
9541 	if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9542 	    (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9543 		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9544 			   __func__);
9545 
9546 	if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9547 	    (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9548 		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9549 			   __func__);
9550 
9551 	if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9552 	    (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9553 		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9554 			   __func__);
9555 
9556 	if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9557 	    (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9558 		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9559 			   __func__);
9560 
9561 	if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9562 	    (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9563 		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9564 			   __func__);
9565 
9566 	if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9567 	    (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9568 		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9569 			   __func__);
9570 
9571 	/* Bytes 9-10 and 11-12 are reserved */
9572 	/* Bytes 13-15 are vendor specific */
9573 
9574 	return 0;
9575 }
9576 
9577 /* This routine will only be scheduled if the QSFP module present is asserted */
qsfp_event(struct work_struct * work)9578 void qsfp_event(struct work_struct *work)
9579 {
9580 	struct qsfp_data *qd;
9581 	struct hfi1_pportdata *ppd;
9582 	struct hfi1_devdata *dd;
9583 
9584 	qd = container_of(work, struct qsfp_data, qsfp_work);
9585 	ppd = qd->ppd;
9586 	dd = ppd->dd;
9587 
9588 	/* Sanity check */
9589 	if (!qsfp_mod_present(ppd))
9590 		return;
9591 
9592 	if (ppd->host_link_state == HLS_DN_DISABLE) {
9593 		dd_dev_info(ppd->dd,
9594 			    "%s: stopping link start because link is disabled\n",
9595 			    __func__);
9596 		return;
9597 	}
9598 
9599 	/*
9600 	 * Turn DC back on after cable has been re-inserted. Up until
9601 	 * now, the DC has been in reset to save power.
9602 	 */
9603 	dc_start(dd);
9604 
9605 	if (qd->cache_refresh_required) {
9606 		set_qsfp_int_n(ppd, 0);
9607 
9608 		wait_for_qsfp_init(ppd);
9609 
9610 		/*
9611 		 * Allow INT_N to trigger the QSFP interrupt to watch
9612 		 * for alarms and warnings
9613 		 */
9614 		set_qsfp_int_n(ppd, 1);
9615 
9616 		start_link(ppd);
9617 	}
9618 
9619 	if (qd->check_interrupt_flags) {
9620 		u8 qsfp_interrupt_status[16] = {0,};
9621 
9622 		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9623 				  &qsfp_interrupt_status[0], 16) != 16) {
9624 			dd_dev_info(dd,
9625 				    "%s: Failed to read status of QSFP module\n",
9626 				    __func__);
9627 		} else {
9628 			unsigned long flags;
9629 
9630 			handle_qsfp_error_conditions(
9631 					ppd, qsfp_interrupt_status);
9632 			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9633 			ppd->qsfp_info.check_interrupt_flags = 0;
9634 			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9635 					       flags);
9636 		}
9637 	}
9638 }
9639 
init_qsfp_int(struct hfi1_devdata * dd)9640 static void init_qsfp_int(struct hfi1_devdata *dd)
9641 {
9642 	struct hfi1_pportdata *ppd = dd->pport;
9643 	u64 qsfp_mask, cce_int_mask;
9644 	const int qsfp1_int_smask = QSFP1_INT % 64;
9645 	const int qsfp2_int_smask = QSFP2_INT % 64;
9646 
9647 	/*
9648 	 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9649 	 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9650 	 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9651 	 * the index of the appropriate CSR in the CCEIntMask CSR array
9652 	 */
9653 	cce_int_mask = read_csr(dd, CCE_INT_MASK +
9654 				(8 * (QSFP1_INT / 64)));
9655 	if (dd->hfi1_id) {
9656 		cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9657 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9658 			  cce_int_mask);
9659 	} else {
9660 		cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9661 		write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9662 			  cce_int_mask);
9663 	}
9664 
9665 	qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9666 	/* Clear current status to avoid spurious interrupts */
9667 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9668 		  qsfp_mask);
9669 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9670 		  qsfp_mask);
9671 
9672 	set_qsfp_int_n(ppd, 0);
9673 
9674 	/* Handle active low nature of INT_N and MODPRST_N pins */
9675 	if (qsfp_mod_present(ppd))
9676 		qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9677 	write_csr(dd,
9678 		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9679 		  qsfp_mask);
9680 }
9681 
9682 /*
9683  * Do a one-time initialize of the LCB block.
9684  */
init_lcb(struct hfi1_devdata * dd)9685 static void init_lcb(struct hfi1_devdata *dd)
9686 {
9687 	/* simulator does not correctly handle LCB cclk loopback, skip */
9688 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9689 		return;
9690 
9691 	/* the DC has been reset earlier in the driver load */
9692 
9693 	/* set LCB for cclk loopback on the port */
9694 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9695 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9696 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9697 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9698 	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9699 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9700 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9701 }
9702 
9703 /*
9704  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9705  * on error.
9706  */
test_qsfp_read(struct hfi1_pportdata * ppd)9707 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9708 {
9709 	int ret;
9710 	u8 status;
9711 
9712 	/*
9713 	 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9714 	 * not present
9715 	 */
9716 	if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9717 		return 0;
9718 
9719 	/* read byte 2, the status byte */
9720 	ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9721 	if (ret < 0)
9722 		return ret;
9723 	if (ret != 1)
9724 		return -EIO;
9725 
9726 	return 0; /* success */
9727 }
9728 
9729 /*
9730  * Values for QSFP retry.
9731  *
9732  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9733  * arrived at from experience on a large cluster.
9734  */
9735 #define MAX_QSFP_RETRIES 20
9736 #define QSFP_RETRY_WAIT 500 /* msec */
9737 
9738 /*
9739  * Try a QSFP read.  If it fails, schedule a retry for later.
9740  * Called on first link activation after driver load.
9741  */
try_start_link(struct hfi1_pportdata * ppd)9742 static void try_start_link(struct hfi1_pportdata *ppd)
9743 {
9744 	if (test_qsfp_read(ppd)) {
9745 		/* read failed */
9746 		if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9747 			dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9748 			return;
9749 		}
9750 		dd_dev_info(ppd->dd,
9751 			    "QSFP not responding, waiting and retrying %d\n",
9752 			    (int)ppd->qsfp_retry_count);
9753 		ppd->qsfp_retry_count++;
9754 		queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9755 				   msecs_to_jiffies(QSFP_RETRY_WAIT));
9756 		return;
9757 	}
9758 	ppd->qsfp_retry_count = 0;
9759 
9760 	start_link(ppd);
9761 }
9762 
9763 /*
9764  * Workqueue function to start the link after a delay.
9765  */
handle_start_link(struct work_struct * work)9766 void handle_start_link(struct work_struct *work)
9767 {
9768 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9769 						  start_link_work.work);
9770 	try_start_link(ppd);
9771 }
9772 
bringup_serdes(struct hfi1_pportdata * ppd)9773 int bringup_serdes(struct hfi1_pportdata *ppd)
9774 {
9775 	struct hfi1_devdata *dd = ppd->dd;
9776 	u64 guid;
9777 	int ret;
9778 
9779 	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9780 		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9781 
9782 	guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9783 	if (!guid) {
9784 		if (dd->base_guid)
9785 			guid = dd->base_guid + ppd->port - 1;
9786 		ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9787 	}
9788 
9789 	/* Set linkinit_reason on power up per OPA spec */
9790 	ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9791 
9792 	/* one-time init of the LCB */
9793 	init_lcb(dd);
9794 
9795 	if (loopback) {
9796 		ret = init_loopback(dd);
9797 		if (ret < 0)
9798 			return ret;
9799 	}
9800 
9801 	get_port_type(ppd);
9802 	if (ppd->port_type == PORT_TYPE_QSFP) {
9803 		set_qsfp_int_n(ppd, 0);
9804 		wait_for_qsfp_init(ppd);
9805 		set_qsfp_int_n(ppd, 1);
9806 	}
9807 
9808 	try_start_link(ppd);
9809 	return 0;
9810 }
9811 
hfi1_quiet_serdes(struct hfi1_pportdata * ppd)9812 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9813 {
9814 	struct hfi1_devdata *dd = ppd->dd;
9815 
9816 	/*
9817 	 * Shut down the link and keep it down.   First turn off that the
9818 	 * driver wants to allow the link to be up (driver_link_ready).
9819 	 * Then make sure the link is not automatically restarted
9820 	 * (link_enabled).  Cancel any pending restart.  And finally
9821 	 * go offline.
9822 	 */
9823 	ppd->driver_link_ready = 0;
9824 	ppd->link_enabled = 0;
9825 
9826 	ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9827 	flush_delayed_work(&ppd->start_link_work);
9828 	cancel_delayed_work_sync(&ppd->start_link_work);
9829 
9830 	ppd->offline_disabled_reason =
9831 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9832 	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9833 			     OPA_LINKDOWN_REASON_SMA_DISABLED);
9834 	set_link_state(ppd, HLS_DN_OFFLINE);
9835 
9836 	/* disable the port */
9837 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9838 	cancel_work_sync(&ppd->freeze_work);
9839 }
9840 
init_cpu_counters(struct hfi1_devdata * dd)9841 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9842 {
9843 	struct hfi1_pportdata *ppd;
9844 	int i;
9845 
9846 	ppd = (struct hfi1_pportdata *)(dd + 1);
9847 	for (i = 0; i < dd->num_pports; i++, ppd++) {
9848 		ppd->ibport_data.rvp.rc_acks = NULL;
9849 		ppd->ibport_data.rvp.rc_qacks = NULL;
9850 		ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9851 		ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9852 		ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9853 		if (!ppd->ibport_data.rvp.rc_acks ||
9854 		    !ppd->ibport_data.rvp.rc_delayed_comp ||
9855 		    !ppd->ibport_data.rvp.rc_qacks)
9856 			return -ENOMEM;
9857 	}
9858 
9859 	return 0;
9860 }
9861 
9862 /*
9863  * index is the index into the receive array
9864  */
hfi1_put_tid(struct hfi1_devdata * dd,u32 index,u32 type,unsigned long pa,u16 order)9865 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9866 		  u32 type, unsigned long pa, u16 order)
9867 {
9868 	u64 reg;
9869 
9870 	if (!(dd->flags & HFI1_PRESENT))
9871 		goto done;
9872 
9873 	if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9874 		pa = 0;
9875 		order = 0;
9876 	} else if (type > PT_INVALID) {
9877 		dd_dev_err(dd,
9878 			   "unexpected receive array type %u for index %u, not handled\n",
9879 			   type, index);
9880 		goto done;
9881 	}
9882 	trace_hfi1_put_tid(dd, index, type, pa, order);
9883 
9884 #define RT_ADDR_SHIFT 12	/* 4KB kernel address boundary */
9885 	reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9886 		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9887 		| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9888 					<< RCV_ARRAY_RT_ADDR_SHIFT;
9889 	trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9890 	writeq(reg, dd->rcvarray_wc + (index * 8));
9891 
9892 	if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9893 		/*
9894 		 * Eager entries are written and flushed
9895 		 *
9896 		 * Expected entries are flushed every 4 writes
9897 		 */
9898 		flush_wc();
9899 done:
9900 	return;
9901 }
9902 
hfi1_clear_tids(struct hfi1_ctxtdata * rcd)9903 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9904 {
9905 	struct hfi1_devdata *dd = rcd->dd;
9906 	u32 i;
9907 
9908 	/* this could be optimized */
9909 	for (i = rcd->eager_base; i < rcd->eager_base +
9910 		     rcd->egrbufs.alloced; i++)
9911 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9912 
9913 	for (i = rcd->expected_base;
9914 			i < rcd->expected_base + rcd->expected_count; i++)
9915 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9916 }
9917 
9918 static const char * const ib_cfg_name_strings[] = {
9919 	"HFI1_IB_CFG_LIDLMC",
9920 	"HFI1_IB_CFG_LWID_DG_ENB",
9921 	"HFI1_IB_CFG_LWID_ENB",
9922 	"HFI1_IB_CFG_LWID",
9923 	"HFI1_IB_CFG_SPD_ENB",
9924 	"HFI1_IB_CFG_SPD",
9925 	"HFI1_IB_CFG_RXPOL_ENB",
9926 	"HFI1_IB_CFG_LREV_ENB",
9927 	"HFI1_IB_CFG_LINKLATENCY",
9928 	"HFI1_IB_CFG_HRTBT",
9929 	"HFI1_IB_CFG_OP_VLS",
9930 	"HFI1_IB_CFG_VL_HIGH_CAP",
9931 	"HFI1_IB_CFG_VL_LOW_CAP",
9932 	"HFI1_IB_CFG_OVERRUN_THRESH",
9933 	"HFI1_IB_CFG_PHYERR_THRESH",
9934 	"HFI1_IB_CFG_LINKDEFAULT",
9935 	"HFI1_IB_CFG_PKEYS",
9936 	"HFI1_IB_CFG_MTU",
9937 	"HFI1_IB_CFG_LSTATE",
9938 	"HFI1_IB_CFG_VL_HIGH_LIMIT",
9939 	"HFI1_IB_CFG_PMA_TICKS",
9940 	"HFI1_IB_CFG_PORT"
9941 };
9942 
ib_cfg_name(int which)9943 static const char *ib_cfg_name(int which)
9944 {
9945 	if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9946 		return "invalid";
9947 	return ib_cfg_name_strings[which];
9948 }
9949 
hfi1_get_ib_cfg(struct hfi1_pportdata * ppd,int which)9950 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9951 {
9952 	struct hfi1_devdata *dd = ppd->dd;
9953 	int val = 0;
9954 
9955 	switch (which) {
9956 	case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9957 		val = ppd->link_width_enabled;
9958 		break;
9959 	case HFI1_IB_CFG_LWID: /* currently active Link-width */
9960 		val = ppd->link_width_active;
9961 		break;
9962 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9963 		val = ppd->link_speed_enabled;
9964 		break;
9965 	case HFI1_IB_CFG_SPD: /* current Link speed */
9966 		val = ppd->link_speed_active;
9967 		break;
9968 
9969 	case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9970 	case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9971 	case HFI1_IB_CFG_LINKLATENCY:
9972 		goto unimplemented;
9973 
9974 	case HFI1_IB_CFG_OP_VLS:
9975 		val = ppd->actual_vls_operational;
9976 		break;
9977 	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9978 		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9979 		break;
9980 	case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9981 		val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9982 		break;
9983 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9984 		val = ppd->overrun_threshold;
9985 		break;
9986 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9987 		val = ppd->phy_error_threshold;
9988 		break;
9989 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9990 		val = dd->link_default;
9991 		break;
9992 
9993 	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9994 	case HFI1_IB_CFG_PMA_TICKS:
9995 	default:
9996 unimplemented:
9997 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9998 			dd_dev_info(
9999 				dd,
10000 				"%s: which %s: not implemented\n",
10001 				__func__,
10002 				ib_cfg_name(which));
10003 		break;
10004 	}
10005 
10006 	return val;
10007 }
10008 
10009 /*
10010  * The largest MAD packet size.
10011  */
10012 #define MAX_MAD_PACKET 2048
10013 
10014 /*
10015  * Return the maximum header bytes that can go on the _wire_
10016  * for this device. This count includes the ICRC which is
10017  * not part of the packet held in memory but it is appended
10018  * by the HW.
10019  * This is dependent on the device's receive header entry size.
10020  * HFI allows this to be set per-receive context, but the
10021  * driver presently enforces a global value.
10022  */
lrh_max_header_bytes(struct hfi1_devdata * dd)10023 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10024 {
10025 	/*
10026 	 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10027 	 * the Receive Header Entry Size minus the PBC (or RHF) size
10028 	 * plus one DW for the ICRC appended by HW.
10029 	 *
10030 	 * dd->rcd[0].rcvhdrqentsize is in DW.
10031 	 * We use rcd[0] as all context will have the same value. Also,
10032 	 * the first kernel context would have been allocated by now so
10033 	 * we are guaranteed a valid value.
10034 	 */
10035 	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10036 }
10037 
10038 /*
10039  * Set Send Length
10040  * @ppd - per port data
10041  *
10042  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10043  * registers compare against LRH.PktLen, so use the max bytes included
10044  * in the LRH.
10045  *
10046  * This routine changes all VL values except VL15, which it maintains at
10047  * the same value.
10048  */
set_send_length(struct hfi1_pportdata * ppd)10049 static void set_send_length(struct hfi1_pportdata *ppd)
10050 {
10051 	struct hfi1_devdata *dd = ppd->dd;
10052 	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10053 	u32 maxvlmtu = dd->vld[15].mtu;
10054 	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10055 			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10056 		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10057 	int i, j;
10058 	u32 thres;
10059 
10060 	for (i = 0; i < ppd->vls_supported; i++) {
10061 		if (dd->vld[i].mtu > maxvlmtu)
10062 			maxvlmtu = dd->vld[i].mtu;
10063 		if (i <= 3)
10064 			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10065 				 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10066 				((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10067 		else
10068 			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10069 				 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10070 				((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10071 	}
10072 	write_csr(dd, SEND_LEN_CHECK0, len1);
10073 	write_csr(dd, SEND_LEN_CHECK1, len2);
10074 	/* adjust kernel credit return thresholds based on new MTUs */
10075 	/* all kernel receive contexts have the same hdrqentsize */
10076 	for (i = 0; i < ppd->vls_supported; i++) {
10077 		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10078 			    sc_mtu_to_threshold(dd->vld[i].sc,
10079 						dd->vld[i].mtu,
10080 						dd->rcd[0]->rcvhdrqentsize));
10081 		for (j = 0; j < INIT_SC_PER_VL; j++)
10082 			sc_set_cr_threshold(
10083 					pio_select_send_context_vl(dd, j, i),
10084 					    thres);
10085 	}
10086 	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10087 		    sc_mtu_to_threshold(dd->vld[15].sc,
10088 					dd->vld[15].mtu,
10089 					dd->rcd[0]->rcvhdrqentsize));
10090 	sc_set_cr_threshold(dd->vld[15].sc, thres);
10091 
10092 	/* Adjust maximum MTU for the port in DC */
10093 	dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10094 		(ilog2(maxvlmtu >> 8) + 1);
10095 	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10096 	len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10097 	len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10098 		DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10099 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10100 }
10101 
set_lidlmc(struct hfi1_pportdata * ppd)10102 static void set_lidlmc(struct hfi1_pportdata *ppd)
10103 {
10104 	int i;
10105 	u64 sreg = 0;
10106 	struct hfi1_devdata *dd = ppd->dd;
10107 	u32 mask = ~((1U << ppd->lmc) - 1);
10108 	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10109 	u32 lid;
10110 
10111 	/*
10112 	 * Program 0 in CSR if port lid is extended. This prevents
10113 	 * 9B packets being sent out for large lids.
10114 	 */
10115 	lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10116 	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10117 		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10118 	c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10119 			<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10120 	      ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10121 			<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10122 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10123 
10124 	/*
10125 	 * Iterate over all the send contexts and set their SLID check
10126 	 */
10127 	sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10128 			SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10129 	       (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10130 			SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10131 
10132 	for (i = 0; i < dd->chip_send_contexts; i++) {
10133 		hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10134 			  i, (u32)sreg);
10135 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10136 	}
10137 
10138 	/* Now we have to do the same thing for the sdma engines */
10139 	sdma_update_lmc(dd, mask, lid);
10140 }
10141 
state_completed_string(u32 completed)10142 static const char *state_completed_string(u32 completed)
10143 {
10144 	static const char * const state_completed[] = {
10145 		"EstablishComm",
10146 		"OptimizeEQ",
10147 		"VerifyCap"
10148 	};
10149 
10150 	if (completed < ARRAY_SIZE(state_completed))
10151 		return state_completed[completed];
10152 
10153 	return "unknown";
10154 }
10155 
10156 static const char all_lanes_dead_timeout_expired[] =
10157 	"All lanes were inactive – was the interconnect media removed?";
10158 static const char tx_out_of_policy[] =
10159 	"Passing lanes on local port do not meet the local link width policy";
10160 static const char no_state_complete[] =
10161 	"State timeout occurred before link partner completed the state";
10162 static const char * const state_complete_reasons[] = {
10163 	[0x00] = "Reason unknown",
10164 	[0x01] = "Link was halted by driver, refer to LinkDownReason",
10165 	[0x02] = "Link partner reported failure",
10166 	[0x10] = "Unable to achieve frame sync on any lane",
10167 	[0x11] =
10168 	  "Unable to find a common bit rate with the link partner",
10169 	[0x12] =
10170 	  "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10171 	[0x13] =
10172 	  "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10173 	[0x14] = no_state_complete,
10174 	[0x15] =
10175 	  "State timeout occurred before link partner identified equalization presets",
10176 	[0x16] =
10177 	  "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10178 	[0x17] = tx_out_of_policy,
10179 	[0x20] = all_lanes_dead_timeout_expired,
10180 	[0x21] =
10181 	  "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10182 	[0x22] = no_state_complete,
10183 	[0x23] =
10184 	  "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10185 	[0x24] = tx_out_of_policy,
10186 	[0x30] = all_lanes_dead_timeout_expired,
10187 	[0x31] =
10188 	  "State timeout occurred waiting for host to process received frames",
10189 	[0x32] = no_state_complete,
10190 	[0x33] =
10191 	  "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10192 	[0x34] = tx_out_of_policy,
10193 };
10194 
state_complete_reason_code_string(struct hfi1_pportdata * ppd,u32 code)10195 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10196 						     u32 code)
10197 {
10198 	const char *str = NULL;
10199 
10200 	if (code < ARRAY_SIZE(state_complete_reasons))
10201 		str = state_complete_reasons[code];
10202 
10203 	if (str)
10204 		return str;
10205 	return "Reserved";
10206 }
10207 
10208 /* describe the given last state complete frame */
decode_state_complete(struct hfi1_pportdata * ppd,u32 frame,const char * prefix)10209 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10210 				  const char *prefix)
10211 {
10212 	struct hfi1_devdata *dd = ppd->dd;
10213 	u32 success;
10214 	u32 state;
10215 	u32 reason;
10216 	u32 lanes;
10217 
10218 	/*
10219 	 * Decode frame:
10220 	 *  [ 0: 0] - success
10221 	 *  [ 3: 1] - state
10222 	 *  [ 7: 4] - next state timeout
10223 	 *  [15: 8] - reason code
10224 	 *  [31:16] - lanes
10225 	 */
10226 	success = frame & 0x1;
10227 	state = (frame >> 1) & 0x7;
10228 	reason = (frame >> 8) & 0xff;
10229 	lanes = (frame >> 16) & 0xffff;
10230 
10231 	dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10232 		   prefix, frame);
10233 	dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10234 		   state_completed_string(state), state);
10235 	dd_dev_err(dd, "    state successfully completed: %s\n",
10236 		   success ? "yes" : "no");
10237 	dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10238 		   reason, state_complete_reason_code_string(ppd, reason));
10239 	dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10240 }
10241 
10242 /*
10243  * Read the last state complete frames and explain them.  This routine
10244  * expects to be called if the link went down during link negotiation
10245  * and initialization (LNI).  That is, anywhere between polling and link up.
10246  */
check_lni_states(struct hfi1_pportdata * ppd)10247 static void check_lni_states(struct hfi1_pportdata *ppd)
10248 {
10249 	u32 last_local_state;
10250 	u32 last_remote_state;
10251 
10252 	read_last_local_state(ppd->dd, &last_local_state);
10253 	read_last_remote_state(ppd->dd, &last_remote_state);
10254 
10255 	/*
10256 	 * Don't report anything if there is nothing to report.  A value of
10257 	 * 0 means the link was taken down while polling and there was no
10258 	 * training in-process.
10259 	 */
10260 	if (last_local_state == 0 && last_remote_state == 0)
10261 		return;
10262 
10263 	decode_state_complete(ppd, last_local_state, "transmitted");
10264 	decode_state_complete(ppd, last_remote_state, "received");
10265 }
10266 
10267 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
wait_link_transfer_active(struct hfi1_devdata * dd,int wait_ms)10268 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10269 {
10270 	u64 reg;
10271 	unsigned long timeout;
10272 
10273 	/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10274 	timeout = jiffies + msecs_to_jiffies(wait_ms);
10275 	while (1) {
10276 		reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10277 		if (reg)
10278 			break;
10279 		if (time_after(jiffies, timeout)) {
10280 			dd_dev_err(dd,
10281 				   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10282 			return -ETIMEDOUT;
10283 		}
10284 		udelay(2);
10285 	}
10286 	return 0;
10287 }
10288 
10289 /* called when the logical link state is not down as it should be */
force_logical_link_state_down(struct hfi1_pportdata * ppd)10290 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10291 {
10292 	struct hfi1_devdata *dd = ppd->dd;
10293 
10294 	/*
10295 	 * Bring link up in LCB loopback
10296 	 */
10297 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10298 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10299 		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10300 
10301 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10302 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10303 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10304 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10305 
10306 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10307 	(void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10308 	udelay(3);
10309 	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10310 	write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10311 
10312 	wait_link_transfer_active(dd, 100);
10313 
10314 	/*
10315 	 * Bring the link down again.
10316 	 */
10317 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10318 	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10319 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10320 
10321 	/* adjust ppd->statusp, if needed */
10322 	update_statusp(ppd, IB_PORT_DOWN);
10323 
10324 	dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10325 }
10326 
10327 /*
10328  * Helper for set_link_state().  Do not call except from that routine.
10329  * Expects ppd->hls_mutex to be held.
10330  *
10331  * @rem_reason value to be sent to the neighbor
10332  *
10333  * LinkDownReasons only set if transition succeeds.
10334  */
goto_offline(struct hfi1_pportdata * ppd,u8 rem_reason)10335 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10336 {
10337 	struct hfi1_devdata *dd = ppd->dd;
10338 	u32 previous_state;
10339 	int offline_state_ret;
10340 	int ret;
10341 
10342 	update_lcb_cache(dd);
10343 
10344 	previous_state = ppd->host_link_state;
10345 	ppd->host_link_state = HLS_GOING_OFFLINE;
10346 
10347 	/* start offline transition */
10348 	ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10349 
10350 	if (ret != HCMD_SUCCESS) {
10351 		dd_dev_err(dd,
10352 			   "Failed to transition to Offline link state, return %d\n",
10353 			   ret);
10354 		return -EINVAL;
10355 	}
10356 	if (ppd->offline_disabled_reason ==
10357 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10358 		ppd->offline_disabled_reason =
10359 		HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10360 
10361 	offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10362 	if (offline_state_ret < 0)
10363 		return offline_state_ret;
10364 
10365 	/* Disabling AOC transmitters */
10366 	if (ppd->port_type == PORT_TYPE_QSFP &&
10367 	    ppd->qsfp_info.limiting_active &&
10368 	    qsfp_mod_present(ppd)) {
10369 		int ret;
10370 
10371 		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10372 		if (ret == 0) {
10373 			set_qsfp_tx(ppd, 0);
10374 			release_chip_resource(dd, qsfp_resource(dd));
10375 		} else {
10376 			/* not fatal, but should warn */
10377 			dd_dev_err(dd,
10378 				   "Unable to acquire lock to turn off QSFP TX\n");
10379 		}
10380 	}
10381 
10382 	/*
10383 	 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10384 	 * can take a while for the link to go down.
10385 	 */
10386 	if (offline_state_ret != PLS_OFFLINE_QUIET) {
10387 		ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10388 		if (ret < 0)
10389 			return ret;
10390 	}
10391 
10392 	/*
10393 	 * Now in charge of LCB - must be after the physical state is
10394 	 * offline.quiet and before host_link_state is changed.
10395 	 */
10396 	set_host_lcb_access(dd);
10397 	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10398 
10399 	/* make sure the logical state is also down */
10400 	ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10401 	if (ret)
10402 		force_logical_link_state_down(ppd);
10403 
10404 	ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10405 
10406 	/*
10407 	 * The LNI has a mandatory wait time after the physical state
10408 	 * moves to Offline.Quiet.  The wait time may be different
10409 	 * depending on how the link went down.  The 8051 firmware
10410 	 * will observe the needed wait time and only move to ready
10411 	 * when that is completed.  The largest of the quiet timeouts
10412 	 * is 6s, so wait that long and then at least 0.5s more for
10413 	 * other transitions, and another 0.5s for a buffer.
10414 	 */
10415 	ret = wait_fm_ready(dd, 7000);
10416 	if (ret) {
10417 		dd_dev_err(dd,
10418 			   "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10419 		/* state is really offline, so make it so */
10420 		ppd->host_link_state = HLS_DN_OFFLINE;
10421 		return ret;
10422 	}
10423 
10424 	/*
10425 	 * The state is now offline and the 8051 is ready to accept host
10426 	 * requests.
10427 	 *	- change our state
10428 	 *	- notify others if we were previously in a linkup state
10429 	 */
10430 	ppd->host_link_state = HLS_DN_OFFLINE;
10431 	if (previous_state & HLS_UP) {
10432 		/* went down while link was up */
10433 		handle_linkup_change(dd, 0);
10434 	} else if (previous_state
10435 			& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10436 		/* went down while attempting link up */
10437 		check_lni_states(ppd);
10438 
10439 		/* The QSFP doesn't need to be reset on LNI failure */
10440 		ppd->qsfp_info.reset_needed = 0;
10441 	}
10442 
10443 	/* the active link width (downgrade) is 0 on link down */
10444 	ppd->link_width_active = 0;
10445 	ppd->link_width_downgrade_tx_active = 0;
10446 	ppd->link_width_downgrade_rx_active = 0;
10447 	ppd->current_egress_rate = 0;
10448 	return 0;
10449 }
10450 
10451 /* return the link state name */
link_state_name(u32 state)10452 static const char *link_state_name(u32 state)
10453 {
10454 	const char *name;
10455 	int n = ilog2(state);
10456 	static const char * const names[] = {
10457 		[__HLS_UP_INIT_BP]	 = "INIT",
10458 		[__HLS_UP_ARMED_BP]	 = "ARMED",
10459 		[__HLS_UP_ACTIVE_BP]	 = "ACTIVE",
10460 		[__HLS_DN_DOWNDEF_BP]	 = "DOWNDEF",
10461 		[__HLS_DN_POLL_BP]	 = "POLL",
10462 		[__HLS_DN_DISABLE_BP]	 = "DISABLE",
10463 		[__HLS_DN_OFFLINE_BP]	 = "OFFLINE",
10464 		[__HLS_VERIFY_CAP_BP]	 = "VERIFY_CAP",
10465 		[__HLS_GOING_UP_BP]	 = "GOING_UP",
10466 		[__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10467 		[__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10468 	};
10469 
10470 	name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10471 	return name ? name : "unknown";
10472 }
10473 
10474 /* return the link state reason name */
link_state_reason_name(struct hfi1_pportdata * ppd,u32 state)10475 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10476 {
10477 	if (state == HLS_UP_INIT) {
10478 		switch (ppd->linkinit_reason) {
10479 		case OPA_LINKINIT_REASON_LINKUP:
10480 			return "(LINKUP)";
10481 		case OPA_LINKINIT_REASON_FLAPPING:
10482 			return "(FLAPPING)";
10483 		case OPA_LINKINIT_OUTSIDE_POLICY:
10484 			return "(OUTSIDE_POLICY)";
10485 		case OPA_LINKINIT_QUARANTINED:
10486 			return "(QUARANTINED)";
10487 		case OPA_LINKINIT_INSUFIC_CAPABILITY:
10488 			return "(INSUFIC_CAPABILITY)";
10489 		default:
10490 			break;
10491 		}
10492 	}
10493 	return "";
10494 }
10495 
10496 /*
10497  * driver_pstate - convert the driver's notion of a port's
10498  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10499  * Return -1 (converted to a u32) to indicate error.
10500  */
driver_pstate(struct hfi1_pportdata * ppd)10501 u32 driver_pstate(struct hfi1_pportdata *ppd)
10502 {
10503 	switch (ppd->host_link_state) {
10504 	case HLS_UP_INIT:
10505 	case HLS_UP_ARMED:
10506 	case HLS_UP_ACTIVE:
10507 		return IB_PORTPHYSSTATE_LINKUP;
10508 	case HLS_DN_POLL:
10509 		return IB_PORTPHYSSTATE_POLLING;
10510 	case HLS_DN_DISABLE:
10511 		return IB_PORTPHYSSTATE_DISABLED;
10512 	case HLS_DN_OFFLINE:
10513 		return OPA_PORTPHYSSTATE_OFFLINE;
10514 	case HLS_VERIFY_CAP:
10515 		return IB_PORTPHYSSTATE_POLLING;
10516 	case HLS_GOING_UP:
10517 		return IB_PORTPHYSSTATE_POLLING;
10518 	case HLS_GOING_OFFLINE:
10519 		return OPA_PORTPHYSSTATE_OFFLINE;
10520 	case HLS_LINK_COOLDOWN:
10521 		return OPA_PORTPHYSSTATE_OFFLINE;
10522 	case HLS_DN_DOWNDEF:
10523 	default:
10524 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10525 			   ppd->host_link_state);
10526 		return  -1;
10527 	}
10528 }
10529 
10530 /*
10531  * driver_lstate - convert the driver's notion of a port's
10532  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10533  * (converted to a u32) to indicate error.
10534  */
driver_lstate(struct hfi1_pportdata * ppd)10535 u32 driver_lstate(struct hfi1_pportdata *ppd)
10536 {
10537 	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10538 		return IB_PORT_DOWN;
10539 
10540 	switch (ppd->host_link_state & HLS_UP) {
10541 	case HLS_UP_INIT:
10542 		return IB_PORT_INIT;
10543 	case HLS_UP_ARMED:
10544 		return IB_PORT_ARMED;
10545 	case HLS_UP_ACTIVE:
10546 		return IB_PORT_ACTIVE;
10547 	default:
10548 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10549 			   ppd->host_link_state);
10550 	return -1;
10551 	}
10552 }
10553 
set_link_down_reason(struct hfi1_pportdata * ppd,u8 lcl_reason,u8 neigh_reason,u8 rem_reason)10554 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10555 			  u8 neigh_reason, u8 rem_reason)
10556 {
10557 	if (ppd->local_link_down_reason.latest == 0 &&
10558 	    ppd->neigh_link_down_reason.latest == 0) {
10559 		ppd->local_link_down_reason.latest = lcl_reason;
10560 		ppd->neigh_link_down_reason.latest = neigh_reason;
10561 		ppd->remote_link_down_reason = rem_reason;
10562 	}
10563 }
10564 
10565 /**
10566  * data_vls_operational() - Verify if data VL BCT credits and MTU
10567  *			    are both set.
10568  * @ppd: pointer to hfi1_pportdata structure
10569  *
10570  * Return: true - Ok, false -otherwise.
10571  */
data_vls_operational(struct hfi1_pportdata * ppd)10572 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10573 {
10574 	int i;
10575 	u64 reg;
10576 
10577 	if (!ppd->actual_vls_operational)
10578 		return false;
10579 
10580 	for (i = 0; i < ppd->vls_supported; i++) {
10581 		reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10582 		if ((reg && !ppd->dd->vld[i].mtu) ||
10583 		    (!reg && ppd->dd->vld[i].mtu))
10584 			return false;
10585 	}
10586 
10587 	return true;
10588 }
10589 
10590 /*
10591  * Change the physical and/or logical link state.
10592  *
10593  * Do not call this routine while inside an interrupt.  It contains
10594  * calls to routines that can take multiple seconds to finish.
10595  *
10596  * Returns 0 on success, -errno on failure.
10597  */
set_link_state(struct hfi1_pportdata * ppd,u32 state)10598 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10599 {
10600 	struct hfi1_devdata *dd = ppd->dd;
10601 	struct ib_event event = {.device = NULL};
10602 	int ret1, ret = 0;
10603 	int orig_new_state, poll_bounce;
10604 
10605 	mutex_lock(&ppd->hls_lock);
10606 
10607 	orig_new_state = state;
10608 	if (state == HLS_DN_DOWNDEF)
10609 		state = dd->link_default;
10610 
10611 	/* interpret poll -> poll as a link bounce */
10612 	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10613 		      state == HLS_DN_POLL;
10614 
10615 	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10616 		    link_state_name(ppd->host_link_state),
10617 		    link_state_name(orig_new_state),
10618 		    poll_bounce ? "(bounce) " : "",
10619 		    link_state_reason_name(ppd, state));
10620 
10621 	/*
10622 	 * If we're going to a (HLS_*) link state that implies the logical
10623 	 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10624 	 * reset is_sm_config_started to 0.
10625 	 */
10626 	if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10627 		ppd->is_sm_config_started = 0;
10628 
10629 	/*
10630 	 * Do nothing if the states match.  Let a poll to poll link bounce
10631 	 * go through.
10632 	 */
10633 	if (ppd->host_link_state == state && !poll_bounce)
10634 		goto done;
10635 
10636 	switch (state) {
10637 	case HLS_UP_INIT:
10638 		if (ppd->host_link_state == HLS_DN_POLL &&
10639 		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10640 			/*
10641 			 * Quick link up jumps from polling to here.
10642 			 *
10643 			 * Whether in normal or loopback mode, the
10644 			 * simulator jumps from polling to link up.
10645 			 * Accept that here.
10646 			 */
10647 			/* OK */
10648 		} else if (ppd->host_link_state != HLS_GOING_UP) {
10649 			goto unexpected;
10650 		}
10651 
10652 		/*
10653 		 * Wait for Link_Up physical state.
10654 		 * Physical and Logical states should already be
10655 		 * be transitioned to LinkUp and LinkInit respectively.
10656 		 */
10657 		ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10658 		if (ret) {
10659 			dd_dev_err(dd,
10660 				   "%s: physical state did not change to LINK-UP\n",
10661 				   __func__);
10662 			break;
10663 		}
10664 
10665 		ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10666 		if (ret) {
10667 			dd_dev_err(dd,
10668 				   "%s: logical state did not change to INIT\n",
10669 				   __func__);
10670 			break;
10671 		}
10672 
10673 		/* clear old transient LINKINIT_REASON code */
10674 		if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10675 			ppd->linkinit_reason =
10676 				OPA_LINKINIT_REASON_LINKUP;
10677 
10678 		/* enable the port */
10679 		add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10680 
10681 		handle_linkup_change(dd, 1);
10682 		pio_kernel_linkup(dd);
10683 
10684 		ppd->host_link_state = HLS_UP_INIT;
10685 		break;
10686 	case HLS_UP_ARMED:
10687 		if (ppd->host_link_state != HLS_UP_INIT)
10688 			goto unexpected;
10689 
10690 		if (!data_vls_operational(ppd)) {
10691 			dd_dev_err(dd,
10692 				   "%s: Invalid data VL credits or mtu\n",
10693 				   __func__);
10694 			ret = -EINVAL;
10695 			break;
10696 		}
10697 
10698 		set_logical_state(dd, LSTATE_ARMED);
10699 		ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10700 		if (ret) {
10701 			dd_dev_err(dd,
10702 				   "%s: logical state did not change to ARMED\n",
10703 				   __func__);
10704 			break;
10705 		}
10706 		ppd->host_link_state = HLS_UP_ARMED;
10707 		/*
10708 		 * The simulator does not currently implement SMA messages,
10709 		 * so neighbor_normal is not set.  Set it here when we first
10710 		 * move to Armed.
10711 		 */
10712 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10713 			ppd->neighbor_normal = 1;
10714 		break;
10715 	case HLS_UP_ACTIVE:
10716 		if (ppd->host_link_state != HLS_UP_ARMED)
10717 			goto unexpected;
10718 
10719 		set_logical_state(dd, LSTATE_ACTIVE);
10720 		ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10721 		if (ret) {
10722 			dd_dev_err(dd,
10723 				   "%s: logical state did not change to ACTIVE\n",
10724 				   __func__);
10725 		} else {
10726 			/* tell all engines to go running */
10727 			sdma_all_running(dd);
10728 			ppd->host_link_state = HLS_UP_ACTIVE;
10729 
10730 			/* Signal the IB layer that the port has went active */
10731 			event.device = &dd->verbs_dev.rdi.ibdev;
10732 			event.element.port_num = ppd->port;
10733 			event.event = IB_EVENT_PORT_ACTIVE;
10734 		}
10735 		break;
10736 	case HLS_DN_POLL:
10737 		if ((ppd->host_link_state == HLS_DN_DISABLE ||
10738 		     ppd->host_link_state == HLS_DN_OFFLINE) &&
10739 		    dd->dc_shutdown)
10740 			dc_start(dd);
10741 		/* Hand LED control to the DC */
10742 		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10743 
10744 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10745 			u8 tmp = ppd->link_enabled;
10746 
10747 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10748 			if (ret) {
10749 				ppd->link_enabled = tmp;
10750 				break;
10751 			}
10752 			ppd->remote_link_down_reason = 0;
10753 
10754 			if (ppd->driver_link_ready)
10755 				ppd->link_enabled = 1;
10756 		}
10757 
10758 		set_all_slowpath(ppd->dd);
10759 		ret = set_local_link_attributes(ppd);
10760 		if (ret)
10761 			break;
10762 
10763 		ppd->port_error_action = 0;
10764 
10765 		if (quick_linkup) {
10766 			/* quick linkup does not go into polling */
10767 			ret = do_quick_linkup(dd);
10768 		} else {
10769 			ret1 = set_physical_link_state(dd, PLS_POLLING);
10770 			if (!ret1)
10771 				ret1 = wait_phys_link_out_of_offline(ppd,
10772 								     3000);
10773 			if (ret1 != HCMD_SUCCESS) {
10774 				dd_dev_err(dd,
10775 					   "Failed to transition to Polling link state, return 0x%x\n",
10776 					   ret1);
10777 				ret = -EINVAL;
10778 			}
10779 		}
10780 
10781 		/*
10782 		 * Change the host link state after requesting DC8051 to
10783 		 * change its physical state so that we can ignore any
10784 		 * interrupt with stale LNI(XX) error, which will not be
10785 		 * cleared until DC8051 transitions to Polling state.
10786 		 */
10787 		ppd->host_link_state = HLS_DN_POLL;
10788 		ppd->offline_disabled_reason =
10789 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10790 		/*
10791 		 * If an error occurred above, go back to offline.  The
10792 		 * caller may reschedule another attempt.
10793 		 */
10794 		if (ret)
10795 			goto_offline(ppd, 0);
10796 		else
10797 			log_physical_state(ppd, PLS_POLLING);
10798 		break;
10799 	case HLS_DN_DISABLE:
10800 		/* link is disabled */
10801 		ppd->link_enabled = 0;
10802 
10803 		/* allow any state to transition to disabled */
10804 
10805 		/* must transition to offline first */
10806 		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10807 			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10808 			if (ret)
10809 				break;
10810 			ppd->remote_link_down_reason = 0;
10811 		}
10812 
10813 		if (!dd->dc_shutdown) {
10814 			ret1 = set_physical_link_state(dd, PLS_DISABLED);
10815 			if (ret1 != HCMD_SUCCESS) {
10816 				dd_dev_err(dd,
10817 					   "Failed to transition to Disabled link state, return 0x%x\n",
10818 					   ret1);
10819 				ret = -EINVAL;
10820 				break;
10821 			}
10822 			ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10823 			if (ret) {
10824 				dd_dev_err(dd,
10825 					   "%s: physical state did not change to DISABLED\n",
10826 					   __func__);
10827 				break;
10828 			}
10829 			dc_shutdown(dd);
10830 		}
10831 		ppd->host_link_state = HLS_DN_DISABLE;
10832 		break;
10833 	case HLS_DN_OFFLINE:
10834 		if (ppd->host_link_state == HLS_DN_DISABLE)
10835 			dc_start(dd);
10836 
10837 		/* allow any state to transition to offline */
10838 		ret = goto_offline(ppd, ppd->remote_link_down_reason);
10839 		if (!ret)
10840 			ppd->remote_link_down_reason = 0;
10841 		break;
10842 	case HLS_VERIFY_CAP:
10843 		if (ppd->host_link_state != HLS_DN_POLL)
10844 			goto unexpected;
10845 		ppd->host_link_state = HLS_VERIFY_CAP;
10846 		log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10847 		break;
10848 	case HLS_GOING_UP:
10849 		if (ppd->host_link_state != HLS_VERIFY_CAP)
10850 			goto unexpected;
10851 
10852 		ret1 = set_physical_link_state(dd, PLS_LINKUP);
10853 		if (ret1 != HCMD_SUCCESS) {
10854 			dd_dev_err(dd,
10855 				   "Failed to transition to link up state, return 0x%x\n",
10856 				   ret1);
10857 			ret = -EINVAL;
10858 			break;
10859 		}
10860 		ppd->host_link_state = HLS_GOING_UP;
10861 		break;
10862 
10863 	case HLS_GOING_OFFLINE:		/* transient within goto_offline() */
10864 	case HLS_LINK_COOLDOWN:		/* transient within goto_offline() */
10865 	default:
10866 		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10867 			    __func__, state);
10868 		ret = -EINVAL;
10869 		break;
10870 	}
10871 
10872 	goto done;
10873 
10874 unexpected:
10875 	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10876 		   __func__, link_state_name(ppd->host_link_state),
10877 		   link_state_name(state));
10878 	ret = -EINVAL;
10879 
10880 done:
10881 	mutex_unlock(&ppd->hls_lock);
10882 
10883 	if (event.device)
10884 		ib_dispatch_event(&event);
10885 
10886 	return ret;
10887 }
10888 
hfi1_set_ib_cfg(struct hfi1_pportdata * ppd,int which,u32 val)10889 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10890 {
10891 	u64 reg;
10892 	int ret = 0;
10893 
10894 	switch (which) {
10895 	case HFI1_IB_CFG_LIDLMC:
10896 		set_lidlmc(ppd);
10897 		break;
10898 	case HFI1_IB_CFG_VL_HIGH_LIMIT:
10899 		/*
10900 		 * The VL Arbitrator high limit is sent in units of 4k
10901 		 * bytes, while HFI stores it in units of 64 bytes.
10902 		 */
10903 		val *= 4096 / 64;
10904 		reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10905 			<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10906 		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10907 		break;
10908 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10909 		/* HFI only supports POLL as the default link down state */
10910 		if (val != HLS_DN_POLL)
10911 			ret = -EINVAL;
10912 		break;
10913 	case HFI1_IB_CFG_OP_VLS:
10914 		if (ppd->vls_operational != val) {
10915 			ppd->vls_operational = val;
10916 			if (!ppd->port)
10917 				ret = -EINVAL;
10918 		}
10919 		break;
10920 	/*
10921 	 * For link width, link width downgrade, and speed enable, always AND
10922 	 * the setting with what is actually supported.  This has two benefits.
10923 	 * First, enabled can't have unsupported values, no matter what the
10924 	 * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10925 	 * "fill in with your supported value" have all the bits in the
10926 	 * field set, so simply ANDing with supported has the desired result.
10927 	 */
10928 	case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10929 		ppd->link_width_enabled = val & ppd->link_width_supported;
10930 		break;
10931 	case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10932 		ppd->link_width_downgrade_enabled =
10933 				val & ppd->link_width_downgrade_supported;
10934 		break;
10935 	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10936 		ppd->link_speed_enabled = val & ppd->link_speed_supported;
10937 		break;
10938 	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10939 		/*
10940 		 * HFI does not follow IB specs, save this value
10941 		 * so we can report it, if asked.
10942 		 */
10943 		ppd->overrun_threshold = val;
10944 		break;
10945 	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10946 		/*
10947 		 * HFI does not follow IB specs, save this value
10948 		 * so we can report it, if asked.
10949 		 */
10950 		ppd->phy_error_threshold = val;
10951 		break;
10952 
10953 	case HFI1_IB_CFG_MTU:
10954 		set_send_length(ppd);
10955 		break;
10956 
10957 	case HFI1_IB_CFG_PKEYS:
10958 		if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10959 			set_partition_keys(ppd);
10960 		break;
10961 
10962 	default:
10963 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10964 			dd_dev_info(ppd->dd,
10965 				    "%s: which %s, val 0x%x: not implemented\n",
10966 				    __func__, ib_cfg_name(which), val);
10967 		break;
10968 	}
10969 	return ret;
10970 }
10971 
10972 /* begin functions related to vl arbitration table caching */
init_vl_arb_caches(struct hfi1_pportdata * ppd)10973 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10974 {
10975 	int i;
10976 
10977 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10978 			VL_ARB_LOW_PRIO_TABLE_SIZE);
10979 	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10980 			VL_ARB_HIGH_PRIO_TABLE_SIZE);
10981 
10982 	/*
10983 	 * Note that we always return values directly from the
10984 	 * 'vl_arb_cache' (and do no CSR reads) in response to a
10985 	 * 'Get(VLArbTable)'. This is obviously correct after a
10986 	 * 'Set(VLArbTable)', since the cache will then be up to
10987 	 * date. But it's also correct prior to any 'Set(VLArbTable)'
10988 	 * since then both the cache, and the relevant h/w registers
10989 	 * will be zeroed.
10990 	 */
10991 
10992 	for (i = 0; i < MAX_PRIO_TABLE; i++)
10993 		spin_lock_init(&ppd->vl_arb_cache[i].lock);
10994 }
10995 
10996 /*
10997  * vl_arb_lock_cache
10998  *
10999  * All other vl_arb_* functions should be called only after locking
11000  * the cache.
11001  */
11002 static inline struct vl_arb_cache *
vl_arb_lock_cache(struct hfi1_pportdata * ppd,int idx)11003 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11004 {
11005 	if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11006 		return NULL;
11007 	spin_lock(&ppd->vl_arb_cache[idx].lock);
11008 	return &ppd->vl_arb_cache[idx];
11009 }
11010 
vl_arb_unlock_cache(struct hfi1_pportdata * ppd,int idx)11011 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11012 {
11013 	spin_unlock(&ppd->vl_arb_cache[idx].lock);
11014 }
11015 
vl_arb_get_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11016 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11017 			     struct ib_vl_weight_elem *vl)
11018 {
11019 	memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11020 }
11021 
vl_arb_set_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11022 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11023 			     struct ib_vl_weight_elem *vl)
11024 {
11025 	memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11026 }
11027 
vl_arb_match_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11028 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11029 			      struct ib_vl_weight_elem *vl)
11030 {
11031 	return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11032 }
11033 
11034 /* end functions related to vl arbitration table caching */
11035 
set_vl_weights(struct hfi1_pportdata * ppd,u32 target,u32 size,struct ib_vl_weight_elem * vl)11036 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11037 			  u32 size, struct ib_vl_weight_elem *vl)
11038 {
11039 	struct hfi1_devdata *dd = ppd->dd;
11040 	u64 reg;
11041 	unsigned int i, is_up = 0;
11042 	int drain, ret = 0;
11043 
11044 	mutex_lock(&ppd->hls_lock);
11045 
11046 	if (ppd->host_link_state & HLS_UP)
11047 		is_up = 1;
11048 
11049 	drain = !is_ax(dd) && is_up;
11050 
11051 	if (drain)
11052 		/*
11053 		 * Before adjusting VL arbitration weights, empty per-VL
11054 		 * FIFOs, otherwise a packet whose VL weight is being
11055 		 * set to 0 could get stuck in a FIFO with no chance to
11056 		 * egress.
11057 		 */
11058 		ret = stop_drain_data_vls(dd);
11059 
11060 	if (ret) {
11061 		dd_dev_err(
11062 			dd,
11063 			"%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11064 			__func__);
11065 		goto err;
11066 	}
11067 
11068 	for (i = 0; i < size; i++, vl++) {
11069 		/*
11070 		 * NOTE: The low priority shift and mask are used here, but
11071 		 * they are the same for both the low and high registers.
11072 		 */
11073 		reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11074 				<< SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11075 		      | (((u64)vl->weight
11076 				& SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11077 				<< SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11078 		write_csr(dd, target + (i * 8), reg);
11079 	}
11080 	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11081 
11082 	if (drain)
11083 		open_fill_data_vls(dd); /* reopen all VLs */
11084 
11085 err:
11086 	mutex_unlock(&ppd->hls_lock);
11087 
11088 	return ret;
11089 }
11090 
11091 /*
11092  * Read one credit merge VL register.
11093  */
read_one_cm_vl(struct hfi1_devdata * dd,u32 csr,struct vl_limit * vll)11094 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11095 			   struct vl_limit *vll)
11096 {
11097 	u64 reg = read_csr(dd, csr);
11098 
11099 	vll->dedicated = cpu_to_be16(
11100 		(reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11101 		& SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11102 	vll->shared = cpu_to_be16(
11103 		(reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11104 		& SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11105 }
11106 
11107 /*
11108  * Read the current credit merge limits.
11109  */
get_buffer_control(struct hfi1_devdata * dd,struct buffer_control * bc,u16 * overall_limit)11110 static int get_buffer_control(struct hfi1_devdata *dd,
11111 			      struct buffer_control *bc, u16 *overall_limit)
11112 {
11113 	u64 reg;
11114 	int i;
11115 
11116 	/* not all entries are filled in */
11117 	memset(bc, 0, sizeof(*bc));
11118 
11119 	/* OPA and HFI have a 1-1 mapping */
11120 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
11121 		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11122 
11123 	/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11124 	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11125 
11126 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11127 	bc->overall_shared_limit = cpu_to_be16(
11128 		(reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11129 		& SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11130 	if (overall_limit)
11131 		*overall_limit = (reg
11132 			>> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11133 			& SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11134 	return sizeof(struct buffer_control);
11135 }
11136 
get_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11137 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11138 {
11139 	u64 reg;
11140 	int i;
11141 
11142 	/* each register contains 16 SC->VLnt mappings, 4 bits each */
11143 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11144 	for (i = 0; i < sizeof(u64); i++) {
11145 		u8 byte = *(((u8 *)&reg) + i);
11146 
11147 		dp->vlnt[2 * i] = byte & 0xf;
11148 		dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11149 	}
11150 
11151 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11152 	for (i = 0; i < sizeof(u64); i++) {
11153 		u8 byte = *(((u8 *)&reg) + i);
11154 
11155 		dp->vlnt[16 + (2 * i)] = byte & 0xf;
11156 		dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11157 	}
11158 	return sizeof(struct sc2vlnt);
11159 }
11160 
get_vlarb_preempt(struct hfi1_devdata * dd,u32 nelems,struct ib_vl_weight_elem * vl)11161 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11162 			      struct ib_vl_weight_elem *vl)
11163 {
11164 	unsigned int i;
11165 
11166 	for (i = 0; i < nelems; i++, vl++) {
11167 		vl->vl = 0xf;
11168 		vl->weight = 0;
11169 	}
11170 }
11171 
set_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11172 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11173 {
11174 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11175 		  DC_SC_VL_VAL(15_0,
11176 			       0, dp->vlnt[0] & 0xf,
11177 			       1, dp->vlnt[1] & 0xf,
11178 			       2, dp->vlnt[2] & 0xf,
11179 			       3, dp->vlnt[3] & 0xf,
11180 			       4, dp->vlnt[4] & 0xf,
11181 			       5, dp->vlnt[5] & 0xf,
11182 			       6, dp->vlnt[6] & 0xf,
11183 			       7, dp->vlnt[7] & 0xf,
11184 			       8, dp->vlnt[8] & 0xf,
11185 			       9, dp->vlnt[9] & 0xf,
11186 			       10, dp->vlnt[10] & 0xf,
11187 			       11, dp->vlnt[11] & 0xf,
11188 			       12, dp->vlnt[12] & 0xf,
11189 			       13, dp->vlnt[13] & 0xf,
11190 			       14, dp->vlnt[14] & 0xf,
11191 			       15, dp->vlnt[15] & 0xf));
11192 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11193 		  DC_SC_VL_VAL(31_16,
11194 			       16, dp->vlnt[16] & 0xf,
11195 			       17, dp->vlnt[17] & 0xf,
11196 			       18, dp->vlnt[18] & 0xf,
11197 			       19, dp->vlnt[19] & 0xf,
11198 			       20, dp->vlnt[20] & 0xf,
11199 			       21, dp->vlnt[21] & 0xf,
11200 			       22, dp->vlnt[22] & 0xf,
11201 			       23, dp->vlnt[23] & 0xf,
11202 			       24, dp->vlnt[24] & 0xf,
11203 			       25, dp->vlnt[25] & 0xf,
11204 			       26, dp->vlnt[26] & 0xf,
11205 			       27, dp->vlnt[27] & 0xf,
11206 			       28, dp->vlnt[28] & 0xf,
11207 			       29, dp->vlnt[29] & 0xf,
11208 			       30, dp->vlnt[30] & 0xf,
11209 			       31, dp->vlnt[31] & 0xf));
11210 }
11211 
nonzero_msg(struct hfi1_devdata * dd,int idx,const char * what,u16 limit)11212 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11213 			u16 limit)
11214 {
11215 	if (limit != 0)
11216 		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11217 			    what, (int)limit, idx);
11218 }
11219 
11220 /* change only the shared limit portion of SendCmGLobalCredit */
set_global_shared(struct hfi1_devdata * dd,u16 limit)11221 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11222 {
11223 	u64 reg;
11224 
11225 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11226 	reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11227 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11228 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11229 }
11230 
11231 /* change only the total credit limit portion of SendCmGLobalCredit */
set_global_limit(struct hfi1_devdata * dd,u16 limit)11232 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11233 {
11234 	u64 reg;
11235 
11236 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11237 	reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11238 	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11239 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11240 }
11241 
11242 /* set the given per-VL shared limit */
set_vl_shared(struct hfi1_devdata * dd,int vl,u16 limit)11243 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11244 {
11245 	u64 reg;
11246 	u32 addr;
11247 
11248 	if (vl < TXE_NUM_DATA_VL)
11249 		addr = SEND_CM_CREDIT_VL + (8 * vl);
11250 	else
11251 		addr = SEND_CM_CREDIT_VL15;
11252 
11253 	reg = read_csr(dd, addr);
11254 	reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11255 	reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11256 	write_csr(dd, addr, reg);
11257 }
11258 
11259 /* set the given per-VL dedicated limit */
set_vl_dedicated(struct hfi1_devdata * dd,int vl,u16 limit)11260 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11261 {
11262 	u64 reg;
11263 	u32 addr;
11264 
11265 	if (vl < TXE_NUM_DATA_VL)
11266 		addr = SEND_CM_CREDIT_VL + (8 * vl);
11267 	else
11268 		addr = SEND_CM_CREDIT_VL15;
11269 
11270 	reg = read_csr(dd, addr);
11271 	reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11272 	reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11273 	write_csr(dd, addr, reg);
11274 }
11275 
11276 /* spin until the given per-VL status mask bits clear */
wait_for_vl_status_clear(struct hfi1_devdata * dd,u64 mask,const char * which)11277 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11278 				     const char *which)
11279 {
11280 	unsigned long timeout;
11281 	u64 reg;
11282 
11283 	timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11284 	while (1) {
11285 		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11286 
11287 		if (reg == 0)
11288 			return;	/* success */
11289 		if (time_after(jiffies, timeout))
11290 			break;		/* timed out */
11291 		udelay(1);
11292 	}
11293 
11294 	dd_dev_err(dd,
11295 		   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11296 		   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11297 	/*
11298 	 * If this occurs, it is likely there was a credit loss on the link.
11299 	 * The only recovery from that is a link bounce.
11300 	 */
11301 	dd_dev_err(dd,
11302 		   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11303 }
11304 
11305 /*
11306  * The number of credits on the VLs may be changed while everything
11307  * is "live", but the following algorithm must be followed due to
11308  * how the hardware is actually implemented.  In particular,
11309  * Return_Credit_Status[] is the only correct status check.
11310  *
11311  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11312  *     set Global_Shared_Credit_Limit = 0
11313  *     use_all_vl = 1
11314  * mask0 = all VLs that are changing either dedicated or shared limits
11315  * set Shared_Limit[mask0] = 0
11316  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11317  * if (changing any dedicated limit)
11318  *     mask1 = all VLs that are lowering dedicated limits
11319  *     lower Dedicated_Limit[mask1]
11320  *     spin until Return_Credit_Status[mask1] == 0
11321  *     raise Dedicated_Limits
11322  * raise Shared_Limits
11323  * raise Global_Shared_Credit_Limit
11324  *
11325  * lower = if the new limit is lower, set the limit to the new value
11326  * raise = if the new limit is higher than the current value (may be changed
11327  *	earlier in the algorithm), set the new limit to the new value
11328  */
set_buffer_control(struct hfi1_pportdata * ppd,struct buffer_control * new_bc)11329 int set_buffer_control(struct hfi1_pportdata *ppd,
11330 		       struct buffer_control *new_bc)
11331 {
11332 	struct hfi1_devdata *dd = ppd->dd;
11333 	u64 changing_mask, ld_mask, stat_mask;
11334 	int change_count;
11335 	int i, use_all_mask;
11336 	int this_shared_changing;
11337 	int vl_count = 0, ret;
11338 	/*
11339 	 * A0: add the variable any_shared_limit_changing below and in the
11340 	 * algorithm above.  If removing A0 support, it can be removed.
11341 	 */
11342 	int any_shared_limit_changing;
11343 	struct buffer_control cur_bc;
11344 	u8 changing[OPA_MAX_VLS];
11345 	u8 lowering_dedicated[OPA_MAX_VLS];
11346 	u16 cur_total;
11347 	u32 new_total = 0;
11348 	const u64 all_mask =
11349 	SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11350 	 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11351 	 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11352 	 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11353 	 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11354 	 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11355 	 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11356 	 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11357 	 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11358 
11359 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11360 #define NUM_USABLE_VLS 16	/* look at VL15 and less */
11361 
11362 	/* find the new total credits, do sanity check on unused VLs */
11363 	for (i = 0; i < OPA_MAX_VLS; i++) {
11364 		if (valid_vl(i)) {
11365 			new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11366 			continue;
11367 		}
11368 		nonzero_msg(dd, i, "dedicated",
11369 			    be16_to_cpu(new_bc->vl[i].dedicated));
11370 		nonzero_msg(dd, i, "shared",
11371 			    be16_to_cpu(new_bc->vl[i].shared));
11372 		new_bc->vl[i].dedicated = 0;
11373 		new_bc->vl[i].shared = 0;
11374 	}
11375 	new_total += be16_to_cpu(new_bc->overall_shared_limit);
11376 
11377 	/* fetch the current values */
11378 	get_buffer_control(dd, &cur_bc, &cur_total);
11379 
11380 	/*
11381 	 * Create the masks we will use.
11382 	 */
11383 	memset(changing, 0, sizeof(changing));
11384 	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11385 	/*
11386 	 * NOTE: Assumes that the individual VL bits are adjacent and in
11387 	 * increasing order
11388 	 */
11389 	stat_mask =
11390 		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11391 	changing_mask = 0;
11392 	ld_mask = 0;
11393 	change_count = 0;
11394 	any_shared_limit_changing = 0;
11395 	for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11396 		if (!valid_vl(i))
11397 			continue;
11398 		this_shared_changing = new_bc->vl[i].shared
11399 						!= cur_bc.vl[i].shared;
11400 		if (this_shared_changing)
11401 			any_shared_limit_changing = 1;
11402 		if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11403 		    this_shared_changing) {
11404 			changing[i] = 1;
11405 			changing_mask |= stat_mask;
11406 			change_count++;
11407 		}
11408 		if (be16_to_cpu(new_bc->vl[i].dedicated) <
11409 					be16_to_cpu(cur_bc.vl[i].dedicated)) {
11410 			lowering_dedicated[i] = 1;
11411 			ld_mask |= stat_mask;
11412 		}
11413 	}
11414 
11415 	/* bracket the credit change with a total adjustment */
11416 	if (new_total > cur_total)
11417 		set_global_limit(dd, new_total);
11418 
11419 	/*
11420 	 * Start the credit change algorithm.
11421 	 */
11422 	use_all_mask = 0;
11423 	if ((be16_to_cpu(new_bc->overall_shared_limit) <
11424 	     be16_to_cpu(cur_bc.overall_shared_limit)) ||
11425 	    (is_ax(dd) && any_shared_limit_changing)) {
11426 		set_global_shared(dd, 0);
11427 		cur_bc.overall_shared_limit = 0;
11428 		use_all_mask = 1;
11429 	}
11430 
11431 	for (i = 0; i < NUM_USABLE_VLS; i++) {
11432 		if (!valid_vl(i))
11433 			continue;
11434 
11435 		if (changing[i]) {
11436 			set_vl_shared(dd, i, 0);
11437 			cur_bc.vl[i].shared = 0;
11438 		}
11439 	}
11440 
11441 	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11442 				 "shared");
11443 
11444 	if (change_count > 0) {
11445 		for (i = 0; i < NUM_USABLE_VLS; i++) {
11446 			if (!valid_vl(i))
11447 				continue;
11448 
11449 			if (lowering_dedicated[i]) {
11450 				set_vl_dedicated(dd, i,
11451 						 be16_to_cpu(new_bc->
11452 							     vl[i].dedicated));
11453 				cur_bc.vl[i].dedicated =
11454 						new_bc->vl[i].dedicated;
11455 			}
11456 		}
11457 
11458 		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11459 
11460 		/* now raise all dedicated that are going up */
11461 		for (i = 0; i < NUM_USABLE_VLS; i++) {
11462 			if (!valid_vl(i))
11463 				continue;
11464 
11465 			if (be16_to_cpu(new_bc->vl[i].dedicated) >
11466 					be16_to_cpu(cur_bc.vl[i].dedicated))
11467 				set_vl_dedicated(dd, i,
11468 						 be16_to_cpu(new_bc->
11469 							     vl[i].dedicated));
11470 		}
11471 	}
11472 
11473 	/* next raise all shared that are going up */
11474 	for (i = 0; i < NUM_USABLE_VLS; i++) {
11475 		if (!valid_vl(i))
11476 			continue;
11477 
11478 		if (be16_to_cpu(new_bc->vl[i].shared) >
11479 				be16_to_cpu(cur_bc.vl[i].shared))
11480 			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11481 	}
11482 
11483 	/* finally raise the global shared */
11484 	if (be16_to_cpu(new_bc->overall_shared_limit) >
11485 	    be16_to_cpu(cur_bc.overall_shared_limit))
11486 		set_global_shared(dd,
11487 				  be16_to_cpu(new_bc->overall_shared_limit));
11488 
11489 	/* bracket the credit change with a total adjustment */
11490 	if (new_total < cur_total)
11491 		set_global_limit(dd, new_total);
11492 
11493 	/*
11494 	 * Determine the actual number of operational VLS using the number of
11495 	 * dedicated and shared credits for each VL.
11496 	 */
11497 	if (change_count > 0) {
11498 		for (i = 0; i < TXE_NUM_DATA_VL; i++)
11499 			if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11500 			    be16_to_cpu(new_bc->vl[i].shared) > 0)
11501 				vl_count++;
11502 		ppd->actual_vls_operational = vl_count;
11503 		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11504 				    ppd->actual_vls_operational :
11505 				    ppd->vls_operational,
11506 				    NULL);
11507 		if (ret == 0)
11508 			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11509 					   ppd->actual_vls_operational :
11510 					   ppd->vls_operational, NULL);
11511 		if (ret)
11512 			return ret;
11513 	}
11514 	return 0;
11515 }
11516 
11517 /*
11518  * Read the given fabric manager table. Return the size of the
11519  * table (in bytes) on success, and a negative error code on
11520  * failure.
11521  */
fm_get_table(struct hfi1_pportdata * ppd,int which,void * t)11522 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11523 
11524 {
11525 	int size;
11526 	struct vl_arb_cache *vlc;
11527 
11528 	switch (which) {
11529 	case FM_TBL_VL_HIGH_ARB:
11530 		size = 256;
11531 		/*
11532 		 * OPA specifies 128 elements (of 2 bytes each), though
11533 		 * HFI supports only 16 elements in h/w.
11534 		 */
11535 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11536 		vl_arb_get_cache(vlc, t);
11537 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11538 		break;
11539 	case FM_TBL_VL_LOW_ARB:
11540 		size = 256;
11541 		/*
11542 		 * OPA specifies 128 elements (of 2 bytes each), though
11543 		 * HFI supports only 16 elements in h/w.
11544 		 */
11545 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11546 		vl_arb_get_cache(vlc, t);
11547 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11548 		break;
11549 	case FM_TBL_BUFFER_CONTROL:
11550 		size = get_buffer_control(ppd->dd, t, NULL);
11551 		break;
11552 	case FM_TBL_SC2VLNT:
11553 		size = get_sc2vlnt(ppd->dd, t);
11554 		break;
11555 	case FM_TBL_VL_PREEMPT_ELEMS:
11556 		size = 256;
11557 		/* OPA specifies 128 elements, of 2 bytes each */
11558 		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11559 		break;
11560 	case FM_TBL_VL_PREEMPT_MATRIX:
11561 		size = 256;
11562 		/*
11563 		 * OPA specifies that this is the same size as the VL
11564 		 * arbitration tables (i.e., 256 bytes).
11565 		 */
11566 		break;
11567 	default:
11568 		return -EINVAL;
11569 	}
11570 	return size;
11571 }
11572 
11573 /*
11574  * Write the given fabric manager table.
11575  */
fm_set_table(struct hfi1_pportdata * ppd,int which,void * t)11576 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11577 {
11578 	int ret = 0;
11579 	struct vl_arb_cache *vlc;
11580 
11581 	switch (which) {
11582 	case FM_TBL_VL_HIGH_ARB:
11583 		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11584 		if (vl_arb_match_cache(vlc, t)) {
11585 			vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11586 			break;
11587 		}
11588 		vl_arb_set_cache(vlc, t);
11589 		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11590 		ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11591 				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11592 		break;
11593 	case FM_TBL_VL_LOW_ARB:
11594 		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11595 		if (vl_arb_match_cache(vlc, t)) {
11596 			vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11597 			break;
11598 		}
11599 		vl_arb_set_cache(vlc, t);
11600 		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11601 		ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11602 				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11603 		break;
11604 	case FM_TBL_BUFFER_CONTROL:
11605 		ret = set_buffer_control(ppd, t);
11606 		break;
11607 	case FM_TBL_SC2VLNT:
11608 		set_sc2vlnt(ppd->dd, t);
11609 		break;
11610 	default:
11611 		ret = -EINVAL;
11612 	}
11613 	return ret;
11614 }
11615 
11616 /*
11617  * Disable all data VLs.
11618  *
11619  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11620  */
disable_data_vls(struct hfi1_devdata * dd)11621 static int disable_data_vls(struct hfi1_devdata *dd)
11622 {
11623 	if (is_ax(dd))
11624 		return 1;
11625 
11626 	pio_send_control(dd, PSC_DATA_VL_DISABLE);
11627 
11628 	return 0;
11629 }
11630 
11631 /*
11632  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11633  * Just re-enables all data VLs (the "fill" part happens
11634  * automatically - the name was chosen for symmetry with
11635  * stop_drain_data_vls()).
11636  *
11637  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11638  */
open_fill_data_vls(struct hfi1_devdata * dd)11639 int open_fill_data_vls(struct hfi1_devdata *dd)
11640 {
11641 	if (is_ax(dd))
11642 		return 1;
11643 
11644 	pio_send_control(dd, PSC_DATA_VL_ENABLE);
11645 
11646 	return 0;
11647 }
11648 
11649 /*
11650  * drain_data_vls() - assumes that disable_data_vls() has been called,
11651  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11652  * engines to drop to 0.
11653  */
drain_data_vls(struct hfi1_devdata * dd)11654 static void drain_data_vls(struct hfi1_devdata *dd)
11655 {
11656 	sc_wait(dd);
11657 	sdma_wait(dd);
11658 	pause_for_credit_return(dd);
11659 }
11660 
11661 /*
11662  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11663  *
11664  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11665  * meant to be used like this:
11666  *
11667  * stop_drain_data_vls(dd);
11668  * // do things with per-VL resources
11669  * open_fill_data_vls(dd);
11670  */
stop_drain_data_vls(struct hfi1_devdata * dd)11671 int stop_drain_data_vls(struct hfi1_devdata *dd)
11672 {
11673 	int ret;
11674 
11675 	ret = disable_data_vls(dd);
11676 	if (ret == 0)
11677 		drain_data_vls(dd);
11678 
11679 	return ret;
11680 }
11681 
11682 /*
11683  * Convert a nanosecond time to a cclock count.  No matter how slow
11684  * the cclock, a non-zero ns will always have a non-zero result.
11685  */
ns_to_cclock(struct hfi1_devdata * dd,u32 ns)11686 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11687 {
11688 	u32 cclocks;
11689 
11690 	if (dd->icode == ICODE_FPGA_EMULATION)
11691 		cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11692 	else  /* simulation pretends to be ASIC */
11693 		cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11694 	if (ns && !cclocks)	/* if ns nonzero, must be at least 1 */
11695 		cclocks = 1;
11696 	return cclocks;
11697 }
11698 
11699 /*
11700  * Convert a cclock count to nanoseconds. Not matter how slow
11701  * the cclock, a non-zero cclocks will always have a non-zero result.
11702  */
cclock_to_ns(struct hfi1_devdata * dd,u32 cclocks)11703 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11704 {
11705 	u32 ns;
11706 
11707 	if (dd->icode == ICODE_FPGA_EMULATION)
11708 		ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11709 	else  /* simulation pretends to be ASIC */
11710 		ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11711 	if (cclocks && !ns)
11712 		ns = 1;
11713 	return ns;
11714 }
11715 
11716 /*
11717  * Dynamically adjust the receive interrupt timeout for a context based on
11718  * incoming packet rate.
11719  *
11720  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11721  */
adjust_rcv_timeout(struct hfi1_ctxtdata * rcd,u32 npkts)11722 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11723 {
11724 	struct hfi1_devdata *dd = rcd->dd;
11725 	u32 timeout = rcd->rcvavail_timeout;
11726 
11727 	/*
11728 	 * This algorithm doubles or halves the timeout depending on whether
11729 	 * the number of packets received in this interrupt were less than or
11730 	 * greater equal the interrupt count.
11731 	 *
11732 	 * The calculations below do not allow a steady state to be achieved.
11733 	 * Only at the endpoints it is possible to have an unchanging
11734 	 * timeout.
11735 	 */
11736 	if (npkts < rcv_intr_count) {
11737 		/*
11738 		 * Not enough packets arrived before the timeout, adjust
11739 		 * timeout downward.
11740 		 */
11741 		if (timeout < 2) /* already at minimum? */
11742 			return;
11743 		timeout >>= 1;
11744 	} else {
11745 		/*
11746 		 * More than enough packets arrived before the timeout, adjust
11747 		 * timeout upward.
11748 		 */
11749 		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11750 			return;
11751 		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11752 	}
11753 
11754 	rcd->rcvavail_timeout = timeout;
11755 	/*
11756 	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11757 	 * been verified to be in range
11758 	 */
11759 	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11760 			(u64)timeout <<
11761 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11762 }
11763 
update_usrhead(struct hfi1_ctxtdata * rcd,u32 hd,u32 updegr,u32 egrhd,u32 intr_adjust,u32 npkts)11764 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11765 		    u32 intr_adjust, u32 npkts)
11766 {
11767 	struct hfi1_devdata *dd = rcd->dd;
11768 	u64 reg;
11769 	u32 ctxt = rcd->ctxt;
11770 
11771 	/*
11772 	 * Need to write timeout register before updating RcvHdrHead to ensure
11773 	 * that a new value is used when the HW decides to restart counting.
11774 	 */
11775 	if (intr_adjust)
11776 		adjust_rcv_timeout(rcd, npkts);
11777 	if (updegr) {
11778 		reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11779 			<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11780 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11781 	}
11782 	mmiowb();
11783 	reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11784 		(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11785 			<< RCV_HDR_HEAD_HEAD_SHIFT);
11786 	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11787 	mmiowb();
11788 }
11789 
hdrqempty(struct hfi1_ctxtdata * rcd)11790 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11791 {
11792 	u32 head, tail;
11793 
11794 	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11795 		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11796 
11797 	if (rcd->rcvhdrtail_kvaddr)
11798 		tail = get_rcvhdrtail(rcd);
11799 	else
11800 		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11801 
11802 	return head == tail;
11803 }
11804 
11805 /*
11806  * Context Control and Receive Array encoding for buffer size:
11807  *	0x0 invalid
11808  *	0x1   4 KB
11809  *	0x2   8 KB
11810  *	0x3  16 KB
11811  *	0x4  32 KB
11812  *	0x5  64 KB
11813  *	0x6 128 KB
11814  *	0x7 256 KB
11815  *	0x8 512 KB (Receive Array only)
11816  *	0x9   1 MB (Receive Array only)
11817  *	0xa   2 MB (Receive Array only)
11818  *
11819  *	0xB-0xF - reserved (Receive Array only)
11820  *
11821  *
11822  * This routine assumes that the value has already been sanity checked.
11823  */
encoded_size(u32 size)11824 static u32 encoded_size(u32 size)
11825 {
11826 	switch (size) {
11827 	case   4 * 1024: return 0x1;
11828 	case   8 * 1024: return 0x2;
11829 	case  16 * 1024: return 0x3;
11830 	case  32 * 1024: return 0x4;
11831 	case  64 * 1024: return 0x5;
11832 	case 128 * 1024: return 0x6;
11833 	case 256 * 1024: return 0x7;
11834 	case 512 * 1024: return 0x8;
11835 	case   1 * 1024 * 1024: return 0x9;
11836 	case   2 * 1024 * 1024: return 0xa;
11837 	}
11838 	return 0x1;	/* if invalid, go with the minimum size */
11839 }
11840 
hfi1_rcvctrl(struct hfi1_devdata * dd,unsigned int op,struct hfi1_ctxtdata * rcd)11841 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11842 		  struct hfi1_ctxtdata *rcd)
11843 {
11844 	u64 rcvctrl, reg;
11845 	int did_enable = 0;
11846 	u16 ctxt;
11847 
11848 	if (!rcd)
11849 		return;
11850 
11851 	ctxt = rcd->ctxt;
11852 
11853 	hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11854 
11855 	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11856 	/* if the context already enabled, don't do the extra steps */
11857 	if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11858 	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11859 		/* reset the tail and hdr addresses, and sequence count */
11860 		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11861 				rcd->rcvhdrq_dma);
11862 		if (rcd->rcvhdrtail_kvaddr)
11863 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11864 					rcd->rcvhdrqtailaddr_dma);
11865 		rcd->seq_cnt = 1;
11866 
11867 		/* reset the cached receive header queue head value */
11868 		rcd->head = 0;
11869 
11870 		/*
11871 		 * Zero the receive header queue so we don't get false
11872 		 * positives when checking the sequence number.  The
11873 		 * sequence numbers could land exactly on the same spot.
11874 		 * E.g. a rcd restart before the receive header wrapped.
11875 		 */
11876 		memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11877 
11878 		/* starting timeout */
11879 		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11880 
11881 		/* enable the context */
11882 		rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11883 
11884 		/* clean the egr buffer size first */
11885 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11886 		rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11887 				& RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11888 					<< RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11889 
11890 		/* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11891 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11892 		did_enable = 1;
11893 
11894 		/* zero RcvEgrIndexHead */
11895 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11896 
11897 		/* set eager count and base index */
11898 		reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11899 			& RCV_EGR_CTRL_EGR_CNT_MASK)
11900 		       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11901 			(((rcd->eager_base >> RCV_SHIFT)
11902 			  & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11903 			 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11904 		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11905 
11906 		/*
11907 		 * Set TID (expected) count and base index.
11908 		 * rcd->expected_count is set to individual RcvArray entries,
11909 		 * not pairs, and the CSR takes a pair-count in groups of
11910 		 * four, so divide by 8.
11911 		 */
11912 		reg = (((rcd->expected_count >> RCV_SHIFT)
11913 					& RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11914 				<< RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11915 		      (((rcd->expected_base >> RCV_SHIFT)
11916 					& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11917 				<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11918 		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11919 		if (ctxt == HFI1_CTRL_CTXT)
11920 			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11921 	}
11922 	if (op & HFI1_RCVCTRL_CTXT_DIS) {
11923 		write_csr(dd, RCV_VL15, 0);
11924 		/*
11925 		 * When receive context is being disabled turn on tail
11926 		 * update with a dummy tail address and then disable
11927 		 * receive context.
11928 		 */
11929 		if (dd->rcvhdrtail_dummy_dma) {
11930 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11931 					dd->rcvhdrtail_dummy_dma);
11932 			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11933 			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11934 		}
11935 
11936 		rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11937 	}
11938 	if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11939 		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11940 	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11941 		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11942 	if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11943 		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11944 	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11945 		/* See comment on RcvCtxtCtrl.TailUpd above */
11946 		if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11947 			rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11948 	}
11949 	if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11950 		rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11951 	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11952 		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11953 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11954 		/*
11955 		 * In one-packet-per-eager mode, the size comes from
11956 		 * the RcvArray entry.
11957 		 */
11958 		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11959 		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11960 	}
11961 	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11962 		rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11963 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11964 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11965 	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11966 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11967 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11968 		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11969 	if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11970 		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11971 	rcd->rcvctrl = rcvctrl;
11972 	hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11973 	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11974 
11975 	/* work around sticky RcvCtxtStatus.BlockedRHQFull */
11976 	if (did_enable &&
11977 	    (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11978 		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11979 		if (reg != 0) {
11980 			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11981 				    ctxt, reg);
11982 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11983 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11984 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11985 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11986 			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11987 			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11988 				    ctxt, reg, reg == 0 ? "not" : "still");
11989 		}
11990 	}
11991 
11992 	if (did_enable) {
11993 		/*
11994 		 * The interrupt timeout and count must be set after
11995 		 * the context is enabled to take effect.
11996 		 */
11997 		/* set interrupt timeout */
11998 		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11999 				(u64)rcd->rcvavail_timeout <<
12000 				RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12001 
12002 		/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12003 		reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12004 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12005 	}
12006 
12007 	if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12008 		/*
12009 		 * If the context has been disabled and the Tail Update has
12010 		 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12011 		 * so it doesn't contain an address that is invalid.
12012 		 */
12013 		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12014 				dd->rcvhdrtail_dummy_dma);
12015 }
12016 
hfi1_read_cntrs(struct hfi1_devdata * dd,char ** namep,u64 ** cntrp)12017 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12018 {
12019 	int ret;
12020 	u64 val = 0;
12021 
12022 	if (namep) {
12023 		ret = dd->cntrnameslen;
12024 		*namep = dd->cntrnames;
12025 	} else {
12026 		const struct cntr_entry *entry;
12027 		int i, j;
12028 
12029 		ret = (dd->ndevcntrs) * sizeof(u64);
12030 
12031 		/* Get the start of the block of counters */
12032 		*cntrp = dd->cntrs;
12033 
12034 		/*
12035 		 * Now go and fill in each counter in the block.
12036 		 */
12037 		for (i = 0; i < DEV_CNTR_LAST; i++) {
12038 			entry = &dev_cntrs[i];
12039 			hfi1_cdbg(CNTR, "reading %s", entry->name);
12040 			if (entry->flags & CNTR_DISABLED) {
12041 				/* Nothing */
12042 				hfi1_cdbg(CNTR, "\tDisabled\n");
12043 			} else {
12044 				if (entry->flags & CNTR_VL) {
12045 					hfi1_cdbg(CNTR, "\tPer VL\n");
12046 					for (j = 0; j < C_VL_COUNT; j++) {
12047 						val = entry->rw_cntr(entry,
12048 								  dd, j,
12049 								  CNTR_MODE_R,
12050 								  0);
12051 						hfi1_cdbg(
12052 						   CNTR,
12053 						   "\t\tRead 0x%llx for %d\n",
12054 						   val, j);
12055 						dd->cntrs[entry->offset + j] =
12056 									    val;
12057 					}
12058 				} else if (entry->flags & CNTR_SDMA) {
12059 					hfi1_cdbg(CNTR,
12060 						  "\t Per SDMA Engine\n");
12061 					for (j = 0; j < dd->chip_sdma_engines;
12062 					     j++) {
12063 						val =
12064 						entry->rw_cntr(entry, dd, j,
12065 							       CNTR_MODE_R, 0);
12066 						hfi1_cdbg(CNTR,
12067 							  "\t\tRead 0x%llx for %d\n",
12068 							  val, j);
12069 						dd->cntrs[entry->offset + j] =
12070 									val;
12071 					}
12072 				} else {
12073 					val = entry->rw_cntr(entry, dd,
12074 							CNTR_INVALID_VL,
12075 							CNTR_MODE_R, 0);
12076 					dd->cntrs[entry->offset] = val;
12077 					hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12078 				}
12079 			}
12080 		}
12081 	}
12082 	return ret;
12083 }
12084 
12085 /*
12086  * Used by sysfs to create files for hfi stats to read
12087  */
hfi1_read_portcntrs(struct hfi1_pportdata * ppd,char ** namep,u64 ** cntrp)12088 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12089 {
12090 	int ret;
12091 	u64 val = 0;
12092 
12093 	if (namep) {
12094 		ret = ppd->dd->portcntrnameslen;
12095 		*namep = ppd->dd->portcntrnames;
12096 	} else {
12097 		const struct cntr_entry *entry;
12098 		int i, j;
12099 
12100 		ret = ppd->dd->nportcntrs * sizeof(u64);
12101 		*cntrp = ppd->cntrs;
12102 
12103 		for (i = 0; i < PORT_CNTR_LAST; i++) {
12104 			entry = &port_cntrs[i];
12105 			hfi1_cdbg(CNTR, "reading %s", entry->name);
12106 			if (entry->flags & CNTR_DISABLED) {
12107 				/* Nothing */
12108 				hfi1_cdbg(CNTR, "\tDisabled\n");
12109 				continue;
12110 			}
12111 
12112 			if (entry->flags & CNTR_VL) {
12113 				hfi1_cdbg(CNTR, "\tPer VL");
12114 				for (j = 0; j < C_VL_COUNT; j++) {
12115 					val = entry->rw_cntr(entry, ppd, j,
12116 							       CNTR_MODE_R,
12117 							       0);
12118 					hfi1_cdbg(
12119 					   CNTR,
12120 					   "\t\tRead 0x%llx for %d",
12121 					   val, j);
12122 					ppd->cntrs[entry->offset + j] = val;
12123 				}
12124 			} else {
12125 				val = entry->rw_cntr(entry, ppd,
12126 						       CNTR_INVALID_VL,
12127 						       CNTR_MODE_R,
12128 						       0);
12129 				ppd->cntrs[entry->offset] = val;
12130 				hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12131 			}
12132 		}
12133 	}
12134 	return ret;
12135 }
12136 
free_cntrs(struct hfi1_devdata * dd)12137 static void free_cntrs(struct hfi1_devdata *dd)
12138 {
12139 	struct hfi1_pportdata *ppd;
12140 	int i;
12141 
12142 	if (dd->synth_stats_timer.data)
12143 		del_timer_sync(&dd->synth_stats_timer);
12144 	dd->synth_stats_timer.data = 0;
12145 	ppd = (struct hfi1_pportdata *)(dd + 1);
12146 	for (i = 0; i < dd->num_pports; i++, ppd++) {
12147 		kfree(ppd->cntrs);
12148 		kfree(ppd->scntrs);
12149 		free_percpu(ppd->ibport_data.rvp.rc_acks);
12150 		free_percpu(ppd->ibport_data.rvp.rc_qacks);
12151 		free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12152 		ppd->cntrs = NULL;
12153 		ppd->scntrs = NULL;
12154 		ppd->ibport_data.rvp.rc_acks = NULL;
12155 		ppd->ibport_data.rvp.rc_qacks = NULL;
12156 		ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12157 	}
12158 	kfree(dd->portcntrnames);
12159 	dd->portcntrnames = NULL;
12160 	kfree(dd->cntrs);
12161 	dd->cntrs = NULL;
12162 	kfree(dd->scntrs);
12163 	dd->scntrs = NULL;
12164 	kfree(dd->cntrnames);
12165 	dd->cntrnames = NULL;
12166 	if (dd->update_cntr_wq) {
12167 		destroy_workqueue(dd->update_cntr_wq);
12168 		dd->update_cntr_wq = NULL;
12169 	}
12170 }
12171 
read_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl)12172 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12173 			      u64 *psval, void *context, int vl)
12174 {
12175 	u64 val;
12176 	u64 sval = *psval;
12177 
12178 	if (entry->flags & CNTR_DISABLED) {
12179 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12180 		return 0;
12181 	}
12182 
12183 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12184 
12185 	val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12186 
12187 	/* If its a synthetic counter there is more work we need to do */
12188 	if (entry->flags & CNTR_SYNTH) {
12189 		if (sval == CNTR_MAX) {
12190 			/* No need to read already saturated */
12191 			return CNTR_MAX;
12192 		}
12193 
12194 		if (entry->flags & CNTR_32BIT) {
12195 			/* 32bit counters can wrap multiple times */
12196 			u64 upper = sval >> 32;
12197 			u64 lower = (sval << 32) >> 32;
12198 
12199 			if (lower > val) { /* hw wrapped */
12200 				if (upper == CNTR_32BIT_MAX)
12201 					val = CNTR_MAX;
12202 				else
12203 					upper++;
12204 			}
12205 
12206 			if (val != CNTR_MAX)
12207 				val = (upper << 32) | val;
12208 
12209 		} else {
12210 			/* If we rolled we are saturated */
12211 			if ((val < sval) || (val > CNTR_MAX))
12212 				val = CNTR_MAX;
12213 		}
12214 	}
12215 
12216 	*psval = val;
12217 
12218 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12219 
12220 	return val;
12221 }
12222 
write_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl,u64 data)12223 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12224 			       struct cntr_entry *entry,
12225 			       u64 *psval, void *context, int vl, u64 data)
12226 {
12227 	u64 val;
12228 
12229 	if (entry->flags & CNTR_DISABLED) {
12230 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12231 		return 0;
12232 	}
12233 
12234 	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12235 
12236 	if (entry->flags & CNTR_SYNTH) {
12237 		*psval = data;
12238 		if (entry->flags & CNTR_32BIT) {
12239 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12240 					     (data << 32) >> 32);
12241 			val = data; /* return the full 64bit value */
12242 		} else {
12243 			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12244 					     data);
12245 		}
12246 	} else {
12247 		val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12248 	}
12249 
12250 	*psval = val;
12251 
12252 	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12253 
12254 	return val;
12255 }
12256 
read_dev_cntr(struct hfi1_devdata * dd,int index,int vl)12257 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12258 {
12259 	struct cntr_entry *entry;
12260 	u64 *sval;
12261 
12262 	entry = &dev_cntrs[index];
12263 	sval = dd->scntrs + entry->offset;
12264 
12265 	if (vl != CNTR_INVALID_VL)
12266 		sval += vl;
12267 
12268 	return read_dev_port_cntr(dd, entry, sval, dd, vl);
12269 }
12270 
write_dev_cntr(struct hfi1_devdata * dd,int index,int vl,u64 data)12271 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12272 {
12273 	struct cntr_entry *entry;
12274 	u64 *sval;
12275 
12276 	entry = &dev_cntrs[index];
12277 	sval = dd->scntrs + entry->offset;
12278 
12279 	if (vl != CNTR_INVALID_VL)
12280 		sval += vl;
12281 
12282 	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12283 }
12284 
read_port_cntr(struct hfi1_pportdata * ppd,int index,int vl)12285 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12286 {
12287 	struct cntr_entry *entry;
12288 	u64 *sval;
12289 
12290 	entry = &port_cntrs[index];
12291 	sval = ppd->scntrs + entry->offset;
12292 
12293 	if (vl != CNTR_INVALID_VL)
12294 		sval += vl;
12295 
12296 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12297 	    (index <= C_RCV_HDR_OVF_LAST)) {
12298 		/* We do not want to bother for disabled contexts */
12299 		return 0;
12300 	}
12301 
12302 	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12303 }
12304 
write_port_cntr(struct hfi1_pportdata * ppd,int index,int vl,u64 data)12305 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12306 {
12307 	struct cntr_entry *entry;
12308 	u64 *sval;
12309 
12310 	entry = &port_cntrs[index];
12311 	sval = ppd->scntrs + entry->offset;
12312 
12313 	if (vl != CNTR_INVALID_VL)
12314 		sval += vl;
12315 
12316 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12317 	    (index <= C_RCV_HDR_OVF_LAST)) {
12318 		/* We do not want to bother for disabled contexts */
12319 		return 0;
12320 	}
12321 
12322 	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12323 }
12324 
do_update_synth_timer(struct work_struct * work)12325 static void do_update_synth_timer(struct work_struct *work)
12326 {
12327 	u64 cur_tx;
12328 	u64 cur_rx;
12329 	u64 total_flits;
12330 	u8 update = 0;
12331 	int i, j, vl;
12332 	struct hfi1_pportdata *ppd;
12333 	struct cntr_entry *entry;
12334 	struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12335 					       update_cntr_work);
12336 
12337 	/*
12338 	 * Rather than keep beating on the CSRs pick a minimal set that we can
12339 	 * check to watch for potential roll over. We can do this by looking at
12340 	 * the number of flits sent/recv. If the total flits exceeds 32bits then
12341 	 * we have to iterate all the counters and update.
12342 	 */
12343 	entry = &dev_cntrs[C_DC_RCV_FLITS];
12344 	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12345 
12346 	entry = &dev_cntrs[C_DC_XMIT_FLITS];
12347 	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12348 
12349 	hfi1_cdbg(
12350 	    CNTR,
12351 	    "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12352 	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12353 
12354 	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12355 		/*
12356 		 * May not be strictly necessary to update but it won't hurt and
12357 		 * simplifies the logic here.
12358 		 */
12359 		update = 1;
12360 		hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12361 			  dd->unit);
12362 	} else {
12363 		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12364 		hfi1_cdbg(CNTR,
12365 			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12366 			  total_flits, (u64)CNTR_32BIT_MAX);
12367 		if (total_flits >= CNTR_32BIT_MAX) {
12368 			hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12369 				  dd->unit);
12370 			update = 1;
12371 		}
12372 	}
12373 
12374 	if (update) {
12375 		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12376 		for (i = 0; i < DEV_CNTR_LAST; i++) {
12377 			entry = &dev_cntrs[i];
12378 			if (entry->flags & CNTR_VL) {
12379 				for (vl = 0; vl < C_VL_COUNT; vl++)
12380 					read_dev_cntr(dd, i, vl);
12381 			} else {
12382 				read_dev_cntr(dd, i, CNTR_INVALID_VL);
12383 			}
12384 		}
12385 		ppd = (struct hfi1_pportdata *)(dd + 1);
12386 		for (i = 0; i < dd->num_pports; i++, ppd++) {
12387 			for (j = 0; j < PORT_CNTR_LAST; j++) {
12388 				entry = &port_cntrs[j];
12389 				if (entry->flags & CNTR_VL) {
12390 					for (vl = 0; vl < C_VL_COUNT; vl++)
12391 						read_port_cntr(ppd, j, vl);
12392 				} else {
12393 					read_port_cntr(ppd, j, CNTR_INVALID_VL);
12394 				}
12395 			}
12396 		}
12397 
12398 		/*
12399 		 * We want the value in the register. The goal is to keep track
12400 		 * of the number of "ticks" not the counter value. In other
12401 		 * words if the register rolls we want to notice it and go ahead
12402 		 * and force an update.
12403 		 */
12404 		entry = &dev_cntrs[C_DC_XMIT_FLITS];
12405 		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12406 						CNTR_MODE_R, 0);
12407 
12408 		entry = &dev_cntrs[C_DC_RCV_FLITS];
12409 		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12410 						CNTR_MODE_R, 0);
12411 
12412 		hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12413 			  dd->unit, dd->last_tx, dd->last_rx);
12414 
12415 	} else {
12416 		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12417 	}
12418 }
12419 
update_synth_timer(unsigned long opaque)12420 static void update_synth_timer(unsigned long opaque)
12421 {
12422 	struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12423 
12424 	queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12425 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12426 }
12427 
12428 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
init_cntrs(struct hfi1_devdata * dd)12429 static int init_cntrs(struct hfi1_devdata *dd)
12430 {
12431 	int i, rcv_ctxts, j;
12432 	size_t sz;
12433 	char *p;
12434 	char name[C_MAX_NAME];
12435 	struct hfi1_pportdata *ppd;
12436 	const char *bit_type_32 = ",32";
12437 	const int bit_type_32_sz = strlen(bit_type_32);
12438 
12439 	/* set up the stats timer; the add_timer is done at the end */
12440 	setup_timer(&dd->synth_stats_timer, update_synth_timer,
12441 		    (unsigned long)dd);
12442 
12443 	/***********************/
12444 	/* per device counters */
12445 	/***********************/
12446 
12447 	/* size names and determine how many we have*/
12448 	dd->ndevcntrs = 0;
12449 	sz = 0;
12450 
12451 	for (i = 0; i < DEV_CNTR_LAST; i++) {
12452 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12453 			hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12454 			continue;
12455 		}
12456 
12457 		if (dev_cntrs[i].flags & CNTR_VL) {
12458 			dev_cntrs[i].offset = dd->ndevcntrs;
12459 			for (j = 0; j < C_VL_COUNT; j++) {
12460 				snprintf(name, C_MAX_NAME, "%s%d",
12461 					 dev_cntrs[i].name, vl_from_idx(j));
12462 				sz += strlen(name);
12463 				/* Add ",32" for 32-bit counters */
12464 				if (dev_cntrs[i].flags & CNTR_32BIT)
12465 					sz += bit_type_32_sz;
12466 				sz++;
12467 				dd->ndevcntrs++;
12468 			}
12469 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12470 			dev_cntrs[i].offset = dd->ndevcntrs;
12471 			for (j = 0; j < dd->chip_sdma_engines; j++) {
12472 				snprintf(name, C_MAX_NAME, "%s%d",
12473 					 dev_cntrs[i].name, j);
12474 				sz += strlen(name);
12475 				/* Add ",32" for 32-bit counters */
12476 				if (dev_cntrs[i].flags & CNTR_32BIT)
12477 					sz += bit_type_32_sz;
12478 				sz++;
12479 				dd->ndevcntrs++;
12480 			}
12481 		} else {
12482 			/* +1 for newline. */
12483 			sz += strlen(dev_cntrs[i].name) + 1;
12484 			/* Add ",32" for 32-bit counters */
12485 			if (dev_cntrs[i].flags & CNTR_32BIT)
12486 				sz += bit_type_32_sz;
12487 			dev_cntrs[i].offset = dd->ndevcntrs;
12488 			dd->ndevcntrs++;
12489 		}
12490 	}
12491 
12492 	/* allocate space for the counter values */
12493 	dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12494 			    GFP_KERNEL);
12495 	if (!dd->cntrs)
12496 		goto bail;
12497 
12498 	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12499 	if (!dd->scntrs)
12500 		goto bail;
12501 
12502 	/* allocate space for the counter names */
12503 	dd->cntrnameslen = sz;
12504 	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12505 	if (!dd->cntrnames)
12506 		goto bail;
12507 
12508 	/* fill in the names */
12509 	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12510 		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12511 			/* Nothing */
12512 		} else if (dev_cntrs[i].flags & CNTR_VL) {
12513 			for (j = 0; j < C_VL_COUNT; j++) {
12514 				snprintf(name, C_MAX_NAME, "%s%d",
12515 					 dev_cntrs[i].name,
12516 					 vl_from_idx(j));
12517 				memcpy(p, name, strlen(name));
12518 				p += strlen(name);
12519 
12520 				/* Counter is 32 bits */
12521 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12522 					memcpy(p, bit_type_32, bit_type_32_sz);
12523 					p += bit_type_32_sz;
12524 				}
12525 
12526 				*p++ = '\n';
12527 			}
12528 		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12529 			for (j = 0; j < dd->chip_sdma_engines; j++) {
12530 				snprintf(name, C_MAX_NAME, "%s%d",
12531 					 dev_cntrs[i].name, j);
12532 				memcpy(p, name, strlen(name));
12533 				p += strlen(name);
12534 
12535 				/* Counter is 32 bits */
12536 				if (dev_cntrs[i].flags & CNTR_32BIT) {
12537 					memcpy(p, bit_type_32, bit_type_32_sz);
12538 					p += bit_type_32_sz;
12539 				}
12540 
12541 				*p++ = '\n';
12542 			}
12543 		} else {
12544 			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12545 			p += strlen(dev_cntrs[i].name);
12546 
12547 			/* Counter is 32 bits */
12548 			if (dev_cntrs[i].flags & CNTR_32BIT) {
12549 				memcpy(p, bit_type_32, bit_type_32_sz);
12550 				p += bit_type_32_sz;
12551 			}
12552 
12553 			*p++ = '\n';
12554 		}
12555 	}
12556 
12557 	/*********************/
12558 	/* per port counters */
12559 	/*********************/
12560 
12561 	/*
12562 	 * Go through the counters for the overflows and disable the ones we
12563 	 * don't need. This varies based on platform so we need to do it
12564 	 * dynamically here.
12565 	 */
12566 	rcv_ctxts = dd->num_rcv_contexts;
12567 	for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12568 	     i <= C_RCV_HDR_OVF_LAST; i++) {
12569 		port_cntrs[i].flags |= CNTR_DISABLED;
12570 	}
12571 
12572 	/* size port counter names and determine how many we have*/
12573 	sz = 0;
12574 	dd->nportcntrs = 0;
12575 	for (i = 0; i < PORT_CNTR_LAST; i++) {
12576 		if (port_cntrs[i].flags & CNTR_DISABLED) {
12577 			hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12578 			continue;
12579 		}
12580 
12581 		if (port_cntrs[i].flags & CNTR_VL) {
12582 			port_cntrs[i].offset = dd->nportcntrs;
12583 			for (j = 0; j < C_VL_COUNT; j++) {
12584 				snprintf(name, C_MAX_NAME, "%s%d",
12585 					 port_cntrs[i].name, vl_from_idx(j));
12586 				sz += strlen(name);
12587 				/* Add ",32" for 32-bit counters */
12588 				if (port_cntrs[i].flags & CNTR_32BIT)
12589 					sz += bit_type_32_sz;
12590 				sz++;
12591 				dd->nportcntrs++;
12592 			}
12593 		} else {
12594 			/* +1 for newline */
12595 			sz += strlen(port_cntrs[i].name) + 1;
12596 			/* Add ",32" for 32-bit counters */
12597 			if (port_cntrs[i].flags & CNTR_32BIT)
12598 				sz += bit_type_32_sz;
12599 			port_cntrs[i].offset = dd->nportcntrs;
12600 			dd->nportcntrs++;
12601 		}
12602 	}
12603 
12604 	/* allocate space for the counter names */
12605 	dd->portcntrnameslen = sz;
12606 	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12607 	if (!dd->portcntrnames)
12608 		goto bail;
12609 
12610 	/* fill in port cntr names */
12611 	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12612 		if (port_cntrs[i].flags & CNTR_DISABLED)
12613 			continue;
12614 
12615 		if (port_cntrs[i].flags & CNTR_VL) {
12616 			for (j = 0; j < C_VL_COUNT; j++) {
12617 				snprintf(name, C_MAX_NAME, "%s%d",
12618 					 port_cntrs[i].name, vl_from_idx(j));
12619 				memcpy(p, name, strlen(name));
12620 				p += strlen(name);
12621 
12622 				/* Counter is 32 bits */
12623 				if (port_cntrs[i].flags & CNTR_32BIT) {
12624 					memcpy(p, bit_type_32, bit_type_32_sz);
12625 					p += bit_type_32_sz;
12626 				}
12627 
12628 				*p++ = '\n';
12629 			}
12630 		} else {
12631 			memcpy(p, port_cntrs[i].name,
12632 			       strlen(port_cntrs[i].name));
12633 			p += strlen(port_cntrs[i].name);
12634 
12635 			/* Counter is 32 bits */
12636 			if (port_cntrs[i].flags & CNTR_32BIT) {
12637 				memcpy(p, bit_type_32, bit_type_32_sz);
12638 				p += bit_type_32_sz;
12639 			}
12640 
12641 			*p++ = '\n';
12642 		}
12643 	}
12644 
12645 	/* allocate per port storage for counter values */
12646 	ppd = (struct hfi1_pportdata *)(dd + 1);
12647 	for (i = 0; i < dd->num_pports; i++, ppd++) {
12648 		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12649 		if (!ppd->cntrs)
12650 			goto bail;
12651 
12652 		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12653 		if (!ppd->scntrs)
12654 			goto bail;
12655 	}
12656 
12657 	/* CPU counters need to be allocated and zeroed */
12658 	if (init_cpu_counters(dd))
12659 		goto bail;
12660 
12661 	dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12662 						     WQ_MEM_RECLAIM, dd->unit);
12663 	if (!dd->update_cntr_wq)
12664 		goto bail;
12665 
12666 	INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12667 
12668 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12669 	return 0;
12670 bail:
12671 	free_cntrs(dd);
12672 	return -ENOMEM;
12673 }
12674 
chip_to_opa_lstate(struct hfi1_devdata * dd,u32 chip_lstate)12675 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12676 {
12677 	switch (chip_lstate) {
12678 	default:
12679 		dd_dev_err(dd,
12680 			   "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12681 			   chip_lstate);
12682 		/* fall through */
12683 	case LSTATE_DOWN:
12684 		return IB_PORT_DOWN;
12685 	case LSTATE_INIT:
12686 		return IB_PORT_INIT;
12687 	case LSTATE_ARMED:
12688 		return IB_PORT_ARMED;
12689 	case LSTATE_ACTIVE:
12690 		return IB_PORT_ACTIVE;
12691 	}
12692 }
12693 
chip_to_opa_pstate(struct hfi1_devdata * dd,u32 chip_pstate)12694 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12695 {
12696 	/* look at the HFI meta-states only */
12697 	switch (chip_pstate & 0xf0) {
12698 	default:
12699 		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12700 			   chip_pstate);
12701 		/* fall through */
12702 	case PLS_DISABLED:
12703 		return IB_PORTPHYSSTATE_DISABLED;
12704 	case PLS_OFFLINE:
12705 		return OPA_PORTPHYSSTATE_OFFLINE;
12706 	case PLS_POLLING:
12707 		return IB_PORTPHYSSTATE_POLLING;
12708 	case PLS_CONFIGPHY:
12709 		return IB_PORTPHYSSTATE_TRAINING;
12710 	case PLS_LINKUP:
12711 		return IB_PORTPHYSSTATE_LINKUP;
12712 	case PLS_PHYTEST:
12713 		return IB_PORTPHYSSTATE_PHY_TEST;
12714 	}
12715 }
12716 
12717 /* return the OPA port logical state name */
opa_lstate_name(u32 lstate)12718 const char *opa_lstate_name(u32 lstate)
12719 {
12720 	static const char * const port_logical_names[] = {
12721 		"PORT_NOP",
12722 		"PORT_DOWN",
12723 		"PORT_INIT",
12724 		"PORT_ARMED",
12725 		"PORT_ACTIVE",
12726 		"PORT_ACTIVE_DEFER",
12727 	};
12728 	if (lstate < ARRAY_SIZE(port_logical_names))
12729 		return port_logical_names[lstate];
12730 	return "unknown";
12731 }
12732 
12733 /* return the OPA port physical state name */
opa_pstate_name(u32 pstate)12734 const char *opa_pstate_name(u32 pstate)
12735 {
12736 	static const char * const port_physical_names[] = {
12737 		"PHYS_NOP",
12738 		"reserved1",
12739 		"PHYS_POLL",
12740 		"PHYS_DISABLED",
12741 		"PHYS_TRAINING",
12742 		"PHYS_LINKUP",
12743 		"PHYS_LINK_ERR_RECOVER",
12744 		"PHYS_PHY_TEST",
12745 		"reserved8",
12746 		"PHYS_OFFLINE",
12747 		"PHYS_GANGED",
12748 		"PHYS_TEST",
12749 	};
12750 	if (pstate < ARRAY_SIZE(port_physical_names))
12751 		return port_physical_names[pstate];
12752 	return "unknown";
12753 }
12754 
update_statusp(struct hfi1_pportdata * ppd,u32 state)12755 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12756 {
12757 	/*
12758 	 * Set port status flags in the page mapped into userspace
12759 	 * memory. Do it here to ensure a reliable state - this is
12760 	 * the only function called by all state handling code.
12761 	 * Always set the flags due to the fact that the cache value
12762 	 * might have been changed explicitly outside of this
12763 	 * function.
12764 	 */
12765 	if (ppd->statusp) {
12766 		switch (state) {
12767 		case IB_PORT_DOWN:
12768 		case IB_PORT_INIT:
12769 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12770 					   HFI1_STATUS_IB_READY);
12771 			break;
12772 		case IB_PORT_ARMED:
12773 			*ppd->statusp |= HFI1_STATUS_IB_CONF;
12774 			break;
12775 		case IB_PORT_ACTIVE:
12776 			*ppd->statusp |= HFI1_STATUS_IB_READY;
12777 			break;
12778 		}
12779 	}
12780 }
12781 
12782 /*
12783  * wait_logical_linkstate - wait for an IB link state change to occur
12784  * @ppd: port device
12785  * @state: the state to wait for
12786  * @msecs: the number of milliseconds to wait
12787  *
12788  * Wait up to msecs milliseconds for IB link state change to occur.
12789  * For now, take the easy polling route.
12790  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12791  */
wait_logical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)12792 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12793 				  int msecs)
12794 {
12795 	unsigned long timeout;
12796 	u32 new_state;
12797 
12798 	timeout = jiffies + msecs_to_jiffies(msecs);
12799 	while (1) {
12800 		new_state = chip_to_opa_lstate(ppd->dd,
12801 					       read_logical_state(ppd->dd));
12802 		if (new_state == state)
12803 			break;
12804 		if (time_after(jiffies, timeout)) {
12805 			dd_dev_err(ppd->dd,
12806 				   "timeout waiting for link state 0x%x\n",
12807 				   state);
12808 			return -ETIMEDOUT;
12809 		}
12810 		msleep(20);
12811 	}
12812 
12813 	update_statusp(ppd, state);
12814 	dd_dev_info(ppd->dd,
12815 		    "logical state changed to %s (0x%x)\n",
12816 		    opa_lstate_name(state),
12817 		    state);
12818 	return 0;
12819 }
12820 
log_state_transition(struct hfi1_pportdata * ppd,u32 state)12821 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12822 {
12823 	u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12824 
12825 	dd_dev_info(ppd->dd,
12826 		    "physical state changed to %s (0x%x), phy 0x%x\n",
12827 		    opa_pstate_name(ib_pstate), ib_pstate, state);
12828 }
12829 
12830 /*
12831  * Read the physical hardware link state and check if it matches host
12832  * drivers anticipated state.
12833  */
log_physical_state(struct hfi1_pportdata * ppd,u32 state)12834 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12835 {
12836 	u32 read_state = read_physical_state(ppd->dd);
12837 
12838 	if (read_state == state) {
12839 		log_state_transition(ppd, state);
12840 	} else {
12841 		dd_dev_err(ppd->dd,
12842 			   "anticipated phy link state 0x%x, read 0x%x\n",
12843 			   state, read_state);
12844 	}
12845 }
12846 
12847 /*
12848  * wait_physical_linkstate - wait for an physical link state change to occur
12849  * @ppd: port device
12850  * @state: the state to wait for
12851  * @msecs: the number of milliseconds to wait
12852  *
12853  * Wait up to msecs milliseconds for physical link state change to occur.
12854  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12855  */
wait_physical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)12856 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12857 				   int msecs)
12858 {
12859 	u32 read_state;
12860 	unsigned long timeout;
12861 
12862 	timeout = jiffies + msecs_to_jiffies(msecs);
12863 	while (1) {
12864 		read_state = read_physical_state(ppd->dd);
12865 		if (read_state == state)
12866 			break;
12867 		if (time_after(jiffies, timeout)) {
12868 			dd_dev_err(ppd->dd,
12869 				   "timeout waiting for phy link state 0x%x\n",
12870 				   state);
12871 			return -ETIMEDOUT;
12872 		}
12873 		usleep_range(1950, 2050); /* sleep 2ms-ish */
12874 	}
12875 
12876 	log_state_transition(ppd, state);
12877 	return 0;
12878 }
12879 
12880 /*
12881  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12882  * @ppd: port device
12883  * @msecs: the number of milliseconds to wait
12884  *
12885  * Wait up to msecs milliseconds for any offline physical link
12886  * state change to occur.
12887  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12888  */
wait_phys_link_offline_substates(struct hfi1_pportdata * ppd,int msecs)12889 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12890 					    int msecs)
12891 {
12892 	u32 read_state;
12893 	unsigned long timeout;
12894 
12895 	timeout = jiffies + msecs_to_jiffies(msecs);
12896 	while (1) {
12897 		read_state = read_physical_state(ppd->dd);
12898 		if ((read_state & 0xF0) == PLS_OFFLINE)
12899 			break;
12900 		if (time_after(jiffies, timeout)) {
12901 			dd_dev_err(ppd->dd,
12902 				   "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12903 				   read_state, msecs);
12904 			return -ETIMEDOUT;
12905 		}
12906 		usleep_range(1950, 2050); /* sleep 2ms-ish */
12907 	}
12908 
12909 	log_state_transition(ppd, read_state);
12910 	return read_state;
12911 }
12912 
12913 /*
12914  * wait_phys_link_out_of_offline - wait for any out of offline state
12915  * @ppd: port device
12916  * @msecs: the number of milliseconds to wait
12917  *
12918  * Wait up to msecs milliseconds for any out of offline physical link
12919  * state change to occur.
12920  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12921  */
wait_phys_link_out_of_offline(struct hfi1_pportdata * ppd,int msecs)12922 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12923 					 int msecs)
12924 {
12925 	u32 read_state;
12926 	unsigned long timeout;
12927 
12928 	timeout = jiffies + msecs_to_jiffies(msecs);
12929 	while (1) {
12930 		read_state = read_physical_state(ppd->dd);
12931 		if ((read_state & 0xF0) != PLS_OFFLINE)
12932 			break;
12933 		if (time_after(jiffies, timeout)) {
12934 			dd_dev_err(ppd->dd,
12935 				   "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12936 				   read_state, msecs);
12937 			return -ETIMEDOUT;
12938 		}
12939 		usleep_range(1950, 2050); /* sleep 2ms-ish */
12940 	}
12941 
12942 	log_state_transition(ppd, read_state);
12943 	return read_state;
12944 }
12945 
12946 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12947 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12948 
12949 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12950 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12951 
hfi1_init_ctxt(struct send_context * sc)12952 void hfi1_init_ctxt(struct send_context *sc)
12953 {
12954 	if (sc) {
12955 		struct hfi1_devdata *dd = sc->dd;
12956 		u64 reg;
12957 		u8 set = (sc->type == SC_USER ?
12958 			  HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12959 			  HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12960 		reg = read_kctxt_csr(dd, sc->hw_context,
12961 				     SEND_CTXT_CHECK_ENABLE);
12962 		if (set)
12963 			CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12964 		else
12965 			SET_STATIC_RATE_CONTROL_SMASK(reg);
12966 		write_kctxt_csr(dd, sc->hw_context,
12967 				SEND_CTXT_CHECK_ENABLE, reg);
12968 	}
12969 }
12970 
hfi1_tempsense_rd(struct hfi1_devdata * dd,struct hfi1_temp * temp)12971 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12972 {
12973 	int ret = 0;
12974 	u64 reg;
12975 
12976 	if (dd->icode != ICODE_RTL_SILICON) {
12977 		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12978 			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12979 				    __func__);
12980 		return -EINVAL;
12981 	}
12982 	reg = read_csr(dd, ASIC_STS_THERM);
12983 	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12984 		      ASIC_STS_THERM_CURR_TEMP_MASK);
12985 	temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12986 			ASIC_STS_THERM_LO_TEMP_MASK);
12987 	temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12988 			ASIC_STS_THERM_HI_TEMP_MASK);
12989 	temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12990 			  ASIC_STS_THERM_CRIT_TEMP_MASK);
12991 	/* triggers is a 3-bit value - 1 bit per trigger. */
12992 	temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12993 
12994 	return ret;
12995 }
12996 
12997 /* ========================================================================= */
12998 
12999 /*
13000  * Enable/disable chip from delivering interrupts.
13001  */
set_intr_state(struct hfi1_devdata * dd,u32 enable)13002 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
13003 {
13004 	int i;
13005 
13006 	/*
13007 	 * In HFI, the mask needs to be 1 to allow interrupts.
13008 	 */
13009 	if (enable) {
13010 		/* enable all interrupts */
13011 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13012 			write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
13013 
13014 		init_qsfp_int(dd);
13015 	} else {
13016 		for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13017 			write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13018 	}
13019 }
13020 
13021 /*
13022  * Clear all interrupt sources on the chip.
13023  */
clear_all_interrupts(struct hfi1_devdata * dd)13024 static void clear_all_interrupts(struct hfi1_devdata *dd)
13025 {
13026 	int i;
13027 
13028 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13029 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13030 
13031 	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13032 	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13033 	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13034 	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13035 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13036 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13037 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13038 	for (i = 0; i < dd->chip_send_contexts; i++)
13039 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13040 	for (i = 0; i < dd->chip_sdma_engines; i++)
13041 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13042 
13043 	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13044 	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13045 	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13046 }
13047 
13048 /* Move to pcie.c? */
disable_intx(struct pci_dev * pdev)13049 static void disable_intx(struct pci_dev *pdev)
13050 {
13051 	pci_intx(pdev, 0);
13052 }
13053 
13054 /**
13055  * hfi1_clean_up_interrupts() - Free all IRQ resources
13056  * @dd: valid device data data structure
13057  *
13058  * Free the MSI or INTx IRQs and assoicated PCI resources,
13059  * if they have been allocated.
13060  */
hfi1_clean_up_interrupts(struct hfi1_devdata * dd)13061 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
13062 {
13063 	int i;
13064 
13065 	/* remove irqs - must happen before disabling/turning off */
13066 	if (dd->num_msix_entries) {
13067 		/* MSI-X */
13068 		struct hfi1_msix_entry *me = dd->msix_entries;
13069 
13070 		for (i = 0; i < dd->num_msix_entries; i++, me++) {
13071 			if (!me->arg) /* => no irq, no affinity */
13072 				continue;
13073 			hfi1_put_irq_affinity(dd, me);
13074 			free_irq(me->irq, me->arg);
13075 		}
13076 
13077 		/* clean structures */
13078 		kfree(dd->msix_entries);
13079 		dd->msix_entries = NULL;
13080 		dd->num_msix_entries = 0;
13081 	} else {
13082 		/* INTx */
13083 		if (dd->requested_intx_irq) {
13084 			free_irq(dd->pcidev->irq, dd);
13085 			dd->requested_intx_irq = 0;
13086 		}
13087 		disable_intx(dd->pcidev);
13088 	}
13089 
13090 	pci_free_irq_vectors(dd->pcidev);
13091 }
13092 
13093 /*
13094  * Remap the interrupt source from the general handler to the given MSI-X
13095  * interrupt.
13096  */
remap_intr(struct hfi1_devdata * dd,int isrc,int msix_intr)13097 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13098 {
13099 	u64 reg;
13100 	int m, n;
13101 
13102 	/* clear from the handled mask of the general interrupt */
13103 	m = isrc / 64;
13104 	n = isrc % 64;
13105 	if (likely(m < CCE_NUM_INT_CSRS)) {
13106 		dd->gi_mask[m] &= ~((u64)1 << n);
13107 	} else {
13108 		dd_dev_err(dd, "remap interrupt err\n");
13109 		return;
13110 	}
13111 
13112 	/* direct the chip source to the given MSI-X interrupt */
13113 	m = isrc / 8;
13114 	n = isrc % 8;
13115 	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13116 	reg &= ~((u64)0xff << (8 * n));
13117 	reg |= ((u64)msix_intr & 0xff) << (8 * n);
13118 	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13119 }
13120 
remap_sdma_interrupts(struct hfi1_devdata * dd,int engine,int msix_intr)13121 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13122 				  int engine, int msix_intr)
13123 {
13124 	/*
13125 	 * SDMA engine interrupt sources grouped by type, rather than
13126 	 * engine.  Per-engine interrupts are as follows:
13127 	 *	SDMA
13128 	 *	SDMAProgress
13129 	 *	SDMAIdle
13130 	 */
13131 	remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13132 		   msix_intr);
13133 	remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13134 		   msix_intr);
13135 	remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13136 		   msix_intr);
13137 }
13138 
request_intx_irq(struct hfi1_devdata * dd)13139 static int request_intx_irq(struct hfi1_devdata *dd)
13140 {
13141 	int ret;
13142 
13143 	snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
13144 		 dd->unit);
13145 	ret = request_irq(dd->pcidev->irq, general_interrupt,
13146 			  IRQF_SHARED, dd->intx_name, dd);
13147 	if (ret)
13148 		dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
13149 			   ret);
13150 	else
13151 		dd->requested_intx_irq = 1;
13152 	return ret;
13153 }
13154 
request_msix_irqs(struct hfi1_devdata * dd)13155 static int request_msix_irqs(struct hfi1_devdata *dd)
13156 {
13157 	int first_general, last_general;
13158 	int first_sdma, last_sdma;
13159 	int first_rx, last_rx;
13160 	int i, ret = 0;
13161 
13162 	/* calculate the ranges we are going to use */
13163 	first_general = 0;
13164 	last_general = first_general + 1;
13165 	first_sdma = last_general;
13166 	last_sdma = first_sdma + dd->num_sdma;
13167 	first_rx = last_sdma;
13168 	last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13169 
13170 	/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13171 	dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13172 
13173 	/*
13174 	 * Sanity check - the code expects all SDMA chip source
13175 	 * interrupts to be in the same CSR, starting at bit 0.  Verify
13176 	 * that this is true by checking the bit location of the start.
13177 	 */
13178 	BUILD_BUG_ON(IS_SDMA_START % 64);
13179 
13180 	for (i = 0; i < dd->num_msix_entries; i++) {
13181 		struct hfi1_msix_entry *me = &dd->msix_entries[i];
13182 		const char *err_info;
13183 		irq_handler_t handler;
13184 		irq_handler_t thread = NULL;
13185 		void *arg = NULL;
13186 		int idx;
13187 		struct hfi1_ctxtdata *rcd = NULL;
13188 		struct sdma_engine *sde = NULL;
13189 
13190 		/* obtain the arguments to request_irq */
13191 		if (first_general <= i && i < last_general) {
13192 			idx = i - first_general;
13193 			handler = general_interrupt;
13194 			arg = dd;
13195 			snprintf(me->name, sizeof(me->name),
13196 				 DRIVER_NAME "_%d", dd->unit);
13197 			err_info = "general";
13198 			me->type = IRQ_GENERAL;
13199 		} else if (first_sdma <= i && i < last_sdma) {
13200 			idx = i - first_sdma;
13201 			sde = &dd->per_sdma[idx];
13202 			handler = sdma_interrupt;
13203 			arg = sde;
13204 			snprintf(me->name, sizeof(me->name),
13205 				 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13206 			err_info = "sdma";
13207 			remap_sdma_interrupts(dd, idx, i);
13208 			me->type = IRQ_SDMA;
13209 		} else if (first_rx <= i && i < last_rx) {
13210 			idx = i - first_rx;
13211 			rcd = hfi1_rcd_get_by_index(dd, idx);
13212 			if (rcd) {
13213 				/*
13214 				 * Set the interrupt register and mask for this
13215 				 * context's interrupt.
13216 				 */
13217 				rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13218 				rcd->imask = ((u64)1) <<
13219 					  ((IS_RCVAVAIL_START + idx) % 64);
13220 				handler = receive_context_interrupt;
13221 				thread = receive_context_thread;
13222 				arg = rcd;
13223 				snprintf(me->name, sizeof(me->name),
13224 					 DRIVER_NAME "_%d kctxt%d",
13225 					 dd->unit, idx);
13226 				err_info = "receive context";
13227 				remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13228 				me->type = IRQ_RCVCTXT;
13229 				rcd->msix_intr = i;
13230 				hfi1_rcd_put(rcd);
13231 			}
13232 		} else {
13233 			/* not in our expected range - complain, then
13234 			 * ignore it
13235 			 */
13236 			dd_dev_err(dd,
13237 				   "Unexpected extra MSI-X interrupt %d\n", i);
13238 			continue;
13239 		}
13240 		/* no argument, no interrupt */
13241 		if (!arg)
13242 			continue;
13243 		/* make sure the name is terminated */
13244 		me->name[sizeof(me->name) - 1] = 0;
13245 		me->irq = pci_irq_vector(dd->pcidev, i);
13246 		/*
13247 		 * On err return me->irq.  Don't need to clear this
13248 		 * because 'arg' has not been set, and cleanup will
13249 		 * do the right thing.
13250 		 */
13251 		if (me->irq < 0)
13252 			return me->irq;
13253 
13254 		ret = request_threaded_irq(me->irq, handler, thread, 0,
13255 					   me->name, arg);
13256 		if (ret) {
13257 			dd_dev_err(dd,
13258 				   "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13259 				   err_info, me->irq, idx, ret);
13260 			return ret;
13261 		}
13262 		/*
13263 		 * assign arg after request_irq call, so it will be
13264 		 * cleaned up
13265 		 */
13266 		me->arg = arg;
13267 
13268 		ret = hfi1_get_irq_affinity(dd, me);
13269 		if (ret)
13270 			dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13271 	}
13272 
13273 	return ret;
13274 }
13275 
hfi1_vnic_synchronize_irq(struct hfi1_devdata * dd)13276 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13277 {
13278 	int i;
13279 
13280 	if (!dd->num_msix_entries) {
13281 		synchronize_irq(dd->pcidev->irq);
13282 		return;
13283 	}
13284 
13285 	for (i = 0; i < dd->vnic.num_ctxt; i++) {
13286 		struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13287 		struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13288 
13289 		synchronize_irq(me->irq);
13290 	}
13291 }
13292 
hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata * rcd)13293 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13294 {
13295 	struct hfi1_devdata *dd = rcd->dd;
13296 	struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13297 
13298 	if (!me->arg) /* => no irq, no affinity */
13299 		return;
13300 
13301 	hfi1_put_irq_affinity(dd, me);
13302 	free_irq(me->irq, me->arg);
13303 
13304 	me->arg = NULL;
13305 }
13306 
hfi1_set_vnic_msix_info(struct hfi1_ctxtdata * rcd)13307 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13308 {
13309 	struct hfi1_devdata *dd = rcd->dd;
13310 	struct hfi1_msix_entry *me;
13311 	int idx = rcd->ctxt;
13312 	void *arg = rcd;
13313 	int ret;
13314 
13315 	rcd->msix_intr = dd->vnic.msix_idx++;
13316 	me = &dd->msix_entries[rcd->msix_intr];
13317 
13318 	/*
13319 	 * Set the interrupt register and mask for this
13320 	 * context's interrupt.
13321 	 */
13322 	rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13323 	rcd->imask = ((u64)1) <<
13324 		  ((IS_RCVAVAIL_START + idx) % 64);
13325 
13326 	snprintf(me->name, sizeof(me->name),
13327 		 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13328 	me->name[sizeof(me->name) - 1] = 0;
13329 	me->type = IRQ_RCVCTXT;
13330 	me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13331 	if (me->irq < 0) {
13332 		dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13333 			   idx, me->irq);
13334 		return;
13335 	}
13336 	remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13337 
13338 	ret = request_threaded_irq(me->irq, receive_context_interrupt,
13339 				   receive_context_thread, 0, me->name, arg);
13340 	if (ret) {
13341 		dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13342 			   me->irq, idx, ret);
13343 		return;
13344 	}
13345 	/*
13346 	 * assign arg after request_irq call, so it will be
13347 	 * cleaned up
13348 	 */
13349 	me->arg = arg;
13350 
13351 	ret = hfi1_get_irq_affinity(dd, me);
13352 	if (ret) {
13353 		dd_dev_err(dd,
13354 			   "unable to pin IRQ %d\n", ret);
13355 		free_irq(me->irq, me->arg);
13356 	}
13357 }
13358 
13359 /*
13360  * Set the general handler to accept all interrupts, remap all
13361  * chip interrupts back to MSI-X 0.
13362  */
reset_interrupts(struct hfi1_devdata * dd)13363 static void reset_interrupts(struct hfi1_devdata *dd)
13364 {
13365 	int i;
13366 
13367 	/* all interrupts handled by the general handler */
13368 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13369 		dd->gi_mask[i] = ~(u64)0;
13370 
13371 	/* all chip interrupts map to MSI-X 0 */
13372 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13373 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13374 }
13375 
set_up_interrupts(struct hfi1_devdata * dd)13376 static int set_up_interrupts(struct hfi1_devdata *dd)
13377 {
13378 	u32 total;
13379 	int ret, request;
13380 	int single_interrupt = 0; /* we expect to have all the interrupts */
13381 
13382 	/*
13383 	 * Interrupt count:
13384 	 *	1 general, "slow path" interrupt (includes the SDMA engines
13385 	 *		slow source, SDMACleanupDone)
13386 	 *	N interrupts - one per used SDMA engine
13387 	 *	M interrupt - one per kernel receive context
13388 	 *	V interrupt - one for each VNIC context
13389 	 */
13390 	total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13391 
13392 	/* ask for MSI-X interrupts */
13393 	request = request_msix(dd, total);
13394 	if (request < 0) {
13395 		ret = request;
13396 		goto fail;
13397 	} else if (request == 0) {
13398 		/* using INTx */
13399 		/* dd->num_msix_entries already zero */
13400 		single_interrupt = 1;
13401 		dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13402 	} else if (request < total) {
13403 		/* using MSI-X, with reduced interrupts */
13404 		dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13405 			   total, request);
13406 		ret = -EINVAL;
13407 		goto fail;
13408 	} else {
13409 		dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13410 					   GFP_KERNEL);
13411 		if (!dd->msix_entries) {
13412 			ret = -ENOMEM;
13413 			goto fail;
13414 		}
13415 		/* using MSI-X */
13416 		dd->num_msix_entries = total;
13417 		dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13418 	}
13419 
13420 	/* mask all interrupts */
13421 	set_intr_state(dd, 0);
13422 	/* clear all pending interrupts */
13423 	clear_all_interrupts(dd);
13424 
13425 	/* reset general handler mask, chip MSI-X mappings */
13426 	reset_interrupts(dd);
13427 
13428 	if (single_interrupt)
13429 		ret = request_intx_irq(dd);
13430 	else
13431 		ret = request_msix_irqs(dd);
13432 	if (ret)
13433 		goto fail;
13434 
13435 	return 0;
13436 
13437 fail:
13438 	hfi1_clean_up_interrupts(dd);
13439 	return ret;
13440 }
13441 
13442 /*
13443  * Set up context values in dd.  Sets:
13444  *
13445  *	num_rcv_contexts - number of contexts being used
13446  *	n_krcv_queues - number of kernel contexts
13447  *	first_dyn_alloc_ctxt - first dynamically allocated context
13448  *                             in array of contexts
13449  *	freectxts  - number of free user contexts
13450  *	num_send_contexts - number of PIO send contexts being used
13451  *	num_vnic_contexts - number of contexts reserved for VNIC
13452  */
set_up_context_variables(struct hfi1_devdata * dd)13453 static int set_up_context_variables(struct hfi1_devdata *dd)
13454 {
13455 	unsigned long num_kernel_contexts;
13456 	u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13457 	int total_contexts;
13458 	int ret;
13459 	unsigned ngroups;
13460 	int qos_rmt_count;
13461 	int user_rmt_reduced;
13462 
13463 	/*
13464 	 * Kernel receive contexts:
13465 	 * - Context 0 - control context (VL15/multicast/error)
13466 	 * - Context 1 - first kernel context
13467 	 * - Context 2 - second kernel context
13468 	 * ...
13469 	 */
13470 	if (n_krcvqs)
13471 		/*
13472 		 * n_krcvqs is the sum of module parameter kernel receive
13473 		 * contexts, krcvqs[].  It does not include the control
13474 		 * context, so add that.
13475 		 */
13476 		num_kernel_contexts = n_krcvqs + 1;
13477 	else
13478 		num_kernel_contexts = DEFAULT_KRCVQS + 1;
13479 	/*
13480 	 * Every kernel receive context needs an ACK send context.
13481 	 * one send context is allocated for each VL{0-7} and VL15
13482 	 */
13483 	if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13484 		dd_dev_err(dd,
13485 			   "Reducing # kernel rcv contexts to: %d, from %lu\n",
13486 			   (int)(dd->chip_send_contexts - num_vls - 1),
13487 			   num_kernel_contexts);
13488 		num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13489 	}
13490 
13491 	/* Accommodate VNIC contexts if possible */
13492 	if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
13493 		dd_dev_err(dd, "No receive contexts available for VNIC\n");
13494 		num_vnic_contexts = 0;
13495 	}
13496 	total_contexts = num_kernel_contexts + num_vnic_contexts;
13497 
13498 	/*
13499 	 * User contexts:
13500 	 *	- default to 1 user context per real (non-HT) CPU core if
13501 	 *	  num_user_contexts is negative
13502 	 */
13503 	if (num_user_contexts < 0)
13504 		num_user_contexts =
13505 			cpumask_weight(&node_affinity.real_cpu_mask);
13506 
13507 	/*
13508 	 * Adjust the counts given a global max.
13509 	 */
13510 	if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
13511 		dd_dev_err(dd,
13512 			   "Reducing # user receive contexts to: %d, from %d\n",
13513 			   (int)(dd->chip_rcv_contexts - total_contexts),
13514 			   (int)num_user_contexts);
13515 		/* recalculate */
13516 		num_user_contexts = dd->chip_rcv_contexts - total_contexts;
13517 	}
13518 
13519 	/* each user context requires an entry in the RMT */
13520 	qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13521 	if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13522 		user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13523 		dd_dev_err(dd,
13524 			   "RMT size is reducing the number of user receive contexts from %d to %d\n",
13525 			   (int)num_user_contexts,
13526 			   user_rmt_reduced);
13527 		/* recalculate */
13528 		num_user_contexts = user_rmt_reduced;
13529 	}
13530 
13531 	total_contexts += num_user_contexts;
13532 
13533 	/* the first N are kernel contexts, the rest are user/vnic contexts */
13534 	dd->num_rcv_contexts = total_contexts;
13535 	dd->n_krcv_queues = num_kernel_contexts;
13536 	dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13537 	dd->num_vnic_contexts = num_vnic_contexts;
13538 	dd->num_user_contexts = num_user_contexts;
13539 	dd->freectxts = num_user_contexts;
13540 	dd_dev_info(dd,
13541 		    "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13542 		    (int)dd->chip_rcv_contexts,
13543 		    (int)dd->num_rcv_contexts,
13544 		    (int)dd->n_krcv_queues,
13545 		    dd->num_vnic_contexts,
13546 		    dd->num_user_contexts);
13547 
13548 	/*
13549 	 * Receive array allocation:
13550 	 *   All RcvArray entries are divided into groups of 8. This
13551 	 *   is required by the hardware and will speed up writes to
13552 	 *   consecutive entries by using write-combining of the entire
13553 	 *   cacheline.
13554 	 *
13555 	 *   The number of groups are evenly divided among all contexts.
13556 	 *   any left over groups will be given to the first N user
13557 	 *   contexts.
13558 	 */
13559 	dd->rcv_entries.group_size = RCV_INCREMENT;
13560 	ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13561 	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13562 	dd->rcv_entries.nctxt_extra = ngroups -
13563 		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13564 	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13565 		    dd->rcv_entries.ngroups,
13566 		    dd->rcv_entries.nctxt_extra);
13567 	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13568 	    MAX_EAGER_ENTRIES * 2) {
13569 		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13570 			dd->rcv_entries.group_size;
13571 		dd_dev_info(dd,
13572 			    "RcvArray group count too high, change to %u\n",
13573 			    dd->rcv_entries.ngroups);
13574 		dd->rcv_entries.nctxt_extra = 0;
13575 	}
13576 	/*
13577 	 * PIO send contexts
13578 	 */
13579 	ret = init_sc_pools_and_sizes(dd);
13580 	if (ret >= 0) {	/* success */
13581 		dd->num_send_contexts = ret;
13582 		dd_dev_info(
13583 			dd,
13584 			"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13585 			dd->chip_send_contexts,
13586 			dd->num_send_contexts,
13587 			dd->sc_sizes[SC_KERNEL].count,
13588 			dd->sc_sizes[SC_ACK].count,
13589 			dd->sc_sizes[SC_USER].count,
13590 			dd->sc_sizes[SC_VL15].count);
13591 		ret = 0;	/* success */
13592 	}
13593 
13594 	return ret;
13595 }
13596 
13597 /*
13598  * Set the device/port partition key table. The MAD code
13599  * will ensure that, at least, the partial management
13600  * partition key is present in the table.
13601  */
set_partition_keys(struct hfi1_pportdata * ppd)13602 static void set_partition_keys(struct hfi1_pportdata *ppd)
13603 {
13604 	struct hfi1_devdata *dd = ppd->dd;
13605 	u64 reg = 0;
13606 	int i;
13607 
13608 	dd_dev_info(dd, "Setting partition keys\n");
13609 	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13610 		reg |= (ppd->pkeys[i] &
13611 			RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13612 			((i % 4) *
13613 			 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13614 		/* Each register holds 4 PKey values. */
13615 		if ((i % 4) == 3) {
13616 			write_csr(dd, RCV_PARTITION_KEY +
13617 				  ((i - 3) * 2), reg);
13618 			reg = 0;
13619 		}
13620 	}
13621 
13622 	/* Always enable HW pkeys check when pkeys table is set */
13623 	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13624 }
13625 
13626 /*
13627  * These CSRs and memories are uninitialized on reset and must be
13628  * written before reading to set the ECC/parity bits.
13629  *
13630  * NOTE: All user context CSRs that are not mmaped write-only
13631  * (e.g. the TID flows) must be initialized even if the driver never
13632  * reads them.
13633  */
write_uninitialized_csrs_and_memories(struct hfi1_devdata * dd)13634 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13635 {
13636 	int i, j;
13637 
13638 	/* CceIntMap */
13639 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13640 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13641 
13642 	/* SendCtxtCreditReturnAddr */
13643 	for (i = 0; i < dd->chip_send_contexts; i++)
13644 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13645 
13646 	/* PIO Send buffers */
13647 	/* SDMA Send buffers */
13648 	/*
13649 	 * These are not normally read, and (presently) have no method
13650 	 * to be read, so are not pre-initialized
13651 	 */
13652 
13653 	/* RcvHdrAddr */
13654 	/* RcvHdrTailAddr */
13655 	/* RcvTidFlowTable */
13656 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
13657 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13658 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13659 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13660 			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13661 	}
13662 
13663 	/* RcvArray */
13664 	for (i = 0; i < dd->chip_rcv_array_count; i++)
13665 		hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13666 
13667 	/* RcvQPMapTable */
13668 	for (i = 0; i < 32; i++)
13669 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13670 }
13671 
13672 /*
13673  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13674  */
clear_cce_status(struct hfi1_devdata * dd,u64 status_bits,u64 ctrl_bits)13675 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13676 			     u64 ctrl_bits)
13677 {
13678 	unsigned long timeout;
13679 	u64 reg;
13680 
13681 	/* is the condition present? */
13682 	reg = read_csr(dd, CCE_STATUS);
13683 	if ((reg & status_bits) == 0)
13684 		return;
13685 
13686 	/* clear the condition */
13687 	write_csr(dd, CCE_CTRL, ctrl_bits);
13688 
13689 	/* wait for the condition to clear */
13690 	timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13691 	while (1) {
13692 		reg = read_csr(dd, CCE_STATUS);
13693 		if ((reg & status_bits) == 0)
13694 			return;
13695 		if (time_after(jiffies, timeout)) {
13696 			dd_dev_err(dd,
13697 				   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13698 				   status_bits, reg & status_bits);
13699 			return;
13700 		}
13701 		udelay(1);
13702 	}
13703 }
13704 
13705 /* set CCE CSRs to chip reset defaults */
reset_cce_csrs(struct hfi1_devdata * dd)13706 static void reset_cce_csrs(struct hfi1_devdata *dd)
13707 {
13708 	int i;
13709 
13710 	/* CCE_REVISION read-only */
13711 	/* CCE_REVISION2 read-only */
13712 	/* CCE_CTRL - bits clear automatically */
13713 	/* CCE_STATUS read-only, use CceCtrl to clear */
13714 	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13715 	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13716 	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13717 	for (i = 0; i < CCE_NUM_SCRATCH; i++)
13718 		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13719 	/* CCE_ERR_STATUS read-only */
13720 	write_csr(dd, CCE_ERR_MASK, 0);
13721 	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13722 	/* CCE_ERR_FORCE leave alone */
13723 	for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13724 		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13725 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13726 	/* CCE_PCIE_CTRL leave alone */
13727 	for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13728 		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13729 		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13730 			  CCE_MSIX_TABLE_UPPER_RESETCSR);
13731 	}
13732 	for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13733 		/* CCE_MSIX_PBA read-only */
13734 		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13735 		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13736 	}
13737 	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13738 		write_csr(dd, CCE_INT_MAP, 0);
13739 	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13740 		/* CCE_INT_STATUS read-only */
13741 		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13742 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13743 		/* CCE_INT_FORCE leave alone */
13744 		/* CCE_INT_BLOCKED read-only */
13745 	}
13746 	for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13747 		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13748 }
13749 
13750 /* set MISC CSRs to chip reset defaults */
reset_misc_csrs(struct hfi1_devdata * dd)13751 static void reset_misc_csrs(struct hfi1_devdata *dd)
13752 {
13753 	int i;
13754 
13755 	for (i = 0; i < 32; i++) {
13756 		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13757 		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13758 		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13759 	}
13760 	/*
13761 	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13762 	 * only be written 128-byte chunks
13763 	 */
13764 	/* init RSA engine to clear lingering errors */
13765 	write_csr(dd, MISC_CFG_RSA_CMD, 1);
13766 	write_csr(dd, MISC_CFG_RSA_MU, 0);
13767 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
13768 	/* MISC_STS_8051_DIGEST read-only */
13769 	/* MISC_STS_SBM_DIGEST read-only */
13770 	/* MISC_STS_PCIE_DIGEST read-only */
13771 	/* MISC_STS_FAB_DIGEST read-only */
13772 	/* MISC_ERR_STATUS read-only */
13773 	write_csr(dd, MISC_ERR_MASK, 0);
13774 	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13775 	/* MISC_ERR_FORCE leave alone */
13776 }
13777 
13778 /* set TXE CSRs to chip reset defaults */
reset_txe_csrs(struct hfi1_devdata * dd)13779 static void reset_txe_csrs(struct hfi1_devdata *dd)
13780 {
13781 	int i;
13782 
13783 	/*
13784 	 * TXE Kernel CSRs
13785 	 */
13786 	write_csr(dd, SEND_CTRL, 0);
13787 	__cm_reset(dd, 0);	/* reset CM internal state */
13788 	/* SEND_CONTEXTS read-only */
13789 	/* SEND_DMA_ENGINES read-only */
13790 	/* SEND_PIO_MEM_SIZE read-only */
13791 	/* SEND_DMA_MEM_SIZE read-only */
13792 	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13793 	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
13794 	/* SEND_PIO_ERR_STATUS read-only */
13795 	write_csr(dd, SEND_PIO_ERR_MASK, 0);
13796 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13797 	/* SEND_PIO_ERR_FORCE leave alone */
13798 	/* SEND_DMA_ERR_STATUS read-only */
13799 	write_csr(dd, SEND_DMA_ERR_MASK, 0);
13800 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13801 	/* SEND_DMA_ERR_FORCE leave alone */
13802 	/* SEND_EGRESS_ERR_STATUS read-only */
13803 	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13804 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13805 	/* SEND_EGRESS_ERR_FORCE leave alone */
13806 	write_csr(dd, SEND_BTH_QP, 0);
13807 	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13808 	write_csr(dd, SEND_SC2VLT0, 0);
13809 	write_csr(dd, SEND_SC2VLT1, 0);
13810 	write_csr(dd, SEND_SC2VLT2, 0);
13811 	write_csr(dd, SEND_SC2VLT3, 0);
13812 	write_csr(dd, SEND_LEN_CHECK0, 0);
13813 	write_csr(dd, SEND_LEN_CHECK1, 0);
13814 	/* SEND_ERR_STATUS read-only */
13815 	write_csr(dd, SEND_ERR_MASK, 0);
13816 	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13817 	/* SEND_ERR_FORCE read-only */
13818 	for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13819 		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13820 	for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13821 		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13822 	for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13823 		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13824 	for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13825 		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13826 	for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13827 		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13828 	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13829 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13830 	/* SEND_CM_CREDIT_USED_STATUS read-only */
13831 	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13832 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13833 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13834 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13835 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13836 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
13837 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13838 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13839 	/* SEND_CM_CREDIT_USED_VL read-only */
13840 	/* SEND_CM_CREDIT_USED_VL15 read-only */
13841 	/* SEND_EGRESS_CTXT_STATUS read-only */
13842 	/* SEND_EGRESS_SEND_DMA_STATUS read-only */
13843 	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13844 	/* SEND_EGRESS_ERR_INFO read-only */
13845 	/* SEND_EGRESS_ERR_SOURCE read-only */
13846 
13847 	/*
13848 	 * TXE Per-Context CSRs
13849 	 */
13850 	for (i = 0; i < dd->chip_send_contexts; i++) {
13851 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13852 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13853 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13854 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13855 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13856 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13857 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13858 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13859 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13860 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13861 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13862 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13863 	}
13864 
13865 	/*
13866 	 * TXE Per-SDMA CSRs
13867 	 */
13868 	for (i = 0; i < dd->chip_sdma_engines; i++) {
13869 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13870 		/* SEND_DMA_STATUS read-only */
13871 		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13872 		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13873 		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13874 		/* SEND_DMA_HEAD read-only */
13875 		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13876 		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13877 		/* SEND_DMA_IDLE_CNT read-only */
13878 		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13879 		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13880 		/* SEND_DMA_DESC_FETCHED_CNT read-only */
13881 		/* SEND_DMA_ENG_ERR_STATUS read-only */
13882 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13883 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13884 		/* SEND_DMA_ENG_ERR_FORCE leave alone */
13885 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13886 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13887 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13888 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13889 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13890 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13891 		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13892 	}
13893 }
13894 
13895 /*
13896  * Expect on entry:
13897  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13898  */
init_rbufs(struct hfi1_devdata * dd)13899 static void init_rbufs(struct hfi1_devdata *dd)
13900 {
13901 	u64 reg;
13902 	int count;
13903 
13904 	/*
13905 	 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13906 	 * clear.
13907 	 */
13908 	count = 0;
13909 	while (1) {
13910 		reg = read_csr(dd, RCV_STATUS);
13911 		if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13912 			    | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13913 			break;
13914 		/*
13915 		 * Give up after 1ms - maximum wait time.
13916 		 *
13917 		 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13918 		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13919 		 *	136 KB / (66% * 250MB/s) = 844us
13920 		 */
13921 		if (count++ > 500) {
13922 			dd_dev_err(dd,
13923 				   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13924 				   __func__, reg);
13925 			break;
13926 		}
13927 		udelay(2); /* do not busy-wait the CSR */
13928 	}
13929 
13930 	/* start the init - expect RcvCtrl to be 0 */
13931 	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13932 
13933 	/*
13934 	 * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13935 	 * period after the write before RcvStatus.RxRbufInitDone is valid.
13936 	 * The delay in the first run through the loop below is sufficient and
13937 	 * required before the first read of RcvStatus.RxRbufInintDone.
13938 	 */
13939 	read_csr(dd, RCV_CTRL);
13940 
13941 	/* wait for the init to finish */
13942 	count = 0;
13943 	while (1) {
13944 		/* delay is required first time through - see above */
13945 		udelay(2); /* do not busy-wait the CSR */
13946 		reg = read_csr(dd, RCV_STATUS);
13947 		if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13948 			break;
13949 
13950 		/* give up after 100us - slowest possible at 33MHz is 73us */
13951 		if (count++ > 50) {
13952 			dd_dev_err(dd,
13953 				   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13954 				   __func__);
13955 			break;
13956 		}
13957 	}
13958 }
13959 
13960 /* set RXE CSRs to chip reset defaults */
reset_rxe_csrs(struct hfi1_devdata * dd)13961 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13962 {
13963 	int i, j;
13964 
13965 	/*
13966 	 * RXE Kernel CSRs
13967 	 */
13968 	write_csr(dd, RCV_CTRL, 0);
13969 	init_rbufs(dd);
13970 	/* RCV_STATUS read-only */
13971 	/* RCV_CONTEXTS read-only */
13972 	/* RCV_ARRAY_CNT read-only */
13973 	/* RCV_BUF_SIZE read-only */
13974 	write_csr(dd, RCV_BTH_QP, 0);
13975 	write_csr(dd, RCV_MULTICAST, 0);
13976 	write_csr(dd, RCV_BYPASS, 0);
13977 	write_csr(dd, RCV_VL15, 0);
13978 	/* this is a clear-down */
13979 	write_csr(dd, RCV_ERR_INFO,
13980 		  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13981 	/* RCV_ERR_STATUS read-only */
13982 	write_csr(dd, RCV_ERR_MASK, 0);
13983 	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13984 	/* RCV_ERR_FORCE leave alone */
13985 	for (i = 0; i < 32; i++)
13986 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13987 	for (i = 0; i < 4; i++)
13988 		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13989 	for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13990 		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13991 	for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13992 		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13993 	for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13994 		clear_rsm_rule(dd, i);
13995 	for (i = 0; i < 32; i++)
13996 		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13997 
13998 	/*
13999 	 * RXE Kernel and User Per-Context CSRs
14000 	 */
14001 	for (i = 0; i < dd->chip_rcv_contexts; i++) {
14002 		/* kernel */
14003 		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
14004 		/* RCV_CTXT_STATUS read-only */
14005 		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
14006 		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
14007 		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
14008 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
14009 		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
14010 		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
14011 		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
14012 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
14013 		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
14014 		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
14015 
14016 		/* user */
14017 		/* RCV_HDR_TAIL read-only */
14018 		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
14019 		/* RCV_EGR_INDEX_TAIL read-only */
14020 		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
14021 		/* RCV_EGR_OFFSET_TAIL read-only */
14022 		for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
14023 			write_uctxt_csr(dd, i,
14024 					RCV_TID_FLOW_TABLE + (8 * j), 0);
14025 		}
14026 	}
14027 }
14028 
14029 /*
14030  * Set sc2vl tables.
14031  *
14032  * They power on to zeros, so to avoid send context errors
14033  * they need to be set:
14034  *
14035  * SC 0-7 -> VL 0-7 (respectively)
14036  * SC 15  -> VL 15
14037  * otherwise
14038  *        -> VL 0
14039  */
init_sc2vl_tables(struct hfi1_devdata * dd)14040 static void init_sc2vl_tables(struct hfi1_devdata *dd)
14041 {
14042 	int i;
14043 	/* init per architecture spec, constrained by hardware capability */
14044 
14045 	/* HFI maps sent packets */
14046 	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
14047 		0,
14048 		0, 0, 1, 1,
14049 		2, 2, 3, 3,
14050 		4, 4, 5, 5,
14051 		6, 6, 7, 7));
14052 	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
14053 		1,
14054 		8, 0, 9, 0,
14055 		10, 0, 11, 0,
14056 		12, 0, 13, 0,
14057 		14, 0, 15, 15));
14058 	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
14059 		2,
14060 		16, 0, 17, 0,
14061 		18, 0, 19, 0,
14062 		20, 0, 21, 0,
14063 		22, 0, 23, 0));
14064 	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
14065 		3,
14066 		24, 0, 25, 0,
14067 		26, 0, 27, 0,
14068 		28, 0, 29, 0,
14069 		30, 0, 31, 0));
14070 
14071 	/* DC maps received packets */
14072 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
14073 		15_0,
14074 		0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
14075 		8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
14076 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
14077 		31_16,
14078 		16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14079 		24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14080 
14081 	/* initialize the cached sc2vl values consistently with h/w */
14082 	for (i = 0; i < 32; i++) {
14083 		if (i < 8 || i == 15)
14084 			*((u8 *)(dd->sc2vl) + i) = (u8)i;
14085 		else
14086 			*((u8 *)(dd->sc2vl) + i) = 0;
14087 	}
14088 }
14089 
14090 /*
14091  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
14092  * depend on the chip going through a power-on reset - a driver may be loaded
14093  * and unloaded many times.
14094  *
14095  * Do not write any CSR values to the chip in this routine - there may be
14096  * a reset following the (possible) FLR in this routine.
14097  *
14098  */
init_chip(struct hfi1_devdata * dd)14099 static int init_chip(struct hfi1_devdata *dd)
14100 {
14101 	int i;
14102 	int ret = 0;
14103 
14104 	/*
14105 	 * Put the HFI CSRs in a known state.
14106 	 * Combine this with a DC reset.
14107 	 *
14108 	 * Stop the device from doing anything while we do a
14109 	 * reset.  We know there are no other active users of
14110 	 * the device since we are now in charge.  Turn off
14111 	 * off all outbound and inbound traffic and make sure
14112 	 * the device does not generate any interrupts.
14113 	 */
14114 
14115 	/* disable send contexts and SDMA engines */
14116 	write_csr(dd, SEND_CTRL, 0);
14117 	for (i = 0; i < dd->chip_send_contexts; i++)
14118 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14119 	for (i = 0; i < dd->chip_sdma_engines; i++)
14120 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14121 	/* disable port (turn off RXE inbound traffic) and contexts */
14122 	write_csr(dd, RCV_CTRL, 0);
14123 	for (i = 0; i < dd->chip_rcv_contexts; i++)
14124 		write_csr(dd, RCV_CTXT_CTRL, 0);
14125 	/* mask all interrupt sources */
14126 	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14127 		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14128 
14129 	/*
14130 	 * DC Reset: do a full DC reset before the register clear.
14131 	 * A recommended length of time to hold is one CSR read,
14132 	 * so reread the CceDcCtrl.  Then, hold the DC in reset
14133 	 * across the clear.
14134 	 */
14135 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14136 	(void)read_csr(dd, CCE_DC_CTRL);
14137 
14138 	if (use_flr) {
14139 		/*
14140 		 * A FLR will reset the SPC core and part of the PCIe.
14141 		 * The parts that need to be restored have already been
14142 		 * saved.
14143 		 */
14144 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
14145 
14146 		/* do the FLR, the DC reset will remain */
14147 		pcie_flr(dd->pcidev);
14148 
14149 		/* restore command and BARs */
14150 		ret = restore_pci_variables(dd);
14151 		if (ret) {
14152 			dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14153 				   __func__);
14154 			return ret;
14155 		}
14156 
14157 		if (is_ax(dd)) {
14158 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
14159 			pcie_flr(dd->pcidev);
14160 			ret = restore_pci_variables(dd);
14161 			if (ret) {
14162 				dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14163 					   __func__);
14164 				return ret;
14165 			}
14166 		}
14167 	} else {
14168 		dd_dev_info(dd, "Resetting CSRs with writes\n");
14169 		reset_cce_csrs(dd);
14170 		reset_txe_csrs(dd);
14171 		reset_rxe_csrs(dd);
14172 		reset_misc_csrs(dd);
14173 	}
14174 	/* clear the DC reset */
14175 	write_csr(dd, CCE_DC_CTRL, 0);
14176 
14177 	/* Set the LED off */
14178 	setextled(dd, 0);
14179 
14180 	/*
14181 	 * Clear the QSFP reset.
14182 	 * An FLR enforces a 0 on all out pins. The driver does not touch
14183 	 * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14184 	 * anything plugged constantly in reset, if it pays attention
14185 	 * to RESET_N.
14186 	 * Prime examples of this are optical cables. Set all pins high.
14187 	 * I2CCLK and I2CDAT will change per direction, and INT_N and
14188 	 * MODPRS_N are input only and their value is ignored.
14189 	 */
14190 	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14191 	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14192 	init_chip_resources(dd);
14193 	return ret;
14194 }
14195 
init_early_variables(struct hfi1_devdata * dd)14196 static void init_early_variables(struct hfi1_devdata *dd)
14197 {
14198 	int i;
14199 
14200 	/* assign link credit variables */
14201 	dd->vau = CM_VAU;
14202 	dd->link_credits = CM_GLOBAL_CREDITS;
14203 	if (is_ax(dd))
14204 		dd->link_credits--;
14205 	dd->vcu = cu_to_vcu(hfi1_cu);
14206 	/* enough room for 8 MAD packets plus header - 17K */
14207 	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14208 	if (dd->vl15_init > dd->link_credits)
14209 		dd->vl15_init = dd->link_credits;
14210 
14211 	write_uninitialized_csrs_and_memories(dd);
14212 
14213 	if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14214 		for (i = 0; i < dd->num_pports; i++) {
14215 			struct hfi1_pportdata *ppd = &dd->pport[i];
14216 
14217 			set_partition_keys(ppd);
14218 		}
14219 	init_sc2vl_tables(dd);
14220 }
14221 
init_kdeth_qp(struct hfi1_devdata * dd)14222 static void init_kdeth_qp(struct hfi1_devdata *dd)
14223 {
14224 	/* user changed the KDETH_QP */
14225 	if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14226 		/* out of range or illegal value */
14227 		dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14228 		kdeth_qp = 0;
14229 	}
14230 	if (kdeth_qp == 0)	/* not set, or failed range check */
14231 		kdeth_qp = DEFAULT_KDETH_QP;
14232 
14233 	write_csr(dd, SEND_BTH_QP,
14234 		  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14235 		  SEND_BTH_QP_KDETH_QP_SHIFT);
14236 
14237 	write_csr(dd, RCV_BTH_QP,
14238 		  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14239 		  RCV_BTH_QP_KDETH_QP_SHIFT);
14240 }
14241 
14242 /**
14243  * init_qpmap_table
14244  * @dd - device data
14245  * @first_ctxt - first context
14246  * @last_ctxt - first context
14247  *
14248  * This return sets the qpn mapping table that
14249  * is indexed by qpn[8:1].
14250  *
14251  * The routine will round robin the 256 settings
14252  * from first_ctxt to last_ctxt.
14253  *
14254  * The first/last looks ahead to having specialized
14255  * receive contexts for mgmt and bypass.  Normal
14256  * verbs traffic will assumed to be on a range
14257  * of receive contexts.
14258  */
init_qpmap_table(struct hfi1_devdata * dd,u32 first_ctxt,u32 last_ctxt)14259 static void init_qpmap_table(struct hfi1_devdata *dd,
14260 			     u32 first_ctxt,
14261 			     u32 last_ctxt)
14262 {
14263 	u64 reg = 0;
14264 	u64 regno = RCV_QP_MAP_TABLE;
14265 	int i;
14266 	u64 ctxt = first_ctxt;
14267 
14268 	for (i = 0; i < 256; i++) {
14269 		reg |= ctxt << (8 * (i % 8));
14270 		ctxt++;
14271 		if (ctxt > last_ctxt)
14272 			ctxt = first_ctxt;
14273 		if (i % 8 == 7) {
14274 			write_csr(dd, regno, reg);
14275 			reg = 0;
14276 			regno += 8;
14277 		}
14278 	}
14279 
14280 	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14281 			| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14282 }
14283 
14284 struct rsm_map_table {
14285 	u64 map[NUM_MAP_REGS];
14286 	unsigned int used;
14287 };
14288 
14289 struct rsm_rule_data {
14290 	u8 offset;
14291 	u8 pkt_type;
14292 	u32 field1_off;
14293 	u32 field2_off;
14294 	u32 index1_off;
14295 	u32 index1_width;
14296 	u32 index2_off;
14297 	u32 index2_width;
14298 	u32 mask1;
14299 	u32 value1;
14300 	u32 mask2;
14301 	u32 value2;
14302 };
14303 
14304 /*
14305  * Return an initialized RMT map table for users to fill in.  OK if it
14306  * returns NULL, indicating no table.
14307  */
alloc_rsm_map_table(struct hfi1_devdata * dd)14308 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14309 {
14310 	struct rsm_map_table *rmt;
14311 	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14312 
14313 	rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14314 	if (rmt) {
14315 		memset(rmt->map, rxcontext, sizeof(rmt->map));
14316 		rmt->used = 0;
14317 	}
14318 
14319 	return rmt;
14320 }
14321 
14322 /*
14323  * Write the final RMT map table to the chip and free the table.  OK if
14324  * table is NULL.
14325  */
complete_rsm_map_table(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14326 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14327 				   struct rsm_map_table *rmt)
14328 {
14329 	int i;
14330 
14331 	if (rmt) {
14332 		/* write table to chip */
14333 		for (i = 0; i < NUM_MAP_REGS; i++)
14334 			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14335 
14336 		/* enable RSM */
14337 		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14338 	}
14339 }
14340 
14341 /*
14342  * Add a receive side mapping rule.
14343  */
add_rsm_rule(struct hfi1_devdata * dd,u8 rule_index,struct rsm_rule_data * rrd)14344 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14345 			 struct rsm_rule_data *rrd)
14346 {
14347 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14348 		  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14349 		  1ull << rule_index | /* enable bit */
14350 		  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14351 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14352 		  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14353 		  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14354 		  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14355 		  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14356 		  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14357 		  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14358 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14359 		  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14360 		  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14361 		  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14362 		  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14363 }
14364 
14365 /*
14366  * Clear a receive side mapping rule.
14367  */
clear_rsm_rule(struct hfi1_devdata * dd,u8 rule_index)14368 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14369 {
14370 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14371 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14372 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14373 }
14374 
14375 /* return the number of RSM map table entries that will be used for QOS */
qos_rmt_entries(struct hfi1_devdata * dd,unsigned int * mp,unsigned int * np)14376 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14377 			   unsigned int *np)
14378 {
14379 	int i;
14380 	unsigned int m, n;
14381 	u8 max_by_vl = 0;
14382 
14383 	/* is QOS active at all? */
14384 	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14385 	    num_vls == 1 ||
14386 	    krcvqsset <= 1)
14387 		goto no_qos;
14388 
14389 	/* determine bits for qpn */
14390 	for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14391 		if (krcvqs[i] > max_by_vl)
14392 			max_by_vl = krcvqs[i];
14393 	if (max_by_vl > 32)
14394 		goto no_qos;
14395 	m = ilog2(__roundup_pow_of_two(max_by_vl));
14396 
14397 	/* determine bits for vl */
14398 	n = ilog2(__roundup_pow_of_two(num_vls));
14399 
14400 	/* reject if too much is used */
14401 	if ((m + n) > 7)
14402 		goto no_qos;
14403 
14404 	if (mp)
14405 		*mp = m;
14406 	if (np)
14407 		*np = n;
14408 
14409 	return 1 << (m + n);
14410 
14411 no_qos:
14412 	if (mp)
14413 		*mp = 0;
14414 	if (np)
14415 		*np = 0;
14416 	return 0;
14417 }
14418 
14419 /**
14420  * init_qos - init RX qos
14421  * @dd - device data
14422  * @rmt - RSM map table
14423  *
14424  * This routine initializes Rule 0 and the RSM map table to implement
14425  * quality of service (qos).
14426  *
14427  * If all of the limit tests succeed, qos is applied based on the array
14428  * interpretation of krcvqs where entry 0 is VL0.
14429  *
14430  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14431  * feed both the RSM map table and the single rule.
14432  */
init_qos(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14433 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14434 {
14435 	struct rsm_rule_data rrd;
14436 	unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14437 	unsigned int rmt_entries;
14438 	u64 reg;
14439 
14440 	if (!rmt)
14441 		goto bail;
14442 	rmt_entries = qos_rmt_entries(dd, &m, &n);
14443 	if (rmt_entries == 0)
14444 		goto bail;
14445 	qpns_per_vl = 1 << m;
14446 
14447 	/* enough room in the map table? */
14448 	rmt_entries = 1 << (m + n);
14449 	if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14450 		goto bail;
14451 
14452 	/* add qos entries to the the RSM map table */
14453 	for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14454 		unsigned tctxt;
14455 
14456 		for (qpn = 0, tctxt = ctxt;
14457 		     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14458 			unsigned idx, regoff, regidx;
14459 
14460 			/* generate the index the hardware will produce */
14461 			idx = rmt->used + ((qpn << n) ^ i);
14462 			regoff = (idx % 8) * 8;
14463 			regidx = idx / 8;
14464 			/* replace default with context number */
14465 			reg = rmt->map[regidx];
14466 			reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14467 				<< regoff);
14468 			reg |= (u64)(tctxt++) << regoff;
14469 			rmt->map[regidx] = reg;
14470 			if (tctxt == ctxt + krcvqs[i])
14471 				tctxt = ctxt;
14472 		}
14473 		ctxt += krcvqs[i];
14474 	}
14475 
14476 	rrd.offset = rmt->used;
14477 	rrd.pkt_type = 2;
14478 	rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14479 	rrd.field2_off = LRH_SC_MATCH_OFFSET;
14480 	rrd.index1_off = LRH_SC_SELECT_OFFSET;
14481 	rrd.index1_width = n;
14482 	rrd.index2_off = QPN_SELECT_OFFSET;
14483 	rrd.index2_width = m + n;
14484 	rrd.mask1 = LRH_BTH_MASK;
14485 	rrd.value1 = LRH_BTH_VALUE;
14486 	rrd.mask2 = LRH_SC_MASK;
14487 	rrd.value2 = LRH_SC_VALUE;
14488 
14489 	/* add rule 0 */
14490 	add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14491 
14492 	/* mark RSM map entries as used */
14493 	rmt->used += rmt_entries;
14494 	/* map everything else to the mcast/err/vl15 context */
14495 	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14496 	dd->qos_shift = n + 1;
14497 	return;
14498 bail:
14499 	dd->qos_shift = 1;
14500 	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14501 }
14502 
init_user_fecn_handling(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14503 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14504 				    struct rsm_map_table *rmt)
14505 {
14506 	struct rsm_rule_data rrd;
14507 	u64 reg;
14508 	int i, idx, regoff, regidx;
14509 	u8 offset;
14510 
14511 	/* there needs to be enough room in the map table */
14512 	if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14513 		dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14514 		return;
14515 	}
14516 
14517 	/*
14518 	 * RSM will extract the destination context as an index into the
14519 	 * map table.  The destination contexts are a sequential block
14520 	 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14521 	 * Map entries are accessed as offset + extracted value.  Adjust
14522 	 * the added offset so this sequence can be placed anywhere in
14523 	 * the table - as long as the entries themselves do not wrap.
14524 	 * There are only enough bits in offset for the table size, so
14525 	 * start with that to allow for a "negative" offset.
14526 	 */
14527 	offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14528 						(int)dd->first_dyn_alloc_ctxt);
14529 
14530 	for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14531 				i < dd->num_rcv_contexts; i++, idx++) {
14532 		/* replace with identity mapping */
14533 		regoff = (idx % 8) * 8;
14534 		regidx = idx / 8;
14535 		reg = rmt->map[regidx];
14536 		reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14537 		reg |= (u64)i << regoff;
14538 		rmt->map[regidx] = reg;
14539 	}
14540 
14541 	/*
14542 	 * For RSM intercept of Expected FECN packets:
14543 	 * o packet type 0 - expected
14544 	 * o match on F (bit 95), using select/match 1, and
14545 	 * o match on SH (bit 133), using select/match 2.
14546 	 *
14547 	 * Use index 1 to extract the 8-bit receive context from DestQP
14548 	 * (start at bit 64).  Use that as the RSM map table index.
14549 	 */
14550 	rrd.offset = offset;
14551 	rrd.pkt_type = 0;
14552 	rrd.field1_off = 95;
14553 	rrd.field2_off = 133;
14554 	rrd.index1_off = 64;
14555 	rrd.index1_width = 8;
14556 	rrd.index2_off = 0;
14557 	rrd.index2_width = 0;
14558 	rrd.mask1 = 1;
14559 	rrd.value1 = 1;
14560 	rrd.mask2 = 1;
14561 	rrd.value2 = 1;
14562 
14563 	/* add rule 1 */
14564 	add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14565 
14566 	rmt->used += dd->num_user_contexts;
14567 }
14568 
14569 /* Initialize RSM for VNIC */
hfi1_init_vnic_rsm(struct hfi1_devdata * dd)14570 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14571 {
14572 	u8 i, j;
14573 	u8 ctx_id = 0;
14574 	u64 reg;
14575 	u32 regoff;
14576 	struct rsm_rule_data rrd;
14577 
14578 	if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14579 		dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14580 			   dd->vnic.rmt_start);
14581 		return;
14582 	}
14583 
14584 	dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14585 		dd->vnic.rmt_start,
14586 		dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14587 
14588 	/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14589 	regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14590 	reg = read_csr(dd, regoff);
14591 	for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14592 		/* Update map register with vnic context */
14593 		j = (dd->vnic.rmt_start + i) % 8;
14594 		reg &= ~(0xffllu << (j * 8));
14595 		reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14596 		/* Wrap up vnic ctx index */
14597 		ctx_id %= dd->vnic.num_ctxt;
14598 		/* Write back map register */
14599 		if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14600 			dev_dbg(&(dd)->pcidev->dev,
14601 				"Vnic rsm map reg[%d] =0x%llx\n",
14602 				regoff - RCV_RSM_MAP_TABLE, reg);
14603 
14604 			write_csr(dd, regoff, reg);
14605 			regoff += 8;
14606 			if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14607 				reg = read_csr(dd, regoff);
14608 		}
14609 	}
14610 
14611 	/* Add rule for vnic */
14612 	rrd.offset = dd->vnic.rmt_start;
14613 	rrd.pkt_type = 4;
14614 	/* Match 16B packets */
14615 	rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14616 	rrd.mask1 = L2_TYPE_MASK;
14617 	rrd.value1 = L2_16B_VALUE;
14618 	/* Match ETH L4 packets */
14619 	rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14620 	rrd.mask2 = L4_16B_TYPE_MASK;
14621 	rrd.value2 = L4_16B_ETH_VALUE;
14622 	/* Calc context from veswid and entropy */
14623 	rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14624 	rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14625 	rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14626 	rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14627 	add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14628 
14629 	/* Enable RSM if not already enabled */
14630 	add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14631 }
14632 
hfi1_deinit_vnic_rsm(struct hfi1_devdata * dd)14633 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14634 {
14635 	clear_rsm_rule(dd, RSM_INS_VNIC);
14636 
14637 	/* Disable RSM if used only by vnic */
14638 	if (dd->vnic.rmt_start == 0)
14639 		clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14640 }
14641 
init_rxe(struct hfi1_devdata * dd)14642 static int init_rxe(struct hfi1_devdata *dd)
14643 {
14644 	struct rsm_map_table *rmt;
14645 	u64 val;
14646 
14647 	/* enable all receive errors */
14648 	write_csr(dd, RCV_ERR_MASK, ~0ull);
14649 
14650 	rmt = alloc_rsm_map_table(dd);
14651 	if (!rmt)
14652 		return -ENOMEM;
14653 
14654 	/* set up QOS, including the QPN map table */
14655 	init_qos(dd, rmt);
14656 	init_user_fecn_handling(dd, rmt);
14657 	complete_rsm_map_table(dd, rmt);
14658 	/* record number of used rsm map entries for vnic */
14659 	dd->vnic.rmt_start = rmt->used;
14660 	kfree(rmt);
14661 
14662 	/*
14663 	 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14664 	 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14665 	 * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14666 	 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14667 	 * Max_PayLoad_Size set to its minimum of 128.
14668 	 *
14669 	 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14670 	 * (64 bytes).  Max_Payload_Size is possibly modified upward in
14671 	 * tune_pcie_caps() which is called after this routine.
14672 	 */
14673 
14674 	/* Have 16 bytes (4DW) of bypass header available in header queue */
14675 	val = read_csr(dd, RCV_BYPASS);
14676 	val |= (4ull << 16);
14677 	write_csr(dd, RCV_BYPASS, val);
14678 	return 0;
14679 }
14680 
init_other(struct hfi1_devdata * dd)14681 static void init_other(struct hfi1_devdata *dd)
14682 {
14683 	/* enable all CCE errors */
14684 	write_csr(dd, CCE_ERR_MASK, ~0ull);
14685 	/* enable *some* Misc errors */
14686 	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14687 	/* enable all DC errors, except LCB */
14688 	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14689 	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14690 }
14691 
14692 /*
14693  * Fill out the given AU table using the given CU.  A CU is defined in terms
14694  * AUs.  The table is a an encoding: given the index, how many AUs does that
14695  * represent?
14696  *
14697  * NOTE: Assumes that the register layout is the same for the
14698  * local and remote tables.
14699  */
assign_cm_au_table(struct hfi1_devdata * dd,u32 cu,u32 csr0to3,u32 csr4to7)14700 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14701 			       u32 csr0to3, u32 csr4to7)
14702 {
14703 	write_csr(dd, csr0to3,
14704 		  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14705 		  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14706 		  2ull * cu <<
14707 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14708 		  4ull * cu <<
14709 		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14710 	write_csr(dd, csr4to7,
14711 		  8ull * cu <<
14712 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14713 		  16ull * cu <<
14714 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14715 		  32ull * cu <<
14716 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14717 		  64ull * cu <<
14718 		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14719 }
14720 
assign_local_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14721 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14722 {
14723 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14724 			   SEND_CM_LOCAL_AU_TABLE4_TO7);
14725 }
14726 
assign_remote_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14727 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14728 {
14729 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14730 			   SEND_CM_REMOTE_AU_TABLE4_TO7);
14731 }
14732 
init_txe(struct hfi1_devdata * dd)14733 static void init_txe(struct hfi1_devdata *dd)
14734 {
14735 	int i;
14736 
14737 	/* enable all PIO, SDMA, general, and Egress errors */
14738 	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14739 	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14740 	write_csr(dd, SEND_ERR_MASK, ~0ull);
14741 	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14742 
14743 	/* enable all per-context and per-SDMA engine errors */
14744 	for (i = 0; i < dd->chip_send_contexts; i++)
14745 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14746 	for (i = 0; i < dd->chip_sdma_engines; i++)
14747 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14748 
14749 	/* set the local CU to AU mapping */
14750 	assign_local_cm_au_table(dd, dd->vcu);
14751 
14752 	/*
14753 	 * Set reasonable default for Credit Return Timer
14754 	 * Don't set on Simulator - causes it to choke.
14755 	 */
14756 	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14757 		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14758 }
14759 
hfi1_set_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 jkey)14760 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14761 		       u16 jkey)
14762 {
14763 	u8 hw_ctxt;
14764 	u64 reg;
14765 
14766 	if (!rcd || !rcd->sc)
14767 		return -EINVAL;
14768 
14769 	hw_ctxt = rcd->sc->hw_context;
14770 	reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14771 		((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14772 		 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14773 	/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14774 	if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14775 		reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14776 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14777 	/*
14778 	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14779 	 */
14780 	if (!is_ax(dd)) {
14781 		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14782 		reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14783 		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14784 	}
14785 
14786 	/* Enable J_KEY check on receive context. */
14787 	reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14788 		((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14789 		 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14790 	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14791 
14792 	return 0;
14793 }
14794 
hfi1_clear_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)14795 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14796 {
14797 	u8 hw_ctxt;
14798 	u64 reg;
14799 
14800 	if (!rcd || !rcd->sc)
14801 		return -EINVAL;
14802 
14803 	hw_ctxt = rcd->sc->hw_context;
14804 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14805 	/*
14806 	 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14807 	 * This check would not have been enabled for A0 h/w, see
14808 	 * set_ctxt_jkey().
14809 	 */
14810 	if (!is_ax(dd)) {
14811 		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14812 		reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14813 		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14814 	}
14815 	/* Turn off the J_KEY on the receive side */
14816 	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14817 
14818 	return 0;
14819 }
14820 
hfi1_set_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 pkey)14821 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14822 		       u16 pkey)
14823 {
14824 	u8 hw_ctxt;
14825 	u64 reg;
14826 
14827 	if (!rcd || !rcd->sc)
14828 		return -EINVAL;
14829 
14830 	hw_ctxt = rcd->sc->hw_context;
14831 	reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14832 		SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14833 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14834 	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14835 	reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14836 	reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14837 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14838 
14839 	return 0;
14840 }
14841 
hfi1_clear_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * ctxt)14842 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14843 {
14844 	u8 hw_ctxt;
14845 	u64 reg;
14846 
14847 	if (!ctxt || !ctxt->sc)
14848 		return -EINVAL;
14849 
14850 	hw_ctxt = ctxt->sc->hw_context;
14851 	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14852 	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14853 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14854 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14855 
14856 	return 0;
14857 }
14858 
14859 /*
14860  * Start doing the clean up the the chip. Our clean up happens in multiple
14861  * stages and this is just the first.
14862  */
hfi1_start_cleanup(struct hfi1_devdata * dd)14863 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14864 {
14865 	aspm_exit(dd);
14866 	free_cntrs(dd);
14867 	free_rcverr(dd);
14868 	finish_chip_resources(dd);
14869 }
14870 
14871 #define HFI_BASE_GUID(dev) \
14872 	((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14873 
14874 /*
14875  * Information can be shared between the two HFIs on the same ASIC
14876  * in the same OS.  This function finds the peer device and sets
14877  * up a shared structure.
14878  */
init_asic_data(struct hfi1_devdata * dd)14879 static int init_asic_data(struct hfi1_devdata *dd)
14880 {
14881 	unsigned long flags;
14882 	struct hfi1_devdata *tmp, *peer = NULL;
14883 	struct hfi1_asic_data *asic_data;
14884 	int ret = 0;
14885 
14886 	/* pre-allocate the asic structure in case we are the first device */
14887 	asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14888 	if (!asic_data)
14889 		return -ENOMEM;
14890 
14891 	spin_lock_irqsave(&hfi1_devs_lock, flags);
14892 	/* Find our peer device */
14893 	list_for_each_entry(tmp, &hfi1_dev_list, list) {
14894 		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14895 		    dd->unit != tmp->unit) {
14896 			peer = tmp;
14897 			break;
14898 		}
14899 	}
14900 
14901 	if (peer) {
14902 		/* use already allocated structure */
14903 		dd->asic_data = peer->asic_data;
14904 		kfree(asic_data);
14905 	} else {
14906 		dd->asic_data = asic_data;
14907 		mutex_init(&dd->asic_data->asic_resource_mutex);
14908 	}
14909 	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14910 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14911 
14912 	/* first one through - set up i2c devices */
14913 	if (!peer)
14914 		ret = set_up_i2c(dd, dd->asic_data);
14915 
14916 	return ret;
14917 }
14918 
14919 /*
14920  * Set dd->boardname.  Use a generic name if a name is not returned from
14921  * EFI variable space.
14922  *
14923  * Return 0 on success, -ENOMEM if space could not be allocated.
14924  */
obtain_boardname(struct hfi1_devdata * dd)14925 static int obtain_boardname(struct hfi1_devdata *dd)
14926 {
14927 	/* generic board description */
14928 	const char generic[] =
14929 		"Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14930 	unsigned long size;
14931 	int ret;
14932 
14933 	ret = read_hfi1_efi_var(dd, "description", &size,
14934 				(void **)&dd->boardname);
14935 	if (ret) {
14936 		dd_dev_info(dd, "Board description not found\n");
14937 		/* use generic description */
14938 		dd->boardname = kstrdup(generic, GFP_KERNEL);
14939 		if (!dd->boardname)
14940 			return -ENOMEM;
14941 	}
14942 	return 0;
14943 }
14944 
14945 /*
14946  * Check the interrupt registers to make sure that they are mapped correctly.
14947  * It is intended to help user identify any mismapping by VMM when the driver
14948  * is running in a VM. This function should only be called before interrupt
14949  * is set up properly.
14950  *
14951  * Return 0 on success, -EINVAL on failure.
14952  */
check_int_registers(struct hfi1_devdata * dd)14953 static int check_int_registers(struct hfi1_devdata *dd)
14954 {
14955 	u64 reg;
14956 	u64 all_bits = ~(u64)0;
14957 	u64 mask;
14958 
14959 	/* Clear CceIntMask[0] to avoid raising any interrupts */
14960 	mask = read_csr(dd, CCE_INT_MASK);
14961 	write_csr(dd, CCE_INT_MASK, 0ull);
14962 	reg = read_csr(dd, CCE_INT_MASK);
14963 	if (reg)
14964 		goto err_exit;
14965 
14966 	/* Clear all interrupt status bits */
14967 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14968 	reg = read_csr(dd, CCE_INT_STATUS);
14969 	if (reg)
14970 		goto err_exit;
14971 
14972 	/* Set all interrupt status bits */
14973 	write_csr(dd, CCE_INT_FORCE, all_bits);
14974 	reg = read_csr(dd, CCE_INT_STATUS);
14975 	if (reg != all_bits)
14976 		goto err_exit;
14977 
14978 	/* Restore the interrupt mask */
14979 	write_csr(dd, CCE_INT_CLEAR, all_bits);
14980 	write_csr(dd, CCE_INT_MASK, mask);
14981 
14982 	return 0;
14983 err_exit:
14984 	write_csr(dd, CCE_INT_MASK, mask);
14985 	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14986 	return -EINVAL;
14987 }
14988 
14989 /**
14990  * Allocate and initialize the device structure for the hfi.
14991  * @dev: the pci_dev for hfi1_ib device
14992  * @ent: pci_device_id struct for this dev
14993  *
14994  * Also allocates, initializes, and returns the devdata struct for this
14995  * device instance
14996  *
14997  * This is global, and is called directly at init to set up the
14998  * chip-specific function pointers for later use.
14999  */
hfi1_init_dd(struct pci_dev * pdev,const struct pci_device_id * ent)15000 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
15001 				  const struct pci_device_id *ent)
15002 {
15003 	struct hfi1_devdata *dd;
15004 	struct hfi1_pportdata *ppd;
15005 	u64 reg;
15006 	int i, ret;
15007 	static const char * const inames[] = { /* implementation names */
15008 		"RTL silicon",
15009 		"RTL VCS simulation",
15010 		"RTL FPGA emulation",
15011 		"Functional simulator"
15012 	};
15013 	struct pci_dev *parent = pdev->bus->self;
15014 
15015 	dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
15016 				sizeof(struct hfi1_pportdata));
15017 	if (IS_ERR(dd))
15018 		goto bail;
15019 	ppd = dd->pport;
15020 	for (i = 0; i < dd->num_pports; i++, ppd++) {
15021 		int vl;
15022 		/* init common fields */
15023 		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15024 		/* DC supports 4 link widths */
15025 		ppd->link_width_supported =
15026 			OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15027 			OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15028 		ppd->link_width_downgrade_supported =
15029 			ppd->link_width_supported;
15030 		/* start out enabling only 4X */
15031 		ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15032 		ppd->link_width_downgrade_enabled =
15033 					ppd->link_width_downgrade_supported;
15034 		/* link width active is 0 when link is down */
15035 		/* link width downgrade active is 0 when link is down */
15036 
15037 		if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15038 		    num_vls > HFI1_MAX_VLS_SUPPORTED) {
15039 			hfi1_early_err(&pdev->dev,
15040 				       "Invalid num_vls %u, using %u VLs\n",
15041 				    num_vls, HFI1_MAX_VLS_SUPPORTED);
15042 			num_vls = HFI1_MAX_VLS_SUPPORTED;
15043 		}
15044 		ppd->vls_supported = num_vls;
15045 		ppd->vls_operational = ppd->vls_supported;
15046 		/* Set the default MTU. */
15047 		for (vl = 0; vl < num_vls; vl++)
15048 			dd->vld[vl].mtu = hfi1_max_mtu;
15049 		dd->vld[15].mtu = MAX_MAD_PACKET;
15050 		/*
15051 		 * Set the initial values to reasonable default, will be set
15052 		 * for real when link is up.
15053 		 */
15054 		ppd->overrun_threshold = 0x4;
15055 		ppd->phy_error_threshold = 0xf;
15056 		ppd->port_crc_mode_enabled = link_crc_mask;
15057 		/* initialize supported LTP CRC mode */
15058 		ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15059 		/* initialize enabled LTP CRC mode */
15060 		ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15061 		/* start in offline */
15062 		ppd->host_link_state = HLS_DN_OFFLINE;
15063 		init_vl_arb_caches(ppd);
15064 	}
15065 
15066 	dd->link_default = HLS_DN_POLL;
15067 
15068 	/*
15069 	 * Do remaining PCIe setup and save PCIe values in dd.
15070 	 * Any error printing is already done by the init code.
15071 	 * On return, we have the chip mapped.
15072 	 */
15073 	ret = hfi1_pcie_ddinit(dd, pdev);
15074 	if (ret < 0)
15075 		goto bail_free;
15076 
15077 	/* Save PCI space registers to rewrite after device reset */
15078 	ret = save_pci_variables(dd);
15079 	if (ret < 0)
15080 		goto bail_cleanup;
15081 
15082 	/* verify that reads actually work, save revision for reset check */
15083 	dd->revision = read_csr(dd, CCE_REVISION);
15084 	if (dd->revision == ~(u64)0) {
15085 		dd_dev_err(dd, "cannot read chip CSRs\n");
15086 		ret = -EINVAL;
15087 		goto bail_cleanup;
15088 	}
15089 	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15090 			& CCE_REVISION_CHIP_REV_MAJOR_MASK;
15091 	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15092 			& CCE_REVISION_CHIP_REV_MINOR_MASK;
15093 
15094 	/*
15095 	 * Check interrupt registers mapping if the driver has no access to
15096 	 * the upstream component. In this case, it is likely that the driver
15097 	 * is running in a VM.
15098 	 */
15099 	if (!parent) {
15100 		ret = check_int_registers(dd);
15101 		if (ret)
15102 			goto bail_cleanup;
15103 	}
15104 
15105 	/*
15106 	 * obtain the hardware ID - NOT related to unit, which is a
15107 	 * software enumeration
15108 	 */
15109 	reg = read_csr(dd, CCE_REVISION2);
15110 	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15111 					& CCE_REVISION2_HFI_ID_MASK;
15112 	/* the variable size will remove unwanted bits */
15113 	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15114 	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15115 	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15116 		    dd->icode < ARRAY_SIZE(inames) ?
15117 		    inames[dd->icode] : "unknown", (int)dd->irev);
15118 
15119 	/* speeds the hardware can support */
15120 	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15121 	/* speeds allowed to run at */
15122 	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15123 	/* give a reasonable active value, will be set on link up */
15124 	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15125 
15126 	dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
15127 	dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
15128 	dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
15129 	dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
15130 	dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
15131 	/* fix up link widths for emulation _p */
15132 	ppd = dd->pport;
15133 	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15134 		ppd->link_width_supported =
15135 			ppd->link_width_enabled =
15136 			ppd->link_width_downgrade_supported =
15137 			ppd->link_width_downgrade_enabled =
15138 				OPA_LINK_WIDTH_1X;
15139 	}
15140 	/* insure num_vls isn't larger than number of sdma engines */
15141 	if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
15142 		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15143 			   num_vls, dd->chip_sdma_engines);
15144 		num_vls = dd->chip_sdma_engines;
15145 		ppd->vls_supported = dd->chip_sdma_engines;
15146 		ppd->vls_operational = ppd->vls_supported;
15147 	}
15148 
15149 	/*
15150 	 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15151 	 * Limit the max if larger than the field holds.  If timeout is
15152 	 * non-zero, then the calculated field will be at least 1.
15153 	 *
15154 	 * Must be after icode is set up - the cclock rate depends
15155 	 * on knowing the hardware being used.
15156 	 */
15157 	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15158 	if (dd->rcv_intr_timeout_csr >
15159 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15160 		dd->rcv_intr_timeout_csr =
15161 			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15162 	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15163 		dd->rcv_intr_timeout_csr = 1;
15164 
15165 	/* needs to be done before we look for the peer device */
15166 	read_guid(dd);
15167 
15168 	/* set up shared ASIC data with peer device */
15169 	ret = init_asic_data(dd);
15170 	if (ret)
15171 		goto bail_cleanup;
15172 
15173 	/* obtain chip sizes, reset chip CSRs */
15174 	ret = init_chip(dd);
15175 	if (ret)
15176 		goto bail_cleanup;
15177 
15178 	/* read in the PCIe link speed information */
15179 	ret = pcie_speeds(dd);
15180 	if (ret)
15181 		goto bail_cleanup;
15182 
15183 	/* call before get_platform_config(), after init_chip_resources() */
15184 	ret = eprom_init(dd);
15185 	if (ret)
15186 		goto bail_free_rcverr;
15187 
15188 	/* Needs to be called before hfi1_firmware_init */
15189 	get_platform_config(dd);
15190 
15191 	/* read in firmware */
15192 	ret = hfi1_firmware_init(dd);
15193 	if (ret)
15194 		goto bail_cleanup;
15195 
15196 	/*
15197 	 * In general, the PCIe Gen3 transition must occur after the
15198 	 * chip has been idled (so it won't initiate any PCIe transactions
15199 	 * e.g. an interrupt) and before the driver changes any registers
15200 	 * (the transition will reset the registers).
15201 	 *
15202 	 * In particular, place this call after:
15203 	 * - init_chip()     - the chip will not initiate any PCIe transactions
15204 	 * - pcie_speeds()   - reads the current link speed
15205 	 * - hfi1_firmware_init() - the needed firmware is ready to be
15206 	 *			    downloaded
15207 	 */
15208 	ret = do_pcie_gen3_transition(dd);
15209 	if (ret)
15210 		goto bail_cleanup;
15211 
15212 	/* start setting dd values and adjusting CSRs */
15213 	init_early_variables(dd);
15214 
15215 	parse_platform_config(dd);
15216 
15217 	ret = obtain_boardname(dd);
15218 	if (ret)
15219 		goto bail_cleanup;
15220 
15221 	snprintf(dd->boardversion, BOARD_VERS_MAX,
15222 		 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15223 		 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15224 		 (u32)dd->majrev,
15225 		 (u32)dd->minrev,
15226 		 (dd->revision >> CCE_REVISION_SW_SHIFT)
15227 		    & CCE_REVISION_SW_MASK);
15228 
15229 	ret = set_up_context_variables(dd);
15230 	if (ret)
15231 		goto bail_cleanup;
15232 
15233 	/* set initial RXE CSRs */
15234 	ret = init_rxe(dd);
15235 	if (ret)
15236 		goto bail_cleanup;
15237 
15238 	/* set initial TXE CSRs */
15239 	init_txe(dd);
15240 	/* set initial non-RXE, non-TXE CSRs */
15241 	init_other(dd);
15242 	/* set up KDETH QP prefix in both RX and TX CSRs */
15243 	init_kdeth_qp(dd);
15244 
15245 	ret = hfi1_dev_affinity_init(dd);
15246 	if (ret)
15247 		goto bail_cleanup;
15248 
15249 	/* send contexts must be set up before receive contexts */
15250 	ret = init_send_contexts(dd);
15251 	if (ret)
15252 		goto bail_cleanup;
15253 
15254 	ret = hfi1_create_kctxts(dd);
15255 	if (ret)
15256 		goto bail_cleanup;
15257 
15258 	/*
15259 	 * Initialize aspm, to be done after gen3 transition and setting up
15260 	 * contexts and before enabling interrupts
15261 	 */
15262 	aspm_init(dd);
15263 
15264 	dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15265 	/*
15266 	 * rcd[0] is guaranteed to be valid by this point. Also, all
15267 	 * context are using the same value, as per the module parameter.
15268 	 */
15269 	dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15270 
15271 	ret = init_pervl_scs(dd);
15272 	if (ret)
15273 		goto bail_cleanup;
15274 
15275 	/* sdma init */
15276 	for (i = 0; i < dd->num_pports; ++i) {
15277 		ret = sdma_init(dd, i);
15278 		if (ret)
15279 			goto bail_cleanup;
15280 	}
15281 
15282 	/* use contexts created by hfi1_create_kctxts */
15283 	ret = set_up_interrupts(dd);
15284 	if (ret)
15285 		goto bail_cleanup;
15286 
15287 	/* set up LCB access - must be after set_up_interrupts() */
15288 	init_lcb_access(dd);
15289 
15290 	/*
15291 	 * Serial number is created from the base guid:
15292 	 * [27:24] = base guid [38:35]
15293 	 * [23: 0] = base guid [23: 0]
15294 	 */
15295 	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15296 		 (dd->base_guid & 0xFFFFFF) |
15297 		     ((dd->base_guid >> 11) & 0xF000000));
15298 
15299 	dd->oui1 = dd->base_guid >> 56 & 0xFF;
15300 	dd->oui2 = dd->base_guid >> 48 & 0xFF;
15301 	dd->oui3 = dd->base_guid >> 40 & 0xFF;
15302 
15303 	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15304 	if (ret)
15305 		goto bail_clear_intr;
15306 
15307 	thermal_init(dd);
15308 
15309 	ret = init_cntrs(dd);
15310 	if (ret)
15311 		goto bail_clear_intr;
15312 
15313 	ret = init_rcverr(dd);
15314 	if (ret)
15315 		goto bail_free_cntrs;
15316 
15317 	init_completion(&dd->user_comp);
15318 
15319 	/* The user refcount starts with one to inidicate an active device */
15320 	atomic_set(&dd->user_refcount, 1);
15321 
15322 	goto bail;
15323 
15324 bail_free_rcverr:
15325 	free_rcverr(dd);
15326 bail_free_cntrs:
15327 	free_cntrs(dd);
15328 bail_clear_intr:
15329 	hfi1_clean_up_interrupts(dd);
15330 bail_cleanup:
15331 	hfi1_pcie_ddcleanup(dd);
15332 bail_free:
15333 	hfi1_free_devdata(dd);
15334 	dd = ERR_PTR(ret);
15335 bail:
15336 	return dd;
15337 }
15338 
delay_cycles(struct hfi1_pportdata * ppd,u32 desired_egress_rate,u32 dw_len)15339 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15340 			u32 dw_len)
15341 {
15342 	u32 delta_cycles;
15343 	u32 current_egress_rate = ppd->current_egress_rate;
15344 	/* rates here are in units of 10^6 bits/sec */
15345 
15346 	if (desired_egress_rate == -1)
15347 		return 0; /* shouldn't happen */
15348 
15349 	if (desired_egress_rate >= current_egress_rate)
15350 		return 0; /* we can't help go faster, only slower */
15351 
15352 	delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15353 			egress_cycles(dw_len * 4, current_egress_rate);
15354 
15355 	return (u16)delta_cycles;
15356 }
15357 
15358 /**
15359  * create_pbc - build a pbc for transmission
15360  * @flags: special case flags or-ed in built pbc
15361  * @srate: static rate
15362  * @vl: vl
15363  * @dwlen: dword length (header words + data words + pbc words)
15364  *
15365  * Create a PBC with the given flags, rate, VL, and length.
15366  *
15367  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15368  * for verbs, which does not use this PSM feature.  The lone other caller
15369  * is for the diagnostic interface which calls this if the user does not
15370  * supply their own PBC.
15371  */
create_pbc(struct hfi1_pportdata * ppd,u64 flags,int srate_mbs,u32 vl,u32 dw_len)15372 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15373 	       u32 dw_len)
15374 {
15375 	u64 pbc, delay = 0;
15376 
15377 	if (unlikely(srate_mbs))
15378 		delay = delay_cycles(ppd, srate_mbs, dw_len);
15379 
15380 	pbc = flags
15381 		| (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15382 		| ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15383 		| (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15384 		| (dw_len & PBC_LENGTH_DWS_MASK)
15385 			<< PBC_LENGTH_DWS_SHIFT;
15386 
15387 	return pbc;
15388 }
15389 
15390 #define SBUS_THERMAL    0x4f
15391 #define SBUS_THERM_MONITOR_MODE 0x1
15392 
15393 #define THERM_FAILURE(dev, ret, reason) \
15394 	dd_dev_err((dd),						\
15395 		   "Thermal sensor initialization failed: %s (%d)\n",	\
15396 		   (reason), (ret))
15397 
15398 /*
15399  * Initialize the thermal sensor.
15400  *
15401  * After initialization, enable polling of thermal sensor through
15402  * SBus interface. In order for this to work, the SBus Master
15403  * firmware has to be loaded due to the fact that the HW polling
15404  * logic uses SBus interrupts, which are not supported with
15405  * default firmware. Otherwise, no data will be returned through
15406  * the ASIC_STS_THERM CSR.
15407  */
thermal_init(struct hfi1_devdata * dd)15408 static int thermal_init(struct hfi1_devdata *dd)
15409 {
15410 	int ret = 0;
15411 
15412 	if (dd->icode != ICODE_RTL_SILICON ||
15413 	    check_chip_resource(dd, CR_THERM_INIT, NULL))
15414 		return ret;
15415 
15416 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15417 	if (ret) {
15418 		THERM_FAILURE(dd, ret, "Acquire SBus");
15419 		return ret;
15420 	}
15421 
15422 	dd_dev_info(dd, "Initializing thermal sensor\n");
15423 	/* Disable polling of thermal readings */
15424 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15425 	msleep(100);
15426 	/* Thermal Sensor Initialization */
15427 	/*    Step 1: Reset the Thermal SBus Receiver */
15428 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15429 				RESET_SBUS_RECEIVER, 0);
15430 	if (ret) {
15431 		THERM_FAILURE(dd, ret, "Bus Reset");
15432 		goto done;
15433 	}
15434 	/*    Step 2: Set Reset bit in Thermal block */
15435 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15436 				WRITE_SBUS_RECEIVER, 0x1);
15437 	if (ret) {
15438 		THERM_FAILURE(dd, ret, "Therm Block Reset");
15439 		goto done;
15440 	}
15441 	/*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15442 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15443 				WRITE_SBUS_RECEIVER, 0x32);
15444 	if (ret) {
15445 		THERM_FAILURE(dd, ret, "Write Clock Div");
15446 		goto done;
15447 	}
15448 	/*    Step 4: Select temperature mode */
15449 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15450 				WRITE_SBUS_RECEIVER,
15451 				SBUS_THERM_MONITOR_MODE);
15452 	if (ret) {
15453 		THERM_FAILURE(dd, ret, "Write Mode Sel");
15454 		goto done;
15455 	}
15456 	/*    Step 5: De-assert block reset and start conversion */
15457 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15458 				WRITE_SBUS_RECEIVER, 0x2);
15459 	if (ret) {
15460 		THERM_FAILURE(dd, ret, "Write Reset Deassert");
15461 		goto done;
15462 	}
15463 	/*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15464 	msleep(22);
15465 
15466 	/* Enable polling of thermal readings */
15467 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15468 
15469 	/* Set initialized flag */
15470 	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15471 	if (ret)
15472 		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15473 
15474 done:
15475 	release_chip_resource(dd, CR_SBUS);
15476 	return ret;
15477 }
15478 
handle_temp_err(struct hfi1_devdata * dd)15479 static void handle_temp_err(struct hfi1_devdata *dd)
15480 {
15481 	struct hfi1_pportdata *ppd = &dd->pport[0];
15482 	/*
15483 	 * Thermal Critical Interrupt
15484 	 * Put the device into forced freeze mode, take link down to
15485 	 * offline, and put DC into reset.
15486 	 */
15487 	dd_dev_emerg(dd,
15488 		     "Critical temperature reached! Forcing device into freeze mode!\n");
15489 	dd->flags |= HFI1_FORCED_FREEZE;
15490 	start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15491 	/*
15492 	 * Shut DC down as much and as quickly as possible.
15493 	 *
15494 	 * Step 1: Take the link down to OFFLINE. This will cause the
15495 	 *         8051 to put the Serdes in reset. However, we don't want to
15496 	 *         go through the entire link state machine since we want to
15497 	 *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15498 	 *         but rather an attempt to save the chip.
15499 	 *         Code below is almost the same as quiet_serdes() but avoids
15500 	 *         all the extra work and the sleeps.
15501 	 */
15502 	ppd->driver_link_ready = 0;
15503 	ppd->link_enabled = 0;
15504 	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15505 				PLS_OFFLINE);
15506 	/*
15507 	 * Step 2: Shutdown LCB and 8051
15508 	 *         After shutdown, do not restore DC_CFG_RESET value.
15509 	 */
15510 	dc_shutdown(dd);
15511 }
15512