1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 #include "nitrox_mbx.h"
11
12 /**
13 * One vector for each type of ring
14 * - NPS packet ring, AQMQ ring and ZQMQ ring
15 */
16 #define NR_RING_VECTORS 3
17 #define NR_NON_RING_VECTORS 1
18 /* base entry for packet ring/port */
19 #define PKT_RING_MSIX_BASE 0
20 #define NON_RING_MSIX_BASE 192
21
22 /**
23 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
24 * @irq: irq number
25 * @data: argument
26 */
nps_pkt_slc_isr(int irq,void * data)27 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
28 {
29 struct nitrox_q_vector *qvec = data;
30 union nps_pkt_slc_cnts slc_cnts;
31 struct nitrox_cmdq *cmdq = qvec->cmdq;
32
33 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
34 /* New packet on SLC output port */
35 if (slc_cnts.s.slc_int)
36 tasklet_hi_schedule(&qvec->resp_tasklet);
37
38 return IRQ_HANDLED;
39 }
40
clear_nps_core_err_intr(struct nitrox_device * ndev)41 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
42 {
43 u64 value;
44
45 /* Write 1 to clear */
46 value = nitrox_read_csr(ndev, NPS_CORE_INT);
47 nitrox_write_csr(ndev, NPS_CORE_INT, value);
48
49 dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value);
50 }
51
clear_nps_pkt_err_intr(struct nitrox_device * ndev)52 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
53 {
54 union nps_pkt_int pkt_int;
55 unsigned long value, offset;
56 int i;
57
58 pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
59 dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n",
60 pkt_int.value);
61
62 if (pkt_int.s.slc_err) {
63 offset = NPS_PKT_SLC_ERR_TYPE;
64 value = nitrox_read_csr(ndev, offset);
65 nitrox_write_csr(ndev, offset, value);
66 dev_err_ratelimited(DEV(ndev),
67 "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value);
68
69 offset = NPS_PKT_SLC_RERR_LO;
70 value = nitrox_read_csr(ndev, offset);
71 nitrox_write_csr(ndev, offset, value);
72 /* enable the solicit ports */
73 for_each_set_bit(i, &value, BITS_PER_LONG)
74 enable_pkt_solicit_port(ndev, i);
75
76 dev_err_ratelimited(DEV(ndev),
77 "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value);
78
79 offset = NPS_PKT_SLC_RERR_HI;
80 value = nitrox_read_csr(ndev, offset);
81 nitrox_write_csr(ndev, offset, value);
82 dev_err_ratelimited(DEV(ndev),
83 "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value);
84 }
85
86 if (pkt_int.s.in_err) {
87 offset = NPS_PKT_IN_ERR_TYPE;
88 value = nitrox_read_csr(ndev, offset);
89 nitrox_write_csr(ndev, offset, value);
90 dev_err_ratelimited(DEV(ndev),
91 "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value);
92 offset = NPS_PKT_IN_RERR_LO;
93 value = nitrox_read_csr(ndev, offset);
94 nitrox_write_csr(ndev, offset, value);
95 /* enable the input ring */
96 for_each_set_bit(i, &value, BITS_PER_LONG)
97 enable_pkt_input_ring(ndev, i);
98
99 dev_err_ratelimited(DEV(ndev),
100 "NPS_PKT_IN_RERR_LO 0x%016lx\n", value);
101
102 offset = NPS_PKT_IN_RERR_HI;
103 value = nitrox_read_csr(ndev, offset);
104 nitrox_write_csr(ndev, offset, value);
105 dev_err_ratelimited(DEV(ndev),
106 "NPS_PKT_IN_RERR_HI 0x%016lx\n", value);
107 }
108 }
109
clear_pom_err_intr(struct nitrox_device * ndev)110 static void clear_pom_err_intr(struct nitrox_device *ndev)
111 {
112 u64 value;
113
114 value = nitrox_read_csr(ndev, POM_INT);
115 nitrox_write_csr(ndev, POM_INT, value);
116 dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value);
117 }
118
clear_pem_err_intr(struct nitrox_device * ndev)119 static void clear_pem_err_intr(struct nitrox_device *ndev)
120 {
121 u64 value;
122
123 value = nitrox_read_csr(ndev, PEM0_INT);
124 nitrox_write_csr(ndev, PEM0_INT, value);
125 dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value);
126 }
127
clear_lbc_err_intr(struct nitrox_device * ndev)128 static void clear_lbc_err_intr(struct nitrox_device *ndev)
129 {
130 union lbc_int lbc_int;
131 u64 value, offset;
132 int i;
133
134 lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
135 dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value);
136
137 if (lbc_int.s.dma_rd_err) {
138 for (i = 0; i < NR_CLUSTERS; i++) {
139 offset = EFL_CORE_VF_ERR_INT0X(i);
140 value = nitrox_read_csr(ndev, offset);
141 nitrox_write_csr(ndev, offset, value);
142 offset = EFL_CORE_VF_ERR_INT1X(i);
143 value = nitrox_read_csr(ndev, offset);
144 nitrox_write_csr(ndev, offset, value);
145 }
146 }
147
148 if (lbc_int.s.cam_soft_err) {
149 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
150 invalidate_lbc(ndev);
151 }
152
153 if (lbc_int.s.pref_dat_len_mismatch_err) {
154 offset = LBC_PLM_VF1_64_INT;
155 value = nitrox_read_csr(ndev, offset);
156 nitrox_write_csr(ndev, offset, value);
157 offset = LBC_PLM_VF65_128_INT;
158 value = nitrox_read_csr(ndev, offset);
159 nitrox_write_csr(ndev, offset, value);
160 }
161
162 if (lbc_int.s.rd_dat_len_mismatch_err) {
163 offset = LBC_ELM_VF1_64_INT;
164 value = nitrox_read_csr(ndev, offset);
165 nitrox_write_csr(ndev, offset, value);
166 offset = LBC_ELM_VF65_128_INT;
167 value = nitrox_read_csr(ndev, offset);
168 nitrox_write_csr(ndev, offset, value);
169 }
170 nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
171 }
172
clear_efl_err_intr(struct nitrox_device * ndev)173 static void clear_efl_err_intr(struct nitrox_device *ndev)
174 {
175 int i;
176
177 for (i = 0; i < NR_CLUSTERS; i++) {
178 union efl_core_int core_int;
179 u64 value, offset;
180
181 offset = EFL_CORE_INTX(i);
182 core_int.value = nitrox_read_csr(ndev, offset);
183 nitrox_write_csr(ndev, offset, core_int.value);
184 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n",
185 i, core_int.value);
186 if (core_int.s.se_err) {
187 offset = EFL_CORE_SE_ERR_INTX(i);
188 value = nitrox_read_csr(ndev, offset);
189 nitrox_write_csr(ndev, offset, value);
190 }
191 }
192 }
193
clear_bmi_err_intr(struct nitrox_device * ndev)194 static void clear_bmi_err_intr(struct nitrox_device *ndev)
195 {
196 u64 value;
197
198 value = nitrox_read_csr(ndev, BMI_INT);
199 nitrox_write_csr(ndev, BMI_INT, value);
200 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
201 }
202
nps_core_int_tasklet(unsigned long data)203 static void nps_core_int_tasklet(unsigned long data)
204 {
205 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
206 struct nitrox_device *ndev = qvec->ndev;
207
208 /* if pf mode do queue recovery */
209 if (ndev->mode == __NDEV_MODE_PF) {
210 } else {
211 /**
212 * if VF(s) enabled communicate the error information
213 * to VF(s)
214 */
215 }
216 }
217
218 /**
219 * nps_core_int_isr - interrupt handler for NITROX errors and
220 * mailbox communication
221 */
nps_core_int_isr(int irq,void * data)222 static irqreturn_t nps_core_int_isr(int irq, void *data)
223 {
224 struct nitrox_q_vector *qvec = data;
225 struct nitrox_device *ndev = qvec->ndev;
226 union nps_core_int_active core_int;
227
228 core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
229
230 if (core_int.s.nps_core)
231 clear_nps_core_err_intr(ndev);
232
233 if (core_int.s.nps_pkt)
234 clear_nps_pkt_err_intr(ndev);
235
236 if (core_int.s.pom)
237 clear_pom_err_intr(ndev);
238
239 if (core_int.s.pem)
240 clear_pem_err_intr(ndev);
241
242 if (core_int.s.lbc)
243 clear_lbc_err_intr(ndev);
244
245 if (core_int.s.efl)
246 clear_efl_err_intr(ndev);
247
248 if (core_int.s.bmi)
249 clear_bmi_err_intr(ndev);
250
251 /* Mailbox interrupt */
252 if (core_int.s.mbox)
253 nitrox_pf2vf_mbox_handler(ndev);
254
255 /* If more work callback the ISR, set resend */
256 core_int.s.resend = 1;
257 nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
258
259 return IRQ_HANDLED;
260 }
261
nitrox_unregister_interrupts(struct nitrox_device * ndev)262 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
263 {
264 struct pci_dev *pdev = ndev->pdev;
265 int i;
266
267 for (i = 0; i < ndev->num_vecs; i++) {
268 struct nitrox_q_vector *qvec;
269 int vec;
270
271 qvec = ndev->qvec + i;
272 if (!qvec->valid)
273 continue;
274
275 /* get the vector number */
276 vec = pci_irq_vector(pdev, i);
277 irq_set_affinity_hint(vec, NULL);
278 free_irq(vec, qvec);
279
280 tasklet_disable(&qvec->resp_tasklet);
281 tasklet_kill(&qvec->resp_tasklet);
282 qvec->valid = false;
283 }
284 kfree(ndev->qvec);
285 ndev->qvec = NULL;
286 pci_free_irq_vectors(pdev);
287 }
288
nitrox_register_interrupts(struct nitrox_device * ndev)289 int nitrox_register_interrupts(struct nitrox_device *ndev)
290 {
291 struct pci_dev *pdev = ndev->pdev;
292 struct nitrox_q_vector *qvec;
293 int nr_vecs, vec, cpu;
294 int ret, i;
295
296 /*
297 * PF MSI-X vectors
298 *
299 * Entry 0: NPS PKT ring 0
300 * Entry 1: AQMQ ring 0
301 * Entry 2: ZQM ring 0
302 * Entry 3: NPS PKT ring 1
303 * Entry 4: AQMQ ring 1
304 * Entry 5: ZQM ring 1
305 * ....
306 * Entry 192: NPS_CORE_INT_ACTIVE
307 */
308 nr_vecs = pci_msix_vec_count(pdev);
309 if (nr_vecs < 0) {
310 dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
311 return nr_vecs;
312 }
313
314 /* Enable MSI-X */
315 ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
316 if (ret < 0) {
317 dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
318 return ret;
319 }
320 ndev->num_vecs = nr_vecs;
321
322 ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
323 if (!ndev->qvec) {
324 pci_free_irq_vectors(pdev);
325 return -ENOMEM;
326 }
327
328 /* request irqs for packet rings/ports */
329 for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
330 qvec = &ndev->qvec[i];
331
332 qvec->ring = i / NR_RING_VECTORS;
333 if (qvec->ring >= ndev->nr_queues)
334 break;
335
336 qvec->cmdq = &ndev->pkt_inq[qvec->ring];
337 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
338 /* get the vector number */
339 vec = pci_irq_vector(pdev, i);
340 ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
341 if (ret) {
342 dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
343 qvec->ring);
344 goto irq_fail;
345 }
346 cpu = qvec->ring % num_online_cpus();
347 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
348
349 tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
350 (unsigned long)qvec);
351 qvec->valid = true;
352 }
353
354 /* request irqs for non ring vectors */
355 i = NON_RING_MSIX_BASE;
356 qvec = &ndev->qvec[i];
357 qvec->ndev = ndev;
358
359 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
360 /* get the vector number */
361 vec = pci_irq_vector(pdev, i);
362 ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
363 if (ret) {
364 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
365 goto irq_fail;
366 }
367 cpu = num_online_cpus();
368 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
369
370 tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
371 (unsigned long)qvec);
372 qvec->valid = true;
373
374 return 0;
375
376 irq_fail:
377 nitrox_unregister_interrupts(ndev);
378 return ret;
379 }
380
nitrox_sriov_unregister_interrupts(struct nitrox_device * ndev)381 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
382 {
383 struct pci_dev *pdev = ndev->pdev;
384 int i;
385
386 for (i = 0; i < ndev->num_vecs; i++) {
387 struct nitrox_q_vector *qvec;
388 int vec;
389
390 qvec = ndev->qvec + i;
391 if (!qvec->valid)
392 continue;
393
394 vec = ndev->iov.msix.vector;
395 irq_set_affinity_hint(vec, NULL);
396 free_irq(vec, qvec);
397
398 tasklet_disable(&qvec->resp_tasklet);
399 tasklet_kill(&qvec->resp_tasklet);
400 qvec->valid = false;
401 }
402 kfree(ndev->qvec);
403 ndev->qvec = NULL;
404 pci_disable_msix(pdev);
405 }
406
nitrox_sriov_register_interupts(struct nitrox_device * ndev)407 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
408 {
409 struct pci_dev *pdev = ndev->pdev;
410 struct nitrox_q_vector *qvec;
411 int vec, cpu;
412 int ret;
413
414 /**
415 * only non ring vectors i.e Entry 192 is available
416 * for PF in SR-IOV mode.
417 */
418 ndev->iov.msix.entry = NON_RING_MSIX_BASE;
419 ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
420 if (ret) {
421 dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
422 NON_RING_MSIX_BASE);
423 return ret;
424 }
425
426 qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
427 if (!qvec) {
428 pci_disable_msix(pdev);
429 return -ENOMEM;
430 }
431 qvec->ndev = ndev;
432
433 ndev->qvec = qvec;
434 ndev->num_vecs = NR_NON_RING_VECTORS;
435 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
436 NON_RING_MSIX_BASE);
437
438 vec = ndev->iov.msix.vector;
439 ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
440 if (ret) {
441 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
442 NON_RING_MSIX_BASE);
443 goto iov_irq_fail;
444 }
445 cpu = num_online_cpus();
446 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
447
448 tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
449 (unsigned long)qvec);
450 qvec->valid = true;
451
452 return 0;
453
454 iov_irq_fail:
455 nitrox_sriov_unregister_interrupts(ndev);
456 return ret;
457 }
458