1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cptpf.h"
9 #include "cn10k_cpt.h"
10 #include "rvu_reg.h"
11
12 #define OTX2_CPT_DRV_NAME "rvu_cptpf"
13 #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
14
cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)15 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
16 int num_vfs)
17 {
18 int ena_bits;
19
20 /* Clear any pending interrupts */
21 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
22 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
23 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
24 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
25
26 /* Enable VF interrupts for VFs from 0 to 63 */
27 ena_bits = ((num_vfs - 1) % 64);
28 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
29 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
30 GENMASK_ULL(ena_bits, 0));
31
32 if (num_vfs > 64) {
33 /* Enable VF interrupts for VFs from 64 to 127 */
34 ena_bits = num_vfs - 64 - 1;
35 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
36 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
37 GENMASK_ULL(ena_bits, 0));
38 }
39 }
40
cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)41 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
42 int num_vfs)
43 {
44 int vector;
45
46 /* Disable VF-PF interrupts */
47 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
48 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
49 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
50 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
51 /* Clear any pending interrupts */
52 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
54
55 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
56 free_irq(vector, cptpf);
57
58 if (num_vfs > 64) {
59 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
60 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
61 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
62 free_irq(vector, cptpf);
63 }
64 }
65
cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev * cptpf,int num_vfs)66 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
67 int num_vfs)
68 {
69 /* Clear FLR interrupt if any */
70 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
71 INTR_MASK(num_vfs));
72
73 /* Enable VF FLR interrupts */
74 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
75 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
76 /* Clear ME interrupt if any */
77 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
78 INTR_MASK(num_vfs));
79 /* Enable VF ME interrupts */
80 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
81 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
82
83 if (num_vfs <= 64)
84 return;
85
86 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
87 INTR_MASK(num_vfs - 64));
88 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
89 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
90
91 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
92 INTR_MASK(num_vfs - 64));
93 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
94 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
95 }
96
cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev * cptpf,int num_vfs)97 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
98 int num_vfs)
99 {
100 int vector;
101
102 /* Disable VF FLR interrupts */
103 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
104 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
105 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
106 free_irq(vector, cptpf);
107
108 /* Disable VF ME interrupts */
109 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
110 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
111 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
112 free_irq(vector, cptpf);
113
114 if (num_vfs <= 64)
115 return;
116
117 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
118 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
119 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
120 free_irq(vector, cptpf);
121
122 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
123 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
124 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
125 free_irq(vector, cptpf);
126 }
127
cptpf_flr_wq_handler(struct work_struct * work)128 static void cptpf_flr_wq_handler(struct work_struct *work)
129 {
130 struct cptpf_flr_work *flr_work;
131 struct otx2_cptpf_dev *pf;
132 struct mbox_msghdr *req;
133 struct otx2_mbox *mbox;
134 int vf, reg = 0;
135
136 flr_work = container_of(work, struct cptpf_flr_work, work);
137 pf = flr_work->pf;
138 mbox = &pf->afpf_mbox;
139
140 vf = flr_work - pf->flr_work;
141
142 req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
143 sizeof(struct msg_rsp));
144 if (!req)
145 return;
146
147 req->sig = OTX2_MBOX_REQ_SIG;
148 req->id = MBOX_MSG_VF_FLR;
149 req->pcifunc &= RVU_PFVF_FUNC_MASK;
150 req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
151
152 otx2_cpt_send_mbox_msg(mbox, pf->pdev);
153
154 if (vf >= 64) {
155 reg = 1;
156 vf = vf - 64;
157 }
158 /* Clear transaction pending register */
159 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
160 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
161 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
162 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
163 }
164
cptpf_vf_flr_intr(int __always_unused irq,void * arg)165 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
166 {
167 int reg, dev, vf, start_vf, num_reg = 1;
168 struct otx2_cptpf_dev *cptpf = arg;
169 u64 intr;
170
171 if (cptpf->max_vfs > 64)
172 num_reg = 2;
173
174 for (reg = 0; reg < num_reg; reg++) {
175 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
176 RVU_PF_VFFLR_INTX(reg));
177 if (!intr)
178 continue;
179 start_vf = 64 * reg;
180 for (vf = 0; vf < 64; vf++) {
181 if (!(intr & BIT_ULL(vf)))
182 continue;
183 dev = vf + start_vf;
184 queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
185 /* Clear interrupt */
186 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
187 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
188 /* Disable the interrupt */
189 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
190 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
191 BIT_ULL(vf));
192 }
193 }
194 return IRQ_HANDLED;
195 }
196
cptpf_vf_me_intr(int __always_unused irq,void * arg)197 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
198 {
199 struct otx2_cptpf_dev *cptpf = arg;
200 int reg, vf, num_reg = 1;
201 u64 intr;
202
203 if (cptpf->max_vfs > 64)
204 num_reg = 2;
205
206 for (reg = 0; reg < num_reg; reg++) {
207 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
208 RVU_PF_VFME_INTX(reg));
209 if (!intr)
210 continue;
211 for (vf = 0; vf < 64; vf++) {
212 if (!(intr & BIT_ULL(vf)))
213 continue;
214 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
215 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
216 /* Clear interrupt */
217 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
218 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
219 }
220 }
221 return IRQ_HANDLED;
222 }
223
cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)224 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
225 int num_vfs)
226 {
227 cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
228 cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
229 }
230
cptpf_register_vfpf_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)231 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
232 {
233 struct pci_dev *pdev = cptpf->pdev;
234 struct device *dev = &pdev->dev;
235 int ret, vector;
236
237 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
238 /* Register VF-PF mailbox interrupt handler */
239 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
240 cptpf);
241 if (ret) {
242 dev_err(dev,
243 "IRQ registration failed for PFVF mbox0 irq\n");
244 return ret;
245 }
246 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
247 /* Register VF FLR interrupt handler */
248 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
249 if (ret) {
250 dev_err(dev,
251 "IRQ registration failed for VFFLR0 irq\n");
252 goto free_mbox0_irq;
253 }
254 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
255 /* Register VF ME interrupt handler */
256 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
257 if (ret) {
258 dev_err(dev,
259 "IRQ registration failed for PFVF mbox0 irq\n");
260 goto free_flr0_irq;
261 }
262
263 if (num_vfs > 64) {
264 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
265 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
266 "CPTVFPF Mbox1", cptpf);
267 if (ret) {
268 dev_err(dev,
269 "IRQ registration failed for PFVF mbox1 irq\n");
270 goto free_me0_irq;
271 }
272 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
273 /* Register VF FLR interrupt handler */
274 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
275 cptpf);
276 if (ret) {
277 dev_err(dev,
278 "IRQ registration failed for VFFLR1 irq\n");
279 goto free_mbox1_irq;
280 }
281 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
282 /* Register VF FLR interrupt handler */
283 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
284 cptpf);
285 if (ret) {
286 dev_err(dev,
287 "IRQ registration failed for VFFLR1 irq\n");
288 goto free_flr1_irq;
289 }
290 }
291 cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
292 cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
293
294 return 0;
295
296 free_flr1_irq:
297 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
298 free_irq(vector, cptpf);
299 free_mbox1_irq:
300 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
301 free_irq(vector, cptpf);
302 free_me0_irq:
303 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
304 free_irq(vector, cptpf);
305 free_flr0_irq:
306 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
307 free_irq(vector, cptpf);
308 free_mbox0_irq:
309 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
310 free_irq(vector, cptpf);
311 return ret;
312 }
313
cptpf_flr_wq_destroy(struct otx2_cptpf_dev * pf)314 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
315 {
316 if (!pf->flr_wq)
317 return;
318 destroy_workqueue(pf->flr_wq);
319 pf->flr_wq = NULL;
320 kfree(pf->flr_work);
321 }
322
cptpf_flr_wq_init(struct otx2_cptpf_dev * cptpf,int num_vfs)323 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
324 {
325 int vf;
326
327 cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
328 if (!cptpf->flr_wq)
329 return -ENOMEM;
330
331 cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
332 GFP_KERNEL);
333 if (!cptpf->flr_work)
334 goto destroy_wq;
335
336 for (vf = 0; vf < num_vfs; vf++) {
337 cptpf->flr_work[vf].pf = cptpf;
338 INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
339 }
340 return 0;
341
342 destroy_wq:
343 destroy_workqueue(cptpf->flr_wq);
344 return -ENOMEM;
345 }
346
cptpf_vfpf_mbox_init(struct otx2_cptpf_dev * cptpf,int num_vfs)347 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
348 {
349 struct device *dev = &cptpf->pdev->dev;
350 u64 vfpf_mbox_base;
351 int err, i;
352
353 cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
354 WQ_UNBOUND | WQ_HIGHPRI |
355 WQ_MEM_RECLAIM, 1);
356 if (!cptpf->vfpf_mbox_wq)
357 return -ENOMEM;
358
359 /* Map VF-PF mailbox memory */
360 if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
361 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
362 else
363 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
364
365 if (!vfpf_mbox_base) {
366 dev_err(dev, "VF-PF mailbox address not configured\n");
367 err = -ENOMEM;
368 goto free_wqe;
369 }
370 cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
371 MBOX_SIZE * cptpf->max_vfs);
372 if (!cptpf->vfpf_mbox_base) {
373 dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
374 err = -ENOMEM;
375 goto free_wqe;
376 }
377 err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
378 cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
379 num_vfs);
380 if (err)
381 goto free_wqe;
382
383 for (i = 0; i < num_vfs; i++) {
384 cptpf->vf[i].vf_id = i;
385 cptpf->vf[i].cptpf = cptpf;
386 cptpf->vf[i].intr_idx = i % 64;
387 INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
388 otx2_cptpf_vfpf_mbox_handler);
389 }
390 return 0;
391
392 free_wqe:
393 destroy_workqueue(cptpf->vfpf_mbox_wq);
394 return err;
395 }
396
cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev * cptpf)397 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
398 {
399 destroy_workqueue(cptpf->vfpf_mbox_wq);
400 otx2_mbox_destroy(&cptpf->vfpf_mbox);
401 }
402
cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev * cptpf)403 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
404 {
405 /* Disable AF-PF interrupt */
406 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
407 0x1ULL);
408 /* Clear interrupt if any */
409 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
410 }
411
cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev * cptpf)412 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
413 {
414 struct pci_dev *pdev = cptpf->pdev;
415 struct device *dev = &pdev->dev;
416 int ret, irq;
417
418 irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
419 /* Register AF-PF mailbox interrupt handler */
420 ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
421 "CPTAFPF Mbox", cptpf);
422 if (ret) {
423 dev_err(dev,
424 "IRQ registration failed for PFAF mbox irq\n");
425 return ret;
426 }
427 /* Clear interrupt if any, to avoid spurious interrupts */
428 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
429 /* Enable AF-PF interrupt */
430 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
431 0x1ULL);
432
433 ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
434 if (ret) {
435 dev_warn(dev,
436 "AF not responding to mailbox, deferring probe\n");
437 cptpf_disable_afpf_mbox_intr(cptpf);
438 return -EPROBE_DEFER;
439 }
440 return 0;
441 }
442
cptpf_afpf_mbox_init(struct otx2_cptpf_dev * cptpf)443 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
444 {
445 struct pci_dev *pdev = cptpf->pdev;
446 resource_size_t offset;
447 int err;
448
449 cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
450 WQ_UNBOUND | WQ_HIGHPRI |
451 WQ_MEM_RECLAIM, 1);
452 if (!cptpf->afpf_mbox_wq)
453 return -ENOMEM;
454
455 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
456 /* Map AF-PF mailbox memory */
457 cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
458 if (!cptpf->afpf_mbox_base) {
459 dev_err(&pdev->dev, "Unable to map BAR4\n");
460 err = -ENOMEM;
461 goto error;
462 }
463
464 err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
465 pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
466 if (err)
467 goto error;
468
469 INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
470 return 0;
471
472 error:
473 destroy_workqueue(cptpf->afpf_mbox_wq);
474 return err;
475 }
476
cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev * cptpf)477 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
478 {
479 destroy_workqueue(cptpf->afpf_mbox_wq);
480 otx2_mbox_destroy(&cptpf->afpf_mbox);
481 }
482
kvf_limits_show(struct device * dev,struct device_attribute * attr,char * buf)483 static ssize_t kvf_limits_show(struct device *dev,
484 struct device_attribute *attr, char *buf)
485 {
486 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
487
488 return sprintf(buf, "%d\n", cptpf->kvf_limits);
489 }
490
kvf_limits_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)491 static ssize_t kvf_limits_store(struct device *dev,
492 struct device_attribute *attr,
493 const char *buf, size_t count)
494 {
495 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
496 int lfs_num;
497 int ret;
498
499 ret = kstrtoint(buf, 0, &lfs_num);
500 if (ret)
501 return ret;
502 if (lfs_num < 1 || lfs_num > num_online_cpus()) {
503 dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
504 lfs_num, num_online_cpus());
505 return -EINVAL;
506 }
507 cptpf->kvf_limits = lfs_num;
508
509 return count;
510 }
511
512 static DEVICE_ATTR_RW(kvf_limits);
513 static struct attribute *cptpf_attrs[] = {
514 &dev_attr_kvf_limits.attr,
515 NULL
516 };
517
518 static const struct attribute_group cptpf_sysfs_group = {
519 .attrs = cptpf_attrs,
520 };
521
cpt_is_pf_usable(struct otx2_cptpf_dev * cptpf)522 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
523 {
524 u64 rev;
525
526 rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
527 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
528 rev = (rev >> 12) & 0xFF;
529 /*
530 * Check if AF has setup revision for RVUM block, otherwise
531 * driver probe should be deferred until AF driver comes up
532 */
533 if (!rev) {
534 dev_warn(&cptpf->pdev->dev,
535 "AF is not initialized, deferring probe\n");
536 return -EPROBE_DEFER;
537 }
538 return 0;
539 }
540
cptx_device_reset(struct otx2_cptpf_dev * cptpf,int blkaddr)541 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
542 {
543 int timeout = 10, ret;
544 u64 reg = 0;
545
546 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
547 CPT_AF_BLK_RST, 0x1, blkaddr);
548 if (ret)
549 return ret;
550
551 do {
552 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
553 CPT_AF_BLK_RST, ®, blkaddr);
554 if (ret)
555 return ret;
556
557 if (!((reg >> 63) & 0x1))
558 break;
559
560 usleep_range(10000, 20000);
561 if (timeout-- < 0)
562 return -EBUSY;
563 } while (1);
564
565 return ret;
566 }
567
cptpf_device_reset(struct otx2_cptpf_dev * cptpf)568 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
569 {
570 int ret = 0;
571
572 if (cptpf->has_cpt1) {
573 ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
574 if (ret)
575 return ret;
576 }
577 return cptx_device_reset(cptpf, BLKADDR_CPT0);
578 }
579
cptpf_check_block_implemented(struct otx2_cptpf_dev * cptpf)580 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
581 {
582 u64 cfg;
583
584 cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
585 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
586 if (cfg & BIT_ULL(11))
587 cptpf->has_cpt1 = true;
588 }
589
cptpf_device_init(struct otx2_cptpf_dev * cptpf)590 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
591 {
592 union otx2_cptx_af_constants1 af_cnsts1 = {0};
593 int ret = 0;
594
595 /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
596 cptpf_check_block_implemented(cptpf);
597 /* Reset the CPT PF device */
598 ret = cptpf_device_reset(cptpf);
599 if (ret)
600 return ret;
601
602 /* Get number of SE, IE and AE engines */
603 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
604 CPT_AF_CONSTANTS1, &af_cnsts1.u,
605 BLKADDR_CPT0);
606 if (ret)
607 return ret;
608
609 cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
610 cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
611 cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
612
613 /* Disable all cores */
614 ret = otx2_cpt_disable_all_cores(cptpf);
615
616 return ret;
617 }
618
cptpf_sriov_disable(struct pci_dev * pdev)619 static int cptpf_sriov_disable(struct pci_dev *pdev)
620 {
621 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
622 int num_vfs = pci_num_vf(pdev);
623
624 if (!num_vfs)
625 return 0;
626
627 pci_disable_sriov(pdev);
628 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
629 cptpf_flr_wq_destroy(cptpf);
630 cptpf_vfpf_mbox_destroy(cptpf);
631 module_put(THIS_MODULE);
632 cptpf->enabled_vfs = 0;
633
634 return 0;
635 }
636
cptpf_sriov_enable(struct pci_dev * pdev,int num_vfs)637 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
638 {
639 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
640 int ret;
641
642 /* Initialize VF<=>PF mailbox */
643 ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
644 if (ret)
645 return ret;
646
647 ret = cptpf_flr_wq_init(cptpf, num_vfs);
648 if (ret)
649 goto destroy_mbox;
650 /* Register VF<=>PF mailbox interrupt */
651 ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
652 if (ret)
653 goto destroy_flr;
654
655 /* Get CPT HW capabilities using LOAD_FVC operation. */
656 ret = otx2_cpt_discover_eng_capabilities(cptpf);
657 if (ret)
658 goto disable_intr;
659
660 ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
661 if (ret)
662 goto disable_intr;
663
664 cptpf->enabled_vfs = num_vfs;
665 ret = pci_enable_sriov(pdev, num_vfs);
666 if (ret)
667 goto disable_intr;
668
669 dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
670
671 try_module_get(THIS_MODULE);
672 return num_vfs;
673
674 disable_intr:
675 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
676 cptpf->enabled_vfs = 0;
677 destroy_flr:
678 cptpf_flr_wq_destroy(cptpf);
679 destroy_mbox:
680 cptpf_vfpf_mbox_destroy(cptpf);
681 return ret;
682 }
683
otx2_cptpf_sriov_configure(struct pci_dev * pdev,int num_vfs)684 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
685 {
686 if (num_vfs > 0) {
687 return cptpf_sriov_enable(pdev, num_vfs);
688 } else {
689 return cptpf_sriov_disable(pdev);
690 }
691 }
692
otx2_cptpf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)693 static int otx2_cptpf_probe(struct pci_dev *pdev,
694 const struct pci_device_id *ent)
695 {
696 struct device *dev = &pdev->dev;
697 struct otx2_cptpf_dev *cptpf;
698 int err;
699
700 cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
701 if (!cptpf)
702 return -ENOMEM;
703
704 err = pcim_enable_device(pdev);
705 if (err) {
706 dev_err(dev, "Failed to enable PCI device\n");
707 goto clear_drvdata;
708 }
709
710 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
711 if (err) {
712 dev_err(dev, "Unable to get usable DMA configuration\n");
713 goto clear_drvdata;
714 }
715 /* Map PF's configuration registers */
716 err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
717 OTX2_CPT_DRV_NAME);
718 if (err) {
719 dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
720 goto clear_drvdata;
721 }
722 pci_set_master(pdev);
723 pci_set_drvdata(pdev, cptpf);
724 cptpf->pdev = pdev;
725
726 cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
727
728 /* Check if AF driver is up, otherwise defer probe */
729 err = cpt_is_pf_usable(cptpf);
730 if (err)
731 goto clear_drvdata;
732
733 err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
734 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
735 if (err < 0) {
736 dev_err(dev, "Request for %d msix vectors failed\n",
737 RVU_PF_INT_VEC_CNT);
738 goto clear_drvdata;
739 }
740 otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
741 /* Initialize AF-PF mailbox */
742 err = cptpf_afpf_mbox_init(cptpf);
743 if (err)
744 goto clear_drvdata;
745 /* Register mailbox interrupt */
746 err = cptpf_register_afpf_mbox_intr(cptpf);
747 if (err)
748 goto destroy_afpf_mbox;
749
750 cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
751
752 err = cn10k_cptpf_lmtst_init(cptpf);
753 if (err)
754 goto unregister_intr;
755
756 /* Initialize CPT PF device */
757 err = cptpf_device_init(cptpf);
758 if (err)
759 goto unregister_intr;
760
761 /* Initialize engine groups */
762 err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
763 if (err)
764 goto unregister_intr;
765
766 err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
767 if (err)
768 goto cleanup_eng_grps;
769 return 0;
770
771 cleanup_eng_grps:
772 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
773 unregister_intr:
774 cptpf_disable_afpf_mbox_intr(cptpf);
775 destroy_afpf_mbox:
776 cptpf_afpf_mbox_destroy(cptpf);
777 clear_drvdata:
778 pci_set_drvdata(pdev, NULL);
779 return err;
780 }
781
otx2_cptpf_remove(struct pci_dev * pdev)782 static void otx2_cptpf_remove(struct pci_dev *pdev)
783 {
784 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
785
786 if (!cptpf)
787 return;
788
789 cptpf_sriov_disable(pdev);
790 /* Delete sysfs entry created for kernel VF limits */
791 sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
792 /* Cleanup engine groups */
793 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
794 /* Disable AF-PF mailbox interrupt */
795 cptpf_disable_afpf_mbox_intr(cptpf);
796 /* Destroy AF-PF mbox */
797 cptpf_afpf_mbox_destroy(cptpf);
798 pci_set_drvdata(pdev, NULL);
799 }
800
801 /* Supported devices */
802 static const struct pci_device_id otx2_cpt_id_table[] = {
803 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
804 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
805 { 0, } /* end of table */
806 };
807
808 static struct pci_driver otx2_cpt_pci_driver = {
809 .name = OTX2_CPT_DRV_NAME,
810 .id_table = otx2_cpt_id_table,
811 .probe = otx2_cptpf_probe,
812 .remove = otx2_cptpf_remove,
813 .sriov_configure = otx2_cptpf_sriov_configure
814 };
815
816 module_pci_driver(otx2_cpt_pci_driver);
817
818 MODULE_AUTHOR("Marvell");
819 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
820 MODULE_LICENSE("GPL v2");
821 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
822