1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8 /* File aq_pci_func.c: Definition of PCI functions. */
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12
13 #include "aq_main.h"
14 #include "aq_nic.h"
15 #include "aq_vec.h"
16 #include "aq_hw.h"
17 #include "aq_pci_func.h"
18 #include "hw_atl/hw_atl_a0.h"
19 #include "hw_atl/hw_atl_b0.h"
20 #include "hw_atl2/hw_atl2.h"
21 #include "aq_filters.h"
22 #include "aq_drvinfo.h"
23 #include "aq_macsec.h"
24
25 static const struct pci_device_id aq_pci_tbl[] = {
26 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
27 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
28 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
29 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
30 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
31
32 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
33 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
34 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
35 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
36 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
37 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
38
39 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
40 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
41 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
42 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
43 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
44 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
45
46 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
47 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
48 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
49 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
50 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
51 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
52 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
53 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
54
55 {}
56 };
57
58 static const struct aq_board_revision_s hw_atl_boards[] = {
59 { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
60 { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
61 { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
62 { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
63 { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
64
65 { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
66 { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
67 { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
68 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
69 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
70
71 { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
72 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
73 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
74 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
75 { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
76 { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
77
78 { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
79 { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
80 { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
81 { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
82 { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
83 { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
84
85 { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
86 { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
87 { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
88 { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
89 { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
90 { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
91 { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
92 { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
93
94 };
95
96 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
97
aq_pci_probe_get_hw_by_id(struct pci_dev * pdev,const struct aq_hw_ops ** ops,const struct aq_hw_caps_s ** caps)98 static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
99 const struct aq_hw_ops **ops,
100 const struct aq_hw_caps_s **caps)
101 {
102 int i;
103
104 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
105 return -EINVAL;
106
107 for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
108 if (hw_atl_boards[i].devid == pdev->device &&
109 (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
110 hw_atl_boards[i].revision == pdev->revision)) {
111 *ops = hw_atl_boards[i].ops;
112 *caps = hw_atl_boards[i].caps;
113 break;
114 }
115 }
116
117 if (i == ARRAY_SIZE(hw_atl_boards))
118 return -EINVAL;
119
120 return 0;
121 }
122
aq_pci_func_init(struct pci_dev * pdev)123 static int aq_pci_func_init(struct pci_dev *pdev)
124 {
125 int err;
126
127 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
128 if (!err)
129 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
130 if (err) {
131 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
132 if (!err)
133 err = pci_set_consistent_dma_mask(pdev,
134 DMA_BIT_MASK(32));
135 }
136 if (err != 0) {
137 err = -ENOSR;
138 goto err_exit;
139 }
140
141 err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
142 if (err < 0)
143 goto err_exit;
144
145 pci_set_master(pdev);
146
147 return 0;
148
149 err_exit:
150 return err;
151 }
152
aq_pci_func_alloc_irq(struct aq_nic_s * self,unsigned int i,char * name,irq_handler_t irq_handler,void * irq_arg,cpumask_t * affinity_mask)153 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
154 char *name, irq_handler_t irq_handler,
155 void *irq_arg, cpumask_t *affinity_mask)
156 {
157 struct pci_dev *pdev = self->pdev;
158 int err;
159
160 if (pdev->msix_enabled || pdev->msi_enabled)
161 err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
162 name, irq_arg);
163 else
164 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
165 IRQF_SHARED, name, irq_arg);
166
167 if (err >= 0) {
168 self->msix_entry_mask |= (1 << i);
169
170 if (pdev->msix_enabled && affinity_mask)
171 irq_set_affinity_hint(pci_irq_vector(pdev, i),
172 affinity_mask);
173 }
174
175 return err;
176 }
177
aq_pci_func_free_irqs(struct aq_nic_s * self)178 void aq_pci_func_free_irqs(struct aq_nic_s *self)
179 {
180 struct pci_dev *pdev = self->pdev;
181 unsigned int i;
182 void *irq_data;
183
184 for (i = 32U; i--;) {
185 if (!((1U << i) & self->msix_entry_mask))
186 continue;
187 if (self->aq_nic_cfg.link_irq_vec &&
188 i == self->aq_nic_cfg.link_irq_vec)
189 irq_data = self;
190 else if (i < AQ_CFG_VECS_MAX)
191 irq_data = self->aq_vec[i];
192 else
193 continue;
194
195 if (pdev->msix_enabled)
196 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
197 free_irq(pci_irq_vector(pdev, i), irq_data);
198 self->msix_entry_mask &= ~(1U << i);
199 }
200 }
201
aq_pci_func_get_irq_type(struct aq_nic_s * self)202 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
203 {
204 if (self->pdev->msix_enabled)
205 return AQ_HW_IRQ_MSIX;
206 if (self->pdev->msi_enabled)
207 return AQ_HW_IRQ_MSI;
208
209 return AQ_HW_IRQ_LEGACY;
210 }
211
aq_pci_free_irq_vectors(struct aq_nic_s * self)212 static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
213 {
214 pci_free_irq_vectors(self->pdev);
215 }
216
aq_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_id)217 static int aq_pci_probe(struct pci_dev *pdev,
218 const struct pci_device_id *pci_id)
219 {
220 struct net_device *ndev;
221 resource_size_t mmio_pa;
222 struct aq_nic_s *self;
223 u32 numvecs;
224 u32 bar;
225 int err;
226
227 err = pci_enable_device(pdev);
228 if (err)
229 return err;
230
231 err = aq_pci_func_init(pdev);
232 if (err)
233 goto err_pci_func;
234
235 ndev = aq_ndev_alloc();
236 if (!ndev) {
237 err = -ENOMEM;
238 goto err_ndev;
239 }
240
241 self = netdev_priv(ndev);
242 self->pdev = pdev;
243 SET_NETDEV_DEV(ndev, &pdev->dev);
244 pci_set_drvdata(pdev, self);
245
246 mutex_init(&self->fwreq_mutex);
247
248 err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
249 &aq_nic_get_cfg(self)->aq_hw_caps);
250 if (err)
251 goto err_ioremap;
252
253 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
254 if (!self->aq_hw) {
255 err = -ENOMEM;
256 goto err_ioremap;
257 }
258 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
259 if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
260 int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
261
262 self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
263 if (!self->aq_hw->priv) {
264 err = -ENOMEM;
265 goto err_free_aq_hw;
266 }
267 }
268
269 for (bar = 0; bar < 4; ++bar) {
270 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
271 resource_size_t reg_sz;
272
273 mmio_pa = pci_resource_start(pdev, bar);
274 if (mmio_pa == 0U) {
275 err = -EIO;
276 goto err_free_aq_hw_priv;
277 }
278
279 reg_sz = pci_resource_len(pdev, bar);
280 if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
281 err = -EIO;
282 goto err_free_aq_hw_priv;
283 }
284
285 self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
286 if (!self->aq_hw->mmio) {
287 err = -EIO;
288 goto err_free_aq_hw_priv;
289 }
290 break;
291 }
292 }
293
294 if (bar == 4) {
295 err = -EIO;
296 goto err_free_aq_hw_priv;
297 }
298
299 numvecs = min((u8)AQ_CFG_VECS_DEF,
300 aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
301 numvecs = min(numvecs, num_online_cpus());
302 /* Request IRQ vector for PTP */
303 numvecs += 1;
304
305 numvecs += AQ_HW_SERVICE_IRQS;
306 /*enable interrupts */
307 #if !AQ_CFG_FORCE_LEGACY_INT
308 err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
309 PCI_IRQ_MSIX | PCI_IRQ_MSI |
310 PCI_IRQ_LEGACY);
311
312 if (err < 0)
313 goto err_hwinit;
314 numvecs = err;
315 #endif
316 self->irqvecs = numvecs;
317
318 /* net device init */
319 aq_nic_cfg_start(self);
320
321 aq_nic_ndev_init(self);
322
323 err = aq_nic_ndev_register(self);
324 if (err < 0)
325 goto err_register;
326
327 aq_drvinfo_init(ndev);
328
329 return 0;
330
331 err_register:
332 aq_nic_free_vectors(self);
333 aq_pci_free_irq_vectors(self);
334 err_hwinit:
335 iounmap(self->aq_hw->mmio);
336 err_free_aq_hw_priv:
337 kfree(self->aq_hw->priv);
338 err_free_aq_hw:
339 kfree(self->aq_hw);
340 err_ioremap:
341 free_netdev(ndev);
342 err_ndev:
343 pci_release_regions(pdev);
344 err_pci_func:
345 pci_disable_device(pdev);
346
347 return err;
348 }
349
aq_pci_remove(struct pci_dev * pdev)350 static void aq_pci_remove(struct pci_dev *pdev)
351 {
352 struct aq_nic_s *self = pci_get_drvdata(pdev);
353
354 if (self->ndev) {
355 aq_clear_rxnfc_all_rules(self);
356 if (self->ndev->reg_state == NETREG_REGISTERED)
357 unregister_netdev(self->ndev);
358
359 #if IS_ENABLED(CONFIG_MACSEC)
360 aq_macsec_free(self);
361 #endif
362 aq_nic_free_vectors(self);
363 aq_pci_free_irq_vectors(self);
364 iounmap(self->aq_hw->mmio);
365 kfree(self->aq_hw->priv);
366 kfree(self->aq_hw);
367 pci_release_regions(pdev);
368 free_netdev(self->ndev);
369 }
370
371 pci_disable_device(pdev);
372 }
373
aq_pci_shutdown(struct pci_dev * pdev)374 static void aq_pci_shutdown(struct pci_dev *pdev)
375 {
376 struct aq_nic_s *self = pci_get_drvdata(pdev);
377
378 aq_nic_shutdown(self);
379
380 pci_disable_device(pdev);
381
382 if (system_state == SYSTEM_POWER_OFF) {
383 pci_wake_from_d3(pdev, false);
384 pci_set_power_state(pdev, PCI_D3hot);
385 }
386 }
387
aq_suspend_common(struct device * dev)388 static int aq_suspend_common(struct device *dev)
389 {
390 struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
391
392 rtnl_lock();
393
394 nic->power_state = AQ_HW_POWER_STATE_D3;
395 netif_device_detach(nic->ndev);
396 netif_tx_stop_all_queues(nic->ndev);
397
398 if (netif_running(nic->ndev))
399 aq_nic_stop(nic);
400
401 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
402 aq_nic_set_power(nic);
403
404 rtnl_unlock();
405
406 return 0;
407 }
408
atl_resume_common(struct device * dev)409 static int atl_resume_common(struct device *dev)
410 {
411 struct pci_dev *pdev = to_pci_dev(dev);
412 struct aq_nic_s *nic;
413 int ret = 0;
414
415 nic = pci_get_drvdata(pdev);
416
417 rtnl_lock();
418
419 pci_set_power_state(pdev, PCI_D0);
420 pci_restore_state(pdev);
421
422 if (netif_running(nic->ndev)) {
423 ret = aq_nic_init(nic);
424 if (ret)
425 goto err_exit;
426
427 ret = aq_nic_start(nic);
428 if (ret)
429 goto err_exit;
430 }
431
432 netif_device_attach(nic->ndev);
433 netif_tx_start_all_queues(nic->ndev);
434
435 err_exit:
436 if (ret < 0)
437 aq_nic_deinit(nic, true);
438
439 rtnl_unlock();
440
441 return ret;
442 }
443
aq_pm_freeze(struct device * dev)444 static int aq_pm_freeze(struct device *dev)
445 {
446 return aq_suspend_common(dev);
447 }
448
aq_pm_suspend_poweroff(struct device * dev)449 static int aq_pm_suspend_poweroff(struct device *dev)
450 {
451 return aq_suspend_common(dev);
452 }
453
aq_pm_thaw(struct device * dev)454 static int aq_pm_thaw(struct device *dev)
455 {
456 return atl_resume_common(dev);
457 }
458
aq_pm_resume_restore(struct device * dev)459 static int aq_pm_resume_restore(struct device *dev)
460 {
461 return atl_resume_common(dev);
462 }
463
464 static const struct dev_pm_ops aq_pm_ops = {
465 .suspend = aq_pm_suspend_poweroff,
466 .poweroff = aq_pm_suspend_poweroff,
467 .freeze = aq_pm_freeze,
468 .resume = aq_pm_resume_restore,
469 .restore = aq_pm_resume_restore,
470 .thaw = aq_pm_thaw,
471 };
472
473 static struct pci_driver aq_pci_ops = {
474 .name = AQ_CFG_DRV_NAME,
475 .id_table = aq_pci_tbl,
476 .probe = aq_pci_probe,
477 .remove = aq_pci_remove,
478 .shutdown = aq_pci_shutdown,
479 #ifdef CONFIG_PM
480 .driver.pm = &aq_pm_ops,
481 #endif
482 };
483
aq_pci_func_register_driver(void)484 int aq_pci_func_register_driver(void)
485 {
486 return pci_register_driver(&aq_pci_ops);
487 }
488
aq_pci_func_unregister_driver(void)489 void aq_pci_func_unregister_driver(void)
490 {
491 pci_unregister_driver(&aq_pci_ops);
492 }
493
494