1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3 */
4
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include "dwmac-intel.h"
9 #include "dwmac4.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12
13 struct intel_priv_data {
14 int mdio_adhoc_addr; /* mdio address for serdes & etc */
15 unsigned long crossts_adj;
16 bool is_pse;
17 };
18
19 /* This struct is used to associate PCI Function of MAC controller on a board,
20 * discovered via DMI, with the address of PHY connected to the MAC. The
21 * negative value of the address means that MAC controller is not connected
22 * with PHY.
23 */
24 struct stmmac_pci_func_data {
25 unsigned int func;
26 int phy_addr;
27 };
28
29 struct stmmac_pci_dmi_data {
30 const struct stmmac_pci_func_data *func;
31 size_t nfuncs;
32 };
33
34 struct stmmac_pci_info {
35 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
36 };
37
stmmac_pci_find_phy_addr(struct pci_dev * pdev,const struct dmi_system_id * dmi_list)38 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
39 const struct dmi_system_id *dmi_list)
40 {
41 const struct stmmac_pci_func_data *func_data;
42 const struct stmmac_pci_dmi_data *dmi_data;
43 const struct dmi_system_id *dmi_id;
44 int func = PCI_FUNC(pdev->devfn);
45 size_t n;
46
47 dmi_id = dmi_first_match(dmi_list);
48 if (!dmi_id)
49 return -ENODEV;
50
51 dmi_data = dmi_id->driver_data;
52 func_data = dmi_data->func;
53
54 for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
55 if (func_data->func == func)
56 return func_data->phy_addr;
57
58 return -ENODEV;
59 }
60
serdes_status_poll(struct stmmac_priv * priv,int phyaddr,int phyreg,u32 mask,u32 val)61 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
62 int phyreg, u32 mask, u32 val)
63 {
64 unsigned int retries = 10;
65 int val_rd;
66
67 do {
68 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
69 if ((val_rd & mask) == (val & mask))
70 return 0;
71 udelay(POLL_DELAY_US);
72 } while (--retries);
73
74 return -ETIMEDOUT;
75 }
76
intel_serdes_powerup(struct net_device * ndev,void * priv_data)77 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
78 {
79 struct intel_priv_data *intel_priv = priv_data;
80 struct stmmac_priv *priv = netdev_priv(ndev);
81 int serdes_phy_addr = 0;
82 u32 data = 0;
83
84 if (!intel_priv->mdio_adhoc_addr)
85 return 0;
86
87 serdes_phy_addr = intel_priv->mdio_adhoc_addr;
88
89 /* Set the serdes rate and the PCLK rate */
90 data = mdiobus_read(priv->mii, serdes_phy_addr,
91 SERDES_GCR0);
92
93 data &= ~SERDES_RATE_MASK;
94 data &= ~SERDES_PCLK_MASK;
95
96 if (priv->plat->max_speed == 2500)
97 data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
98 SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
99 else
100 data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
101 SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
102
103 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
104
105 /* assert clk_req */
106 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107 data |= SERDES_PLL_CLK;
108 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109
110 /* check for clk_ack assertion */
111 data = serdes_status_poll(priv, serdes_phy_addr,
112 SERDES_GSR0,
113 SERDES_PLL_CLK,
114 SERDES_PLL_CLK);
115
116 if (data) {
117 dev_err(priv->device, "Serdes PLL clk request timeout\n");
118 return data;
119 }
120
121 /* assert lane reset */
122 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 data |= SERDES_RST;
124 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125
126 /* check for assert lane reset reflection */
127 data = serdes_status_poll(priv, serdes_phy_addr,
128 SERDES_GSR0,
129 SERDES_RST,
130 SERDES_RST);
131
132 if (data) {
133 dev_err(priv->device, "Serdes assert lane reset timeout\n");
134 return data;
135 }
136
137 /* move power state to P0 */
138 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139
140 data &= ~SERDES_PWR_ST_MASK;
141 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
142
143 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144
145 /* Check for P0 state */
146 data = serdes_status_poll(priv, serdes_phy_addr,
147 SERDES_GSR0,
148 SERDES_PWR_ST_MASK,
149 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
150
151 if (data) {
152 dev_err(priv->device, "Serdes power state P0 timeout.\n");
153 return data;
154 }
155
156 /* PSE only - ungate SGMII PHY Rx Clock */
157 if (intel_priv->is_pse)
158 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
159 0, SERDES_PHY_RX_CLK);
160
161 return 0;
162 }
163
intel_serdes_powerdown(struct net_device * ndev,void * intel_data)164 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
165 {
166 struct intel_priv_data *intel_priv = intel_data;
167 struct stmmac_priv *priv = netdev_priv(ndev);
168 int serdes_phy_addr = 0;
169 u32 data = 0;
170
171 if (!intel_priv->mdio_adhoc_addr)
172 return;
173
174 serdes_phy_addr = intel_priv->mdio_adhoc_addr;
175
176 /* PSE only - gate SGMII PHY Rx Clock */
177 if (intel_priv->is_pse)
178 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
179 SERDES_PHY_RX_CLK, 0);
180
181 /* move power state to P3 */
182 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
183
184 data &= ~SERDES_PWR_ST_MASK;
185 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
186
187 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
188
189 /* Check for P3 state */
190 data = serdes_status_poll(priv, serdes_phy_addr,
191 SERDES_GSR0,
192 SERDES_PWR_ST_MASK,
193 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
194
195 if (data) {
196 dev_err(priv->device, "Serdes power state P3 timeout\n");
197 return;
198 }
199
200 /* de-assert clk_req */
201 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
202 data &= ~SERDES_PLL_CLK;
203 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204
205 /* check for clk_ack de-assert */
206 data = serdes_status_poll(priv, serdes_phy_addr,
207 SERDES_GSR0,
208 SERDES_PLL_CLK,
209 (u32)~SERDES_PLL_CLK);
210
211 if (data) {
212 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
213 return;
214 }
215
216 /* de-assert lane reset */
217 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218 data &= ~SERDES_RST;
219 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220
221 /* check for de-assert lane reset reflection */
222 data = serdes_status_poll(priv, serdes_phy_addr,
223 SERDES_GSR0,
224 SERDES_RST,
225 (u32)~SERDES_RST);
226
227 if (data) {
228 dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
229 return;
230 }
231 }
232
intel_speed_mode_2500(struct net_device * ndev,void * intel_data)233 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
234 {
235 struct intel_priv_data *intel_priv = intel_data;
236 struct stmmac_priv *priv = netdev_priv(ndev);
237 int serdes_phy_addr = 0;
238 u32 data = 0;
239
240 serdes_phy_addr = intel_priv->mdio_adhoc_addr;
241
242 /* Determine the link speed mode: 2.5Gbps/1Gbps */
243 data = mdiobus_read(priv->mii, serdes_phy_addr,
244 SERDES_GCR);
245
246 if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
247 SERDES_LINK_MODE_2G5) {
248 dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
249 priv->plat->max_speed = 2500;
250 priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
251 priv->plat->mdio_bus_data->default_an_inband = false;
252 } else {
253 priv->plat->max_speed = 1000;
254 }
255 }
256
257 /* Program PTP Clock Frequency for different variant of
258 * Intel mGBE that has slightly different GPO mapping
259 */
intel_mgbe_ptp_clk_freq_config(struct stmmac_priv * priv)260 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
261 {
262 struct intel_priv_data *intel_priv;
263 u32 gpio_value;
264
265 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
266
267 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
268
269 if (intel_priv->is_pse) {
270 /* For PSE GbE, use 200MHz */
271 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
272 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
273 } else {
274 /* For PCH GbE, use 200MHz */
275 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
276 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
277 }
278
279 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
280 }
281
get_arttime(struct mii_bus * mii,int intel_adhoc_addr,u64 * art_time)282 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
283 u64 *art_time)
284 {
285 u64 ns;
286
287 ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
288 ns <<= GMAC4_ART_TIME_SHIFT;
289 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
290 ns <<= GMAC4_ART_TIME_SHIFT;
291 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
292 ns <<= GMAC4_ART_TIME_SHIFT;
293 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
294
295 *art_time = ns;
296 }
297
stmmac_cross_ts_isr(struct stmmac_priv * priv)298 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
299 {
300 return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
301 }
302
intel_crosststamp(ktime_t * device,struct system_counterval_t * system,void * ctx)303 static int intel_crosststamp(ktime_t *device,
304 struct system_counterval_t *system,
305 void *ctx)
306 {
307 struct intel_priv_data *intel_priv;
308
309 struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
310 void __iomem *ptpaddr = priv->ptpaddr;
311 void __iomem *ioaddr = priv->hw->pcsr;
312 unsigned long flags;
313 u64 art_time = 0;
314 u64 ptp_time = 0;
315 u32 num_snapshot;
316 u32 gpio_value;
317 u32 acr_value;
318 int i;
319
320 if (!boot_cpu_has(X86_FEATURE_ART))
321 return -EOPNOTSUPP;
322
323 intel_priv = priv->plat->bsp_priv;
324
325 /* Both internal crosstimestamping and external triggered event
326 * timestamping cannot be run concurrently.
327 */
328 if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
329 return -EBUSY;
330
331 priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
332
333 mutex_lock(&priv->aux_ts_lock);
334 /* Enable Internal snapshot trigger */
335 acr_value = readl(ptpaddr + PTP_ACR);
336 acr_value &= ~PTP_ACR_MASK;
337 switch (priv->plat->int_snapshot_num) {
338 case AUX_SNAPSHOT0:
339 acr_value |= PTP_ACR_ATSEN0;
340 break;
341 case AUX_SNAPSHOT1:
342 acr_value |= PTP_ACR_ATSEN1;
343 break;
344 case AUX_SNAPSHOT2:
345 acr_value |= PTP_ACR_ATSEN2;
346 break;
347 case AUX_SNAPSHOT3:
348 acr_value |= PTP_ACR_ATSEN3;
349 break;
350 default:
351 mutex_unlock(&priv->aux_ts_lock);
352 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
353 return -EINVAL;
354 }
355 writel(acr_value, ptpaddr + PTP_ACR);
356
357 /* Clear FIFO */
358 acr_value = readl(ptpaddr + PTP_ACR);
359 acr_value |= PTP_ACR_ATSFC;
360 writel(acr_value, ptpaddr + PTP_ACR);
361 /* Release the mutex */
362 mutex_unlock(&priv->aux_ts_lock);
363
364 /* Trigger Internal snapshot signal
365 * Create a rising edge by just toggle the GPO1 to low
366 * and back to high.
367 */
368 gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
369 gpio_value &= ~GMAC_GPO1;
370 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
371 gpio_value |= GMAC_GPO1;
372 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
373
374 /* Time sync done Indication - Interrupt method */
375 if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
376 stmmac_cross_ts_isr(priv),
377 HZ / 100)) {
378 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
379 return -ETIMEDOUT;
380 }
381
382 *system = (struct system_counterval_t) {
383 .cycles = 0,
384 .cs_id = CSID_X86_ART,
385 .use_nsecs = false,
386 };
387
388 num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
389 GMAC_TIMESTAMP_ATSNS_MASK) >>
390 GMAC_TIMESTAMP_ATSNS_SHIFT;
391
392 /* Repeat until the timestamps are from the FIFO last segment */
393 for (i = 0; i < num_snapshot; i++) {
394 read_lock_irqsave(&priv->ptp_lock, flags);
395 stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
396 *device = ns_to_ktime(ptp_time);
397 read_unlock_irqrestore(&priv->ptp_lock, flags);
398 get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
399 system->cycles = art_time;
400 }
401
402 system->cycles *= intel_priv->crossts_adj;
403
404 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
405
406 return 0;
407 }
408
intel_mgbe_pse_crossts_adj(struct intel_priv_data * intel_priv,int base)409 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
410 int base)
411 {
412 if (boot_cpu_has(X86_FEATURE_ART)) {
413 unsigned int art_freq;
414
415 /* On systems that support ART, ART frequency can be obtained
416 * from ECX register of CPUID leaf (0x15).
417 */
418 art_freq = cpuid_ecx(ART_CPUID_LEAF);
419 do_div(art_freq, base);
420 intel_priv->crossts_adj = art_freq;
421 }
422 }
423
common_default_data(struct plat_stmmacenet_data * plat)424 static void common_default_data(struct plat_stmmacenet_data *plat)
425 {
426 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
427 plat->has_gmac = 1;
428 plat->force_sf_dma_mode = 1;
429
430 plat->mdio_bus_data->needs_reset = true;
431
432 /* Set default value for multicast hash bins */
433 plat->multicast_filter_bins = HASH_TABLE_SIZE;
434
435 /* Set default value for unicast filter entries */
436 plat->unicast_filter_entries = 1;
437
438 /* Set the maxmtu to a default of JUMBO_LEN */
439 plat->maxmtu = JUMBO_LEN;
440
441 /* Set default number of RX and TX queues to use */
442 plat->tx_queues_to_use = 1;
443 plat->rx_queues_to_use = 1;
444
445 /* Disable Priority config by default */
446 plat->tx_queues_cfg[0].use_prio = false;
447 plat->rx_queues_cfg[0].use_prio = false;
448
449 /* Disable RX queues routing by default */
450 plat->rx_queues_cfg[0].pkt_route = 0x0;
451 }
452
intel_mgbe_select_pcs(struct stmmac_priv * priv,phy_interface_t interface)453 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
454 phy_interface_t interface)
455 {
456 /* plat->mdio_bus_data->has_xpcs has been set true, so there
457 * should always be an XPCS. The original code would always
458 * return this if present.
459 */
460 return &priv->hw->xpcs->pcs;
461 }
462
intel_mgbe_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)463 static int intel_mgbe_common_data(struct pci_dev *pdev,
464 struct plat_stmmacenet_data *plat)
465 {
466 struct fwnode_handle *fwnode;
467 char clk_name[20];
468 int ret;
469 int i;
470
471 plat->pdev = pdev;
472 plat->phy_addr = -1;
473 plat->clk_csr = 5;
474 plat->has_gmac = 0;
475 plat->has_gmac4 = 1;
476 plat->force_sf_dma_mode = 0;
477 plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
478
479 /* Multiplying factor to the clk_eee_i clock time
480 * period to make it closer to 100 ns. This value
481 * should be programmed such that the clk_eee_time_period *
482 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
483 * clk_eee frequency is 19.2Mhz
484 * clk_eee_time_period is 52ns
485 * 52ns * (1 + 1) = 104ns
486 * MULT_FACT_100NS = 1
487 */
488 plat->mult_fact_100ns = 1;
489
490 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
491
492 for (i = 0; i < plat->rx_queues_to_use; i++) {
493 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
494 plat->rx_queues_cfg[i].chan = i;
495
496 /* Disable Priority config by default */
497 plat->rx_queues_cfg[i].use_prio = false;
498
499 /* Disable RX queues routing by default */
500 plat->rx_queues_cfg[i].pkt_route = 0x0;
501 }
502
503 for (i = 0; i < plat->tx_queues_to_use; i++) {
504 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
505
506 /* Disable Priority config by default */
507 plat->tx_queues_cfg[i].use_prio = false;
508 /* Default TX Q0 to use TSO and rest TXQ for TBS */
509 if (i > 0)
510 plat->tx_queues_cfg[i].tbs_en = 1;
511 }
512
513 /* FIFO size is 4096 bytes for 1 tx/rx queue */
514 plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
515 plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
516
517 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
518 plat->tx_queues_cfg[0].weight = 0x09;
519 plat->tx_queues_cfg[1].weight = 0x0A;
520 plat->tx_queues_cfg[2].weight = 0x0B;
521 plat->tx_queues_cfg[3].weight = 0x0C;
522 plat->tx_queues_cfg[4].weight = 0x0D;
523 plat->tx_queues_cfg[5].weight = 0x0E;
524 plat->tx_queues_cfg[6].weight = 0x0F;
525 plat->tx_queues_cfg[7].weight = 0x10;
526
527 plat->dma_cfg->pbl = 32;
528 plat->dma_cfg->pblx8 = true;
529 plat->dma_cfg->fixed_burst = 0;
530 plat->dma_cfg->mixed_burst = 0;
531 plat->dma_cfg->aal = 0;
532 plat->dma_cfg->dche = true;
533
534 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
535 GFP_KERNEL);
536 if (!plat->axi)
537 return -ENOMEM;
538
539 plat->axi->axi_lpi_en = 0;
540 plat->axi->axi_xit_frm = 0;
541 plat->axi->axi_wr_osr_lmt = 1;
542 plat->axi->axi_rd_osr_lmt = 1;
543 plat->axi->axi_blen[0] = 4;
544 plat->axi->axi_blen[1] = 8;
545 plat->axi->axi_blen[2] = 16;
546
547 plat->ptp_max_adj = plat->clk_ptp_rate;
548 plat->eee_usecs_rate = plat->clk_ptp_rate;
549
550 /* Set system clock */
551 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
552
553 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
554 clk_name, NULL, 0,
555 plat->clk_ptp_rate);
556
557 if (IS_ERR(plat->stmmac_clk)) {
558 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
559 plat->stmmac_clk = NULL;
560 }
561
562 ret = clk_prepare_enable(plat->stmmac_clk);
563 if (ret) {
564 clk_unregister_fixed_rate(plat->stmmac_clk);
565 return ret;
566 }
567
568 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
569
570 /* Set default value for multicast hash bins */
571 plat->multicast_filter_bins = HASH_TABLE_SIZE;
572
573 /* Set default value for unicast filter entries */
574 plat->unicast_filter_entries = 1;
575
576 /* Set the maxmtu to a default of JUMBO_LEN */
577 plat->maxmtu = JUMBO_LEN;
578
579 plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
580
581 /* Use the last Rx queue */
582 plat->vlan_fail_q = plat->rx_queues_to_use - 1;
583
584 /* For fixed-link setup, we allow phy-mode setting */
585 fwnode = dev_fwnode(&pdev->dev);
586 if (fwnode) {
587 int phy_mode;
588
589 /* "phy-mode" setting is optional. If it is set,
590 * we allow either sgmii or 1000base-x for now.
591 */
592 phy_mode = fwnode_get_phy_mode(fwnode);
593 if (phy_mode >= 0) {
594 if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
595 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
596 plat->phy_interface = phy_mode;
597 else
598 dev_warn(&pdev->dev, "Invalid phy-mode\n");
599 }
600 }
601
602 /* Intel mgbe SGMII interface uses pcs-xcps */
603 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
604 plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
605 plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
606 plat->mdio_bus_data->default_an_inband = true;
607 plat->select_pcs = intel_mgbe_select_pcs;
608 }
609
610 /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
611 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
612 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
613
614 plat->int_snapshot_num = AUX_SNAPSHOT1;
615
616 plat->crosststamp = intel_crosststamp;
617 plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
618
619 /* Setup MSI vector offset specific to Intel mGbE controller */
620 plat->msi_mac_vec = 29;
621 plat->msi_lpi_vec = 28;
622 plat->msi_sfty_ce_vec = 27;
623 plat->msi_sfty_ue_vec = 26;
624 plat->msi_rx_base_vec = 0;
625 plat->msi_tx_base_vec = 1;
626
627 return 0;
628 }
629
ehl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)630 static int ehl_common_data(struct pci_dev *pdev,
631 struct plat_stmmacenet_data *plat)
632 {
633 plat->rx_queues_to_use = 8;
634 plat->tx_queues_to_use = 8;
635 plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
636 plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
637
638 plat->safety_feat_cfg->tsoee = 1;
639 plat->safety_feat_cfg->mrxpee = 1;
640 plat->safety_feat_cfg->mestee = 1;
641 plat->safety_feat_cfg->mrxee = 1;
642 plat->safety_feat_cfg->mtxee = 1;
643 plat->safety_feat_cfg->epsi = 0;
644 plat->safety_feat_cfg->edpp = 0;
645 plat->safety_feat_cfg->prtyen = 0;
646 plat->safety_feat_cfg->tmouten = 0;
647
648 return intel_mgbe_common_data(pdev, plat);
649 }
650
ehl_sgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)651 static int ehl_sgmii_data(struct pci_dev *pdev,
652 struct plat_stmmacenet_data *plat)
653 {
654 plat->bus_id = 1;
655 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
656 plat->speed_mode_2500 = intel_speed_mode_2500;
657 plat->serdes_powerup = intel_serdes_powerup;
658 plat->serdes_powerdown = intel_serdes_powerdown;
659
660 plat->clk_ptp_rate = 204800000;
661
662 return ehl_common_data(pdev, plat);
663 }
664
665 static struct stmmac_pci_info ehl_sgmii1g_info = {
666 .setup = ehl_sgmii_data,
667 };
668
ehl_rgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)669 static int ehl_rgmii_data(struct pci_dev *pdev,
670 struct plat_stmmacenet_data *plat)
671 {
672 plat->bus_id = 1;
673 plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
674
675 plat->clk_ptp_rate = 204800000;
676
677 return ehl_common_data(pdev, plat);
678 }
679
680 static struct stmmac_pci_info ehl_rgmii1g_info = {
681 .setup = ehl_rgmii_data,
682 };
683
ehl_pse0_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)684 static int ehl_pse0_common_data(struct pci_dev *pdev,
685 struct plat_stmmacenet_data *plat)
686 {
687 struct intel_priv_data *intel_priv = plat->bsp_priv;
688
689 intel_priv->is_pse = true;
690 plat->bus_id = 2;
691 plat->host_dma_width = 32;
692
693 plat->clk_ptp_rate = 200000000;
694
695 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
696
697 return ehl_common_data(pdev, plat);
698 }
699
ehl_pse0_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)700 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
701 struct plat_stmmacenet_data *plat)
702 {
703 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
704 return ehl_pse0_common_data(pdev, plat);
705 }
706
707 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
708 .setup = ehl_pse0_rgmii1g_data,
709 };
710
ehl_pse0_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)711 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
712 struct plat_stmmacenet_data *plat)
713 {
714 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
715 plat->speed_mode_2500 = intel_speed_mode_2500;
716 plat->serdes_powerup = intel_serdes_powerup;
717 plat->serdes_powerdown = intel_serdes_powerdown;
718 return ehl_pse0_common_data(pdev, plat);
719 }
720
721 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
722 .setup = ehl_pse0_sgmii1g_data,
723 };
724
ehl_pse1_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)725 static int ehl_pse1_common_data(struct pci_dev *pdev,
726 struct plat_stmmacenet_data *plat)
727 {
728 struct intel_priv_data *intel_priv = plat->bsp_priv;
729
730 intel_priv->is_pse = true;
731 plat->bus_id = 3;
732 plat->host_dma_width = 32;
733
734 plat->clk_ptp_rate = 200000000;
735
736 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
737
738 return ehl_common_data(pdev, plat);
739 }
740
ehl_pse1_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)741 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
742 struct plat_stmmacenet_data *plat)
743 {
744 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
745 return ehl_pse1_common_data(pdev, plat);
746 }
747
748 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
749 .setup = ehl_pse1_rgmii1g_data,
750 };
751
ehl_pse1_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)752 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
753 struct plat_stmmacenet_data *plat)
754 {
755 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
756 plat->speed_mode_2500 = intel_speed_mode_2500;
757 plat->serdes_powerup = intel_serdes_powerup;
758 plat->serdes_powerdown = intel_serdes_powerdown;
759 return ehl_pse1_common_data(pdev, plat);
760 }
761
762 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
763 .setup = ehl_pse1_sgmii1g_data,
764 };
765
tgl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)766 static int tgl_common_data(struct pci_dev *pdev,
767 struct plat_stmmacenet_data *plat)
768 {
769 plat->rx_queues_to_use = 6;
770 plat->tx_queues_to_use = 4;
771 plat->clk_ptp_rate = 204800000;
772 plat->speed_mode_2500 = intel_speed_mode_2500;
773
774 plat->safety_feat_cfg->tsoee = 1;
775 plat->safety_feat_cfg->mrxpee = 0;
776 plat->safety_feat_cfg->mestee = 1;
777 plat->safety_feat_cfg->mrxee = 1;
778 plat->safety_feat_cfg->mtxee = 1;
779 plat->safety_feat_cfg->epsi = 0;
780 plat->safety_feat_cfg->edpp = 0;
781 plat->safety_feat_cfg->prtyen = 0;
782 plat->safety_feat_cfg->tmouten = 0;
783
784 return intel_mgbe_common_data(pdev, plat);
785 }
786
tgl_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)787 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
788 struct plat_stmmacenet_data *plat)
789 {
790 plat->bus_id = 1;
791 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
792 plat->serdes_powerup = intel_serdes_powerup;
793 plat->serdes_powerdown = intel_serdes_powerdown;
794 return tgl_common_data(pdev, plat);
795 }
796
797 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
798 .setup = tgl_sgmii_phy0_data,
799 };
800
tgl_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)801 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
802 struct plat_stmmacenet_data *plat)
803 {
804 plat->bus_id = 2;
805 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
806 plat->serdes_powerup = intel_serdes_powerup;
807 plat->serdes_powerdown = intel_serdes_powerdown;
808 return tgl_common_data(pdev, plat);
809 }
810
811 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
812 .setup = tgl_sgmii_phy1_data,
813 };
814
adls_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)815 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
816 struct plat_stmmacenet_data *plat)
817 {
818 plat->bus_id = 1;
819 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
820
821 /* SerDes power up and power down are done in BIOS for ADL */
822
823 return tgl_common_data(pdev, plat);
824 }
825
826 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
827 .setup = adls_sgmii_phy0_data,
828 };
829
adls_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)830 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
831 struct plat_stmmacenet_data *plat)
832 {
833 plat->bus_id = 2;
834 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
835
836 /* SerDes power up and power down are done in BIOS for ADL */
837
838 return tgl_common_data(pdev, plat);
839 }
840
841 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
842 .setup = adls_sgmii_phy1_data,
843 };
844 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
845 {
846 .func = 6,
847 .phy_addr = 1,
848 },
849 };
850
851 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
852 .func = galileo_stmmac_func_data,
853 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
854 };
855
856 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
857 {
858 .func = 6,
859 .phy_addr = 1,
860 },
861 {
862 .func = 7,
863 .phy_addr = 1,
864 },
865 };
866
867 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
868 .func = iot2040_stmmac_func_data,
869 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
870 };
871
872 static const struct dmi_system_id quark_pci_dmi[] = {
873 {
874 .matches = {
875 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
876 },
877 .driver_data = (void *)&galileo_stmmac_dmi_data,
878 },
879 {
880 .matches = {
881 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
882 },
883 .driver_data = (void *)&galileo_stmmac_dmi_data,
884 },
885 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
886 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
887 * has only one pci network device while other asset tags are
888 * for IOT2040 which has two.
889 */
890 {
891 .matches = {
892 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
893 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
894 "6ES7647-0AA00-0YA2"),
895 },
896 .driver_data = (void *)&galileo_stmmac_dmi_data,
897 },
898 {
899 .matches = {
900 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
901 },
902 .driver_data = (void *)&iot2040_stmmac_dmi_data,
903 },
904 {}
905 };
906
quark_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)907 static int quark_default_data(struct pci_dev *pdev,
908 struct plat_stmmacenet_data *plat)
909 {
910 int ret;
911
912 /* Set common default data first */
913 common_default_data(plat);
914
915 /* Refuse to load the driver and register net device if MAC controller
916 * does not connect to any PHY interface.
917 */
918 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
919 if (ret < 0) {
920 /* Return error to the caller on DMI enabled boards. */
921 if (dmi_get_system_info(DMI_BOARD_NAME))
922 return ret;
923
924 /* Galileo boards with old firmware don't support DMI. We always
925 * use 1 here as PHY address, so at least the first found MAC
926 * controller would be probed.
927 */
928 ret = 1;
929 }
930
931 plat->bus_id = pci_dev_id(pdev);
932 plat->phy_addr = ret;
933 plat->phy_interface = PHY_INTERFACE_MODE_RMII;
934
935 plat->dma_cfg->pbl = 16;
936 plat->dma_cfg->pblx8 = true;
937 plat->dma_cfg->fixed_burst = 1;
938 /* AXI (TODO) */
939
940 return 0;
941 }
942
943 static const struct stmmac_pci_info quark_info = {
944 .setup = quark_default_data,
945 };
946
stmmac_config_single_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)947 static int stmmac_config_single_msi(struct pci_dev *pdev,
948 struct plat_stmmacenet_data *plat,
949 struct stmmac_resources *res)
950 {
951 int ret;
952
953 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
954 if (ret < 0) {
955 dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
956 __func__);
957 return ret;
958 }
959
960 res->irq = pci_irq_vector(pdev, 0);
961 res->wol_irq = res->irq;
962 plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
963 dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
964 __func__);
965
966 return 0;
967 }
968
stmmac_config_multi_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)969 static int stmmac_config_multi_msi(struct pci_dev *pdev,
970 struct plat_stmmacenet_data *plat,
971 struct stmmac_resources *res)
972 {
973 int ret;
974 int i;
975
976 if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
977 plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
978 dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
979 __func__);
980 return -1;
981 }
982
983 ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
984 PCI_IRQ_MSI | PCI_IRQ_MSIX);
985 if (ret < 0) {
986 dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
987 __func__);
988 return ret;
989 }
990
991 /* For RX MSI */
992 for (i = 0; i < plat->rx_queues_to_use; i++) {
993 res->rx_irq[i] = pci_irq_vector(pdev,
994 plat->msi_rx_base_vec + i * 2);
995 }
996
997 /* For TX MSI */
998 for (i = 0; i < plat->tx_queues_to_use; i++) {
999 res->tx_irq[i] = pci_irq_vector(pdev,
1000 plat->msi_tx_base_vec + i * 2);
1001 }
1002
1003 if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
1004 res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
1005 if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
1006 res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
1007 if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
1008 res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
1009 if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
1010 res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
1011 if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
1012 res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
1013
1014 plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
1015 dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
1016
1017 return 0;
1018 }
1019
1020 /**
1021 * intel_eth_pci_probe
1022 *
1023 * @pdev: pci device pointer
1024 * @id: pointer to table of device id/id's.
1025 *
1026 * Description: This probing function gets called for all PCI devices which
1027 * match the ID table and are not "owned" by other driver yet. This function
1028 * gets passed a "struct pci_dev *" for each device whose entry in the ID table
1029 * matches the device. The probe functions returns zero when the driver choose
1030 * to take "ownership" of the device or an error code(-ve no) otherwise.
1031 */
intel_eth_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1032 static int intel_eth_pci_probe(struct pci_dev *pdev,
1033 const struct pci_device_id *id)
1034 {
1035 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1036 struct intel_priv_data *intel_priv;
1037 struct plat_stmmacenet_data *plat;
1038 struct stmmac_resources res;
1039 int ret;
1040
1041 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1042 if (!intel_priv)
1043 return -ENOMEM;
1044
1045 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1046 if (!plat)
1047 return -ENOMEM;
1048
1049 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1050 sizeof(*plat->mdio_bus_data),
1051 GFP_KERNEL);
1052 if (!plat->mdio_bus_data)
1053 return -ENOMEM;
1054
1055 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1056 GFP_KERNEL);
1057 if (!plat->dma_cfg)
1058 return -ENOMEM;
1059
1060 plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1061 sizeof(*plat->safety_feat_cfg),
1062 GFP_KERNEL);
1063 if (!plat->safety_feat_cfg)
1064 return -ENOMEM;
1065
1066 /* Enable pci device */
1067 ret = pcim_enable_device(pdev);
1068 if (ret) {
1069 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1070 __func__);
1071 return ret;
1072 }
1073
1074 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1075 if (ret)
1076 return ret;
1077
1078 pci_set_master(pdev);
1079
1080 plat->bsp_priv = intel_priv;
1081 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1082 intel_priv->crossts_adj = 1;
1083
1084 /* Initialize all MSI vectors to invalid so that it can be set
1085 * according to platform data settings below.
1086 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1087 */
1088 plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1089 plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1090 plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1091 plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1092 plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1093 plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1094 plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1095
1096 ret = info->setup(pdev, plat);
1097 if (ret)
1098 return ret;
1099
1100 memset(&res, 0, sizeof(res));
1101 res.addr = pcim_iomap_table(pdev)[0];
1102
1103 if (plat->eee_usecs_rate > 0) {
1104 u32 tx_lpi_usec;
1105
1106 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
1107 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
1108 }
1109
1110 ret = stmmac_config_multi_msi(pdev, plat, &res);
1111 if (ret) {
1112 ret = stmmac_config_single_msi(pdev, plat, &res);
1113 if (ret) {
1114 dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1115 __func__);
1116 goto err_alloc_irq;
1117 }
1118 }
1119
1120 ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1121 if (ret) {
1122 goto err_alloc_irq;
1123 }
1124
1125 return 0;
1126
1127 err_alloc_irq:
1128 clk_disable_unprepare(plat->stmmac_clk);
1129 clk_unregister_fixed_rate(plat->stmmac_clk);
1130 return ret;
1131 }
1132
1133 /**
1134 * intel_eth_pci_remove
1135 *
1136 * @pdev: pci device pointer
1137 * Description: this function calls the main to free the net resources
1138 * and releases the PCI resources.
1139 */
intel_eth_pci_remove(struct pci_dev * pdev)1140 static void intel_eth_pci_remove(struct pci_dev *pdev)
1141 {
1142 struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1143 struct stmmac_priv *priv = netdev_priv(ndev);
1144
1145 stmmac_dvr_remove(&pdev->dev);
1146
1147 clk_disable_unprepare(priv->plat->stmmac_clk);
1148 clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1149 }
1150
intel_eth_pci_suspend(struct device * dev)1151 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1152 {
1153 struct pci_dev *pdev = to_pci_dev(dev);
1154 int ret;
1155
1156 ret = stmmac_suspend(dev);
1157 if (ret)
1158 return ret;
1159
1160 ret = pci_save_state(pdev);
1161 if (ret)
1162 return ret;
1163
1164 pci_wake_from_d3(pdev, true);
1165 pci_set_power_state(pdev, PCI_D3hot);
1166 return 0;
1167 }
1168
intel_eth_pci_resume(struct device * dev)1169 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1170 {
1171 struct pci_dev *pdev = to_pci_dev(dev);
1172 int ret;
1173
1174 pci_restore_state(pdev);
1175 pci_set_power_state(pdev, PCI_D0);
1176
1177 ret = pcim_enable_device(pdev);
1178 if (ret)
1179 return ret;
1180
1181 pci_set_master(pdev);
1182
1183 return stmmac_resume(dev);
1184 }
1185
1186 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1187 intel_eth_pci_resume);
1188
1189 #define PCI_DEVICE_ID_INTEL_QUARK 0x0937
1190 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
1191 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
1192 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32
1193 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1194 * which are named PSE0 and PSE1
1195 */
1196 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0
1197 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1
1198 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2
1199 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0
1200 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1
1201 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2
1202 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac
1203 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2
1204 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
1205 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
1206 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
1207 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
1208 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac
1209
1210 static const struct pci_device_id intel_eth_pci_id_table[] = {
1211 { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1212 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1213 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1214 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1215 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1216 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1217 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1218 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1219 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1220 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1221 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1222 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1223 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1224 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1225 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1226 { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
1227 { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
1228 {}
1229 };
1230 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1231
1232 static struct pci_driver intel_eth_pci_driver = {
1233 .name = "intel-eth-pci",
1234 .id_table = intel_eth_pci_id_table,
1235 .probe = intel_eth_pci_probe,
1236 .remove = intel_eth_pci_remove,
1237 .driver = {
1238 .pm = &intel_eth_pm_ops,
1239 },
1240 };
1241
1242 module_pci_driver(intel_eth_pci_driver);
1243
1244 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1245 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1246 MODULE_LICENSE("GPL v2");
1247