• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include "dwmac-intel.h"
9 #include "dwmac4.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 struct intel_priv_data {
14 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
15 	unsigned long crossts_adj;
16 	bool is_pse;
17 };
18 
19 /* This struct is used to associate PCI Function of MAC controller on a board,
20  * discovered via DMI, with the address of PHY connected to the MAC. The
21  * negative value of the address means that MAC controller is not connected
22  * with PHY.
23  */
24 struct stmmac_pci_func_data {
25 	unsigned int func;
26 	int phy_addr;
27 };
28 
29 struct stmmac_pci_dmi_data {
30 	const struct stmmac_pci_func_data *func;
31 	size_t nfuncs;
32 };
33 
34 struct stmmac_pci_info {
35 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
36 };
37 
stmmac_pci_find_phy_addr(struct pci_dev * pdev,const struct dmi_system_id * dmi_list)38 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
39 				    const struct dmi_system_id *dmi_list)
40 {
41 	const struct stmmac_pci_func_data *func_data;
42 	const struct stmmac_pci_dmi_data *dmi_data;
43 	const struct dmi_system_id *dmi_id;
44 	int func = PCI_FUNC(pdev->devfn);
45 	size_t n;
46 
47 	dmi_id = dmi_first_match(dmi_list);
48 	if (!dmi_id)
49 		return -ENODEV;
50 
51 	dmi_data = dmi_id->driver_data;
52 	func_data = dmi_data->func;
53 
54 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
55 		if (func_data->func == func)
56 			return func_data->phy_addr;
57 
58 	return -ENODEV;
59 }
60 
serdes_status_poll(struct stmmac_priv * priv,int phyaddr,int phyreg,u32 mask,u32 val)61 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
62 			      int phyreg, u32 mask, u32 val)
63 {
64 	unsigned int retries = 10;
65 	int val_rd;
66 
67 	do {
68 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
69 		if ((val_rd & mask) == (val & mask))
70 			return 0;
71 		udelay(POLL_DELAY_US);
72 	} while (--retries);
73 
74 	return -ETIMEDOUT;
75 }
76 
intel_serdes_powerup(struct net_device * ndev,void * priv_data)77 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
78 {
79 	struct intel_priv_data *intel_priv = priv_data;
80 	struct stmmac_priv *priv = netdev_priv(ndev);
81 	int serdes_phy_addr = 0;
82 	u32 data = 0;
83 
84 	if (!intel_priv->mdio_adhoc_addr)
85 		return 0;
86 
87 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
88 
89 	/* Set the serdes rate and the PCLK rate */
90 	data = mdiobus_read(priv->mii, serdes_phy_addr,
91 			    SERDES_GCR0);
92 
93 	data &= ~SERDES_RATE_MASK;
94 	data &= ~SERDES_PCLK_MASK;
95 
96 	if (priv->plat->max_speed == 2500)
97 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
98 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
99 	else
100 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
101 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
102 
103 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
104 
105 	/* assert clk_req */
106 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107 	data |= SERDES_PLL_CLK;
108 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109 
110 	/* check for clk_ack assertion */
111 	data = serdes_status_poll(priv, serdes_phy_addr,
112 				  SERDES_GSR0,
113 				  SERDES_PLL_CLK,
114 				  SERDES_PLL_CLK);
115 
116 	if (data) {
117 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
118 		return data;
119 	}
120 
121 	/* assert lane reset */
122 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 	data |= SERDES_RST;
124 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125 
126 	/* check for assert lane reset reflection */
127 	data = serdes_status_poll(priv, serdes_phy_addr,
128 				  SERDES_GSR0,
129 				  SERDES_RST,
130 				  SERDES_RST);
131 
132 	if (data) {
133 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
134 		return data;
135 	}
136 
137 	/*  move power state to P0 */
138 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139 
140 	data &= ~SERDES_PWR_ST_MASK;
141 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
142 
143 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144 
145 	/* Check for P0 state */
146 	data = serdes_status_poll(priv, serdes_phy_addr,
147 				  SERDES_GSR0,
148 				  SERDES_PWR_ST_MASK,
149 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
150 
151 	if (data) {
152 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
153 		return data;
154 	}
155 
156 	/* PSE only - ungate SGMII PHY Rx Clock */
157 	if (intel_priv->is_pse)
158 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
159 			       0, SERDES_PHY_RX_CLK);
160 
161 	return 0;
162 }
163 
intel_serdes_powerdown(struct net_device * ndev,void * intel_data)164 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
165 {
166 	struct intel_priv_data *intel_priv = intel_data;
167 	struct stmmac_priv *priv = netdev_priv(ndev);
168 	int serdes_phy_addr = 0;
169 	u32 data = 0;
170 
171 	if (!intel_priv->mdio_adhoc_addr)
172 		return;
173 
174 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
175 
176 	/* PSE only - gate SGMII PHY Rx Clock */
177 	if (intel_priv->is_pse)
178 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
179 			       SERDES_PHY_RX_CLK, 0);
180 
181 	/*  move power state to P3 */
182 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
183 
184 	data &= ~SERDES_PWR_ST_MASK;
185 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
186 
187 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
188 
189 	/* Check for P3 state */
190 	data = serdes_status_poll(priv, serdes_phy_addr,
191 				  SERDES_GSR0,
192 				  SERDES_PWR_ST_MASK,
193 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
194 
195 	if (data) {
196 		dev_err(priv->device, "Serdes power state P3 timeout\n");
197 		return;
198 	}
199 
200 	/* de-assert clk_req */
201 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
202 	data &= ~SERDES_PLL_CLK;
203 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204 
205 	/* check for clk_ack de-assert */
206 	data = serdes_status_poll(priv, serdes_phy_addr,
207 				  SERDES_GSR0,
208 				  SERDES_PLL_CLK,
209 				  (u32)~SERDES_PLL_CLK);
210 
211 	if (data) {
212 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
213 		return;
214 	}
215 
216 	/* de-assert lane reset */
217 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218 	data &= ~SERDES_RST;
219 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220 
221 	/* check for de-assert lane reset reflection */
222 	data = serdes_status_poll(priv, serdes_phy_addr,
223 				  SERDES_GSR0,
224 				  SERDES_RST,
225 				  (u32)~SERDES_RST);
226 
227 	if (data) {
228 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
229 		return;
230 	}
231 }
232 
intel_speed_mode_2500(struct net_device * ndev,void * intel_data)233 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
234 {
235 	struct intel_priv_data *intel_priv = intel_data;
236 	struct stmmac_priv *priv = netdev_priv(ndev);
237 	int serdes_phy_addr = 0;
238 	u32 data = 0;
239 
240 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
241 
242 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
243 	data = mdiobus_read(priv->mii, serdes_phy_addr,
244 			    SERDES_GCR);
245 
246 	if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
247 	    SERDES_LINK_MODE_2G5) {
248 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
249 		priv->plat->max_speed = 2500;
250 		priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
251 		priv->plat->mdio_bus_data->xpcs_an_inband = false;
252 	} else {
253 		priv->plat->max_speed = 1000;
254 		priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
255 		priv->plat->mdio_bus_data->xpcs_an_inband = true;
256 	}
257 }
258 
259 /* Program PTP Clock Frequency for different variant of
260  * Intel mGBE that has slightly different GPO mapping
261  */
intel_mgbe_ptp_clk_freq_config(void * npriv)262 static void intel_mgbe_ptp_clk_freq_config(void *npriv)
263 {
264 	struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
265 	struct intel_priv_data *intel_priv;
266 	u32 gpio_value;
267 
268 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
269 
270 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
271 
272 	if (intel_priv->is_pse) {
273 		/* For PSE GbE, use 200MHz */
274 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
275 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
276 	} else {
277 		/* For PCH GbE, use 200MHz */
278 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
279 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
280 	}
281 
282 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
283 }
284 
get_arttime(struct mii_bus * mii,int intel_adhoc_addr,u64 * art_time)285 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
286 			u64 *art_time)
287 {
288 	u64 ns;
289 
290 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
291 	ns <<= GMAC4_ART_TIME_SHIFT;
292 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
293 	ns <<= GMAC4_ART_TIME_SHIFT;
294 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
295 	ns <<= GMAC4_ART_TIME_SHIFT;
296 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
297 
298 	*art_time = ns;
299 }
300 
stmmac_cross_ts_isr(struct stmmac_priv * priv)301 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
302 {
303 	return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
304 }
305 
intel_crosststamp(ktime_t * device,struct system_counterval_t * system,void * ctx)306 static int intel_crosststamp(ktime_t *device,
307 			     struct system_counterval_t *system,
308 			     void *ctx)
309 {
310 	struct intel_priv_data *intel_priv;
311 
312 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
313 	void __iomem *ptpaddr = priv->ptpaddr;
314 	void __iomem *ioaddr = priv->hw->pcsr;
315 	unsigned long flags;
316 	u64 art_time = 0;
317 	u64 ptp_time = 0;
318 	u32 num_snapshot;
319 	u32 gpio_value;
320 	u32 acr_value;
321 	int i;
322 
323 	if (!boot_cpu_has(X86_FEATURE_ART))
324 		return -EOPNOTSUPP;
325 
326 	intel_priv = priv->plat->bsp_priv;
327 
328 	/* Both internal crosstimestamping and external triggered event
329 	 * timestamping cannot be run concurrently.
330 	 */
331 	if (priv->plat->ext_snapshot_en)
332 		return -EBUSY;
333 
334 	priv->plat->int_snapshot_en = 1;
335 
336 	mutex_lock(&priv->aux_ts_lock);
337 	/* Enable Internal snapshot trigger */
338 	acr_value = readl(ptpaddr + PTP_ACR);
339 	acr_value &= ~PTP_ACR_MASK;
340 	switch (priv->plat->int_snapshot_num) {
341 	case AUX_SNAPSHOT0:
342 		acr_value |= PTP_ACR_ATSEN0;
343 		break;
344 	case AUX_SNAPSHOT1:
345 		acr_value |= PTP_ACR_ATSEN1;
346 		break;
347 	case AUX_SNAPSHOT2:
348 		acr_value |= PTP_ACR_ATSEN2;
349 		break;
350 	case AUX_SNAPSHOT3:
351 		acr_value |= PTP_ACR_ATSEN3;
352 		break;
353 	default:
354 		mutex_unlock(&priv->aux_ts_lock);
355 		priv->plat->int_snapshot_en = 0;
356 		return -EINVAL;
357 	}
358 	writel(acr_value, ptpaddr + PTP_ACR);
359 
360 	/* Clear FIFO */
361 	acr_value = readl(ptpaddr + PTP_ACR);
362 	acr_value |= PTP_ACR_ATSFC;
363 	writel(acr_value, ptpaddr + PTP_ACR);
364 	/* Release the mutex */
365 	mutex_unlock(&priv->aux_ts_lock);
366 
367 	/* Trigger Internal snapshot signal
368 	 * Create a rising edge by just toggle the GPO1 to low
369 	 * and back to high.
370 	 */
371 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
372 	gpio_value &= ~GMAC_GPO1;
373 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
374 	gpio_value |= GMAC_GPO1;
375 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
376 
377 	/* Time sync done Indication - Interrupt method */
378 	if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
379 					      stmmac_cross_ts_isr(priv),
380 					      HZ / 100)) {
381 		priv->plat->int_snapshot_en = 0;
382 		return -ETIMEDOUT;
383 	}
384 
385 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
386 			GMAC_TIMESTAMP_ATSNS_MASK) >>
387 			GMAC_TIMESTAMP_ATSNS_SHIFT;
388 
389 	/* Repeat until the timestamps are from the FIFO last segment */
390 	for (i = 0; i < num_snapshot; i++) {
391 		spin_lock_irqsave(&priv->ptp_lock, flags);
392 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
393 		*device = ns_to_ktime(ptp_time);
394 		spin_unlock_irqrestore(&priv->ptp_lock, flags);
395 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
396 		*system = convert_art_to_tsc(art_time);
397 	}
398 
399 	system->cycles *= intel_priv->crossts_adj;
400 	priv->plat->int_snapshot_en = 0;
401 
402 	return 0;
403 }
404 
intel_mgbe_pse_crossts_adj(struct intel_priv_data * intel_priv,int base)405 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
406 				       int base)
407 {
408 	if (boot_cpu_has(X86_FEATURE_ART)) {
409 		unsigned int art_freq;
410 
411 		/* On systems that support ART, ART frequency can be obtained
412 		 * from ECX register of CPUID leaf (0x15).
413 		 */
414 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
415 		do_div(art_freq, base);
416 		intel_priv->crossts_adj = art_freq;
417 	}
418 }
419 
common_default_data(struct plat_stmmacenet_data * plat)420 static void common_default_data(struct plat_stmmacenet_data *plat)
421 {
422 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
423 	plat->has_gmac = 1;
424 	plat->force_sf_dma_mode = 1;
425 
426 	plat->mdio_bus_data->needs_reset = true;
427 
428 	/* Set default value for multicast hash bins */
429 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
430 
431 	/* Set default value for unicast filter entries */
432 	plat->unicast_filter_entries = 1;
433 
434 	/* Set the maxmtu to a default of JUMBO_LEN */
435 	plat->maxmtu = JUMBO_LEN;
436 
437 	/* Set default number of RX and TX queues to use */
438 	plat->tx_queues_to_use = 1;
439 	plat->rx_queues_to_use = 1;
440 
441 	/* Disable Priority config by default */
442 	plat->tx_queues_cfg[0].use_prio = false;
443 	plat->rx_queues_cfg[0].use_prio = false;
444 
445 	/* Disable RX queues routing by default */
446 	plat->rx_queues_cfg[0].pkt_route = 0x0;
447 }
448 
intel_mgbe_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)449 static int intel_mgbe_common_data(struct pci_dev *pdev,
450 				  struct plat_stmmacenet_data *plat)
451 {
452 	char clk_name[20];
453 	int ret;
454 	int i;
455 
456 	plat->pdev = pdev;
457 	plat->phy_addr = -1;
458 	plat->clk_csr = 5;
459 	plat->has_gmac = 0;
460 	plat->has_gmac4 = 1;
461 	plat->force_sf_dma_mode = 0;
462 	plat->tso_en = 1;
463 	plat->sph_disable = 1;
464 
465 	/* Multiplying factor to the clk_eee_i clock time
466 	 * period to make it closer to 100 ns. This value
467 	 * should be programmed such that the clk_eee_time_period *
468 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
469 	 * clk_eee frequency is 19.2Mhz
470 	 * clk_eee_time_period is 52ns
471 	 * 52ns * (1 + 1) = 104ns
472 	 * MULT_FACT_100NS = 1
473 	 */
474 	plat->mult_fact_100ns = 1;
475 
476 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
477 
478 	for (i = 0; i < plat->rx_queues_to_use; i++) {
479 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
480 		plat->rx_queues_cfg[i].chan = i;
481 
482 		/* Disable Priority config by default */
483 		plat->rx_queues_cfg[i].use_prio = false;
484 
485 		/* Disable RX queues routing by default */
486 		plat->rx_queues_cfg[i].pkt_route = 0x0;
487 	}
488 
489 	for (i = 0; i < plat->tx_queues_to_use; i++) {
490 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
491 
492 		/* Disable Priority config by default */
493 		plat->tx_queues_cfg[i].use_prio = false;
494 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
495 		if (i > 0)
496 			plat->tx_queues_cfg[i].tbs_en = 1;
497 	}
498 
499 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
500 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
501 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
502 
503 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
504 	plat->tx_queues_cfg[0].weight = 0x09;
505 	plat->tx_queues_cfg[1].weight = 0x0A;
506 	plat->tx_queues_cfg[2].weight = 0x0B;
507 	plat->tx_queues_cfg[3].weight = 0x0C;
508 	plat->tx_queues_cfg[4].weight = 0x0D;
509 	plat->tx_queues_cfg[5].weight = 0x0E;
510 	plat->tx_queues_cfg[6].weight = 0x0F;
511 	plat->tx_queues_cfg[7].weight = 0x10;
512 
513 	plat->dma_cfg->pbl = 32;
514 	plat->dma_cfg->pblx8 = true;
515 	plat->dma_cfg->fixed_burst = 0;
516 	plat->dma_cfg->mixed_burst = 0;
517 	plat->dma_cfg->aal = 0;
518 	plat->dma_cfg->dche = true;
519 
520 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
521 				 GFP_KERNEL);
522 	if (!plat->axi)
523 		return -ENOMEM;
524 
525 	plat->axi->axi_lpi_en = 0;
526 	plat->axi->axi_xit_frm = 0;
527 	plat->axi->axi_wr_osr_lmt = 1;
528 	plat->axi->axi_rd_osr_lmt = 1;
529 	plat->axi->axi_blen[0] = 4;
530 	plat->axi->axi_blen[1] = 8;
531 	plat->axi->axi_blen[2] = 16;
532 
533 	plat->ptp_max_adj = plat->clk_ptp_rate;
534 	plat->eee_usecs_rate = plat->clk_ptp_rate;
535 
536 	/* Set system clock */
537 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
538 
539 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
540 						   clk_name, NULL, 0,
541 						   plat->clk_ptp_rate);
542 
543 	if (IS_ERR(plat->stmmac_clk)) {
544 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
545 		plat->stmmac_clk = NULL;
546 	}
547 
548 	ret = clk_prepare_enable(plat->stmmac_clk);
549 	if (ret) {
550 		clk_unregister_fixed_rate(plat->stmmac_clk);
551 		return ret;
552 	}
553 
554 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
555 
556 	/* Set default value for multicast hash bins */
557 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
558 
559 	/* Set default value for unicast filter entries */
560 	plat->unicast_filter_entries = 1;
561 
562 	/* Set the maxmtu to a default of JUMBO_LEN */
563 	plat->maxmtu = JUMBO_LEN;
564 
565 	plat->vlan_fail_q_en = true;
566 
567 	/* Use the last Rx queue */
568 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
569 
570 	/* Intel mgbe SGMII interface uses pcs-xcps */
571 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
572 		plat->mdio_bus_data->has_xpcs = true;
573 		plat->mdio_bus_data->xpcs_an_inband = true;
574 	}
575 
576 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
577 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
578 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
579 
580 	plat->int_snapshot_num = AUX_SNAPSHOT1;
581 	plat->ext_snapshot_num = AUX_SNAPSHOT0;
582 
583 	plat->has_crossts = true;
584 	plat->crosststamp = intel_crosststamp;
585 	plat->int_snapshot_en = 0;
586 
587 	/* Setup MSI vector offset specific to Intel mGbE controller */
588 	plat->msi_mac_vec = 29;
589 	plat->msi_lpi_vec = 28;
590 	plat->msi_sfty_ce_vec = 27;
591 	plat->msi_sfty_ue_vec = 26;
592 	plat->msi_rx_base_vec = 0;
593 	plat->msi_tx_base_vec = 1;
594 
595 	return 0;
596 }
597 
ehl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)598 static int ehl_common_data(struct pci_dev *pdev,
599 			   struct plat_stmmacenet_data *plat)
600 {
601 	plat->rx_queues_to_use = 8;
602 	plat->tx_queues_to_use = 8;
603 	plat->use_phy_wol = 1;
604 
605 	plat->safety_feat_cfg->tsoee = 1;
606 	plat->safety_feat_cfg->mrxpee = 1;
607 	plat->safety_feat_cfg->mestee = 1;
608 	plat->safety_feat_cfg->mrxee = 1;
609 	plat->safety_feat_cfg->mtxee = 1;
610 	plat->safety_feat_cfg->epsi = 0;
611 	plat->safety_feat_cfg->edpp = 0;
612 	plat->safety_feat_cfg->prtyen = 0;
613 	plat->safety_feat_cfg->tmouten = 0;
614 
615 	return intel_mgbe_common_data(pdev, plat);
616 }
617 
ehl_sgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)618 static int ehl_sgmii_data(struct pci_dev *pdev,
619 			  struct plat_stmmacenet_data *plat)
620 {
621 	plat->bus_id = 1;
622 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
623 	plat->speed_mode_2500 = intel_speed_mode_2500;
624 	plat->serdes_powerup = intel_serdes_powerup;
625 	plat->serdes_powerdown = intel_serdes_powerdown;
626 
627 	plat->clk_ptp_rate = 204800000;
628 
629 	return ehl_common_data(pdev, plat);
630 }
631 
632 static struct stmmac_pci_info ehl_sgmii1g_info = {
633 	.setup = ehl_sgmii_data,
634 };
635 
ehl_rgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)636 static int ehl_rgmii_data(struct pci_dev *pdev,
637 			  struct plat_stmmacenet_data *plat)
638 {
639 	plat->bus_id = 1;
640 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
641 
642 	plat->clk_ptp_rate = 204800000;
643 
644 	return ehl_common_data(pdev, plat);
645 }
646 
647 static struct stmmac_pci_info ehl_rgmii1g_info = {
648 	.setup = ehl_rgmii_data,
649 };
650 
ehl_pse0_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)651 static int ehl_pse0_common_data(struct pci_dev *pdev,
652 				struct plat_stmmacenet_data *plat)
653 {
654 	struct intel_priv_data *intel_priv = plat->bsp_priv;
655 
656 	intel_priv->is_pse = true;
657 	plat->bus_id = 2;
658 	plat->addr64 = 32;
659 
660 	plat->clk_ptp_rate = 200000000;
661 
662 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
663 
664 	return ehl_common_data(pdev, plat);
665 }
666 
ehl_pse0_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)667 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
668 				 struct plat_stmmacenet_data *plat)
669 {
670 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
671 	return ehl_pse0_common_data(pdev, plat);
672 }
673 
674 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
675 	.setup = ehl_pse0_rgmii1g_data,
676 };
677 
ehl_pse0_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)678 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
679 				 struct plat_stmmacenet_data *plat)
680 {
681 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
682 	plat->speed_mode_2500 = intel_speed_mode_2500;
683 	plat->serdes_powerup = intel_serdes_powerup;
684 	plat->serdes_powerdown = intel_serdes_powerdown;
685 	return ehl_pse0_common_data(pdev, plat);
686 }
687 
688 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
689 	.setup = ehl_pse0_sgmii1g_data,
690 };
691 
ehl_pse1_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)692 static int ehl_pse1_common_data(struct pci_dev *pdev,
693 				struct plat_stmmacenet_data *plat)
694 {
695 	struct intel_priv_data *intel_priv = plat->bsp_priv;
696 
697 	intel_priv->is_pse = true;
698 	plat->bus_id = 3;
699 	plat->addr64 = 32;
700 
701 	plat->clk_ptp_rate = 200000000;
702 
703 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
704 
705 	return ehl_common_data(pdev, plat);
706 }
707 
ehl_pse1_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)708 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
709 				 struct plat_stmmacenet_data *plat)
710 {
711 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
712 	return ehl_pse1_common_data(pdev, plat);
713 }
714 
715 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
716 	.setup = ehl_pse1_rgmii1g_data,
717 };
718 
ehl_pse1_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)719 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
720 				 struct plat_stmmacenet_data *plat)
721 {
722 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
723 	plat->speed_mode_2500 = intel_speed_mode_2500;
724 	plat->serdes_powerup = intel_serdes_powerup;
725 	plat->serdes_powerdown = intel_serdes_powerdown;
726 	return ehl_pse1_common_data(pdev, plat);
727 }
728 
729 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
730 	.setup = ehl_pse1_sgmii1g_data,
731 };
732 
tgl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)733 static int tgl_common_data(struct pci_dev *pdev,
734 			   struct plat_stmmacenet_data *plat)
735 {
736 	plat->rx_queues_to_use = 6;
737 	plat->tx_queues_to_use = 4;
738 	plat->clk_ptp_rate = 204800000;
739 	plat->speed_mode_2500 = intel_speed_mode_2500;
740 
741 	plat->safety_feat_cfg->tsoee = 1;
742 	plat->safety_feat_cfg->mrxpee = 0;
743 	plat->safety_feat_cfg->mestee = 1;
744 	plat->safety_feat_cfg->mrxee = 1;
745 	plat->safety_feat_cfg->mtxee = 1;
746 	plat->safety_feat_cfg->epsi = 0;
747 	plat->safety_feat_cfg->edpp = 0;
748 	plat->safety_feat_cfg->prtyen = 0;
749 	plat->safety_feat_cfg->tmouten = 0;
750 
751 	return intel_mgbe_common_data(pdev, plat);
752 }
753 
tgl_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)754 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
755 			       struct plat_stmmacenet_data *plat)
756 {
757 	plat->bus_id = 1;
758 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
759 	plat->serdes_powerup = intel_serdes_powerup;
760 	plat->serdes_powerdown = intel_serdes_powerdown;
761 	return tgl_common_data(pdev, plat);
762 }
763 
764 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
765 	.setup = tgl_sgmii_phy0_data,
766 };
767 
tgl_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)768 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
769 			       struct plat_stmmacenet_data *plat)
770 {
771 	plat->bus_id = 2;
772 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
773 	plat->serdes_powerup = intel_serdes_powerup;
774 	plat->serdes_powerdown = intel_serdes_powerdown;
775 	return tgl_common_data(pdev, plat);
776 }
777 
778 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
779 	.setup = tgl_sgmii_phy1_data,
780 };
781 
adls_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)782 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
783 				struct plat_stmmacenet_data *plat)
784 {
785 	plat->bus_id = 1;
786 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
787 
788 	/* SerDes power up and power down are done in BIOS for ADL */
789 
790 	return tgl_common_data(pdev, plat);
791 }
792 
793 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
794 	.setup = adls_sgmii_phy0_data,
795 };
796 
adls_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)797 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
798 				struct plat_stmmacenet_data *plat)
799 {
800 	plat->bus_id = 2;
801 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
802 
803 	/* SerDes power up and power down are done in BIOS for ADL */
804 
805 	return tgl_common_data(pdev, plat);
806 }
807 
808 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
809 	.setup = adls_sgmii_phy1_data,
810 };
811 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
812 	{
813 		.func = 6,
814 		.phy_addr = 1,
815 	},
816 };
817 
818 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
819 	.func = galileo_stmmac_func_data,
820 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
821 };
822 
823 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
824 	{
825 		.func = 6,
826 		.phy_addr = 1,
827 	},
828 	{
829 		.func = 7,
830 		.phy_addr = 1,
831 	},
832 };
833 
834 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
835 	.func = iot2040_stmmac_func_data,
836 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
837 };
838 
839 static const struct dmi_system_id quark_pci_dmi[] = {
840 	{
841 		.matches = {
842 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
843 		},
844 		.driver_data = (void *)&galileo_stmmac_dmi_data,
845 	},
846 	{
847 		.matches = {
848 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
849 		},
850 		.driver_data = (void *)&galileo_stmmac_dmi_data,
851 	},
852 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
853 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
854 	 * has only one pci network device while other asset tags are
855 	 * for IOT2040 which has two.
856 	 */
857 	{
858 		.matches = {
859 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
860 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
861 					"6ES7647-0AA00-0YA2"),
862 		},
863 		.driver_data = (void *)&galileo_stmmac_dmi_data,
864 	},
865 	{
866 		.matches = {
867 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
868 		},
869 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
870 	},
871 	{}
872 };
873 
quark_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)874 static int quark_default_data(struct pci_dev *pdev,
875 			      struct plat_stmmacenet_data *plat)
876 {
877 	int ret;
878 
879 	/* Set common default data first */
880 	common_default_data(plat);
881 
882 	/* Refuse to load the driver and register net device if MAC controller
883 	 * does not connect to any PHY interface.
884 	 */
885 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
886 	if (ret < 0) {
887 		/* Return error to the caller on DMI enabled boards. */
888 		if (dmi_get_system_info(DMI_BOARD_NAME))
889 			return ret;
890 
891 		/* Galileo boards with old firmware don't support DMI. We always
892 		 * use 1 here as PHY address, so at least the first found MAC
893 		 * controller would be probed.
894 		 */
895 		ret = 1;
896 	}
897 
898 	plat->bus_id = pci_dev_id(pdev);
899 	plat->phy_addr = ret;
900 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
901 
902 	plat->dma_cfg->pbl = 16;
903 	plat->dma_cfg->pblx8 = true;
904 	plat->dma_cfg->fixed_burst = 1;
905 	/* AXI (TODO) */
906 
907 	return 0;
908 }
909 
910 static const struct stmmac_pci_info quark_info = {
911 	.setup = quark_default_data,
912 };
913 
stmmac_config_single_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)914 static int stmmac_config_single_msi(struct pci_dev *pdev,
915 				    struct plat_stmmacenet_data *plat,
916 				    struct stmmac_resources *res)
917 {
918 	int ret;
919 
920 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
921 	if (ret < 0) {
922 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
923 			 __func__);
924 		return ret;
925 	}
926 
927 	res->irq = pci_irq_vector(pdev, 0);
928 	res->wol_irq = res->irq;
929 	plat->multi_msi_en = 0;
930 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
931 		 __func__);
932 
933 	return 0;
934 }
935 
stmmac_config_multi_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)936 static int stmmac_config_multi_msi(struct pci_dev *pdev,
937 				   struct plat_stmmacenet_data *plat,
938 				   struct stmmac_resources *res)
939 {
940 	int ret;
941 	int i;
942 
943 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
944 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
945 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
946 			 __func__);
947 		return -1;
948 	}
949 
950 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
951 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
952 	if (ret < 0) {
953 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
954 			 __func__);
955 		return ret;
956 	}
957 
958 	/* For RX MSI */
959 	for (i = 0; i < plat->rx_queues_to_use; i++) {
960 		res->rx_irq[i] = pci_irq_vector(pdev,
961 						plat->msi_rx_base_vec + i * 2);
962 	}
963 
964 	/* For TX MSI */
965 	for (i = 0; i < plat->tx_queues_to_use; i++) {
966 		res->tx_irq[i] = pci_irq_vector(pdev,
967 						plat->msi_tx_base_vec + i * 2);
968 	}
969 
970 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
971 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
972 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
973 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
974 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
975 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
976 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
977 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
978 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
979 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
980 
981 	plat->multi_msi_en = 1;
982 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
983 
984 	return 0;
985 }
986 
987 /**
988  * intel_eth_pci_probe
989  *
990  * @pdev: pci device pointer
991  * @id: pointer to table of device id/id's.
992  *
993  * Description: This probing function gets called for all PCI devices which
994  * match the ID table and are not "owned" by other driver yet. This function
995  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
996  * matches the device. The probe functions returns zero when the driver choose
997  * to take "ownership" of the device or an error code(-ve no) otherwise.
998  */
intel_eth_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)999 static int intel_eth_pci_probe(struct pci_dev *pdev,
1000 			       const struct pci_device_id *id)
1001 {
1002 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1003 	struct intel_priv_data *intel_priv;
1004 	struct plat_stmmacenet_data *plat;
1005 	struct stmmac_resources res;
1006 	int ret;
1007 
1008 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1009 	if (!intel_priv)
1010 		return -ENOMEM;
1011 
1012 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1013 	if (!plat)
1014 		return -ENOMEM;
1015 
1016 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1017 					   sizeof(*plat->mdio_bus_data),
1018 					   GFP_KERNEL);
1019 	if (!plat->mdio_bus_data)
1020 		return -ENOMEM;
1021 
1022 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1023 				     GFP_KERNEL);
1024 	if (!plat->dma_cfg)
1025 		return -ENOMEM;
1026 
1027 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1028 					     sizeof(*plat->safety_feat_cfg),
1029 					     GFP_KERNEL);
1030 	if (!plat->safety_feat_cfg)
1031 		return -ENOMEM;
1032 
1033 	/* Enable pci device */
1034 	ret = pcim_enable_device(pdev);
1035 	if (ret) {
1036 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1037 			__func__);
1038 		return ret;
1039 	}
1040 
1041 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1042 	if (ret)
1043 		return ret;
1044 
1045 	pci_set_master(pdev);
1046 
1047 	plat->bsp_priv = intel_priv;
1048 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1049 	intel_priv->crossts_adj = 1;
1050 
1051 	/* Initialize all MSI vectors to invalid so that it can be set
1052 	 * according to platform data settings below.
1053 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1054 	 */
1055 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1056 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1057 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1058 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1059 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1060 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1061 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1062 
1063 	ret = info->setup(pdev, plat);
1064 	if (ret)
1065 		return ret;
1066 
1067 	memset(&res, 0, sizeof(res));
1068 	res.addr = pcim_iomap_table(pdev)[0];
1069 
1070 	if (plat->eee_usecs_rate > 0) {
1071 		u32 tx_lpi_usec;
1072 
1073 		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
1074 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
1075 	}
1076 
1077 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1078 	if (ret) {
1079 		ret = stmmac_config_single_msi(pdev, plat, &res);
1080 		if (ret) {
1081 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1082 				__func__);
1083 			goto err_alloc_irq;
1084 		}
1085 	}
1086 
1087 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1088 	if (ret) {
1089 		goto err_alloc_irq;
1090 	}
1091 
1092 	return 0;
1093 
1094 err_alloc_irq:
1095 	clk_disable_unprepare(plat->stmmac_clk);
1096 	clk_unregister_fixed_rate(plat->stmmac_clk);
1097 	return ret;
1098 }
1099 
1100 /**
1101  * intel_eth_pci_remove
1102  *
1103  * @pdev: pci device pointer
1104  * Description: this function calls the main to free the net resources
1105  * and releases the PCI resources.
1106  */
intel_eth_pci_remove(struct pci_dev * pdev)1107 static void intel_eth_pci_remove(struct pci_dev *pdev)
1108 {
1109 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1110 	struct stmmac_priv *priv = netdev_priv(ndev);
1111 
1112 	stmmac_dvr_remove(&pdev->dev);
1113 
1114 	clk_disable_unprepare(priv->plat->stmmac_clk);
1115 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1116 
1117 	pcim_iounmap_regions(pdev, BIT(0));
1118 }
1119 
intel_eth_pci_suspend(struct device * dev)1120 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1121 {
1122 	struct pci_dev *pdev = to_pci_dev(dev);
1123 	int ret;
1124 
1125 	ret = stmmac_suspend(dev);
1126 	if (ret)
1127 		return ret;
1128 
1129 	ret = pci_save_state(pdev);
1130 	if (ret)
1131 		return ret;
1132 
1133 	pci_wake_from_d3(pdev, true);
1134 	pci_set_power_state(pdev, PCI_D3hot);
1135 	return 0;
1136 }
1137 
intel_eth_pci_resume(struct device * dev)1138 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1139 {
1140 	struct pci_dev *pdev = to_pci_dev(dev);
1141 	int ret;
1142 
1143 	pci_restore_state(pdev);
1144 	pci_set_power_state(pdev, PCI_D0);
1145 
1146 	ret = pcim_enable_device(pdev);
1147 	if (ret)
1148 		return ret;
1149 
1150 	pci_set_master(pdev);
1151 
1152 	return stmmac_resume(dev);
1153 }
1154 
1155 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1156 			 intel_eth_pci_resume);
1157 
1158 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1159 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1160 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1161 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1162 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1163  * which are named PSE0 and PSE1
1164  */
1165 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1166 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1167 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1168 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1169 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1170 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1171 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1172 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1173 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1174 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1175 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1176 
1177 static const struct pci_device_id intel_eth_pci_id_table[] = {
1178 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1179 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1180 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1181 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1182 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1183 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1184 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1185 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1186 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1187 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1188 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1189 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1190 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1191 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1192 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1193 	{}
1194 };
1195 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1196 
1197 static struct pci_driver intel_eth_pci_driver = {
1198 	.name = "intel-eth-pci",
1199 	.id_table = intel_eth_pci_id_table,
1200 	.probe = intel_eth_pci_probe,
1201 	.remove = intel_eth_pci_remove,
1202 	.driver         = {
1203 		.pm     = &intel_eth_pm_ops,
1204 	},
1205 };
1206 
1207 module_pci_driver(intel_eth_pci_driver);
1208 
1209 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1210 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1211 MODULE_LICENSE("GPL v2");
1212