• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Combo-PHY driver
4  *
5  * Copyright (C) 2019-2020 Intel Corporation.
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/of.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/reset.h>
19 
20 #include <dt-bindings/phy/phy.h>
21 
22 #define PCIE_PHY_GEN_CTRL	0x00
23 #define PCIE_PHY_CLK_PAD	BIT(17)
24 
25 #define PAD_DIS_CFG		0x174
26 
27 #define PCS_XF_ATE_OVRD_IN_2	0x3008
28 #define ADAPT_REQ_MSK		GENMASK(5, 4)
29 
30 #define PCS_XF_RX_ADAPT_ACK	0x3010
31 #define RX_ADAPT_ACK_BIT	BIT(0)
32 
33 #define CR_ADDR(addr, lane)	(((addr) + (lane) * 0x100) << 2)
34 #define REG_COMBO_MODE(x)	((x) * 0x200)
35 #define REG_CLK_DISABLE(x)	((x) * 0x200 + 0x124)
36 
37 #define COMBO_PHY_ID(x)		((x)->parent->id)
38 #define PHY_ID(x)		((x)->id)
39 
40 #define CLK_100MHZ		100000000
41 #define CLK_156_25MHZ		156250000
42 
43 static const unsigned long intel_iphy_clk_rates[] = {
44 	CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
45 };
46 
47 enum {
48 	PHY_0,
49 	PHY_1,
50 	PHY_MAX_NUM
51 };
52 
53 /*
54  * Clock Register bit fields to enable clocks
55  * for ComboPhy according to the mode.
56  */
57 enum intel_phy_mode {
58 	PHY_PCIE_MODE = 0,
59 	PHY_XPCS_MODE,
60 	PHY_SATA_MODE,
61 };
62 
63 /* ComboPhy mode Register values */
64 enum intel_combo_mode {
65 	PCIE0_PCIE1_MODE = 0,
66 	PCIE_DL_MODE,
67 	RXAUI_MODE,
68 	XPCS0_XPCS1_MODE,
69 	SATA0_SATA1_MODE,
70 };
71 
72 enum aggregated_mode {
73 	PHY_SL_MODE,
74 	PHY_DL_MODE,
75 };
76 
77 struct intel_combo_phy;
78 
79 struct intel_cbphy_iphy {
80 	struct phy		*phy;
81 	struct intel_combo_phy	*parent;
82 	struct reset_control	*app_rst;
83 	u32			id;
84 };
85 
86 struct intel_combo_phy {
87 	struct device		*dev;
88 	struct clk		*core_clk;
89 	unsigned long		clk_rate;
90 	void __iomem		*app_base;
91 	void __iomem		*cr_base;
92 	struct regmap		*syscfg;
93 	struct regmap		*hsiocfg;
94 	u32			id;
95 	u32			bid;
96 	struct reset_control	*phy_rst;
97 	struct reset_control	*core_rst;
98 	struct intel_cbphy_iphy	iphy[PHY_MAX_NUM];
99 	enum intel_phy_mode	phy_mode;
100 	enum aggregated_mode	aggr_mode;
101 	u32			init_cnt;
102 	struct mutex		lock;
103 };
104 
intel_cbphy_iphy_enable(struct intel_cbphy_iphy * iphy,bool set)105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
106 {
107 	struct intel_combo_phy *cbphy = iphy->parent;
108 	u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
109 	u32 val;
110 
111 	/* Register: 0 is enable, 1 is disable */
112 	val = set ? 0 : mask;
113 
114 	return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
115 				  mask, val);
116 }
117 
intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy * iphy,bool set)118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
119 {
120 	struct intel_combo_phy *cbphy = iphy->parent;
121 	u32 mask = BIT(cbphy->id * 2 + iphy->id);
122 	u32 val;
123 
124 	/* Register: 0 is enable, 1 is disable */
125 	val = set ? 0 : mask;
126 
127 	return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
128 }
129 
combo_phy_w32_off_mask(void __iomem * base,unsigned int reg,u32 mask,u32 val)130 static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
131 					  u32 mask, u32 val)
132 {
133 	u32 reg_val;
134 
135 	reg_val = readl(base + reg);
136 	reg_val &= ~mask;
137 	reg_val |= val;
138 	writel(reg_val, base + reg);
139 }
140 
intel_cbphy_iphy_cfg(struct intel_cbphy_iphy * iphy,int (* phy_cfg)(struct intel_cbphy_iphy *))141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
142 				int (*phy_cfg)(struct intel_cbphy_iphy *))
143 {
144 	struct intel_combo_phy *cbphy = iphy->parent;
145 	int ret;
146 
147 	ret = phy_cfg(iphy);
148 	if (ret)
149 		return ret;
150 
151 	if (cbphy->aggr_mode != PHY_DL_MODE)
152 		return 0;
153 
154 	return phy_cfg(&cbphy->iphy[PHY_1]);
155 }
156 
intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy * iphy)157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
158 {
159 	struct intel_combo_phy *cbphy = iphy->parent;
160 	int ret;
161 
162 	ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
163 	if (ret) {
164 		dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
165 		return ret;
166 	}
167 
168 	if (cbphy->init_cnt)
169 		return 0;
170 
171 	combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
172 			       PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
173 
174 	/* Delay for stable clock PLL */
175 	usleep_range(50, 100);
176 
177 	return 0;
178 }
179 
intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy * iphy)180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
181 {
182 	struct intel_combo_phy *cbphy = iphy->parent;
183 	int ret;
184 
185 	ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
186 	if (ret) {
187 		dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
188 		return ret;
189 	}
190 
191 	if (cbphy->init_cnt)
192 		return 0;
193 
194 	combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
195 			       PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
196 
197 	return 0;
198 }
199 
intel_cbphy_set_mode(struct intel_combo_phy * cbphy)200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
201 {
202 	enum intel_combo_mode cb_mode;
203 	enum aggregated_mode aggr = cbphy->aggr_mode;
204 	struct device *dev = cbphy->dev;
205 	enum intel_phy_mode mode;
206 	int ret;
207 
208 	mode = cbphy->phy_mode;
209 
210 	switch (mode) {
211 	case PHY_PCIE_MODE:
212 		cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
213 		break;
214 
215 	case PHY_XPCS_MODE:
216 		cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
217 		break;
218 
219 	case PHY_SATA_MODE:
220 		if (aggr == PHY_DL_MODE) {
221 			dev_err(dev, "Mode:%u not support dual lane!\n", mode);
222 			return -EINVAL;
223 		}
224 
225 		cb_mode = SATA0_SATA1_MODE;
226 		break;
227 	default:
228 		return -EINVAL;
229 	}
230 
231 	ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
232 	if (ret)
233 		dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
234 
235 	return ret;
236 }
237 
intel_cbphy_rst_assert(struct intel_combo_phy * cbphy)238 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
239 {
240 	reset_control_assert(cbphy->core_rst);
241 	reset_control_assert(cbphy->phy_rst);
242 }
243 
intel_cbphy_rst_deassert(struct intel_combo_phy * cbphy)244 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
245 {
246 	reset_control_deassert(cbphy->core_rst);
247 	reset_control_deassert(cbphy->phy_rst);
248 	/* Delay to ensure reset process is done */
249 	usleep_range(10, 20);
250 }
251 
intel_cbphy_iphy_power_on(struct intel_cbphy_iphy * iphy)252 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
253 {
254 	struct intel_combo_phy *cbphy = iphy->parent;
255 	int ret;
256 
257 	if (!cbphy->init_cnt) {
258 		ret = clk_prepare_enable(cbphy->core_clk);
259 		if (ret) {
260 			dev_err(cbphy->dev, "Clock enable failed!\n");
261 			return ret;
262 		}
263 
264 		ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
265 		if (ret) {
266 			dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
267 				cbphy->clk_rate);
268 			goto clk_err;
269 		}
270 
271 		intel_cbphy_rst_assert(cbphy);
272 		intel_cbphy_rst_deassert(cbphy);
273 		ret = intel_cbphy_set_mode(cbphy);
274 		if (ret)
275 			goto clk_err;
276 	}
277 
278 	ret = intel_cbphy_iphy_enable(iphy, true);
279 	if (ret) {
280 		dev_err(cbphy->dev, "Failed enabling PHY core\n");
281 		goto clk_err;
282 	}
283 
284 	ret = reset_control_deassert(iphy->app_rst);
285 	if (ret) {
286 		dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
287 			COMBO_PHY_ID(iphy), PHY_ID(iphy));
288 		goto clk_err;
289 	}
290 
291 	/* Delay to ensure reset process is done */
292 	udelay(1);
293 
294 	return 0;
295 
296 clk_err:
297 	clk_disable_unprepare(cbphy->core_clk);
298 
299 	return ret;
300 }
301 
intel_cbphy_iphy_power_off(struct intel_cbphy_iphy * iphy)302 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
303 {
304 	struct intel_combo_phy *cbphy = iphy->parent;
305 	int ret;
306 
307 	ret = reset_control_assert(iphy->app_rst);
308 	if (ret) {
309 		dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
310 			COMBO_PHY_ID(iphy), PHY_ID(iphy));
311 		return ret;
312 	}
313 
314 	ret = intel_cbphy_iphy_enable(iphy, false);
315 	if (ret) {
316 		dev_err(cbphy->dev, "Failed disabling PHY core\n");
317 		return ret;
318 	}
319 
320 	if (cbphy->init_cnt)
321 		return 0;
322 
323 	clk_disable_unprepare(cbphy->core_clk);
324 	intel_cbphy_rst_assert(cbphy);
325 
326 	return 0;
327 }
328 
intel_cbphy_init(struct phy * phy)329 static int intel_cbphy_init(struct phy *phy)
330 {
331 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
332 	struct intel_combo_phy *cbphy = iphy->parent;
333 	int ret;
334 
335 	mutex_lock(&cbphy->lock);
336 	ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
337 	if (ret)
338 		goto err;
339 
340 	if (cbphy->phy_mode == PHY_PCIE_MODE) {
341 		ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
342 		if (ret)
343 			goto err;
344 	}
345 
346 	cbphy->init_cnt++;
347 
348 err:
349 	mutex_unlock(&cbphy->lock);
350 
351 	return ret;
352 }
353 
intel_cbphy_exit(struct phy * phy)354 static int intel_cbphy_exit(struct phy *phy)
355 {
356 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
357 	struct intel_combo_phy *cbphy = iphy->parent;
358 	int ret;
359 
360 	mutex_lock(&cbphy->lock);
361 	cbphy->init_cnt--;
362 	if (cbphy->phy_mode == PHY_PCIE_MODE) {
363 		ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
364 		if (ret)
365 			goto err;
366 	}
367 
368 	ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
369 
370 err:
371 	mutex_unlock(&cbphy->lock);
372 
373 	return ret;
374 }
375 
intel_cbphy_calibrate(struct phy * phy)376 static int intel_cbphy_calibrate(struct phy *phy)
377 {
378 	struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
379 	struct intel_combo_phy *cbphy = iphy->parent;
380 	void __iomem *cr_base = cbphy->cr_base;
381 	int val, ret, id;
382 
383 	if (cbphy->phy_mode != PHY_XPCS_MODE)
384 		return 0;
385 
386 	id = PHY_ID(iphy);
387 
388 	/* trigger auto RX adaptation */
389 	combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
390 			       ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
391 	/* Wait RX adaptation to finish */
392 	ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
393 				 val, val & RX_ADAPT_ACK_BIT, 10, 5000);
394 	if (ret)
395 		dev_err(cbphy->dev, "RX Adaptation failed!\n");
396 	else
397 		dev_dbg(cbphy->dev, "RX Adaptation success!\n");
398 
399 	/* Stop RX adaptation */
400 	combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
401 			       ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
402 
403 	return ret;
404 }
405 
intel_cbphy_fwnode_parse(struct intel_combo_phy * cbphy)406 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
407 {
408 	struct device *dev = cbphy->dev;
409 	struct platform_device *pdev = to_platform_device(dev);
410 	struct fwnode_handle *fwnode = dev_fwnode(dev);
411 	struct fwnode_reference_args ref;
412 	int ret;
413 	u32 val;
414 
415 	cbphy->core_clk = devm_clk_get(dev, NULL);
416 	if (IS_ERR(cbphy->core_clk))
417 		return dev_err_probe(dev, PTR_ERR(cbphy->core_clk),
418 				     "Get clk failed!\n");
419 
420 	cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
421 	if (IS_ERR(cbphy->core_rst))
422 		return dev_err_probe(dev, PTR_ERR(cbphy->core_rst),
423 				     "Get core reset control err!\n");
424 
425 	cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
426 	if (IS_ERR(cbphy->phy_rst))
427 		return dev_err_probe(dev, PTR_ERR(cbphy->phy_rst),
428 				     "Get PHY reset control err!\n");
429 
430 	cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
431 	if (IS_ERR(cbphy->iphy[0].app_rst))
432 		return dev_err_probe(dev, PTR_ERR(cbphy->iphy[0].app_rst),
433 				     "Get phy0 reset control err!\n");
434 
435 	cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
436 	if (IS_ERR(cbphy->iphy[1].app_rst))
437 		return dev_err_probe(dev, PTR_ERR(cbphy->iphy[1].app_rst),
438 				     "Get phy1 reset control err!\n");
439 
440 	cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
441 	if (IS_ERR(cbphy->app_base))
442 		return PTR_ERR(cbphy->app_base);
443 
444 	cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
445 	if (IS_ERR(cbphy->cr_base))
446 		return PTR_ERR(cbphy->cr_base);
447 
448 	/*
449 	 * syscfg and hsiocfg variables stores the handle of the registers set
450 	 * in which ComboPhy subsystem specific registers are subset. Using
451 	 * Register map framework to access the registers set.
452 	 */
453 	ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
454 						 1, 0, &ref);
455 	if (ret < 0)
456 		return ret;
457 
458 	cbphy->id = ref.args[0];
459 	cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
460 	fwnode_handle_put(ref.fwnode);
461 
462 	ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
463 						 0, &ref);
464 	if (ret < 0)
465 		return ret;
466 
467 	cbphy->bid = ref.args[0];
468 	cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
469 	fwnode_handle_put(ref.fwnode);
470 
471 	ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
472 	if (ret)
473 		return ret;
474 
475 	switch (val) {
476 	case PHY_TYPE_PCIE:
477 		cbphy->phy_mode = PHY_PCIE_MODE;
478 		break;
479 
480 	case PHY_TYPE_SATA:
481 		cbphy->phy_mode = PHY_SATA_MODE;
482 		break;
483 
484 	case PHY_TYPE_XPCS:
485 		cbphy->phy_mode = PHY_XPCS_MODE;
486 		break;
487 
488 	default:
489 		dev_err(dev, "Invalid PHY mode: %u\n", val);
490 		return -EINVAL;
491 	}
492 
493 	cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
494 
495 	if (fwnode_property_present(fwnode, "intel,aggregation"))
496 		cbphy->aggr_mode = PHY_DL_MODE;
497 	else
498 		cbphy->aggr_mode = PHY_SL_MODE;
499 
500 	return 0;
501 }
502 
503 static const struct phy_ops intel_cbphy_ops = {
504 	.init		= intel_cbphy_init,
505 	.exit		= intel_cbphy_exit,
506 	.calibrate	= intel_cbphy_calibrate,
507 	.owner		= THIS_MODULE,
508 };
509 
intel_cbphy_xlate(struct device * dev,struct of_phandle_args * args)510 static struct phy *intel_cbphy_xlate(struct device *dev,
511 				     struct of_phandle_args *args)
512 {
513 	struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
514 	u32 iphy_id;
515 
516 	if (args->args_count < 1) {
517 		dev_err(dev, "Invalid number of arguments\n");
518 		return ERR_PTR(-EINVAL);
519 	}
520 
521 	iphy_id = args->args[0];
522 	if (iphy_id >= PHY_MAX_NUM) {
523 		dev_err(dev, "Invalid phy instance %d\n", iphy_id);
524 		return ERR_PTR(-EINVAL);
525 	}
526 
527 	if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
528 		dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
529 		return ERR_PTR(-EINVAL);
530 	}
531 
532 	return cbphy->iphy[iphy_id].phy;
533 }
534 
intel_cbphy_create(struct intel_combo_phy * cbphy)535 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
536 {
537 	struct phy_provider *phy_provider;
538 	struct device *dev = cbphy->dev;
539 	struct intel_cbphy_iphy *iphy;
540 	int i;
541 
542 	for (i = 0; i < PHY_MAX_NUM; i++) {
543 		iphy = &cbphy->iphy[i];
544 		iphy->parent = cbphy;
545 		iphy->id = i;
546 
547 		/* In dual lane mode skip phy creation for the second phy */
548 		if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
549 			continue;
550 
551 		iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
552 		if (IS_ERR(iphy->phy)) {
553 			dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
554 				COMBO_PHY_ID(iphy), PHY_ID(iphy));
555 
556 			return PTR_ERR(iphy->phy);
557 		}
558 
559 		phy_set_drvdata(iphy->phy, iphy);
560 	}
561 
562 	dev_set_drvdata(dev, cbphy);
563 	phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
564 	if (IS_ERR(phy_provider))
565 		dev_err(dev, "Register PHY provider failed!\n");
566 
567 	return PTR_ERR_OR_ZERO(phy_provider);
568 }
569 
intel_cbphy_probe(struct platform_device * pdev)570 static int intel_cbphy_probe(struct platform_device *pdev)
571 {
572 	struct device *dev = &pdev->dev;
573 	struct intel_combo_phy *cbphy;
574 	int ret;
575 
576 	cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
577 	if (!cbphy)
578 		return -ENOMEM;
579 
580 	cbphy->dev = dev;
581 	cbphy->init_cnt = 0;
582 	mutex_init(&cbphy->lock);
583 	ret = intel_cbphy_fwnode_parse(cbphy);
584 	if (ret)
585 		return ret;
586 
587 	platform_set_drvdata(pdev, cbphy);
588 
589 	return intel_cbphy_create(cbphy);
590 }
591 
intel_cbphy_remove(struct platform_device * pdev)592 static void intel_cbphy_remove(struct platform_device *pdev)
593 {
594 	struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
595 
596 	intel_cbphy_rst_assert(cbphy);
597 	clk_disable_unprepare(cbphy->core_clk);
598 }
599 
600 static const struct of_device_id of_intel_cbphy_match[] = {
601 	{ .compatible = "intel,combo-phy" },
602 	{ .compatible = "intel,combophy-lgm" },
603 	{}
604 };
605 
606 static struct platform_driver intel_cbphy_driver = {
607 	.probe = intel_cbphy_probe,
608 	.remove_new = intel_cbphy_remove,
609 	.driver = {
610 		.name = "intel-combo-phy",
611 		.of_match_table = of_intel_cbphy_match,
612 	}
613 };
614 
615 module_platform_driver(intel_cbphy_driver);
616 
617 MODULE_DESCRIPTION("Intel Combo-phy driver");
618