1 /*
2 * phy-core.c -- Generic Phy framework.
3 *
4 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/phy/phy.h>
22 #include <linux/idr.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regulator/consumer.h>
25
26 static struct class *phy_class;
27 static DEFINE_MUTEX(phy_provider_mutex);
28 static LIST_HEAD(phy_provider_list);
29 static DEFINE_IDA(phy_ida);
30
devm_phy_release(struct device * dev,void * res)31 static void devm_phy_release(struct device *dev, void *res)
32 {
33 struct phy *phy = *(struct phy **)res;
34
35 phy_put(phy);
36 }
37
devm_phy_provider_release(struct device * dev,void * res)38 static void devm_phy_provider_release(struct device *dev, void *res)
39 {
40 struct phy_provider *phy_provider = *(struct phy_provider **)res;
41
42 of_phy_provider_unregister(phy_provider);
43 }
44
devm_phy_consume(struct device * dev,void * res)45 static void devm_phy_consume(struct device *dev, void *res)
46 {
47 struct phy *phy = *(struct phy **)res;
48
49 phy_destroy(phy);
50 }
51
devm_phy_match(struct device * dev,void * res,void * match_data)52 static int devm_phy_match(struct device *dev, void *res, void *match_data)
53 {
54 struct phy **phy = res;
55
56 return *phy == match_data;
57 }
58
phy_lookup(struct device * device,const char * port)59 static struct phy *phy_lookup(struct device *device, const char *port)
60 {
61 unsigned int count;
62 struct phy *phy;
63 struct device *dev;
64 struct phy_consumer *consumers;
65 struct class_dev_iter iter;
66
67 class_dev_iter_init(&iter, phy_class, NULL, NULL);
68 while ((dev = class_dev_iter_next(&iter))) {
69 phy = to_phy(dev);
70
71 if (!phy->init_data)
72 continue;
73 count = phy->init_data->num_consumers;
74 consumers = phy->init_data->consumers;
75 while (count--) {
76 if (!strcmp(consumers->dev_name, dev_name(device)) &&
77 !strcmp(consumers->port, port)) {
78 class_dev_iter_exit(&iter);
79 return phy;
80 }
81 consumers++;
82 }
83 }
84
85 class_dev_iter_exit(&iter);
86 return ERR_PTR(-ENODEV);
87 }
88
of_phy_provider_lookup(struct device_node * node)89 static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
90 {
91 struct phy_provider *phy_provider;
92 struct device_node *child;
93
94 list_for_each_entry(phy_provider, &phy_provider_list, list) {
95 if (phy_provider->dev->of_node == node)
96 return phy_provider;
97
98 for_each_child_of_node(phy_provider->dev->of_node, child)
99 if (child == node)
100 return phy_provider;
101 }
102
103 return ERR_PTR(-EPROBE_DEFER);
104 }
105
phy_pm_runtime_get(struct phy * phy)106 int phy_pm_runtime_get(struct phy *phy)
107 {
108 int ret;
109
110 if (!pm_runtime_enabled(&phy->dev))
111 return -ENOTSUPP;
112
113 ret = pm_runtime_get(&phy->dev);
114 if (ret < 0 && ret != -EINPROGRESS)
115 pm_runtime_put_noidle(&phy->dev);
116
117 return ret;
118 }
119 EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
120
phy_pm_runtime_get_sync(struct phy * phy)121 int phy_pm_runtime_get_sync(struct phy *phy)
122 {
123 int ret;
124
125 if (!pm_runtime_enabled(&phy->dev))
126 return -ENOTSUPP;
127
128 ret = pm_runtime_get_sync(&phy->dev);
129 if (ret < 0)
130 pm_runtime_put_sync(&phy->dev);
131
132 return ret;
133 }
134 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
135
phy_pm_runtime_put(struct phy * phy)136 int phy_pm_runtime_put(struct phy *phy)
137 {
138 if (!pm_runtime_enabled(&phy->dev))
139 return -ENOTSUPP;
140
141 return pm_runtime_put(&phy->dev);
142 }
143 EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
144
phy_pm_runtime_put_sync(struct phy * phy)145 int phy_pm_runtime_put_sync(struct phy *phy)
146 {
147 if (!pm_runtime_enabled(&phy->dev))
148 return -ENOTSUPP;
149
150 return pm_runtime_put_sync(&phy->dev);
151 }
152 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
153
phy_pm_runtime_allow(struct phy * phy)154 void phy_pm_runtime_allow(struct phy *phy)
155 {
156 if (!pm_runtime_enabled(&phy->dev))
157 return;
158
159 pm_runtime_allow(&phy->dev);
160 }
161 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
162
phy_pm_runtime_forbid(struct phy * phy)163 void phy_pm_runtime_forbid(struct phy *phy)
164 {
165 if (!pm_runtime_enabled(&phy->dev))
166 return;
167
168 pm_runtime_forbid(&phy->dev);
169 }
170 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
171
phy_init(struct phy * phy)172 int phy_init(struct phy *phy)
173 {
174 int ret;
175
176 if (!phy)
177 return 0;
178
179 ret = phy_pm_runtime_get_sync(phy);
180 if (ret < 0 && ret != -ENOTSUPP)
181 return ret;
182 ret = 0; /* Override possible ret == -ENOTSUPP */
183
184 mutex_lock(&phy->mutex);
185 if (phy->init_count == 0 && phy->ops->init) {
186 ret = phy->ops->init(phy);
187 if (ret < 0) {
188 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
189 goto out;
190 }
191 }
192 ++phy->init_count;
193
194 out:
195 mutex_unlock(&phy->mutex);
196 phy_pm_runtime_put(phy);
197 return ret;
198 }
199 EXPORT_SYMBOL_GPL(phy_init);
200
phy_exit(struct phy * phy)201 int phy_exit(struct phy *phy)
202 {
203 int ret;
204
205 if (!phy)
206 return 0;
207
208 ret = phy_pm_runtime_get_sync(phy);
209 if (ret < 0 && ret != -ENOTSUPP)
210 return ret;
211 ret = 0; /* Override possible ret == -ENOTSUPP */
212
213 mutex_lock(&phy->mutex);
214 if (phy->init_count == 1 && phy->ops->exit) {
215 ret = phy->ops->exit(phy);
216 if (ret < 0) {
217 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
218 goto out;
219 }
220 }
221 --phy->init_count;
222
223 out:
224 mutex_unlock(&phy->mutex);
225 phy_pm_runtime_put(phy);
226 return ret;
227 }
228 EXPORT_SYMBOL_GPL(phy_exit);
229
phy_power_on(struct phy * phy)230 int phy_power_on(struct phy *phy)
231 {
232 int ret = 0;
233
234 if (!phy)
235 goto out;
236
237 if (phy->pwr) {
238 ret = regulator_enable(phy->pwr);
239 if (ret)
240 goto out;
241 }
242
243 ret = phy_pm_runtime_get_sync(phy);
244 if (ret < 0 && ret != -ENOTSUPP)
245 goto err_pm_sync;
246
247 ret = 0; /* Override possible ret == -ENOTSUPP */
248
249 mutex_lock(&phy->mutex);
250 if (phy->power_count == 0 && phy->ops->power_on) {
251 ret = phy->ops->power_on(phy);
252 if (ret < 0) {
253 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
254 goto err_pwr_on;
255 }
256 }
257 ++phy->power_count;
258 mutex_unlock(&phy->mutex);
259 return 0;
260
261 err_pwr_on:
262 mutex_unlock(&phy->mutex);
263 phy_pm_runtime_put_sync(phy);
264 err_pm_sync:
265 if (phy->pwr)
266 regulator_disable(phy->pwr);
267 out:
268 return ret;
269 }
270 EXPORT_SYMBOL_GPL(phy_power_on);
271
phy_power_off(struct phy * phy)272 int phy_power_off(struct phy *phy)
273 {
274 int ret;
275
276 if (!phy)
277 return 0;
278
279 mutex_lock(&phy->mutex);
280 if (phy->power_count == 1 && phy->ops->power_off) {
281 ret = phy->ops->power_off(phy);
282 if (ret < 0) {
283 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
284 mutex_unlock(&phy->mutex);
285 return ret;
286 }
287 }
288 --phy->power_count;
289 mutex_unlock(&phy->mutex);
290 phy_pm_runtime_put(phy);
291
292 if (phy->pwr)
293 regulator_disable(phy->pwr);
294
295 return 0;
296 }
297 EXPORT_SYMBOL_GPL(phy_power_off);
298
299 /**
300 * _of_phy_get() - lookup and obtain a reference to a phy by phandle
301 * @np: device_node for which to get the phy
302 * @index: the index of the phy
303 *
304 * Returns the phy associated with the given phandle value,
305 * after getting a refcount to it or -ENODEV if there is no such phy or
306 * -EPROBE_DEFER if there is a phandle to the phy, but the device is
307 * not yet loaded. This function uses of_xlate call back function provided
308 * while registering the phy_provider to find the phy instance.
309 */
_of_phy_get(struct device_node * np,int index)310 static struct phy *_of_phy_get(struct device_node *np, int index)
311 {
312 int ret;
313 struct phy_provider *phy_provider;
314 struct phy *phy = NULL;
315 struct of_phandle_args args;
316
317 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
318 index, &args);
319 if (ret)
320 return ERR_PTR(-ENODEV);
321
322 /* This phy type handled by the usb-phy subsystem for now */
323 if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
324 return ERR_PTR(-ENODEV);
325
326 mutex_lock(&phy_provider_mutex);
327 phy_provider = of_phy_provider_lookup(args.np);
328 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
329 phy = ERR_PTR(-EPROBE_DEFER);
330 goto err0;
331 }
332
333 phy = phy_provider->of_xlate(phy_provider->dev, &args);
334 module_put(phy_provider->owner);
335
336 err0:
337 mutex_unlock(&phy_provider_mutex);
338 of_node_put(args.np);
339
340 return phy;
341 }
342
343 /**
344 * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
345 * @np: device_node for which to get the phy
346 * @con_id: name of the phy from device's point of view
347 *
348 * Returns the phy driver, after getting a refcount to it; or
349 * -ENODEV if there is no such phy. The caller is responsible for
350 * calling phy_put() to release that count.
351 */
of_phy_get(struct device_node * np,const char * con_id)352 struct phy *of_phy_get(struct device_node *np, const char *con_id)
353 {
354 struct phy *phy = NULL;
355 int index = 0;
356
357 if (con_id)
358 index = of_property_match_string(np, "phy-names", con_id);
359
360 phy = _of_phy_get(np, index);
361 if (IS_ERR(phy))
362 return phy;
363
364 if (!try_module_get(phy->ops->owner))
365 return ERR_PTR(-EPROBE_DEFER);
366
367 get_device(&phy->dev);
368
369 return phy;
370 }
371 EXPORT_SYMBOL_GPL(of_phy_get);
372
373 /**
374 * phy_put() - release the PHY
375 * @phy: the phy returned by phy_get()
376 *
377 * Releases a refcount the caller received from phy_get().
378 */
phy_put(struct phy * phy)379 void phy_put(struct phy *phy)
380 {
381 if (!phy || IS_ERR(phy))
382 return;
383
384 module_put(phy->ops->owner);
385 put_device(&phy->dev);
386 }
387 EXPORT_SYMBOL_GPL(phy_put);
388
389 /**
390 * devm_phy_put() - release the PHY
391 * @dev: device that wants to release this phy
392 * @phy: the phy returned by devm_phy_get()
393 *
394 * destroys the devres associated with this phy and invokes phy_put
395 * to release the phy.
396 */
devm_phy_put(struct device * dev,struct phy * phy)397 void devm_phy_put(struct device *dev, struct phy *phy)
398 {
399 int r;
400
401 if (!phy)
402 return;
403
404 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
405 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
406 }
407 EXPORT_SYMBOL_GPL(devm_phy_put);
408
409 /**
410 * of_phy_simple_xlate() - returns the phy instance from phy provider
411 * @dev: the PHY provider device
412 * @args: of_phandle_args (not used here)
413 *
414 * Intended to be used by phy provider for the common case where #phy-cells is
415 * 0. For other cases where #phy-cells is greater than '0', the phy provider
416 * should provide a custom of_xlate function that reads the *args* and returns
417 * the appropriate phy.
418 */
of_phy_simple_xlate(struct device * dev,struct of_phandle_args * args)419 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
420 *args)
421 {
422 struct phy *phy;
423 struct class_dev_iter iter;
424 struct device_node *node = dev->of_node;
425 struct device_node *child;
426
427 class_dev_iter_init(&iter, phy_class, NULL, NULL);
428 while ((dev = class_dev_iter_next(&iter))) {
429 phy = to_phy(dev);
430 if (node != phy->dev.of_node) {
431 for_each_child_of_node(node, child) {
432 if (child == phy->dev.of_node)
433 goto phy_found;
434 }
435 continue;
436 }
437
438 phy_found:
439 class_dev_iter_exit(&iter);
440 return phy;
441 }
442
443 class_dev_iter_exit(&iter);
444 return ERR_PTR(-ENODEV);
445 }
446 EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
447
448 /**
449 * phy_get() - lookup and obtain a reference to a phy.
450 * @dev: device that requests this phy
451 * @string: the phy name as given in the dt data or the name of the controller
452 * port for non-dt case
453 *
454 * Returns the phy driver, after getting a refcount to it; or
455 * -ENODEV if there is no such phy. The caller is responsible for
456 * calling phy_put() to release that count.
457 */
phy_get(struct device * dev,const char * string)458 struct phy *phy_get(struct device *dev, const char *string)
459 {
460 int index = 0;
461 struct phy *phy;
462
463 if (string == NULL) {
464 dev_WARN(dev, "missing string\n");
465 return ERR_PTR(-EINVAL);
466 }
467
468 if (dev->of_node) {
469 index = of_property_match_string(dev->of_node, "phy-names",
470 string);
471 phy = _of_phy_get(dev->of_node, index);
472 } else {
473 phy = phy_lookup(dev, string);
474 }
475 if (IS_ERR(phy))
476 return phy;
477
478 if (!try_module_get(phy->ops->owner))
479 return ERR_PTR(-EPROBE_DEFER);
480
481 get_device(&phy->dev);
482
483 return phy;
484 }
485 EXPORT_SYMBOL_GPL(phy_get);
486
487 /**
488 * phy_optional_get() - lookup and obtain a reference to an optional phy.
489 * @dev: device that requests this phy
490 * @string: the phy name as given in the dt data or the name of the controller
491 * port for non-dt case
492 *
493 * Returns the phy driver, after getting a refcount to it; or
494 * NULL if there is no such phy. The caller is responsible for
495 * calling phy_put() to release that count.
496 */
phy_optional_get(struct device * dev,const char * string)497 struct phy *phy_optional_get(struct device *dev, const char *string)
498 {
499 struct phy *phy = phy_get(dev, string);
500
501 if (PTR_ERR(phy) == -ENODEV)
502 phy = NULL;
503
504 return phy;
505 }
506 EXPORT_SYMBOL_GPL(phy_optional_get);
507
508 /**
509 * devm_phy_get() - lookup and obtain a reference to a phy.
510 * @dev: device that requests this phy
511 * @string: the phy name as given in the dt data or phy device name
512 * for non-dt case
513 *
514 * Gets the phy using phy_get(), and associates a device with it using
515 * devres. On driver detach, release function is invoked on the devres data,
516 * then, devres data is freed.
517 */
devm_phy_get(struct device * dev,const char * string)518 struct phy *devm_phy_get(struct device *dev, const char *string)
519 {
520 struct phy **ptr, *phy;
521
522 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
523 if (!ptr)
524 return ERR_PTR(-ENOMEM);
525
526 phy = phy_get(dev, string);
527 if (!IS_ERR(phy)) {
528 *ptr = phy;
529 devres_add(dev, ptr);
530 } else {
531 devres_free(ptr);
532 }
533
534 return phy;
535 }
536 EXPORT_SYMBOL_GPL(devm_phy_get);
537
538 /**
539 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
540 * @dev: device that requests this phy
541 * @string: the phy name as given in the dt data or phy device name
542 * for non-dt case
543 *
544 * Gets the phy using phy_get(), and associates a device with it using
545 * devres. On driver detach, release function is invoked on the devres
546 * data, then, devres data is freed. This differs to devm_phy_get() in
547 * that if the phy does not exist, it is not considered an error and
548 * -ENODEV will not be returned. Instead the NULL phy is returned,
549 * which can be passed to all other phy consumer calls.
550 */
devm_phy_optional_get(struct device * dev,const char * string)551 struct phy *devm_phy_optional_get(struct device *dev, const char *string)
552 {
553 struct phy *phy = devm_phy_get(dev, string);
554
555 if (PTR_ERR(phy) == -ENODEV)
556 phy = NULL;
557
558 return phy;
559 }
560 EXPORT_SYMBOL_GPL(devm_phy_optional_get);
561
562 /**
563 * devm_of_phy_get() - lookup and obtain a reference to a phy.
564 * @dev: device that requests this phy
565 * @np: node containing the phy
566 * @con_id: name of the phy from device's point of view
567 *
568 * Gets the phy using of_phy_get(), and associates a device with it using
569 * devres. On driver detach, release function is invoked on the devres data,
570 * then, devres data is freed.
571 */
devm_of_phy_get(struct device * dev,struct device_node * np,const char * con_id)572 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
573 const char *con_id)
574 {
575 struct phy **ptr, *phy;
576
577 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
578 if (!ptr)
579 return ERR_PTR(-ENOMEM);
580
581 phy = of_phy_get(np, con_id);
582 if (!IS_ERR(phy)) {
583 *ptr = phy;
584 devres_add(dev, ptr);
585 } else {
586 devres_free(ptr);
587 }
588
589 return phy;
590 }
591 EXPORT_SYMBOL_GPL(devm_of_phy_get);
592
593 /**
594 * phy_create() - create a new phy
595 * @dev: device that is creating the new phy
596 * @node: device node of the phy
597 * @ops: function pointers for performing phy operations
598 * @init_data: contains the list of PHY consumers or NULL
599 *
600 * Called to create a phy using phy framework.
601 */
phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops,struct phy_init_data * init_data)602 struct phy *phy_create(struct device *dev, struct device_node *node,
603 const struct phy_ops *ops,
604 struct phy_init_data *init_data)
605 {
606 int ret;
607 int id;
608 struct phy *phy;
609
610 if (WARN_ON(!dev))
611 return ERR_PTR(-EINVAL);
612
613 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
614 if (!phy)
615 return ERR_PTR(-ENOMEM);
616
617 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
618 if (id < 0) {
619 dev_err(dev, "unable to get id\n");
620 ret = id;
621 goto free_phy;
622 }
623
624 /* phy-supply */
625 phy->pwr = regulator_get_optional(dev, "phy");
626 if (IS_ERR(phy->pwr)) {
627 if (PTR_ERR(phy->pwr) == -EPROBE_DEFER) {
628 ret = -EPROBE_DEFER;
629 goto free_ida;
630 }
631 phy->pwr = NULL;
632 }
633
634 device_initialize(&phy->dev);
635 mutex_init(&phy->mutex);
636
637 phy->dev.class = phy_class;
638 phy->dev.parent = dev;
639 phy->dev.of_node = node ?: dev->of_node;
640 phy->id = id;
641 phy->ops = ops;
642 phy->init_data = init_data;
643
644 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
645 if (ret)
646 goto put_dev;
647
648 ret = device_add(&phy->dev);
649 if (ret)
650 goto put_dev;
651
652 if (pm_runtime_enabled(dev)) {
653 pm_runtime_enable(&phy->dev);
654 pm_runtime_no_callbacks(&phy->dev);
655 }
656
657 return phy;
658
659 put_dev:
660 put_device(&phy->dev); /* calls phy_release() which frees resources */
661 return ERR_PTR(ret);
662
663 free_ida:
664 ida_simple_remove(&phy_ida, phy->id);
665
666 free_phy:
667 kfree(phy);
668 return ERR_PTR(ret);
669 }
670 EXPORT_SYMBOL_GPL(phy_create);
671
672 /**
673 * devm_phy_create() - create a new phy
674 * @dev: device that is creating the new phy
675 * @node: device node of the phy
676 * @ops: function pointers for performing phy operations
677 * @init_data: contains the list of PHY consumers or NULL
678 *
679 * Creates a new PHY device adding it to the PHY class.
680 * While at that, it also associates the device with the phy using devres.
681 * On driver detach, release function is invoked on the devres data,
682 * then, devres data is freed.
683 */
devm_phy_create(struct device * dev,struct device_node * node,const struct phy_ops * ops,struct phy_init_data * init_data)684 struct phy *devm_phy_create(struct device *dev, struct device_node *node,
685 const struct phy_ops *ops,
686 struct phy_init_data *init_data)
687 {
688 struct phy **ptr, *phy;
689
690 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
691 if (!ptr)
692 return ERR_PTR(-ENOMEM);
693
694 phy = phy_create(dev, node, ops, init_data);
695 if (!IS_ERR(phy)) {
696 *ptr = phy;
697 devres_add(dev, ptr);
698 } else {
699 devres_free(ptr);
700 }
701
702 return phy;
703 }
704 EXPORT_SYMBOL_GPL(devm_phy_create);
705
706 /**
707 * phy_destroy() - destroy the phy
708 * @phy: the phy to be destroyed
709 *
710 * Called to destroy the phy.
711 */
phy_destroy(struct phy * phy)712 void phy_destroy(struct phy *phy)
713 {
714 pm_runtime_disable(&phy->dev);
715 device_unregister(&phy->dev);
716 }
717 EXPORT_SYMBOL_GPL(phy_destroy);
718
719 /**
720 * devm_phy_destroy() - destroy the PHY
721 * @dev: device that wants to release this phy
722 * @phy: the phy returned by devm_phy_get()
723 *
724 * destroys the devres associated with this phy and invokes phy_destroy
725 * to destroy the phy.
726 */
devm_phy_destroy(struct device * dev,struct phy * phy)727 void devm_phy_destroy(struct device *dev, struct phy *phy)
728 {
729 int r;
730
731 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
732 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
733 }
734 EXPORT_SYMBOL_GPL(devm_phy_destroy);
735
736 /**
737 * __of_phy_provider_register() - create/register phy provider with the framework
738 * @dev: struct device of the phy provider
739 * @owner: the module owner containing of_xlate
740 * @of_xlate: function pointer to obtain phy instance from phy provider
741 *
742 * Creates struct phy_provider from dev and of_xlate function pointer.
743 * This is used in the case of dt boot for finding the phy instance from
744 * phy provider.
745 */
__of_phy_provider_register(struct device * dev,struct module * owner,struct phy * (* of_xlate)(struct device * dev,struct of_phandle_args * args))746 struct phy_provider *__of_phy_provider_register(struct device *dev,
747 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
748 struct of_phandle_args *args))
749 {
750 struct phy_provider *phy_provider;
751
752 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
753 if (!phy_provider)
754 return ERR_PTR(-ENOMEM);
755
756 phy_provider->dev = dev;
757 phy_provider->owner = owner;
758 phy_provider->of_xlate = of_xlate;
759
760 mutex_lock(&phy_provider_mutex);
761 list_add_tail(&phy_provider->list, &phy_provider_list);
762 mutex_unlock(&phy_provider_mutex);
763
764 return phy_provider;
765 }
766 EXPORT_SYMBOL_GPL(__of_phy_provider_register);
767
768 /**
769 * __devm_of_phy_provider_register() - create/register phy provider with the
770 * framework
771 * @dev: struct device of the phy provider
772 * @owner: the module owner containing of_xlate
773 * @of_xlate: function pointer to obtain phy instance from phy provider
774 *
775 * Creates struct phy_provider from dev and of_xlate function pointer.
776 * This is used in the case of dt boot for finding the phy instance from
777 * phy provider. While at that, it also associates the device with the
778 * phy provider using devres. On driver detach, release function is invoked
779 * on the devres data, then, devres data is freed.
780 */
__devm_of_phy_provider_register(struct device * dev,struct module * owner,struct phy * (* of_xlate)(struct device * dev,struct of_phandle_args * args))781 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
782 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
783 struct of_phandle_args *args))
784 {
785 struct phy_provider **ptr, *phy_provider;
786
787 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
788 if (!ptr)
789 return ERR_PTR(-ENOMEM);
790
791 phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
792 if (!IS_ERR(phy_provider)) {
793 *ptr = phy_provider;
794 devres_add(dev, ptr);
795 } else {
796 devres_free(ptr);
797 }
798
799 return phy_provider;
800 }
801 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
802
803 /**
804 * of_phy_provider_unregister() - unregister phy provider from the framework
805 * @phy_provider: phy provider returned by of_phy_provider_register()
806 *
807 * Removes the phy_provider created using of_phy_provider_register().
808 */
of_phy_provider_unregister(struct phy_provider * phy_provider)809 void of_phy_provider_unregister(struct phy_provider *phy_provider)
810 {
811 if (IS_ERR(phy_provider))
812 return;
813
814 mutex_lock(&phy_provider_mutex);
815 list_del(&phy_provider->list);
816 kfree(phy_provider);
817 mutex_unlock(&phy_provider_mutex);
818 }
819 EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
820
821 /**
822 * devm_of_phy_provider_unregister() - remove phy provider from the framework
823 * @dev: struct device of the phy provider
824 *
825 * destroys the devres associated with this phy provider and invokes
826 * of_phy_provider_unregister to unregister the phy provider.
827 */
devm_of_phy_provider_unregister(struct device * dev,struct phy_provider * phy_provider)828 void devm_of_phy_provider_unregister(struct device *dev,
829 struct phy_provider *phy_provider) {
830 int r;
831
832 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
833 phy_provider);
834 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
835 }
836 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
837
838 /**
839 * phy_release() - release the phy
840 * @dev: the dev member within phy
841 *
842 * When the last reference to the device is removed, it is called
843 * from the embedded kobject as release method.
844 */
phy_release(struct device * dev)845 static void phy_release(struct device *dev)
846 {
847 struct phy *phy;
848
849 phy = to_phy(dev);
850 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
851 regulator_put(phy->pwr);
852 ida_simple_remove(&phy_ida, phy->id);
853 kfree(phy);
854 }
855
phy_core_init(void)856 static int __init phy_core_init(void)
857 {
858 phy_class = class_create(THIS_MODULE, "phy");
859 if (IS_ERR(phy_class)) {
860 pr_err("failed to create phy class --> %ld\n",
861 PTR_ERR(phy_class));
862 return PTR_ERR(phy_class);
863 }
864
865 phy_class->dev_release = phy_release;
866
867 return 0;
868 }
869 module_init(phy_core_init);
870
phy_core_exit(void)871 static void __exit phy_core_exit(void)
872 {
873 class_destroy(phy_class);
874 }
875 module_exit(phy_core_exit);
876
877 MODULE_DESCRIPTION("Generic PHY Framework");
878 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
879 MODULE_LICENSE("GPL v2");
880