1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/module.h>
17#include <linux/err.h>
18#include <linux/device.h>
19#include <linux/slab.h>
20#include <linux/of.h>
21#include <linux/phy/phy.h>
22#include <linux/idr.h>
23#include <linux/pm_runtime.h>
24#include <linux/regulator/consumer.h>
25
26static struct class *phy_class;
27static DEFINE_MUTEX(phy_provider_mutex);
28static LIST_HEAD(phy_provider_list);
29static LIST_HEAD(phys);
30static DEFINE_IDA(phy_ida);
31
32static void devm_phy_release(struct device *dev, void *res)
33{
34 struct phy *phy = *(struct phy **)res;
35
36 phy_put(phy);
37}
38
39static void devm_phy_provider_release(struct device *dev, void *res)
40{
41 struct phy_provider *phy_provider = *(struct phy_provider **)res;
42
43 of_phy_provider_unregister(phy_provider);
44}
45
46static void devm_phy_consume(struct device *dev, void *res)
47{
48 struct phy *phy = *(struct phy **)res;
49
50 phy_destroy(phy);
51}
52
53static int devm_phy_match(struct device *dev, void *res, void *match_data)
54{
55 return res == match_data;
56}
57
58
59
60
61
62
63
64
65
66int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
67{
68 struct phy_lookup *pl;
69
70 if (!phy || !dev_id || !con_id)
71 return -EINVAL;
72
73 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
74 if (!pl)
75 return -ENOMEM;
76
77 pl->dev_id = dev_id;
78 pl->con_id = con_id;
79 pl->phy = phy;
80
81 mutex_lock(&phy_provider_mutex);
82 list_add_tail(&pl->node, &phys);
83 mutex_unlock(&phy_provider_mutex);
84
85 return 0;
86}
87EXPORT_SYMBOL_GPL(phy_create_lookup);
88
89
90
91
92
93
94
95
96
97
98void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
99{
100 struct phy_lookup *pl;
101
102 if (!phy || !dev_id || !con_id)
103 return;
104
105 mutex_lock(&phy_provider_mutex);
106 list_for_each_entry(pl, &phys, node)
107 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
108 !strcmp(pl->con_id, con_id)) {
109 list_del(&pl->node);
110 kfree(pl);
111 break;
112 }
113 mutex_unlock(&phy_provider_mutex);
114}
115EXPORT_SYMBOL_GPL(phy_remove_lookup);
116
117static struct phy *phy_find(struct device *dev, const char *con_id)
118{
119 const char *dev_id = dev_name(dev);
120 struct phy_lookup *p, *pl = NULL;
121
122 mutex_lock(&phy_provider_mutex);
123 list_for_each_entry(p, &phys, node)
124 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
125 pl = p;
126 break;
127 }
128 mutex_unlock(&phy_provider_mutex);
129
130 return pl ? pl->phy : ERR_PTR(-ENODEV);
131}
132
133static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
134{
135 struct phy_provider *phy_provider;
136 struct device_node *child;
137
138 list_for_each_entry(phy_provider, &phy_provider_list, list) {
139 if (phy_provider->dev->of_node == node)
140 return phy_provider;
141
142 for_each_child_of_node(phy_provider->dev->of_node, child)
143 if (child == node)
144 return phy_provider;
145 }
146
147 return ERR_PTR(-EPROBE_DEFER);
148}
149
150int phy_pm_runtime_get(struct phy *phy)
151{
152 int ret;
153
154 if (!pm_runtime_enabled(&phy->dev))
155 return -ENOTSUPP;
156
157 ret = pm_runtime_get(&phy->dev);
158 if (ret < 0 && ret != -EINPROGRESS)
159 pm_runtime_put_noidle(&phy->dev);
160
161 return ret;
162}
163EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
164
165int phy_pm_runtime_get_sync(struct phy *phy)
166{
167 int ret;
168
169 if (!pm_runtime_enabled(&phy->dev))
170 return -ENOTSUPP;
171
172 ret = pm_runtime_get_sync(&phy->dev);
173 if (ret < 0)
174 pm_runtime_put_sync(&phy->dev);
175
176 return ret;
177}
178EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
179
180int phy_pm_runtime_put(struct phy *phy)
181{
182 if (!pm_runtime_enabled(&phy->dev))
183 return -ENOTSUPP;
184
185 return pm_runtime_put(&phy->dev);
186}
187EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
188
189int phy_pm_runtime_put_sync(struct phy *phy)
190{
191 if (!pm_runtime_enabled(&phy->dev))
192 return -ENOTSUPP;
193
194 return pm_runtime_put_sync(&phy->dev);
195}
196EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
197
198void phy_pm_runtime_allow(struct phy *phy)
199{
200 if (!pm_runtime_enabled(&phy->dev))
201 return;
202
203 pm_runtime_allow(&phy->dev);
204}
205EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
206
207void phy_pm_runtime_forbid(struct phy *phy)
208{
209 if (!pm_runtime_enabled(&phy->dev))
210 return;
211
212 pm_runtime_forbid(&phy->dev);
213}
214EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
215
216int phy_init(struct phy *phy)
217{
218 int ret;
219
220 if (!phy)
221 return 0;
222
223 ret = phy_pm_runtime_get_sync(phy);
224 if (ret < 0 && ret != -ENOTSUPP)
225 return ret;
226
227 mutex_lock(&phy->mutex);
228 if (phy->init_count == 0 && phy->ops->init) {
229 ret = phy->ops->init(phy);
230 if (ret < 0) {
231 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
232 goto out;
233 }
234 } else {
235 ret = 0;
236 }
237 ++phy->init_count;
238
239out:
240 mutex_unlock(&phy->mutex);
241 phy_pm_runtime_put(phy);
242 return ret;
243}
244EXPORT_SYMBOL_GPL(phy_init);
245
246int phy_exit(struct phy *phy)
247{
248 int ret;
249
250 if (!phy)
251 return 0;
252
253 ret = phy_pm_runtime_get_sync(phy);
254 if (ret < 0 && ret != -ENOTSUPP)
255 return ret;
256
257 mutex_lock(&phy->mutex);
258 if (phy->init_count == 1 && phy->ops->exit) {
259 ret = phy->ops->exit(phy);
260 if (ret < 0) {
261 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
262 goto out;
263 }
264 }
265 --phy->init_count;
266
267out:
268 mutex_unlock(&phy->mutex);
269 phy_pm_runtime_put(phy);
270 return ret;
271}
272EXPORT_SYMBOL_GPL(phy_exit);
273
274int phy_power_on(struct phy *phy)
275{
276 int ret;
277
278 if (!phy)
279 return 0;
280
281 if (phy->pwr) {
282 ret = regulator_enable(phy->pwr);
283 if (ret)
284 return ret;
285 }
286
287 ret = phy_pm_runtime_get_sync(phy);
288 if (ret < 0 && ret != -ENOTSUPP)
289 return ret;
290
291 mutex_lock(&phy->mutex);
292 if (phy->power_count == 0 && phy->ops->power_on) {
293 ret = phy->ops->power_on(phy);
294 if (ret < 0) {
295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
296 goto out;
297 }
298 } else {
299 ret = 0;
300 }
301 ++phy->power_count;
302 mutex_unlock(&phy->mutex);
303 return 0;
304
305out:
306 mutex_unlock(&phy->mutex);
307 phy_pm_runtime_put_sync(phy);
308 if (phy->pwr)
309 regulator_disable(phy->pwr);
310
311 return ret;
312}
313EXPORT_SYMBOL_GPL(phy_power_on);
314
315int phy_power_off(struct phy *phy)
316{
317 int ret;
318
319 if (!phy)
320 return 0;
321
322 mutex_lock(&phy->mutex);
323 if (phy->power_count == 1 && phy->ops->power_off) {
324 ret = phy->ops->power_off(phy);
325 if (ret < 0) {
326 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
327 mutex_unlock(&phy->mutex);
328 return ret;
329 }
330 }
331 --phy->power_count;
332 mutex_unlock(&phy->mutex);
333 phy_pm_runtime_put(phy);
334
335 if (phy->pwr)
336 regulator_disable(phy->pwr);
337
338 return 0;
339}
340EXPORT_SYMBOL_GPL(phy_power_off);
341
342
343
344
345
346
347
348
349
350
351
352
353static struct phy *_of_phy_get(struct device_node *np, int index)
354{
355 int ret;
356 struct phy_provider *phy_provider;
357 struct phy *phy = NULL;
358 struct of_phandle_args args;
359
360 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
361 index, &args);
362 if (ret)
363 return ERR_PTR(-ENODEV);
364
365 mutex_lock(&phy_provider_mutex);
366 phy_provider = of_phy_provider_lookup(args.np);
367 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
368 phy = ERR_PTR(-EPROBE_DEFER);
369 goto err0;
370 }
371
372 phy = phy_provider->of_xlate(phy_provider->dev, &args);
373 module_put(phy_provider->owner);
374
375err0:
376 mutex_unlock(&phy_provider_mutex);
377 of_node_put(args.np);
378
379 return phy;
380}
381
382
383
384
385
386
387
388
389
390
391struct phy *of_phy_get(struct device_node *np, const char *con_id)
392{
393 struct phy *phy = NULL;
394 int index = 0;
395
396 if (con_id)
397 index = of_property_match_string(np, "phy-names", con_id);
398
399 phy = _of_phy_get(np, index);
400 if (IS_ERR(phy))
401 return phy;
402
403 if (!try_module_get(phy->ops->owner))
404 return ERR_PTR(-EPROBE_DEFER);
405
406 get_device(&phy->dev);
407
408 return phy;
409}
410EXPORT_SYMBOL_GPL(of_phy_get);
411
412
413
414
415
416
417
418void phy_put(struct phy *phy)
419{
420 if (!phy || IS_ERR(phy))
421 return;
422
423 module_put(phy->ops->owner);
424 put_device(&phy->dev);
425}
426EXPORT_SYMBOL_GPL(phy_put);
427
428
429
430
431
432
433
434
435
436void devm_phy_put(struct device *dev, struct phy *phy)
437{
438 int r;
439
440 if (!phy)
441 return;
442
443 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
444 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
445}
446EXPORT_SYMBOL_GPL(devm_phy_put);
447
448
449
450
451
452
453
454
455
456
457
458struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
459 *args)
460{
461 struct phy *phy;
462 struct class_dev_iter iter;
463
464 class_dev_iter_init(&iter, phy_class, NULL, NULL);
465 while ((dev = class_dev_iter_next(&iter))) {
466 phy = to_phy(dev);
467 if (args->np != phy->dev.of_node)
468 continue;
469
470 class_dev_iter_exit(&iter);
471 return phy;
472 }
473
474 class_dev_iter_exit(&iter);
475 return ERR_PTR(-ENODEV);
476}
477EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
478
479
480
481
482
483
484
485
486
487
488
489struct phy *phy_get(struct device *dev, const char *string)
490{
491 int index = 0;
492 struct phy *phy;
493
494 if (string == NULL) {
495 dev_WARN(dev, "missing string\n");
496 return ERR_PTR(-EINVAL);
497 }
498
499 if (dev->of_node) {
500 index = of_property_match_string(dev->of_node, "phy-names",
501 string);
502 phy = _of_phy_get(dev->of_node, index);
503 } else {
504 phy = phy_find(dev, string);
505 }
506 if (IS_ERR(phy))
507 return phy;
508
509 if (!try_module_get(phy->ops->owner))
510 return ERR_PTR(-EPROBE_DEFER);
511
512 get_device(&phy->dev);
513
514 return phy;
515}
516EXPORT_SYMBOL_GPL(phy_get);
517
518
519
520
521
522
523
524
525
526
527
528struct phy *phy_optional_get(struct device *dev, const char *string)
529{
530 struct phy *phy = phy_get(dev, string);
531
532 if (PTR_ERR(phy) == -ENODEV)
533 phy = NULL;
534
535 return phy;
536}
537EXPORT_SYMBOL_GPL(phy_optional_get);
538
539
540
541
542
543
544
545
546
547
548
549struct phy *devm_phy_get(struct device *dev, const char *string)
550{
551 struct phy **ptr, *phy;
552
553 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
554 if (!ptr)
555 return ERR_PTR(-ENOMEM);
556
557 phy = phy_get(dev, string);
558 if (!IS_ERR(phy)) {
559 *ptr = phy;
560 devres_add(dev, ptr);
561 } else {
562 devres_free(ptr);
563 }
564
565 return phy;
566}
567EXPORT_SYMBOL_GPL(devm_phy_get);
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582struct phy *devm_phy_optional_get(struct device *dev, const char *string)
583{
584 struct phy *phy = devm_phy_get(dev, string);
585
586 if (PTR_ERR(phy) == -ENODEV)
587 phy = NULL;
588
589 return phy;
590}
591EXPORT_SYMBOL_GPL(devm_phy_optional_get);
592
593
594
595
596
597
598
599
600
601
602
603struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
604 const char *con_id)
605{
606 struct phy **ptr, *phy;
607
608 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
609 if (!ptr)
610 return ERR_PTR(-ENOMEM);
611
612 phy = of_phy_get(np, con_id);
613 if (!IS_ERR(phy)) {
614 *ptr = phy;
615 devres_add(dev, ptr);
616 } else {
617 devres_free(ptr);
618 }
619
620 return phy;
621}
622EXPORT_SYMBOL_GPL(devm_of_phy_get);
623
624
625
626
627
628
629
630
631
632struct phy *phy_create(struct device *dev, struct device_node *node,
633 const struct phy_ops *ops)
634{
635 int ret;
636 int id;
637 struct phy *phy;
638
639 if (WARN_ON(!dev))
640 return ERR_PTR(-EINVAL);
641
642 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
643 if (!phy)
644 return ERR_PTR(-ENOMEM);
645
646 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
647 if (id < 0) {
648 dev_err(dev, "unable to get id\n");
649 ret = id;
650 goto free_phy;
651 }
652
653
654 phy->pwr = regulator_get_optional(dev, "phy");
655 if (IS_ERR(phy->pwr)) {
656 if (PTR_ERR(phy->pwr) == -EPROBE_DEFER) {
657 ret = -EPROBE_DEFER;
658 goto free_ida;
659 }
660 phy->pwr = NULL;
661 }
662
663 device_initialize(&phy->dev);
664 mutex_init(&phy->mutex);
665
666 phy->dev.class = phy_class;
667 phy->dev.parent = dev;
668 phy->dev.of_node = node ?: dev->of_node;
669 phy->id = id;
670 phy->ops = ops;
671
672 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
673 if (ret)
674 goto put_dev;
675
676 ret = device_add(&phy->dev);
677 if (ret)
678 goto put_dev;
679
680 if (pm_runtime_enabled(dev)) {
681 pm_runtime_enable(&phy->dev);
682 pm_runtime_no_callbacks(&phy->dev);
683 }
684
685 return phy;
686
687put_dev:
688 put_device(&phy->dev);
689 return ERR_PTR(ret);
690
691free_ida:
692 ida_simple_remove(&phy_ida, phy->id);
693
694free_phy:
695 kfree(phy);
696 return ERR_PTR(ret);
697}
698EXPORT_SYMBOL_GPL(phy_create);
699
700
701
702
703
704
705
706
707
708
709
710
711struct phy *devm_phy_create(struct device *dev, struct device_node *node,
712 const struct phy_ops *ops)
713{
714 struct phy **ptr, *phy;
715
716 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
717 if (!ptr)
718 return ERR_PTR(-ENOMEM);
719
720 phy = phy_create(dev, node, ops);
721 if (!IS_ERR(phy)) {
722 *ptr = phy;
723 devres_add(dev, ptr);
724 } else {
725 devres_free(ptr);
726 }
727
728 return phy;
729}
730EXPORT_SYMBOL_GPL(devm_phy_create);
731
732
733
734
735
736
737
738void phy_destroy(struct phy *phy)
739{
740 pm_runtime_disable(&phy->dev);
741 device_unregister(&phy->dev);
742}
743EXPORT_SYMBOL_GPL(phy_destroy);
744
745
746
747
748
749
750
751
752
753void devm_phy_destroy(struct device *dev, struct phy *phy)
754{
755 int r;
756
757 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
758 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
759}
760EXPORT_SYMBOL_GPL(devm_phy_destroy);
761
762
763
764
765
766
767
768
769
770
771
772struct phy_provider *__of_phy_provider_register(struct device *dev,
773 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
774 struct of_phandle_args *args))
775{
776 struct phy_provider *phy_provider;
777
778 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
779 if (!phy_provider)
780 return ERR_PTR(-ENOMEM);
781
782 phy_provider->dev = dev;
783 phy_provider->owner = owner;
784 phy_provider->of_xlate = of_xlate;
785
786 mutex_lock(&phy_provider_mutex);
787 list_add_tail(&phy_provider->list, &phy_provider_list);
788 mutex_unlock(&phy_provider_mutex);
789
790 return phy_provider;
791}
792EXPORT_SYMBOL_GPL(__of_phy_provider_register);
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
808 struct module *owner, struct phy * (*of_xlate)(struct device *dev,
809 struct of_phandle_args *args))
810{
811 struct phy_provider **ptr, *phy_provider;
812
813 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
814 if (!ptr)
815 return ERR_PTR(-ENOMEM);
816
817 phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
818 if (!IS_ERR(phy_provider)) {
819 *ptr = phy_provider;
820 devres_add(dev, ptr);
821 } else {
822 devres_free(ptr);
823 }
824
825 return phy_provider;
826}
827EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
828
829
830
831
832
833
834
835void of_phy_provider_unregister(struct phy_provider *phy_provider)
836{
837 if (IS_ERR(phy_provider))
838 return;
839
840 mutex_lock(&phy_provider_mutex);
841 list_del(&phy_provider->list);
842 kfree(phy_provider);
843 mutex_unlock(&phy_provider_mutex);
844}
845EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
846
847
848
849
850
851
852
853
854void devm_of_phy_provider_unregister(struct device *dev,
855 struct phy_provider *phy_provider) {
856 int r;
857
858 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
859 phy_provider);
860 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
861}
862EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
863
864
865
866
867
868
869
870
871static void phy_release(struct device *dev)
872{
873 struct phy *phy;
874
875 phy = to_phy(dev);
876 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
877 regulator_put(phy->pwr);
878 ida_simple_remove(&phy_ida, phy->id);
879 kfree(phy);
880}
881
882static int __init phy_core_init(void)
883{
884 phy_class = class_create(THIS_MODULE, "phy");
885 if (IS_ERR(phy_class)) {
886 pr_err("failed to create phy class --> %ld\n",
887 PTR_ERR(phy_class));
888 return PTR_ERR(phy_class);
889 }
890
891 phy_class->dev_release = phy_release;
892
893 return 0;
894}
895module_init(phy_core_init);
896
897static void __exit phy_core_exit(void)
898{
899 class_destroy(phy_class);
900}
901module_exit(phy_core_exit);
902
903MODULE_DESCRIPTION("Generic PHY Framework");
904MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
905MODULE_LICENSE("GPL v2");
906