1
2
3
4
5
6
7
8
9
10#include <linux/delay.h>
11
12#include "greybus.h"
13#include "greybus_trace.h"
14
15#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
16
17#define GB_INTERFACE_DEVICE_ID_BAD 0xff
18
19#define GB_INTERFACE_AUTOSUSPEND_MS 3000
20
21
22#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
23
24
25#define DME_SELECTOR_INDEX_NULL 0
26
27
28
29#define DME_T_TST_SRC_INCREMENT 0x4083
30
31#define DME_DDBL1_MANUFACTURERID 0x5003
32#define DME_DDBL1_PRODUCTID 0x5004
33
34#define DME_TOSHIBA_GMP_VID 0x6000
35#define DME_TOSHIBA_GMP_PID 0x6001
36#define DME_TOSHIBA_GMP_SN0 0x6002
37#define DME_TOSHIBA_GMP_SN1 0x6003
38#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
39
40
41#define TOSHIBA_DMID 0x0126
42#define TOSHIBA_ES2_BRIDGE_DPID 0x1000
43#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
44#define TOSHIBA_ES3_GBPHY_DPID 0x1002
45
46static int gb_interface_hibernate_link(struct gb_interface *intf);
47static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
48
49static int gb_interface_dme_attr_get(struct gb_interface *intf,
50 u16 attr, u32 *val)
51{
52 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
53 attr, DME_SELECTOR_INDEX_NULL, val);
54}
55
56static int gb_interface_read_ara_dme(struct gb_interface *intf)
57{
58 u32 sn0, sn1;
59 int ret;
60
61
62
63
64
65 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
66 dev_err(&intf->dev, "unknown manufacturer %08x\n",
67 intf->ddbl1_manufacturer_id);
68 return -ENODEV;
69 }
70
71 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
72 &intf->vendor_id);
73 if (ret)
74 return ret;
75
76 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
77 &intf->product_id);
78 if (ret)
79 return ret;
80
81 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
82 if (ret)
83 return ret;
84
85 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
86 if (ret)
87 return ret;
88
89 intf->serial_number = (u64)sn1 << 32 | sn0;
90
91 return 0;
92}
93
94static int gb_interface_read_dme(struct gb_interface *intf)
95{
96 int ret;
97
98
99 if (intf->dme_read)
100 return 0;
101
102 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
103 &intf->ddbl1_manufacturer_id);
104 if (ret)
105 return ret;
106
107 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
108 &intf->ddbl1_product_id);
109 if (ret)
110 return ret;
111
112 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
113 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
114 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
115 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
116 }
117
118 ret = gb_interface_read_ara_dme(intf);
119 if (ret)
120 return ret;
121
122 intf->dme_read = true;
123
124 return 0;
125}
126
127static int gb_interface_route_create(struct gb_interface *intf)
128{
129 struct gb_svc *svc = intf->hd->svc;
130 u8 intf_id = intf->interface_id;
131 u8 device_id;
132 int ret;
133
134
135 ret = ida_simple_get(&svc->device_id_map,
136 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
137 GFP_KERNEL);
138 if (ret < 0) {
139 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
140 return ret;
141 }
142 device_id = ret;
143
144 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
145 if (ret) {
146 dev_err(&intf->dev, "failed to set device id %u: %d\n",
147 device_id, ret);
148 goto err_ida_remove;
149 }
150
151
152 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
153 intf_id, device_id);
154 if (ret) {
155 dev_err(&intf->dev, "failed to create route: %d\n", ret);
156 goto err_svc_id_free;
157 }
158
159 intf->device_id = device_id;
160
161 return 0;
162
163err_svc_id_free:
164
165
166
167
168err_ida_remove:
169 ida_simple_remove(&svc->device_id_map, device_id);
170
171 return ret;
172}
173
174static void gb_interface_route_destroy(struct gb_interface *intf)
175{
176 struct gb_svc *svc = intf->hd->svc;
177
178 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
179 return;
180
181 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
182 ida_simple_remove(&svc->device_id_map, intf->device_id);
183 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
184}
185
186
187static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
188{
189 int ret;
190
191 dev_info(&intf->dev, "legacy mode switch detected\n");
192
193
194 intf->disconnected = true;
195 gb_interface_disable(intf);
196 intf->disconnected = false;
197
198 ret = gb_interface_enable(intf);
199 if (ret) {
200 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
201 gb_interface_deactivate(intf);
202 }
203
204 return ret;
205}
206
207void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
208 u32 mailbox)
209{
210 mutex_lock(&intf->mutex);
211
212 if (result) {
213 dev_warn(&intf->dev,
214 "mailbox event with UniPro error: 0x%04x\n",
215 result);
216 goto err_disable;
217 }
218
219 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
220 dev_warn(&intf->dev,
221 "mailbox event with unexpected value: 0x%08x\n",
222 mailbox);
223 goto err_disable;
224 }
225
226 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
227 gb_interface_legacy_mode_switch(intf);
228 goto out_unlock;
229 }
230
231 if (!intf->mode_switch) {
232 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
233 mailbox);
234 goto err_disable;
235 }
236
237 dev_info(&intf->dev, "mode switch detected\n");
238
239 complete(&intf->mode_switch_completion);
240
241out_unlock:
242 mutex_unlock(&intf->mutex);
243
244 return;
245
246err_disable:
247 gb_interface_disable(intf);
248 gb_interface_deactivate(intf);
249 mutex_unlock(&intf->mutex);
250}
251
252static void gb_interface_mode_switch_work(struct work_struct *work)
253{
254 struct gb_interface *intf;
255 struct gb_control *control;
256 unsigned long timeout;
257 int ret;
258
259 intf = container_of(work, struct gb_interface, mode_switch_work);
260
261 mutex_lock(&intf->mutex);
262
263 if (!intf->enabled) {
264 dev_dbg(&intf->dev, "mode switch aborted\n");
265 intf->mode_switch = false;
266 mutex_unlock(&intf->mutex);
267 goto out_interface_put;
268 }
269
270
271
272
273
274 control = gb_control_get(intf->control);
275 gb_control_mode_switch_prepare(control);
276 gb_interface_disable(intf);
277 mutex_unlock(&intf->mutex);
278
279 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
280 ret = wait_for_completion_interruptible_timeout(
281 &intf->mode_switch_completion, timeout);
282
283
284 gb_control_mode_switch_complete(control);
285 gb_control_put(control);
286
287 if (ret < 0) {
288 dev_err(&intf->dev, "mode switch interrupted\n");
289 goto err_deactivate;
290 } else if (ret == 0) {
291 dev_err(&intf->dev, "mode switch timed out\n");
292 goto err_deactivate;
293 }
294
295
296 mutex_lock(&intf->mutex);
297 intf->mode_switch = false;
298 if (intf->active) {
299 ret = gb_interface_enable(intf);
300 if (ret) {
301 dev_err(&intf->dev, "failed to re-enable interface: %d\n",
302 ret);
303 gb_interface_deactivate(intf);
304 }
305 }
306 mutex_unlock(&intf->mutex);
307
308out_interface_put:
309 gb_interface_put(intf);
310
311 return;
312
313err_deactivate:
314 mutex_lock(&intf->mutex);
315 intf->mode_switch = false;
316 gb_interface_deactivate(intf);
317 mutex_unlock(&intf->mutex);
318
319 gb_interface_put(intf);
320}
321
322int gb_interface_request_mode_switch(struct gb_interface *intf)
323{
324 int ret = 0;
325
326 mutex_lock(&intf->mutex);
327 if (intf->mode_switch) {
328 ret = -EBUSY;
329 goto out_unlock;
330 }
331
332 intf->mode_switch = true;
333 reinit_completion(&intf->mode_switch_completion);
334
335
336
337
338
339 get_device(&intf->dev);
340
341 if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
342 put_device(&intf->dev);
343 ret = -EBUSY;
344 goto out_unlock;
345 }
346
347out_unlock:
348 mutex_unlock(&intf->mutex);
349
350 return ret;
351}
352EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
353
354
355
356
357
358
359
360
361
362static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
363{
364 struct gb_host_device *hd = intf->hd;
365 unsigned long bootrom_quirks;
366 unsigned long s2l_quirks;
367 int ret;
368 u32 value;
369 u16 attr;
370 u8 init_status;
371
372
373
374
375
376
377 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
378 attr = DME_T_TST_SRC_INCREMENT;
379 else
380 attr = DME_TOSHIBA_GMP_INIT_STATUS;
381
382 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
383 DME_SELECTOR_INDEX_NULL, &value);
384 if (ret)
385 return ret;
386
387
388
389
390
391 if (!value) {
392 dev_err(&intf->dev, "invalid init status\n");
393 return -ENODEV;
394 }
395
396
397
398
399
400
401
402
403
404 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
405 init_status = value & 0xff;
406 else
407 init_status = value >> 24;
408
409
410
411
412
413 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
414 GB_INTERFACE_QUIRK_FORCED_DISABLE |
415 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
416 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
417
418 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
419
420 switch (init_status) {
421 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
422 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
423 intf->quirks |= bootrom_quirks;
424 break;
425 case GB_INIT_S2_LOADER_BOOT_STARTED:
426
427 intf->quirks &= ~bootrom_quirks;
428 intf->quirks |= s2l_quirks;
429 break;
430 default:
431 intf->quirks &= ~bootrom_quirks;
432 intf->quirks &= ~s2l_quirks;
433 }
434
435
436 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
437 DME_SELECTOR_INDEX_NULL, 0);
438}
439
440
441#define gb_interface_attr(field, type) \
442static ssize_t field##_show(struct device *dev, \
443 struct device_attribute *attr, \
444 char *buf) \
445{ \
446 struct gb_interface *intf = to_gb_interface(dev); \
447 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
448} \
449static DEVICE_ATTR_RO(field)
450
451gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
452gb_interface_attr(ddbl1_product_id, "0x%08x");
453gb_interface_attr(interface_id, "%u");
454gb_interface_attr(vendor_id, "0x%08x");
455gb_interface_attr(product_id, "0x%08x");
456gb_interface_attr(serial_number, "0x%016llx");
457
458static ssize_t voltage_now_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
460{
461 struct gb_interface *intf = to_gb_interface(dev);
462 int ret;
463 u32 measurement;
464
465 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
466 GB_SVC_PWRMON_TYPE_VOL,
467 &measurement);
468 if (ret) {
469 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
470 return ret;
471 }
472
473 return sprintf(buf, "%u\n", measurement);
474}
475static DEVICE_ATTR_RO(voltage_now);
476
477static ssize_t current_now_show(struct device *dev,
478 struct device_attribute *attr, char *buf)
479{
480 struct gb_interface *intf = to_gb_interface(dev);
481 int ret;
482 u32 measurement;
483
484 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
485 GB_SVC_PWRMON_TYPE_CURR,
486 &measurement);
487 if (ret) {
488 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
489 return ret;
490 }
491
492 return sprintf(buf, "%u\n", measurement);
493}
494static DEVICE_ATTR_RO(current_now);
495
496static ssize_t power_now_show(struct device *dev,
497 struct device_attribute *attr, char *buf)
498{
499 struct gb_interface *intf = to_gb_interface(dev);
500 int ret;
501 u32 measurement;
502
503 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
504 GB_SVC_PWRMON_TYPE_PWR,
505 &measurement);
506 if (ret) {
507 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
508 return ret;
509 }
510
511 return sprintf(buf, "%u\n", measurement);
512}
513static DEVICE_ATTR_RO(power_now);
514
515static ssize_t power_state_show(struct device *dev,
516 struct device_attribute *attr, char *buf)
517{
518 struct gb_interface *intf = to_gb_interface(dev);
519
520 if (intf->active)
521 return scnprintf(buf, PAGE_SIZE, "on\n");
522 else
523 return scnprintf(buf, PAGE_SIZE, "off\n");
524}
525
526static ssize_t power_state_store(struct device *dev,
527 struct device_attribute *attr, const char *buf,
528 size_t len)
529{
530 struct gb_interface *intf = to_gb_interface(dev);
531 bool activate;
532 int ret = 0;
533
534 if (kstrtobool(buf, &activate))
535 return -EINVAL;
536
537 mutex_lock(&intf->mutex);
538
539 if (activate == intf->active)
540 goto unlock;
541
542 if (activate) {
543 ret = gb_interface_activate(intf);
544 if (ret) {
545 dev_err(&intf->dev,
546 "failed to activate interface: %d\n", ret);
547 goto unlock;
548 }
549
550 ret = gb_interface_enable(intf);
551 if (ret) {
552 dev_err(&intf->dev,
553 "failed to enable interface: %d\n", ret);
554 gb_interface_deactivate(intf);
555 goto unlock;
556 }
557 } else {
558 gb_interface_disable(intf);
559 gb_interface_deactivate(intf);
560 }
561
562unlock:
563 mutex_unlock(&intf->mutex);
564
565 if (ret)
566 return ret;
567
568 return len;
569}
570static DEVICE_ATTR_RW(power_state);
571
572static const char *gb_interface_type_string(struct gb_interface *intf)
573{
574 static const char * const types[] = {
575 [GB_INTERFACE_TYPE_INVALID] = "invalid",
576 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
577 [GB_INTERFACE_TYPE_DUMMY] = "dummy",
578 [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
579 [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
580 };
581
582 return types[intf->type];
583}
584
585static ssize_t interface_type_show(struct device *dev,
586 struct device_attribute *attr, char *buf)
587{
588 struct gb_interface *intf = to_gb_interface(dev);
589
590 return sprintf(buf, "%s\n", gb_interface_type_string(intf));
591}
592static DEVICE_ATTR_RO(interface_type);
593
594static struct attribute *interface_unipro_attrs[] = {
595 &dev_attr_ddbl1_manufacturer_id.attr,
596 &dev_attr_ddbl1_product_id.attr,
597 NULL
598};
599
600static struct attribute *interface_greybus_attrs[] = {
601 &dev_attr_vendor_id.attr,
602 &dev_attr_product_id.attr,
603 &dev_attr_serial_number.attr,
604 NULL
605};
606
607static struct attribute *interface_power_attrs[] = {
608 &dev_attr_voltage_now.attr,
609 &dev_attr_current_now.attr,
610 &dev_attr_power_now.attr,
611 &dev_attr_power_state.attr,
612 NULL
613};
614
615static struct attribute *interface_common_attrs[] = {
616 &dev_attr_interface_id.attr,
617 &dev_attr_interface_type.attr,
618 NULL
619};
620
621static umode_t interface_unipro_is_visible(struct kobject *kobj,
622 struct attribute *attr, int n)
623{
624 struct device *dev = container_of(kobj, struct device, kobj);
625 struct gb_interface *intf = to_gb_interface(dev);
626
627 switch (intf->type) {
628 case GB_INTERFACE_TYPE_UNIPRO:
629 case GB_INTERFACE_TYPE_GREYBUS:
630 return attr->mode;
631 default:
632 return 0;
633 }
634}
635
636static umode_t interface_greybus_is_visible(struct kobject *kobj,
637 struct attribute *attr, int n)
638{
639 struct device *dev = container_of(kobj, struct device, kobj);
640 struct gb_interface *intf = to_gb_interface(dev);
641
642 switch (intf->type) {
643 case GB_INTERFACE_TYPE_GREYBUS:
644 return attr->mode;
645 default:
646 return 0;
647 }
648}
649
650static umode_t interface_power_is_visible(struct kobject *kobj,
651 struct attribute *attr, int n)
652{
653 struct device *dev = container_of(kobj, struct device, kobj);
654 struct gb_interface *intf = to_gb_interface(dev);
655
656 switch (intf->type) {
657 case GB_INTERFACE_TYPE_UNIPRO:
658 case GB_INTERFACE_TYPE_GREYBUS:
659 return attr->mode;
660 default:
661 return 0;
662 }
663}
664
665static const struct attribute_group interface_unipro_group = {
666 .is_visible = interface_unipro_is_visible,
667 .attrs = interface_unipro_attrs,
668};
669
670static const struct attribute_group interface_greybus_group = {
671 .is_visible = interface_greybus_is_visible,
672 .attrs = interface_greybus_attrs,
673};
674
675static const struct attribute_group interface_power_group = {
676 .is_visible = interface_power_is_visible,
677 .attrs = interface_power_attrs,
678};
679
680static const struct attribute_group interface_common_group = {
681 .attrs = interface_common_attrs,
682};
683
684static const struct attribute_group *interface_groups[] = {
685 &interface_unipro_group,
686 &interface_greybus_group,
687 &interface_power_group,
688 &interface_common_group,
689 NULL
690};
691
692static void gb_interface_release(struct device *dev)
693{
694 struct gb_interface *intf = to_gb_interface(dev);
695
696 trace_gb_interface_release(intf);
697
698 kfree(intf);
699}
700
701#ifdef CONFIG_PM
702static int gb_interface_suspend(struct device *dev)
703{
704 struct gb_interface *intf = to_gb_interface(dev);
705 int ret, timesync_ret;
706
707 ret = gb_control_interface_suspend_prepare(intf->control);
708 if (ret)
709 return ret;
710
711 gb_timesync_interface_remove(intf);
712
713 ret = gb_control_suspend(intf->control);
714 if (ret)
715 goto err_hibernate_abort;
716
717 ret = gb_interface_hibernate_link(intf);
718 if (ret)
719 return ret;
720
721
722 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
723
724 ret = gb_interface_refclk_set(intf, false);
725 if (ret)
726 return ret;
727
728 return 0;
729
730err_hibernate_abort:
731 gb_control_interface_hibernate_abort(intf->control);
732
733 timesync_ret = gb_timesync_interface_add(intf);
734 if (timesync_ret) {
735 dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
736 return timesync_ret;
737 }
738
739 return ret;
740}
741
742static int gb_interface_resume(struct device *dev)
743{
744 struct gb_interface *intf = to_gb_interface(dev);
745 struct gb_svc *svc = intf->hd->svc;
746 int ret;
747
748 ret = gb_interface_refclk_set(intf, true);
749 if (ret)
750 return ret;
751
752 ret = gb_svc_intf_resume(svc, intf->interface_id);
753 if (ret)
754 return ret;
755
756 ret = gb_control_resume(intf->control);
757 if (ret)
758 return ret;
759
760 ret = gb_timesync_interface_add(intf);
761 if (ret) {
762 dev_err(dev, "failed to add to timesync: %d\n", ret);
763 return ret;
764 }
765
766 ret = gb_timesync_schedule_synchronous(intf);
767 if (ret) {
768 dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
769 return ret;
770 }
771
772 return 0;
773}
774
775static int gb_interface_runtime_idle(struct device *dev)
776{
777 pm_runtime_mark_last_busy(dev);
778 pm_request_autosuspend(dev);
779
780 return 0;
781}
782#endif
783
784static const struct dev_pm_ops gb_interface_pm_ops = {
785 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
786 gb_interface_runtime_idle)
787};
788
789struct device_type greybus_interface_type = {
790 .name = "greybus_interface",
791 .release = gb_interface_release,
792 .pm = &gb_interface_pm_ops,
793};
794
795
796
797
798
799
800
801
802
803
804
805
806
807struct gb_interface *gb_interface_create(struct gb_module *module,
808 u8 interface_id)
809{
810 struct gb_host_device *hd = module->hd;
811 struct gb_interface *intf;
812
813 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
814 if (!intf)
815 return NULL;
816
817 intf->hd = hd;
818 intf->module = module;
819 intf->interface_id = interface_id;
820 INIT_LIST_HEAD(&intf->bundles);
821 INIT_LIST_HEAD(&intf->manifest_descs);
822 mutex_init(&intf->mutex);
823 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
824 init_completion(&intf->mode_switch_completion);
825
826
827 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
828
829 intf->dev.parent = &module->dev;
830 intf->dev.bus = &greybus_bus_type;
831 intf->dev.type = &greybus_interface_type;
832 intf->dev.groups = interface_groups;
833 intf->dev.dma_mask = module->dev.dma_mask;
834 device_initialize(&intf->dev);
835 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
836 interface_id);
837
838 pm_runtime_set_autosuspend_delay(&intf->dev,
839 GB_INTERFACE_AUTOSUSPEND_MS);
840
841 trace_gb_interface_create(intf);
842
843 return intf;
844}
845
846static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
847{
848 struct gb_svc *svc = intf->hd->svc;
849 int ret;
850
851 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
852
853 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
854 if (ret) {
855 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
856 return ret;
857 }
858
859 return 0;
860}
861
862static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
863{
864 struct gb_svc *svc = intf->hd->svc;
865 int ret;
866
867 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
868
869 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
870 if (ret) {
871 dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
872 return ret;
873 }
874
875 return 0;
876}
877
878static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
879{
880 struct gb_svc *svc = intf->hd->svc;
881 int ret;
882
883 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
884
885 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
886 if (ret) {
887 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
888 return ret;
889 }
890
891 return 0;
892}
893
894static int gb_interface_activate_operation(struct gb_interface *intf,
895 enum gb_interface_type *intf_type)
896{
897 struct gb_svc *svc = intf->hd->svc;
898 u8 type;
899 int ret;
900
901 dev_dbg(&intf->dev, "%s\n", __func__);
902
903 ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
904 if (ret) {
905 dev_err(&intf->dev, "failed to activate: %d\n", ret);
906 return ret;
907 }
908
909 switch (type) {
910 case GB_SVC_INTF_TYPE_DUMMY:
911 *intf_type = GB_INTERFACE_TYPE_DUMMY;
912
913 return -ENODEV;
914 case GB_SVC_INTF_TYPE_UNIPRO:
915 *intf_type = GB_INTERFACE_TYPE_UNIPRO;
916 dev_err(&intf->dev, "interface type UniPro not supported\n");
917
918 return -ENODEV;
919 case GB_SVC_INTF_TYPE_GREYBUS:
920 *intf_type = GB_INTERFACE_TYPE_GREYBUS;
921 break;
922 default:
923 dev_err(&intf->dev, "unknown interface type: %u\n", type);
924 *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
925 return -ENODEV;
926 }
927
928 return 0;
929}
930
931static int gb_interface_hibernate_link(struct gb_interface *intf)
932{
933 struct gb_svc *svc = intf->hd->svc;
934
935 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
936}
937
938static int _gb_interface_activate(struct gb_interface *intf,
939 enum gb_interface_type *type)
940{
941 int ret;
942
943 *type = GB_INTERFACE_TYPE_UNKNOWN;
944
945 if (intf->ejected || intf->removed)
946 return -ENODEV;
947
948 ret = gb_interface_vsys_set(intf, true);
949 if (ret)
950 return ret;
951
952 ret = gb_interface_refclk_set(intf, true);
953 if (ret)
954 goto err_vsys_disable;
955
956 ret = gb_interface_unipro_set(intf, true);
957 if (ret)
958 goto err_refclk_disable;
959
960 ret = gb_interface_activate_operation(intf, type);
961 if (ret) {
962 switch (*type) {
963 case GB_INTERFACE_TYPE_UNIPRO:
964 case GB_INTERFACE_TYPE_GREYBUS:
965 goto err_hibernate_link;
966 default:
967 goto err_unipro_disable;
968 }
969 }
970
971 ret = gb_interface_read_dme(intf);
972 if (ret)
973 goto err_hibernate_link;
974
975 ret = gb_interface_route_create(intf);
976 if (ret)
977 goto err_hibernate_link;
978
979 intf->active = true;
980
981 trace_gb_interface_activate(intf);
982
983 return 0;
984
985err_hibernate_link:
986 gb_interface_hibernate_link(intf);
987err_unipro_disable:
988 gb_interface_unipro_set(intf, false);
989err_refclk_disable:
990 gb_interface_refclk_set(intf, false);
991err_vsys_disable:
992 gb_interface_vsys_set(intf, false);
993
994 return ret;
995}
996
997
998
999
1000
1001
1002
1003
1004static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
1005 enum gb_interface_type *type)
1006{
1007 int retries = 3;
1008 int ret;
1009
1010 while (retries--) {
1011 ret = _gb_interface_activate(intf, type);
1012 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
1013 continue;
1014
1015 break;
1016 }
1017
1018 return ret;
1019}
1020
1021
1022
1023
1024
1025
1026int gb_interface_activate(struct gb_interface *intf)
1027{
1028 enum gb_interface_type type;
1029 int ret;
1030
1031 switch (intf->type) {
1032 case GB_INTERFACE_TYPE_INVALID:
1033 case GB_INTERFACE_TYPE_GREYBUS:
1034 ret = _gb_interface_activate_es3_hack(intf, &type);
1035 break;
1036 default:
1037 ret = _gb_interface_activate(intf, &type);
1038 }
1039
1040
1041 if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1042 if (type != intf->type) {
1043 dev_err(&intf->dev, "failed to detect interface type\n");
1044
1045 if (!ret)
1046 gb_interface_deactivate(intf);
1047
1048 return -EIO;
1049 }
1050 } else {
1051 intf->type = type;
1052 }
1053
1054 return ret;
1055}
1056
1057
1058
1059
1060
1061
1062void gb_interface_deactivate(struct gb_interface *intf)
1063{
1064 if (!intf->active)
1065 return;
1066
1067 trace_gb_interface_deactivate(intf);
1068
1069
1070 if (intf->mode_switch)
1071 complete(&intf->mode_switch_completion);
1072
1073 gb_interface_route_destroy(intf);
1074 gb_interface_hibernate_link(intf);
1075 gb_interface_unipro_set(intf, false);
1076 gb_interface_refclk_set(intf, false);
1077 gb_interface_vsys_set(intf, false);
1078
1079 intf->active = false;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089int gb_interface_enable(struct gb_interface *intf)
1090{
1091 struct gb_control *control;
1092 struct gb_bundle *bundle, *tmp;
1093 int ret, size;
1094 void *manifest;
1095
1096 ret = gb_interface_read_and_clear_init_status(intf);
1097 if (ret) {
1098 dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1099 return ret;
1100 }
1101
1102
1103 control = gb_control_create(intf);
1104 if (IS_ERR(control)) {
1105 dev_err(&intf->dev, "failed to create control device: %ld\n",
1106 PTR_ERR(control));
1107 return PTR_ERR(control);
1108 }
1109 intf->control = control;
1110
1111 ret = gb_control_enable(intf->control);
1112 if (ret)
1113 goto err_put_control;
1114
1115
1116 size = gb_control_get_manifest_size_operation(intf);
1117 if (size <= 0) {
1118 dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1119
1120 if (size)
1121 ret = size;
1122 else
1123 ret = -EINVAL;
1124
1125 goto err_disable_control;
1126 }
1127
1128 manifest = kmalloc(size, GFP_KERNEL);
1129 if (!manifest) {
1130 ret = -ENOMEM;
1131 goto err_disable_control;
1132 }
1133
1134
1135 ret = gb_control_get_manifest_operation(intf, manifest, size);
1136 if (ret) {
1137 dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1138 goto err_free_manifest;
1139 }
1140
1141
1142
1143
1144
1145 if (!gb_manifest_parse(intf, manifest, size)) {
1146 dev_err(&intf->dev, "failed to parse manifest\n");
1147 ret = -EINVAL;
1148 goto err_destroy_bundles;
1149 }
1150
1151 ret = gb_control_get_bundle_versions(intf->control);
1152 if (ret)
1153 goto err_destroy_bundles;
1154
1155 ret = gb_timesync_interface_add(intf);
1156 if (ret) {
1157 dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
1158 goto err_destroy_bundles;
1159 }
1160
1161
1162 ret = gb_control_add(intf->control);
1163 if (ret)
1164 goto err_remove_timesync;
1165
1166 pm_runtime_use_autosuspend(&intf->dev);
1167 pm_runtime_get_noresume(&intf->dev);
1168 pm_runtime_set_active(&intf->dev);
1169 pm_runtime_enable(&intf->dev);
1170
1171 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1172 ret = gb_bundle_add(bundle);
1173 if (ret) {
1174 gb_bundle_destroy(bundle);
1175 continue;
1176 }
1177 }
1178
1179 kfree(manifest);
1180
1181 intf->enabled = true;
1182
1183 pm_runtime_put(&intf->dev);
1184
1185 trace_gb_interface_enable(intf);
1186
1187 return 0;
1188
1189err_remove_timesync:
1190 gb_timesync_interface_remove(intf);
1191err_destroy_bundles:
1192 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1193 gb_bundle_destroy(bundle);
1194err_free_manifest:
1195 kfree(manifest);
1196err_disable_control:
1197 gb_control_disable(intf->control);
1198err_put_control:
1199 gb_control_put(intf->control);
1200 intf->control = NULL;
1201
1202 return ret;
1203}
1204
1205
1206
1207
1208
1209
1210void gb_interface_disable(struct gb_interface *intf)
1211{
1212 struct gb_bundle *bundle;
1213 struct gb_bundle *next;
1214
1215 if (!intf->enabled)
1216 return;
1217
1218 trace_gb_interface_disable(intf);
1219
1220 pm_runtime_get_sync(&intf->dev);
1221
1222
1223 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1224 intf->disconnected = true;
1225
1226 list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1227 gb_bundle_destroy(bundle);
1228
1229 if (!intf->mode_switch && !intf->disconnected)
1230 gb_control_interface_deactivate_prepare(intf->control);
1231
1232 gb_control_del(intf->control);
1233 gb_timesync_interface_remove(intf);
1234 gb_control_disable(intf->control);
1235 gb_control_put(intf->control);
1236 intf->control = NULL;
1237
1238 intf->enabled = false;
1239
1240 pm_runtime_disable(&intf->dev);
1241 pm_runtime_set_suspended(&intf->dev);
1242 pm_runtime_dont_use_autosuspend(&intf->dev);
1243 pm_runtime_put_noidle(&intf->dev);
1244}
1245
1246
1247int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
1248 u64 frame_time, u32 strobe_delay, u32 refclk)
1249{
1250 return gb_control_timesync_enable(intf->control, count,
1251 frame_time, strobe_delay,
1252 refclk);
1253}
1254
1255
1256int gb_interface_timesync_disable(struct gb_interface *intf)
1257{
1258 return gb_control_timesync_disable(intf->control);
1259}
1260
1261
1262int gb_interface_timesync_authoritative(struct gb_interface *intf,
1263 u64 *frame_time)
1264{
1265 return gb_control_timesync_authoritative(intf->control,
1266 frame_time);
1267}
1268
1269
1270int gb_interface_add(struct gb_interface *intf)
1271{
1272 int ret;
1273
1274 ret = device_add(&intf->dev);
1275 if (ret) {
1276 dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1277 return ret;
1278 }
1279
1280 trace_gb_interface_add(intf);
1281
1282 dev_info(&intf->dev, "Interface added (%s)\n",
1283 gb_interface_type_string(intf));
1284
1285 switch (intf->type) {
1286 case GB_INTERFACE_TYPE_GREYBUS:
1287 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1288 intf->vendor_id, intf->product_id);
1289
1290 case GB_INTERFACE_TYPE_UNIPRO:
1291 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1292 intf->ddbl1_manufacturer_id,
1293 intf->ddbl1_product_id);
1294 break;
1295 default:
1296 break;
1297 }
1298
1299 return 0;
1300}
1301
1302
1303void gb_interface_del(struct gb_interface *intf)
1304{
1305 if (device_is_registered(&intf->dev)) {
1306 trace_gb_interface_del(intf);
1307
1308 device_del(&intf->dev);
1309 dev_info(&intf->dev, "Interface removed\n");
1310 }
1311}
1312
1313void gb_interface_put(struct gb_interface *intf)
1314{
1315 put_device(&intf->dev);
1316}
1317