1
2
3
4
5
6
7
8
9
10#include <linux/workqueue.h>
11
12#include "greybus.h"
13#include "greybus_trace.h"
14
15
16#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
17
18
19static void gb_connection_kref_release(struct kref *kref);
20
21
22static DEFINE_SPINLOCK(gb_connections_lock);
23static DEFINE_MUTEX(gb_connection_mutex);
24
25
26
27static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
28{
29 struct gb_host_device *hd = intf->hd;
30 struct gb_connection *connection;
31
32 list_for_each_entry(connection, &hd->connections, hd_links) {
33 if (connection->intf == intf &&
34 connection->intf_cport_id == cport_id)
35 return true;
36 }
37
38 return false;
39}
40
41static void gb_connection_get(struct gb_connection *connection)
42{
43 kref_get(&connection->kref);
44
45 trace_gb_connection_get(connection);
46}
47
48static void gb_connection_put(struct gb_connection *connection)
49{
50 trace_gb_connection_put(connection);
51
52 kref_put(&connection->kref, gb_connection_kref_release);
53}
54
55
56
57
58static struct gb_connection *
59gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
60{
61 struct gb_connection *connection;
62 unsigned long flags;
63
64 spin_lock_irqsave(&gb_connections_lock, flags);
65 list_for_each_entry(connection, &hd->connections, hd_links)
66 if (connection->hd_cport_id == cport_id) {
67 gb_connection_get(connection);
68 goto found;
69 }
70 connection = NULL;
71found:
72 spin_unlock_irqrestore(&gb_connections_lock, flags);
73
74 return connection;
75}
76
77
78
79
80
81void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
82 u8 *data, size_t length)
83{
84 struct gb_connection *connection;
85
86 trace_gb_hd_in(hd);
87
88 connection = gb_connection_hd_find(hd, cport_id);
89 if (!connection) {
90 dev_err(&hd->dev,
91 "nonexistent connection (%zu bytes dropped)\n", length);
92 return;
93 }
94 gb_connection_recv(connection, data, length);
95 gb_connection_put(connection);
96}
97EXPORT_SYMBOL_GPL(greybus_data_rcvd);
98
99static void gb_connection_kref_release(struct kref *kref)
100{
101 struct gb_connection *connection;
102
103 connection = container_of(kref, struct gb_connection, kref);
104
105 trace_gb_connection_release(connection);
106
107 kfree(connection);
108}
109
110static void gb_connection_init_name(struct gb_connection *connection)
111{
112 u16 hd_cport_id = connection->hd_cport_id;
113 u16 cport_id = 0;
114 u8 intf_id = 0;
115
116 if (connection->intf) {
117 intf_id = connection->intf->interface_id;
118 cport_id = connection->intf_cport_id;
119 }
120
121 snprintf(connection->name, sizeof(connection->name),
122 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
123}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static struct gb_connection *
149_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
150 struct gb_interface *intf,
151 struct gb_bundle *bundle, int cport_id,
152 gb_request_handler_t handler,
153 unsigned long flags)
154{
155 struct gb_connection *connection;
156 int ret;
157
158 mutex_lock(&gb_connection_mutex);
159
160 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
161 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162 ret = -EBUSY;
163 goto err_unlock;
164 }
165
166 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
167 if (ret < 0) {
168 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
169 goto err_unlock;
170 }
171 hd_cport_id = ret;
172
173 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
174 if (!connection) {
175 ret = -ENOMEM;
176 goto err_hd_cport_release;
177 }
178
179 connection->hd_cport_id = hd_cport_id;
180 connection->intf_cport_id = cport_id;
181 connection->hd = hd;
182 connection->intf = intf;
183 connection->bundle = bundle;
184 connection->handler = handler;
185 connection->flags = flags;
186 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
187 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
188 connection->state = GB_CONNECTION_STATE_DISABLED;
189
190 atomic_set(&connection->op_cycle, 0);
191 mutex_init(&connection->mutex);
192 spin_lock_init(&connection->lock);
193 INIT_LIST_HEAD(&connection->operations);
194
195 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
196 dev_name(&hd->dev), hd_cport_id);
197 if (!connection->wq) {
198 ret = -ENOMEM;
199 goto err_free_connection;
200 }
201
202 kref_init(&connection->kref);
203
204 gb_connection_init_name(connection);
205
206 spin_lock_irq(&gb_connections_lock);
207 list_add(&connection->hd_links, &hd->connections);
208
209 if (bundle)
210 list_add(&connection->bundle_links, &bundle->connections);
211 else
212 INIT_LIST_HEAD(&connection->bundle_links);
213
214 spin_unlock_irq(&gb_connections_lock);
215
216 mutex_unlock(&gb_connection_mutex);
217
218 trace_gb_connection_create(connection);
219
220 return connection;
221
222err_free_connection:
223 kfree(connection);
224err_hd_cport_release:
225 gb_hd_cport_release(hd, hd_cport_id);
226err_unlock:
227 mutex_unlock(&gb_connection_mutex);
228
229 return ERR_PTR(ret);
230}
231
232struct gb_connection *
233gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
234 gb_request_handler_t handler)
235{
236 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
237 GB_CONNECTION_FLAG_HIGH_PRIO);
238}
239
240struct gb_connection *
241gb_connection_create_control(struct gb_interface *intf)
242{
243 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
244 GB_CONNECTION_FLAG_CONTROL |
245 GB_CONNECTION_FLAG_HIGH_PRIO);
246}
247
248struct gb_connection *
249gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
250 gb_request_handler_t handler)
251{
252 struct gb_interface *intf = bundle->intf;
253
254 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
255 handler, 0);
256}
257EXPORT_SYMBOL_GPL(gb_connection_create);
258
259struct gb_connection *
260gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
261 gb_request_handler_t handler,
262 unsigned long flags)
263{
264 struct gb_interface *intf = bundle->intf;
265
266 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
267 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
268
269 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
270 handler, flags);
271}
272EXPORT_SYMBOL_GPL(gb_connection_create_flags);
273
274struct gb_connection *
275gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
276 unsigned long flags)
277{
278 flags |= GB_CONNECTION_FLAG_OFFLOADED;
279
280 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
281}
282EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
283
284static int gb_connection_hd_cport_enable(struct gb_connection *connection)
285{
286 struct gb_host_device *hd = connection->hd;
287 int ret;
288
289 if (!hd->driver->cport_enable)
290 return 0;
291
292 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
293 connection->flags);
294 if (ret) {
295 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
296 connection->name, ret);
297 return ret;
298 }
299
300 return 0;
301}
302
303static void gb_connection_hd_cport_disable(struct gb_connection *connection)
304{
305 struct gb_host_device *hd = connection->hd;
306 int ret;
307
308 if (!hd->driver->cport_disable)
309 return;
310
311 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
312 if (ret) {
313 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
314 connection->name, ret);
315 }
316}
317
318static int gb_connection_hd_cport_connected(struct gb_connection *connection)
319{
320 struct gb_host_device *hd = connection->hd;
321 int ret;
322
323 if (!hd->driver->cport_connected)
324 return 0;
325
326 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
327 if (ret) {
328 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
329 connection->name, ret);
330 return ret;
331 }
332
333 return 0;
334}
335
336static int gb_connection_hd_cport_flush(struct gb_connection *connection)
337{
338 struct gb_host_device *hd = connection->hd;
339 int ret;
340
341 if (!hd->driver->cport_flush)
342 return 0;
343
344 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
345 if (ret) {
346 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
347 connection->name, ret);
348 return ret;
349 }
350
351 return 0;
352}
353
354static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
355{
356 struct gb_host_device *hd = connection->hd;
357 size_t peer_space;
358 int ret;
359
360 if (!hd->driver->cport_quiesce)
361 return 0;
362
363 peer_space = sizeof(struct gb_operation_msg_hdr) +
364 sizeof(struct gb_cport_shutdown_request);
365
366 if (connection->mode_switch)
367 peer_space += sizeof(struct gb_operation_msg_hdr);
368
369 if (!hd->driver->cport_quiesce)
370 return 0;
371
372 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
373 peer_space,
374 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
375 if (ret) {
376 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
377 connection->name, ret);
378 return ret;
379 }
380
381 return 0;
382}
383
384static int gb_connection_hd_cport_clear(struct gb_connection *connection)
385{
386 struct gb_host_device *hd = connection->hd;
387 int ret;
388
389 if (!hd->driver->cport_clear)
390 return 0;
391
392 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
393 if (ret) {
394 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
395 connection->name, ret);
396 return ret;
397 }
398
399 return 0;
400}
401
402
403
404
405
406static int
407gb_connection_svc_connection_create(struct gb_connection *connection)
408{
409 struct gb_host_device *hd = connection->hd;
410 struct gb_interface *intf;
411 u8 cport_flags;
412 int ret;
413
414 if (gb_connection_is_static(connection))
415 return 0;
416
417 intf = connection->intf;
418
419
420
421
422 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
423 if (gb_connection_flow_control_disabled(connection)) {
424 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
425 } else if (gb_connection_e2efc_enabled(connection)) {
426 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
427 GB_SVC_CPORT_FLAG_E2EFC;
428 }
429
430 ret = gb_svc_connection_create(hd->svc,
431 hd->svc->ap_intf_id,
432 connection->hd_cport_id,
433 intf->interface_id,
434 connection->intf_cport_id,
435 cport_flags);
436 if (ret) {
437 dev_err(&connection->hd->dev,
438 "%s: failed to create svc connection: %d\n",
439 connection->name, ret);
440 return ret;
441 }
442
443 return 0;
444}
445
446static void
447gb_connection_svc_connection_destroy(struct gb_connection *connection)
448{
449 if (gb_connection_is_static(connection))
450 return;
451
452 gb_svc_connection_destroy(connection->hd->svc,
453 connection->hd->svc->ap_intf_id,
454 connection->hd_cport_id,
455 connection->intf->interface_id,
456 connection->intf_cport_id);
457}
458
459
460static int gb_connection_control_connected(struct gb_connection *connection)
461{
462 struct gb_control *control;
463 u16 cport_id = connection->intf_cport_id;
464 int ret;
465
466 if (gb_connection_is_static(connection))
467 return 0;
468
469 if (gb_connection_is_control(connection))
470 return 0;
471
472 control = connection->intf->control;
473
474 ret = gb_control_connected_operation(control, cport_id);
475 if (ret) {
476 dev_err(&connection->bundle->dev,
477 "failed to connect cport: %d\n", ret);
478 return ret;
479 }
480
481 return 0;
482}
483
484static void
485gb_connection_control_disconnecting(struct gb_connection *connection)
486{
487 struct gb_control *control;
488 u16 cport_id = connection->intf_cport_id;
489 int ret;
490
491 if (gb_connection_is_static(connection))
492 return;
493
494 control = connection->intf->control;
495
496 ret = gb_control_disconnecting_operation(control, cport_id);
497 if (ret) {
498 dev_err(&connection->hd->dev,
499 "%s: failed to send disconnecting: %d\n",
500 connection->name, ret);
501 }
502}
503
504static void
505gb_connection_control_disconnected(struct gb_connection *connection)
506{
507 struct gb_control *control;
508 u16 cport_id = connection->intf_cport_id;
509 int ret;
510
511 if (gb_connection_is_static(connection))
512 return;
513
514 control = connection->intf->control;
515
516 if (gb_connection_is_control(connection)) {
517 if (connection->mode_switch) {
518 ret = gb_control_mode_switch_operation(control);
519 if (ret) {
520
521
522
523
524 return;
525 }
526 }
527
528 return;
529 }
530
531 ret = gb_control_disconnected_operation(control, cport_id);
532 if (ret) {
533 dev_warn(&connection->bundle->dev,
534 "failed to disconnect cport: %d\n", ret);
535 }
536}
537
538static int gb_connection_shutdown_operation(struct gb_connection *connection,
539 u8 phase)
540{
541 struct gb_cport_shutdown_request *req;
542 struct gb_operation *operation;
543 int ret;
544
545 operation = gb_operation_create_core(connection,
546 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
547 sizeof(*req), 0, 0,
548 GFP_KERNEL);
549 if (!operation)
550 return -ENOMEM;
551
552 req = operation->request->payload;
553 req->phase = phase;
554
555 ret = gb_operation_request_send_sync(operation);
556
557 gb_operation_put(operation);
558
559 return ret;
560}
561
562static int gb_connection_cport_shutdown(struct gb_connection *connection,
563 u8 phase)
564{
565 struct gb_host_device *hd = connection->hd;
566 const struct gb_hd_driver *drv = hd->driver;
567 int ret;
568
569 if (gb_connection_is_static(connection))
570 return 0;
571
572 if (gb_connection_is_offloaded(connection)) {
573 if (!drv->cport_shutdown)
574 return 0;
575
576 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
577 GB_OPERATION_TIMEOUT_DEFAULT);
578 } else {
579 ret = gb_connection_shutdown_operation(connection, phase);
580 }
581
582 if (ret) {
583 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
584 connection->name, phase, ret);
585 return ret;
586 }
587
588 return 0;
589}
590
591static int
592gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
593{
594 return gb_connection_cport_shutdown(connection, 1);
595}
596
597static int
598gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
599{
600 return gb_connection_cport_shutdown(connection, 2);
601}
602
603
604
605
606
607
608
609static void gb_connection_cancel_operations(struct gb_connection *connection,
610 int errno)
611 __must_hold(&connection->lock)
612{
613 struct gb_operation *operation;
614
615 while (!list_empty(&connection->operations)) {
616 operation = list_last_entry(&connection->operations,
617 struct gb_operation, links);
618 gb_operation_get(operation);
619 spin_unlock_irq(&connection->lock);
620
621 if (gb_operation_is_incoming(operation))
622 gb_operation_cancel_incoming(operation, errno);
623 else
624 gb_operation_cancel(operation, errno);
625
626 gb_operation_put(operation);
627
628 spin_lock_irq(&connection->lock);
629 }
630}
631
632
633
634
635
636
637static void
638gb_connection_flush_incoming_operations(struct gb_connection *connection,
639 int errno)
640 __must_hold(&connection->lock)
641{
642 struct gb_operation *operation;
643 bool incoming;
644
645 while (!list_empty(&connection->operations)) {
646 incoming = false;
647 list_for_each_entry(operation, &connection->operations,
648 links) {
649 if (gb_operation_is_incoming(operation)) {
650 gb_operation_get(operation);
651 incoming = true;
652 break;
653 }
654 }
655
656 if (!incoming)
657 break;
658
659 spin_unlock_irq(&connection->lock);
660
661
662 gb_operation_cancel_incoming(operation, errno);
663 gb_operation_put(operation);
664
665 spin_lock_irq(&connection->lock);
666 }
667}
668
669
670
671
672
673
674
675
676
677
678
679static int _gb_connection_enable(struct gb_connection *connection, bool rx)
680{
681 int ret;
682
683
684 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
685 if (!(connection->handler && rx))
686 return 0;
687
688 spin_lock_irq(&connection->lock);
689 connection->state = GB_CONNECTION_STATE_ENABLED;
690 spin_unlock_irq(&connection->lock);
691
692 return 0;
693 }
694
695 ret = gb_connection_hd_cport_enable(connection);
696 if (ret)
697 return ret;
698
699 ret = gb_connection_svc_connection_create(connection);
700 if (ret)
701 goto err_hd_cport_clear;
702
703 ret = gb_connection_hd_cport_connected(connection);
704 if (ret)
705 goto err_svc_connection_destroy;
706
707 spin_lock_irq(&connection->lock);
708 if (connection->handler && rx)
709 connection->state = GB_CONNECTION_STATE_ENABLED;
710 else
711 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
712 spin_unlock_irq(&connection->lock);
713
714 ret = gb_connection_control_connected(connection);
715 if (ret)
716 goto err_control_disconnecting;
717
718 return 0;
719
720err_control_disconnecting:
721 spin_lock_irq(&connection->lock);
722 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
723 gb_connection_cancel_operations(connection, -ESHUTDOWN);
724 spin_unlock_irq(&connection->lock);
725
726
727 gb_connection_hd_cport_flush(connection);
728
729 gb_connection_control_disconnecting(connection);
730 gb_connection_cport_shutdown_phase_1(connection);
731 gb_connection_hd_cport_quiesce(connection);
732 gb_connection_cport_shutdown_phase_2(connection);
733 gb_connection_control_disconnected(connection);
734 connection->state = GB_CONNECTION_STATE_DISABLED;
735err_svc_connection_destroy:
736 gb_connection_svc_connection_destroy(connection);
737err_hd_cport_clear:
738 gb_connection_hd_cport_clear(connection);
739
740 gb_connection_hd_cport_disable(connection);
741
742 return ret;
743}
744
745int gb_connection_enable(struct gb_connection *connection)
746{
747 int ret = 0;
748
749 mutex_lock(&connection->mutex);
750
751 if (connection->state == GB_CONNECTION_STATE_ENABLED)
752 goto out_unlock;
753
754 ret = _gb_connection_enable(connection, true);
755 if (!ret)
756 trace_gb_connection_enable(connection);
757
758out_unlock:
759 mutex_unlock(&connection->mutex);
760
761 return ret;
762}
763EXPORT_SYMBOL_GPL(gb_connection_enable);
764
765int gb_connection_enable_tx(struct gb_connection *connection)
766{
767 int ret = 0;
768
769 mutex_lock(&connection->mutex);
770
771 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
772 ret = -EINVAL;
773 goto out_unlock;
774 }
775
776 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
777 goto out_unlock;
778
779 ret = _gb_connection_enable(connection, false);
780 if (!ret)
781 trace_gb_connection_enable(connection);
782
783out_unlock:
784 mutex_unlock(&connection->mutex);
785
786 return ret;
787}
788EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
789
790void gb_connection_disable_rx(struct gb_connection *connection)
791{
792 mutex_lock(&connection->mutex);
793
794 spin_lock_irq(&connection->lock);
795 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
796 spin_unlock_irq(&connection->lock);
797 goto out_unlock;
798 }
799 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
800 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
801 spin_unlock_irq(&connection->lock);
802
803 trace_gb_connection_disable(connection);
804
805out_unlock:
806 mutex_unlock(&connection->mutex);
807}
808EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
809
810void gb_connection_mode_switch_prepare(struct gb_connection *connection)
811{
812 connection->mode_switch = true;
813}
814
815void gb_connection_mode_switch_complete(struct gb_connection *connection)
816{
817 gb_connection_svc_connection_destroy(connection);
818 gb_connection_hd_cport_clear(connection);
819
820 gb_connection_hd_cport_disable(connection);
821
822 connection->mode_switch = false;
823}
824
825void gb_connection_disable(struct gb_connection *connection)
826{
827 mutex_lock(&connection->mutex);
828
829 if (connection->state == GB_CONNECTION_STATE_DISABLED)
830 goto out_unlock;
831
832 trace_gb_connection_disable(connection);
833
834 spin_lock_irq(&connection->lock);
835 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
836 gb_connection_cancel_operations(connection, -ESHUTDOWN);
837 spin_unlock_irq(&connection->lock);
838
839 gb_connection_hd_cport_flush(connection);
840
841 gb_connection_control_disconnecting(connection);
842 gb_connection_cport_shutdown_phase_1(connection);
843 gb_connection_hd_cport_quiesce(connection);
844 gb_connection_cport_shutdown_phase_2(connection);
845 gb_connection_control_disconnected(connection);
846
847 connection->state = GB_CONNECTION_STATE_DISABLED;
848
849
850 if (!connection->mode_switch) {
851 gb_connection_svc_connection_destroy(connection);
852 gb_connection_hd_cport_clear(connection);
853
854 gb_connection_hd_cport_disable(connection);
855 }
856
857out_unlock:
858 mutex_unlock(&connection->mutex);
859}
860EXPORT_SYMBOL_GPL(gb_connection_disable);
861
862
863void gb_connection_disable_forced(struct gb_connection *connection)
864{
865 mutex_lock(&connection->mutex);
866
867 if (connection->state == GB_CONNECTION_STATE_DISABLED)
868 goto out_unlock;
869
870 trace_gb_connection_disable(connection);
871
872 spin_lock_irq(&connection->lock);
873 connection->state = GB_CONNECTION_STATE_DISABLED;
874 gb_connection_cancel_operations(connection, -ESHUTDOWN);
875 spin_unlock_irq(&connection->lock);
876
877 gb_connection_hd_cport_flush(connection);
878
879 gb_connection_svc_connection_destroy(connection);
880 gb_connection_hd_cport_clear(connection);
881
882 gb_connection_hd_cport_disable(connection);
883out_unlock:
884 mutex_unlock(&connection->mutex);
885}
886EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
887
888
889void gb_connection_destroy(struct gb_connection *connection)
890{
891 if (!connection)
892 return;
893
894 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
895 gb_connection_disable(connection);
896
897 mutex_lock(&gb_connection_mutex);
898
899 spin_lock_irq(&gb_connections_lock);
900 list_del(&connection->bundle_links);
901 list_del(&connection->hd_links);
902 spin_unlock_irq(&gb_connections_lock);
903
904 destroy_workqueue(connection->wq);
905
906 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
907 connection->hd_cport_id = CPORT_ID_BAD;
908
909 mutex_unlock(&gb_connection_mutex);
910
911 gb_connection_put(connection);
912}
913EXPORT_SYMBOL_GPL(gb_connection_destroy);
914
915void gb_connection_latency_tag_enable(struct gb_connection *connection)
916{
917 struct gb_host_device *hd = connection->hd;
918 int ret;
919
920 if (!hd->driver->latency_tag_enable)
921 return;
922
923 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
924 if (ret) {
925 dev_err(&connection->hd->dev,
926 "%s: failed to enable latency tag: %d\n",
927 connection->name, ret);
928 }
929}
930EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
931
932void gb_connection_latency_tag_disable(struct gb_connection *connection)
933{
934 struct gb_host_device *hd = connection->hd;
935 int ret;
936
937 if (!hd->driver->latency_tag_disable)
938 return;
939
940 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
941 if (ret) {
942 dev_err(&connection->hd->dev,
943 "%s: failed to disable latency tag: %d\n",
944 connection->name, ret);
945 }
946}
947EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
948