1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/skbuff.h>
28#include <linux/init.h>
29#include <linux/fs.h>
30#include <linux/seq_file.h>
31#include <linux/slab.h>
32
33#include <asm/byteorder.h>
34#include <asm/unaligned.h>
35
36#include <net/irda/irda.h>
37#include <net/irda/irlap.h>
38#include <net/irda/irlmp.h>
39#include <net/irda/parameters.h>
40#include <net/irda/irttp.h>
41
42static struct irttp_cb *irttp;
43
44static void __irttp_close_tsap(struct tsap_cb *self);
45
46static int irttp_data_indication(void *instance, void *sap,
47 struct sk_buff *skb);
48static int irttp_udata_indication(void *instance, void *sap,
49 struct sk_buff *skb);
50static void irttp_disconnect_indication(void *instance, void *sap,
51 LM_REASON reason, struct sk_buff *);
52static void irttp_connect_indication(void *instance, void *sap,
53 struct qos_info *qos, __u32 max_sdu_size,
54 __u8 header_size, struct sk_buff *skb);
55static void irttp_connect_confirm(void *instance, void *sap,
56 struct qos_info *qos, __u32 max_sdu_size,
57 __u8 header_size, struct sk_buff *skb);
58static void irttp_run_tx_queue(struct tsap_cb *self);
59static void irttp_run_rx_queue(struct tsap_cb *self);
60
61static void irttp_flush_queues(struct tsap_cb *self);
62static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
63static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
64static void irttp_todo_expired(unsigned long data);
65static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
66 int get);
67
68static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
69static void irttp_status_indication(void *instance,
70 LINK_STATUS link, LOCK_STATUS lock);
71
72
73static pi_minor_info_t pi_minor_call_table[] = {
74 { NULL, 0 },
75 { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN }
76};
77static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
78static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
79
80
81
82
83
84
85
86
87
88int __init irttp_init(void)
89{
90 irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
91 if (irttp == NULL)
92 return -ENOMEM;
93
94 irttp->magic = TTP_MAGIC;
95
96 irttp->tsaps = hashbin_new(HB_LOCK);
97 if (!irttp->tsaps) {
98 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
99 __func__);
100 kfree(irttp);
101 return -ENOMEM;
102 }
103
104 return 0;
105}
106
107
108
109
110
111
112
113void irttp_cleanup(void)
114{
115
116 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
117
118
119
120
121 hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
122
123 irttp->magic = 0;
124
125
126 kfree(irttp);
127
128 irttp = NULL;
129}
130
131
132
133
134
135
136
137
138
139
140static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
141{
142
143 mod_timer(&self->todo_timer, jiffies + timeout);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static void irttp_todo_expired(unsigned long data)
161{
162 struct tsap_cb *self = (struct tsap_cb *) data;
163
164
165 if (!self || self->magic != TTP_TSAP_MAGIC)
166 return;
167
168 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
169
170
171 irttp_run_rx_queue(self);
172 irttp_run_tx_queue(self);
173
174
175 if (test_bit(0, &self->disconnect_pend)) {
176
177 if (skb_queue_empty(&self->tx_queue)) {
178
179 clear_bit(0, &self->disconnect_pend);
180
181
182 irttp_disconnect_request(self, self->disconnect_skb,
183 P_NORMAL);
184 self->disconnect_skb = NULL;
185 } else {
186
187 irttp_start_todo_timer(self, HZ/10);
188
189
190 return;
191 }
192 }
193
194
195 if (self->close_pend)
196
197 irttp_close_tsap(self);
198}
199
200
201
202
203
204
205static void irttp_flush_queues(struct tsap_cb *self)
206{
207 struct sk_buff* skb;
208
209 IRDA_DEBUG(4, "%s()\n", __func__);
210
211 IRDA_ASSERT(self != NULL, return;);
212 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
213
214
215 while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
216 dev_kfree_skb(skb);
217
218
219 while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
220 dev_kfree_skb(skb);
221
222
223 while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
224 dev_kfree_skb(skb);
225}
226
227
228
229
230
231
232
233
234static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
235{
236 struct sk_buff *skb, *frag;
237 int n = 0;
238
239 IRDA_ASSERT(self != NULL, return NULL;);
240 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
241
242 IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
243 self->rx_sdu_size);
244
245 skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
246 if (!skb)
247 return NULL;
248
249
250
251
252
253 skb_reserve(skb, TTP_HEADER);
254 skb_put(skb, self->rx_sdu_size);
255
256
257
258
259 while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
260 skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
261 n += frag->len;
262
263 dev_kfree_skb(frag);
264 }
265
266 IRDA_DEBUG(2,
267 "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
268 __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
269
270
271
272
273
274
275 IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
276
277
278 skb_trim(skb, n);
279
280 self->rx_sdu_size = 0;
281
282 return skb;
283}
284
285
286
287
288
289
290
291static inline void irttp_fragment_skb(struct tsap_cb *self,
292 struct sk_buff *skb)
293{
294 struct sk_buff *frag;
295 __u8 *frame;
296
297 IRDA_DEBUG(2, "%s()\n", __func__);
298
299 IRDA_ASSERT(self != NULL, return;);
300 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
301 IRDA_ASSERT(skb != NULL, return;);
302
303
304
305
306 while (skb->len > self->max_seg_size) {
307 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
308
309
310 frag = alloc_skb(self->max_seg_size+self->max_header_size,
311 GFP_ATOMIC);
312 if (!frag)
313 return;
314
315 skb_reserve(frag, self->max_header_size);
316
317
318 skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
319 self->max_seg_size);
320
321
322 frame = skb_push(frag, TTP_HEADER);
323 frame[0] = TTP_MORE;
324
325
326 skb_pull(skb, self->max_seg_size);
327
328
329 skb_queue_tail(&self->tx_queue, frag);
330 }
331
332 IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
333
334 frame = skb_push(skb, TTP_HEADER);
335 frame[0] = 0x00;
336
337
338 skb_queue_tail(&self->tx_queue, skb);
339}
340
341
342
343
344
345
346
347
348static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
349 int get)
350{
351 struct tsap_cb *self;
352
353 self = (struct tsap_cb *) instance;
354
355 IRDA_ASSERT(self != NULL, return -1;);
356 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
357
358 if (get)
359 param->pv.i = self->tx_max_sdu_size;
360 else
361 self->tx_max_sdu_size = param->pv.i;
362
363 IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
364
365 return 0;
366}
367
368
369
370
371
372
373
374
375
376static void irttp_init_tsap(struct tsap_cb *tsap)
377{
378 spin_lock_init(&tsap->lock);
379 init_timer(&tsap->todo_timer);
380
381 skb_queue_head_init(&tsap->rx_queue);
382 skb_queue_head_init(&tsap->tx_queue);
383 skb_queue_head_init(&tsap->rx_fragments);
384}
385
386
387
388
389
390
391struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
392{
393 struct tsap_cb *self;
394 struct lsap_cb *lsap;
395 notify_t ttp_notify;
396
397 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
398
399
400
401
402 if((stsap_sel != LSAP_ANY) &&
403 ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
404 IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
405 return NULL;
406 }
407
408 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
409 if (self == NULL) {
410 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
411 return NULL;
412 }
413
414
415 irttp_init_tsap(self);
416
417
418 self->todo_timer.data = (unsigned long) self;
419 self->todo_timer.function = &irttp_todo_expired;
420
421
422 irda_notify_init(&ttp_notify);
423 ttp_notify.connect_confirm = irttp_connect_confirm;
424 ttp_notify.connect_indication = irttp_connect_indication;
425 ttp_notify.disconnect_indication = irttp_disconnect_indication;
426 ttp_notify.data_indication = irttp_data_indication;
427 ttp_notify.udata_indication = irttp_udata_indication;
428 ttp_notify.flow_indication = irttp_flow_indication;
429 if(notify->status_indication != NULL)
430 ttp_notify.status_indication = irttp_status_indication;
431 ttp_notify.instance = self;
432 strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
433
434 self->magic = TTP_TSAP_MAGIC;
435 self->connected = FALSE;
436
437
438
439
440 lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
441 if (lsap == NULL) {
442 IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
443 return NULL;
444 }
445
446
447
448
449
450
451 self->stsap_sel = lsap->slsap_sel;
452 IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
453
454 self->notify = *notify;
455 self->lsap = lsap;
456
457 hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
458
459 if (credit > TTP_RX_MAX_CREDIT)
460 self->initial_credit = TTP_RX_MAX_CREDIT;
461 else
462 self->initial_credit = credit;
463
464 return self;
465}
466EXPORT_SYMBOL(irttp_open_tsap);
467
468
469
470
471
472
473
474
475static void __irttp_close_tsap(struct tsap_cb *self)
476{
477
478 IRDA_ASSERT(self != NULL, return;);
479 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
480
481 irttp_flush_queues(self);
482
483 del_timer(&self->todo_timer);
484
485
486
487 if (self->disconnect_skb)
488 dev_kfree_skb(self->disconnect_skb);
489
490 self->connected = FALSE;
491 self->magic = ~TTP_TSAP_MAGIC;
492
493 kfree(self);
494}
495
496
497
498
499
500
501
502
503
504
505
506int irttp_close_tsap(struct tsap_cb *self)
507{
508 struct tsap_cb *tsap;
509
510 IRDA_DEBUG(4, "%s()\n", __func__);
511
512 IRDA_ASSERT(self != NULL, return -1;);
513 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
514
515
516 if (self->connected) {
517
518 if (!test_bit(0, &self->disconnect_pend)) {
519 IRDA_WARNING("%s: TSAP still connected!\n",
520 __func__);
521 irttp_disconnect_request(self, NULL, P_NORMAL);
522 }
523 self->close_pend = TRUE;
524 irttp_start_todo_timer(self, HZ/10);
525
526 return 0;
527 }
528
529 tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
530
531 IRDA_ASSERT(tsap == self, return -1;);
532
533
534 if (self->lsap) {
535 irlmp_close_lsap(self->lsap);
536 self->lsap = NULL;
537 }
538
539 __irttp_close_tsap(self);
540
541 return 0;
542}
543EXPORT_SYMBOL(irttp_close_tsap);
544
545
546
547
548
549
550
551int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
552{
553 int ret;
554
555 IRDA_ASSERT(self != NULL, return -1;);
556 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
557 IRDA_ASSERT(skb != NULL, return -1;);
558
559 IRDA_DEBUG(4, "%s()\n", __func__);
560
561
562 if (skb->len == 0) {
563 ret = 0;
564 goto err;
565 }
566
567
568 if (!self->connected) {
569 IRDA_WARNING("%s(), Not connected\n", __func__);
570 ret = -ENOTCONN;
571 goto err;
572 }
573
574 if (skb->len > self->max_seg_size) {
575 IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
576 ret = -EMSGSIZE;
577 goto err;
578 }
579
580 irlmp_udata_request(self->lsap, skb);
581 self->stats.tx_packets++;
582
583 return 0;
584
585err:
586 dev_kfree_skb(skb);
587 return ret;
588}
589EXPORT_SYMBOL(irttp_udata_request);
590
591
592
593
594
595
596
597
598int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
599{
600 __u8 *frame;
601 int ret;
602
603 IRDA_ASSERT(self != NULL, return -1;);
604 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
605 IRDA_ASSERT(skb != NULL, return -1;);
606
607 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
608 skb_queue_len(&self->tx_queue));
609
610
611 if (skb->len == 0) {
612 ret = 0;
613 goto err;
614 }
615
616
617 if (!self->connected) {
618 IRDA_WARNING("%s: Not connected\n", __func__);
619 ret = -ENOTCONN;
620 goto err;
621 }
622
623
624
625
626
627 if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
628 IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
629 __func__);
630 ret = -EMSGSIZE;
631 goto err;
632 }
633
634
635
636
637
638 if ((self->tx_max_sdu_size != 0) &&
639 (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
640 (skb->len > self->tx_max_sdu_size))
641 {
642 IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
643 __func__);
644 ret = -EMSGSIZE;
645 goto err;
646 }
647
648
649
650 if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
651
652
653
654 irttp_run_tx_queue(self);
655
656
657
658 ret = -ENOBUFS;
659 goto err;
660 }
661
662
663 if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
664
665 IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
666 frame = skb_push(skb, TTP_HEADER);
667 frame[0] = 0x00;
668
669 skb_queue_tail(&self->tx_queue, skb);
670 } else {
671
672
673
674
675
676
677 irttp_fragment_skb(self, skb);
678 }
679
680
681 if ((!self->tx_sdu_busy) &&
682 (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
683
684 if (self->notify.flow_indication) {
685 self->notify.flow_indication(self->notify.instance,
686 self, FLOW_STOP);
687 }
688
689
690
691
692
693
694
695 self->tx_sdu_busy = TRUE;
696 }
697
698
699 irttp_run_tx_queue(self);
700
701 return 0;
702
703err:
704 dev_kfree_skb(skb);
705 return ret;
706}
707EXPORT_SYMBOL(irttp_data_request);
708
709
710
711
712
713
714
715static void irttp_run_tx_queue(struct tsap_cb *self)
716{
717 struct sk_buff *skb;
718 unsigned long flags;
719 int n;
720
721 IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
722 __func__,
723 self->send_credit, skb_queue_len(&self->tx_queue));
724
725
726 if (irda_lock(&self->tx_queue_lock) == FALSE)
727 return;
728
729
730
731
732 while ((self->send_credit > 0) &&
733 (!irlmp_lap_tx_queue_full(self->lsap)) &&
734 (skb = skb_dequeue(&self->tx_queue)))
735 {
736
737
738
739
740
741 spin_lock_irqsave(&self->lock, flags);
742
743 n = self->avail_credit;
744 self->avail_credit = 0;
745
746
747 if (n > 127) {
748 self->avail_credit = n-127;
749 n = 127;
750 }
751 self->remote_credit += n;
752 self->send_credit--;
753
754 spin_unlock_irqrestore(&self->lock, flags);
755
756
757
758
759
760 skb->data[0] |= (n & 0x7f);
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780 if (skb->sk != NULL) {
781
782 skb_orphan(skb);
783 }
784
785
786
787 irlmp_data_request(self->lsap, skb);
788 self->stats.tx_packets++;
789 }
790
791
792
793
794
795
796
797 if ((self->tx_sdu_busy) &&
798 (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
799 (!self->close_pend))
800 {
801 if (self->notify.flow_indication)
802 self->notify.flow_indication(self->notify.instance,
803 self, FLOW_START);
804
805
806
807
808 self->tx_sdu_busy = FALSE;
809 }
810
811
812 self->tx_queue_lock = 0;
813}
814
815
816
817
818
819
820
821static inline void irttp_give_credit(struct tsap_cb *self)
822{
823 struct sk_buff *tx_skb = NULL;
824 unsigned long flags;
825 int n;
826
827 IRDA_ASSERT(self != NULL, return;);
828 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
829
830 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
831 __func__,
832 self->send_credit, self->avail_credit, self->remote_credit);
833
834
835 tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
836 if (!tx_skb)
837 return;
838
839
840 skb_reserve(tx_skb, LMP_MAX_HEADER);
841
842
843
844
845
846
847 spin_lock_irqsave(&self->lock, flags);
848
849 n = self->avail_credit;
850 self->avail_credit = 0;
851
852
853 if (n > 127) {
854 self->avail_credit = n - 127;
855 n = 127;
856 }
857 self->remote_credit += n;
858
859 spin_unlock_irqrestore(&self->lock, flags);
860
861 skb_put(tx_skb, 1);
862 tx_skb->data[0] = (__u8) (n & 0x7f);
863
864 irlmp_data_request(self->lsap, tx_skb);
865 self->stats.tx_packets++;
866}
867
868
869
870
871
872
873
874static int irttp_udata_indication(void *instance, void *sap,
875 struct sk_buff *skb)
876{
877 struct tsap_cb *self;
878 int err;
879
880 IRDA_DEBUG(4, "%s()\n", __func__);
881
882 self = (struct tsap_cb *) instance;
883
884 IRDA_ASSERT(self != NULL, return -1;);
885 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
886 IRDA_ASSERT(skb != NULL, return -1;);
887
888 self->stats.rx_packets++;
889
890
891 if (self->notify.udata_indication) {
892 err = self->notify.udata_indication(self->notify.instance,
893 self,skb);
894
895 if (!err)
896 return 0;
897 }
898
899 dev_kfree_skb(skb);
900
901 return 0;
902}
903
904
905
906
907
908
909
910static int irttp_data_indication(void *instance, void *sap,
911 struct sk_buff *skb)
912{
913 struct tsap_cb *self;
914 unsigned long flags;
915 int n;
916
917 self = (struct tsap_cb *) instance;
918
919 n = skb->data[0] & 0x7f;
920
921 self->stats.rx_packets++;
922
923
924
925
926
927
928 spin_lock_irqsave(&self->lock, flags);
929 self->send_credit += n;
930 if (skb->len > 1)
931 self->remote_credit--;
932 spin_unlock_irqrestore(&self->lock, flags);
933
934
935
936
937
938 if (skb->len > 1) {
939
940
941
942
943 skb_queue_tail(&self->rx_queue, skb);
944 } else {
945
946 dev_kfree_skb(skb);
947 }
948
949
950
951
952
953
954
955
956
957
958
959
960
961 irttp_run_rx_queue(self);
962
963
964
965
966
967
968
969
970
971
972
973
974
975 if (self->send_credit == n) {
976
977 irttp_run_tx_queue(self);
978
979
980
981 }
982
983 return 0;
984}
985
986
987
988
989
990
991
992static void irttp_status_indication(void *instance,
993 LINK_STATUS link, LOCK_STATUS lock)
994{
995 struct tsap_cb *self;
996
997 IRDA_DEBUG(4, "%s()\n", __func__);
998
999 self = (struct tsap_cb *) instance;
1000
1001 IRDA_ASSERT(self != NULL, return;);
1002 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1003
1004
1005 if (self->close_pend)
1006 return;
1007
1008
1009
1010
1011 if (self->notify.status_indication != NULL)
1012 self->notify.status_indication(self->notify.instance,
1013 link, lock);
1014 else
1015 IRDA_DEBUG(2, "%s(), no handler\n", __func__);
1016}
1017
1018
1019
1020
1021
1022
1023
1024static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
1025{
1026 struct tsap_cb *self;
1027
1028 self = (struct tsap_cb *) instance;
1029
1030 IRDA_ASSERT(self != NULL, return;);
1031 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1032
1033 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 irttp_run_tx_queue(self);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 if(self->disconnect_pend)
1059 irttp_start_todo_timer(self, 0);
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1070{
1071 IRDA_DEBUG(1, "%s()\n", __func__);
1072
1073 IRDA_ASSERT(self != NULL, return;);
1074 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1075
1076 switch (flow) {
1077 case FLOW_STOP:
1078 IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
1079 self->rx_sdu_busy = TRUE;
1080 break;
1081 case FLOW_START:
1082 IRDA_DEBUG(1, "%s(), flow start\n", __func__);
1083 self->rx_sdu_busy = FALSE;
1084
1085
1086
1087 irttp_run_rx_queue(self);
1088
1089 break;
1090 default:
1091 IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
1092 }
1093}
1094EXPORT_SYMBOL(irttp_flow_request);
1095
1096
1097
1098
1099
1100
1101
1102int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1103 __u32 saddr, __u32 daddr,
1104 struct qos_info *qos, __u32 max_sdu_size,
1105 struct sk_buff *userdata)
1106{
1107 struct sk_buff *tx_skb;
1108 __u8 *frame;
1109 __u8 n;
1110
1111 IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
1112
1113 IRDA_ASSERT(self != NULL, return -EBADR;);
1114 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
1115
1116 if (self->connected) {
1117 if(userdata)
1118 dev_kfree_skb(userdata);
1119 return -EISCONN;
1120 }
1121
1122
1123 if (userdata == NULL) {
1124 tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1125 GFP_ATOMIC);
1126 if (!tx_skb)
1127 return -ENOMEM;
1128
1129
1130 skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
1131 } else {
1132 tx_skb = userdata;
1133
1134
1135
1136
1137 IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1138 { dev_kfree_skb(userdata); return -1; } );
1139 }
1140
1141
1142 self->connected = FALSE;
1143 self->avail_credit = 0;
1144 self->rx_max_sdu_size = max_sdu_size;
1145 self->rx_sdu_size = 0;
1146 self->rx_sdu_busy = FALSE;
1147 self->dtsap_sel = dtsap_sel;
1148
1149 n = self->initial_credit;
1150
1151 self->remote_credit = 0;
1152 self->send_credit = 0;
1153
1154
1155
1156
1157 if (n > 127) {
1158 self->avail_credit=n-127;
1159 n = 127;
1160 }
1161
1162 self->remote_credit = n;
1163
1164
1165 if (max_sdu_size > 0) {
1166 IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1167 { dev_kfree_skb(tx_skb); return -1; } );
1168
1169
1170 frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
1171
1172 frame[0] = TTP_PARAMETERS | n;
1173 frame[1] = 0x04;
1174 frame[2] = 0x01;
1175 frame[3] = 0x02;
1176
1177 put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1178 (__be16 *)(frame+4));
1179 } else {
1180
1181 frame = skb_push(tx_skb, TTP_HEADER);
1182
1183
1184 frame[0] = n & 0x7f;
1185 }
1186
1187
1188 return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
1189 tx_skb);
1190}
1191EXPORT_SYMBOL(irttp_connect_request);
1192
1193
1194
1195
1196
1197
1198
1199static void irttp_connect_confirm(void *instance, void *sap,
1200 struct qos_info *qos, __u32 max_seg_size,
1201 __u8 max_header_size, struct sk_buff *skb)
1202{
1203 struct tsap_cb *self;
1204 int parameters;
1205 int ret;
1206 __u8 plen;
1207 __u8 n;
1208
1209 IRDA_DEBUG(4, "%s()\n", __func__);
1210
1211 self = (struct tsap_cb *) instance;
1212
1213 IRDA_ASSERT(self != NULL, return;);
1214 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1215 IRDA_ASSERT(skb != NULL, return;);
1216
1217 self->max_seg_size = max_seg_size - TTP_HEADER;
1218 self->max_header_size = max_header_size + TTP_HEADER;
1219
1220
1221
1222
1223
1224 if (qos) {
1225 IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
1226 qos->baud_rate.bits);
1227 IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
1228 qos->baud_rate.value);
1229 }
1230
1231 n = skb->data[0] & 0x7f;
1232
1233 IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
1234
1235 self->send_credit = n;
1236 self->tx_max_sdu_size = 0;
1237 self->connected = TRUE;
1238
1239 parameters = skb->data[0] & 0x80;
1240
1241 IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1242 skb_pull(skb, TTP_HEADER);
1243
1244 if (parameters) {
1245 plen = skb->data[0];
1246
1247 ret = irda_param_extract_all(self, skb->data+1,
1248 IRDA_MIN(skb->len-1, plen),
1249 ¶m_info);
1250
1251
1252 if (ret < 0) {
1253 IRDA_WARNING("%s: error extracting parameters\n",
1254 __func__);
1255 dev_kfree_skb(skb);
1256
1257
1258 return;
1259 }
1260
1261 skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1262 }
1263
1264 IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
1265 self->send_credit, self->avail_credit, self->remote_credit);
1266
1267 IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
1268 self->tx_max_sdu_size);
1269
1270 if (self->notify.connect_confirm) {
1271 self->notify.connect_confirm(self->notify.instance, self, qos,
1272 self->tx_max_sdu_size,
1273 self->max_header_size, skb);
1274 } else
1275 dev_kfree_skb(skb);
1276}
1277
1278
1279
1280
1281
1282
1283
1284static void irttp_connect_indication(void *instance, void *sap,
1285 struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size,
1286 struct sk_buff *skb)
1287{
1288 struct tsap_cb *self;
1289 struct lsap_cb *lsap;
1290 int parameters;
1291 int ret;
1292 __u8 plen;
1293 __u8 n;
1294
1295 self = (struct tsap_cb *) instance;
1296
1297 IRDA_ASSERT(self != NULL, return;);
1298 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1299 IRDA_ASSERT(skb != NULL, return;);
1300
1301 lsap = (struct lsap_cb *) sap;
1302
1303 self->max_seg_size = max_seg_size - TTP_HEADER;
1304 self->max_header_size = max_header_size+TTP_HEADER;
1305
1306 IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
1307
1308
1309 self->dtsap_sel = lsap->dlsap_sel;
1310
1311 n = skb->data[0] & 0x7f;
1312
1313 self->send_credit = n;
1314 self->tx_max_sdu_size = 0;
1315
1316 parameters = skb->data[0] & 0x80;
1317
1318 IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1319 skb_pull(skb, TTP_HEADER);
1320
1321 if (parameters) {
1322 plen = skb->data[0];
1323
1324 ret = irda_param_extract_all(self, skb->data+1,
1325 IRDA_MIN(skb->len-1, plen),
1326 ¶m_info);
1327
1328
1329 if (ret < 0) {
1330 IRDA_WARNING("%s: error extracting parameters\n",
1331 __func__);
1332 dev_kfree_skb(skb);
1333
1334
1335 return;
1336 }
1337
1338
1339 skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1340 }
1341
1342 if (self->notify.connect_indication) {
1343 self->notify.connect_indication(self->notify.instance, self,
1344 qos, self->tx_max_sdu_size,
1345 self->max_header_size, skb);
1346 } else
1347 dev_kfree_skb(skb);
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1358 struct sk_buff *userdata)
1359{
1360 struct sk_buff *tx_skb;
1361 __u8 *frame;
1362 int ret;
1363 __u8 n;
1364
1365 IRDA_ASSERT(self != NULL, return -1;);
1366 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1367
1368 IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
1369 self->stsap_sel);
1370
1371
1372 if (userdata == NULL) {
1373 tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1374 GFP_ATOMIC);
1375 if (!tx_skb)
1376 return -ENOMEM;
1377
1378
1379 skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
1380 } else {
1381 tx_skb = userdata;
1382
1383
1384
1385
1386 IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1387 { dev_kfree_skb(userdata); return -1; } );
1388 }
1389
1390 self->avail_credit = 0;
1391 self->remote_credit = 0;
1392 self->rx_max_sdu_size = max_sdu_size;
1393 self->rx_sdu_size = 0;
1394 self->rx_sdu_busy = FALSE;
1395
1396 n = self->initial_credit;
1397
1398
1399 if (n > 127) {
1400 self->avail_credit = n - 127;
1401 n = 127;
1402 }
1403
1404 self->remote_credit = n;
1405 self->connected = TRUE;
1406
1407
1408 if (max_sdu_size > 0) {
1409 IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1410 { dev_kfree_skb(tx_skb); return -1; } );
1411
1412
1413 frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
1414
1415 frame[0] = TTP_PARAMETERS | n;
1416 frame[1] = 0x04;
1417
1418
1419
1420
1421 frame[2] = 0x01;
1422 frame[3] = 0x02;
1423
1424 put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1425 (__be16 *)(frame+4));
1426 } else {
1427
1428 frame = skb_push(tx_skb, TTP_HEADER);
1429
1430 frame[0] = n & 0x7f;
1431 }
1432
1433 ret = irlmp_connect_response(self->lsap, tx_skb);
1434
1435 return ret;
1436}
1437EXPORT_SYMBOL(irttp_connect_response);
1438
1439
1440
1441
1442
1443
1444
1445struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1446{
1447 struct tsap_cb *new;
1448 unsigned long flags;
1449
1450 IRDA_DEBUG(1, "%s()\n", __func__);
1451
1452
1453 spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
1454
1455
1456 if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
1457 IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
1458 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1459 return NULL;
1460 }
1461
1462
1463 new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
1464 if (!new) {
1465 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
1466 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1467 return NULL;
1468 }
1469
1470 memcpy(new, orig, sizeof(struct tsap_cb));
1471 spin_lock_init(&new->lock);
1472
1473
1474 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1475
1476
1477 new->lsap = irlmp_dup(orig->lsap, new);
1478 if (!new->lsap) {
1479 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
1480 kfree(new);
1481 return NULL;
1482 }
1483
1484
1485 new->notify.instance = instance;
1486
1487
1488 irttp_init_tsap(new);
1489
1490
1491 hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
1492
1493 return new;
1494}
1495EXPORT_SYMBOL(irttp_dup);
1496
1497
1498
1499
1500
1501
1502
1503
1504int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1505 int priority)
1506{
1507 int ret;
1508
1509 IRDA_ASSERT(self != NULL, return -1;);
1510 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1511
1512
1513 if (!self->connected) {
1514 IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
1515 if (userdata)
1516 dev_kfree_skb(userdata);
1517 return -1;
1518 }
1519
1520
1521
1522
1523
1524
1525 if(test_and_set_bit(0, &self->disconnect_pend)) {
1526 IRDA_DEBUG(0, "%s(), disconnect already pending\n",
1527 __func__);
1528 if (userdata)
1529 dev_kfree_skb(userdata);
1530
1531
1532 irttp_run_tx_queue(self);
1533 return -1;
1534 }
1535
1536
1537
1538
1539 if (!skb_queue_empty(&self->tx_queue)) {
1540 if (priority == P_HIGH) {
1541
1542
1543
1544
1545
1546 IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
1547 irttp_flush_queues(self);
1548 } else if (priority == P_NORMAL) {
1549
1550
1551
1552
1553
1554 self->disconnect_skb = userdata;
1555
1556 irttp_run_tx_queue(self);
1557
1558 irttp_start_todo_timer(self, HZ/10);
1559 return -1;
1560 }
1561 }
1562
1563
1564
1565
1566
1567 IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
1568 self->connected = FALSE;
1569
1570 if (!userdata) {
1571 struct sk_buff *tx_skb;
1572 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
1573 if (!tx_skb)
1574 return -ENOMEM;
1575
1576
1577
1578
1579 skb_reserve(tx_skb, LMP_MAX_HEADER);
1580
1581 userdata = tx_skb;
1582 }
1583 ret = irlmp_disconnect_request(self->lsap, userdata);
1584
1585
1586 clear_bit(0, &self->disconnect_pend);
1587
1588 return ret;
1589}
1590EXPORT_SYMBOL(irttp_disconnect_request);
1591
1592
1593
1594
1595
1596
1597
1598static void irttp_disconnect_indication(void *instance, void *sap,
1599 LM_REASON reason, struct sk_buff *skb)
1600{
1601 struct tsap_cb *self;
1602
1603 IRDA_DEBUG(4, "%s()\n", __func__);
1604
1605 self = (struct tsap_cb *) instance;
1606
1607 IRDA_ASSERT(self != NULL, return;);
1608 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1609
1610
1611 self->connected = FALSE;
1612
1613
1614 if (self->close_pend) {
1615
1616
1617 if (skb)
1618 dev_kfree_skb(skb);
1619 irttp_close_tsap(self);
1620 return;
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630 if(self->notify.disconnect_indication)
1631 self->notify.disconnect_indication(self->notify.instance, self,
1632 reason, skb);
1633 else
1634 if (skb)
1635 dev_kfree_skb(skb);
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
1646{
1647 int err;
1648
1649
1650 if (self->close_pend) {
1651 dev_kfree_skb(skb);
1652 return;
1653 }
1654
1655 err = self->notify.data_indication(self->notify.instance, self, skb);
1656
1657
1658
1659
1660
1661
1662 if (err) {
1663 IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
1664
1665
1666 self->rx_sdu_busy = TRUE;
1667
1668
1669 skb_push(skb, TTP_HEADER);
1670 skb->data[0] = 0x00;
1671
1672
1673 skb_queue_head(&self->rx_queue, skb);
1674 }
1675}
1676
1677
1678
1679
1680
1681
1682
1683static void irttp_run_rx_queue(struct tsap_cb *self)
1684{
1685 struct sk_buff *skb;
1686 int more = 0;
1687
1688 IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
1689 self->send_credit, self->avail_credit, self->remote_credit);
1690
1691
1692 if (irda_lock(&self->rx_queue_lock) == FALSE)
1693 return;
1694
1695
1696
1697
1698 while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
1699
1700 more = skb->data[0] & 0x80;
1701
1702
1703 skb_pull(skb, TTP_HEADER);
1704
1705
1706 self->rx_sdu_size += skb->len;
1707
1708
1709
1710
1711
1712
1713
1714 if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
1715 irttp_do_data_indication(self, skb);
1716 self->rx_sdu_size = 0;
1717
1718 continue;
1719 }
1720
1721
1722 if (more) {
1723
1724
1725
1726
1727 if (self->rx_sdu_size <= self->rx_max_sdu_size) {
1728 IRDA_DEBUG(4, "%s(), queueing frag\n",
1729 __func__);
1730 skb_queue_tail(&self->rx_fragments, skb);
1731 } else {
1732
1733 dev_kfree_skb(skb);
1734 }
1735 continue;
1736 }
1737
1738
1739
1740 if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
1741 (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
1742 {
1743
1744
1745
1746
1747
1748
1749 if (!skb_queue_empty(&self->rx_fragments)) {
1750 skb_queue_tail(&self->rx_fragments,
1751 skb);
1752
1753 skb = irttp_reassemble_skb(self);
1754 }
1755
1756
1757 irttp_do_data_indication(self, skb);
1758 } else {
1759 IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
1760
1761
1762 dev_kfree_skb(skb);
1763
1764
1765 skb = irttp_reassemble_skb(self);
1766
1767 irttp_do_data_indication(self, skb);
1768 }
1769 self->rx_sdu_size = 0;
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 self->avail_credit = (self->initial_credit -
1784 (self->remote_credit +
1785 skb_queue_len(&self->rx_queue) +
1786 skb_queue_len(&self->rx_fragments)));
1787
1788
1789 if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
1790 (self->avail_credit > 0)) {
1791
1792 irttp_give_credit(self);
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 }
1808
1809
1810 self->rx_queue_lock = 0;
1811}
1812
1813#ifdef CONFIG_PROC_FS
1814struct irttp_iter_state {
1815 int id;
1816};
1817
1818static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
1819{
1820 struct irttp_iter_state *iter = seq->private;
1821 struct tsap_cb *self;
1822
1823
1824 spin_lock_irq(&irttp->tsaps->hb_spinlock);
1825 iter->id = 0;
1826
1827 for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
1828 self != NULL;
1829 self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
1830 if (iter->id == *pos)
1831 break;
1832 ++iter->id;
1833 }
1834
1835 return self;
1836}
1837
1838static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1839{
1840 struct irttp_iter_state *iter = seq->private;
1841
1842 ++*pos;
1843 ++iter->id;
1844 return (void *) hashbin_get_next(irttp->tsaps);
1845}
1846
1847static void irttp_seq_stop(struct seq_file *seq, void *v)
1848{
1849 spin_unlock_irq(&irttp->tsaps->hb_spinlock);
1850}
1851
1852static int irttp_seq_show(struct seq_file *seq, void *v)
1853{
1854 const struct irttp_iter_state *iter = seq->private;
1855 const struct tsap_cb *self = v;
1856
1857 seq_printf(seq, "TSAP %d, ", iter->id);
1858 seq_printf(seq, "stsap_sel: %02x, ",
1859 self->stsap_sel);
1860 seq_printf(seq, "dtsap_sel: %02x\n",
1861 self->dtsap_sel);
1862 seq_printf(seq, " connected: %s, ",
1863 self->connected? "TRUE":"FALSE");
1864 seq_printf(seq, "avail credit: %d, ",
1865 self->avail_credit);
1866 seq_printf(seq, "remote credit: %d, ",
1867 self->remote_credit);
1868 seq_printf(seq, "send credit: %d\n",
1869 self->send_credit);
1870 seq_printf(seq, " tx packets: %lu, ",
1871 self->stats.tx_packets);
1872 seq_printf(seq, "rx packets: %lu, ",
1873 self->stats.rx_packets);
1874 seq_printf(seq, "tx_queue len: %u ",
1875 skb_queue_len(&self->tx_queue));
1876 seq_printf(seq, "rx_queue len: %u\n",
1877 skb_queue_len(&self->rx_queue));
1878 seq_printf(seq, " tx_sdu_busy: %s, ",
1879 self->tx_sdu_busy? "TRUE":"FALSE");
1880 seq_printf(seq, "rx_sdu_busy: %s\n",
1881 self->rx_sdu_busy? "TRUE":"FALSE");
1882 seq_printf(seq, " max_seg_size: %u, ",
1883 self->max_seg_size);
1884 seq_printf(seq, "tx_max_sdu_size: %u, ",
1885 self->tx_max_sdu_size);
1886 seq_printf(seq, "rx_max_sdu_size: %u\n",
1887 self->rx_max_sdu_size);
1888
1889 seq_printf(seq, " Used by (%s)\n\n",
1890 self->notify.name);
1891 return 0;
1892}
1893
1894static const struct seq_operations irttp_seq_ops = {
1895 .start = irttp_seq_start,
1896 .next = irttp_seq_next,
1897 .stop = irttp_seq_stop,
1898 .show = irttp_seq_show,
1899};
1900
1901static int irttp_seq_open(struct inode *inode, struct file *file)
1902{
1903 return seq_open_private(file, &irttp_seq_ops,
1904 sizeof(struct irttp_iter_state));
1905}
1906
1907const struct file_operations irttp_seq_fops = {
1908 .owner = THIS_MODULE,
1909 .open = irttp_seq_open,
1910 .read = seq_read,
1911 .llseek = seq_lseek,
1912 .release = seq_release_private,
1913};
1914
1915#endif
1916