1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/capability.h>
46#include <linux/module.h>
47#include <linux/types.h>
48#include <linux/socket.h>
49#include <linux/sockios.h>
50#include <linux/slab.h>
51#include <linux/init.h>
52#include <linux/net.h>
53#include <linux/irda.h>
54#include <linux/poll.h>
55
56#include <asm/ioctls.h>
57#include <asm/uaccess.h>
58
59#include <net/sock.h>
60#include <net/tcp_states.h>
61
62#include <net/irda/af_irda.h>
63
64static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
65
66static const struct proto_ops irda_stream_ops;
67static const struct proto_ops irda_seqpacket_ops;
68static const struct proto_ops irda_dgram_ops;
69
70#ifdef CONFIG_IRDA_ULTRA
71static const struct proto_ops irda_ultra_ops;
72#define ULTRA_MAX_DATA 382
73#endif
74
75#define IRDA_MAX_HEADER (TTP_MAX_HEADER)
76
77
78
79
80
81
82
83static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
84{
85 struct irda_sock *self;
86 struct sock *sk;
87 int err;
88
89 IRDA_DEBUG(3, "%s()\n", __func__);
90
91 self = instance;
92 sk = instance;
93
94 err = sock_queue_rcv_skb(sk, skb);
95 if (err) {
96 IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__);
97 self->rx_flow = FLOW_STOP;
98
99
100 return err;
101 }
102
103 return 0;
104}
105
106
107
108
109
110
111
112static void irda_disconnect_indication(void *instance, void *sap,
113 LM_REASON reason, struct sk_buff *skb)
114{
115 struct irda_sock *self;
116 struct sock *sk;
117
118 self = instance;
119
120 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
121
122
123 if(skb)
124 dev_kfree_skb(skb);
125
126 sk = instance;
127 if (sk == NULL) {
128 IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n",
129 __func__, self);
130 return;
131 }
132
133
134 bh_lock_sock(sk);
135 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
136 sk->sk_state = TCP_CLOSE;
137 sk->sk_shutdown |= SEND_SHUTDOWN;
138
139 sk->sk_state_change(sk);
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 if (self->tsap) {
155 irttp_close_tsap(self->tsap);
156 self->tsap = NULL;
157 }
158 }
159 bh_unlock_sock(sk);
160
161
162
163
164
165
166
167}
168
169
170
171
172
173
174
175static void irda_connect_confirm(void *instance, void *sap,
176 struct qos_info *qos,
177 __u32 max_sdu_size, __u8 max_header_size,
178 struct sk_buff *skb)
179{
180 struct irda_sock *self;
181 struct sock *sk;
182
183 self = instance;
184
185 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
186
187 sk = instance;
188 if (sk == NULL) {
189 dev_kfree_skb(skb);
190 return;
191 }
192
193 dev_kfree_skb(skb);
194
195
196
197 self->max_header_size = max_header_size;
198
199
200 self->max_sdu_size_tx = max_sdu_size;
201
202
203 switch (sk->sk_type) {
204 case SOCK_STREAM:
205 if (max_sdu_size != 0) {
206 IRDA_ERROR("%s: max_sdu_size must be 0\n",
207 __func__);
208 return;
209 }
210 self->max_data_size = irttp_get_max_seg_size(self->tsap);
211 break;
212 case SOCK_SEQPACKET:
213 if (max_sdu_size == 0) {
214 IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
215 __func__);
216 return;
217 }
218 self->max_data_size = max_sdu_size;
219 break;
220 default:
221 self->max_data_size = irttp_get_max_seg_size(self->tsap);
222 }
223
224 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
225 self->max_data_size);
226
227 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
228
229
230 sk->sk_state = TCP_ESTABLISHED;
231 sk->sk_state_change(sk);
232}
233
234
235
236
237
238
239
240static void irda_connect_indication(void *instance, void *sap,
241 struct qos_info *qos, __u32 max_sdu_size,
242 __u8 max_header_size, struct sk_buff *skb)
243{
244 struct irda_sock *self;
245 struct sock *sk;
246
247 self = instance;
248
249 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
250
251 sk = instance;
252 if (sk == NULL) {
253 dev_kfree_skb(skb);
254 return;
255 }
256
257
258 self->max_header_size = max_header_size;
259
260
261 self->max_sdu_size_tx = max_sdu_size;
262
263
264 switch (sk->sk_type) {
265 case SOCK_STREAM:
266 if (max_sdu_size != 0) {
267 IRDA_ERROR("%s: max_sdu_size must be 0\n",
268 __func__);
269 kfree_skb(skb);
270 return;
271 }
272 self->max_data_size = irttp_get_max_seg_size(self->tsap);
273 break;
274 case SOCK_SEQPACKET:
275 if (max_sdu_size == 0) {
276 IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
277 __func__);
278 kfree_skb(skb);
279 return;
280 }
281 self->max_data_size = max_sdu_size;
282 break;
283 default:
284 self->max_data_size = irttp_get_max_seg_size(self->tsap);
285 }
286
287 IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
288 self->max_data_size);
289
290 memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
291
292 skb_queue_tail(&sk->sk_receive_queue, skb);
293 sk->sk_state_change(sk);
294}
295
296
297
298
299
300
301
302static void irda_connect_response(struct irda_sock *self)
303{
304 struct sk_buff *skb;
305
306 IRDA_DEBUG(2, "%s()\n", __func__);
307
308 skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL);
309 if (skb == NULL) {
310 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
311 __func__);
312 return;
313 }
314
315
316 skb_reserve(skb, IRDA_MAX_HEADER);
317
318 irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb);
319}
320
321
322
323
324
325
326
327static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
328{
329 struct irda_sock *self;
330 struct sock *sk;
331
332 IRDA_DEBUG(2, "%s()\n", __func__);
333
334 self = instance;
335 sk = instance;
336 BUG_ON(sk == NULL);
337
338 switch (flow) {
339 case FLOW_STOP:
340 IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n",
341 __func__);
342 self->tx_flow = flow;
343 break;
344 case FLOW_START:
345 self->tx_flow = flow;
346 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
347 __func__);
348 wake_up_interruptible(sk_sleep(sk));
349 break;
350 default:
351 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
352
353 self->tx_flow = flow;
354 break;
355 }
356}
357
358
359
360
361
362
363
364
365
366static void irda_getvalue_confirm(int result, __u16 obj_id,
367 struct ias_value *value, void *priv)
368{
369 struct irda_sock *self;
370
371 self = priv;
372 if (!self) {
373 IRDA_WARNING("%s: lost myself!\n", __func__);
374 return;
375 }
376
377 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
378
379
380 iriap_close(self->iriap);
381 self->iriap = NULL;
382
383
384 if (result != IAS_SUCCESS) {
385 IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__,
386 result);
387
388 self->errno = result;
389
390
391 wake_up_interruptible(&self->query_wait);
392
393 return;
394 }
395
396
397 self->ias_result = value;
398 self->errno = 0;
399
400
401 wake_up_interruptible(&self->query_wait);
402}
403
404
405
406
407
408
409
410
411
412static void irda_selective_discovery_indication(discinfo_t *discovery,
413 DISCOVERY_MODE mode,
414 void *priv)
415{
416 struct irda_sock *self;
417
418 IRDA_DEBUG(2, "%s()\n", __func__);
419
420 self = priv;
421 if (!self) {
422 IRDA_WARNING("%s: lost myself!\n", __func__);
423 return;
424 }
425
426
427 self->cachedaddr = discovery->daddr;
428
429
430 wake_up_interruptible(&self->query_wait);
431}
432
433
434
435
436
437
438
439
440
441static void irda_discovery_timeout(u_long priv)
442{
443 struct irda_sock *self;
444
445 IRDA_DEBUG(2, "%s()\n", __func__);
446
447 self = (struct irda_sock *) priv;
448 BUG_ON(self == NULL);
449
450
451 self->cachelog = NULL;
452 self->cachedaddr = 0;
453 self->errno = -ETIME;
454
455
456 wake_up_interruptible(&self->query_wait);
457}
458
459
460
461
462
463
464
465static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
466{
467 notify_t notify;
468
469 if (self->tsap) {
470 IRDA_DEBUG(0, "%s: busy!\n", __func__);
471 return -EBUSY;
472 }
473
474
475 irda_notify_init(¬ify);
476 notify.connect_confirm = irda_connect_confirm;
477 notify.connect_indication = irda_connect_indication;
478 notify.disconnect_indication = irda_disconnect_indication;
479 notify.data_indication = irda_data_indication;
480 notify.udata_indication = irda_data_indication;
481 notify.flow_indication = irda_flow_indication;
482 notify.instance = self;
483 strncpy(notify.name, name, NOTIFY_MAX_NAME);
484
485 self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT,
486 ¬ify);
487 if (self->tsap == NULL) {
488 IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n",
489 __func__);
490 return -ENOMEM;
491 }
492
493 self->stsap_sel = self->tsap->stsap_sel;
494
495 return 0;
496}
497
498
499
500
501
502
503
504#ifdef CONFIG_IRDA_ULTRA
505static int irda_open_lsap(struct irda_sock *self, int pid)
506{
507 notify_t notify;
508
509 if (self->lsap) {
510 IRDA_WARNING("%s(), busy!\n", __func__);
511 return -EBUSY;
512 }
513
514
515 irda_notify_init(¬ify);
516 notify.udata_indication = irda_data_indication;
517 notify.instance = self;
518 strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME);
519
520 self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid);
521 if (self->lsap == NULL) {
522 IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__);
523 return -ENOMEM;
524 }
525
526 return 0;
527}
528#endif
529
530
531
532
533
534
535
536
537
538
539
540
541static int irda_find_lsap_sel(struct irda_sock *self, char *name)
542{
543 IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name);
544
545 if (self->iriap) {
546 IRDA_WARNING("%s(): busy with a previous query\n",
547 __func__);
548 return -EBUSY;
549 }
550
551 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
552 irda_getvalue_confirm);
553 if(self->iriap == NULL)
554 return -ENOMEM;
555
556
557 self->errno = -EHOSTUNREACH;
558
559
560 iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr,
561 name, "IrDA:TinyTP:LsapSel");
562
563
564 if (wait_event_interruptible(self->query_wait, (self->iriap==NULL)))
565
566 return -EHOSTUNREACH;
567
568
569 if (self->errno)
570 {
571
572 if((self->errno == IAS_CLASS_UNKNOWN) ||
573 (self->errno == IAS_ATTRIB_UNKNOWN))
574 return -EADDRNOTAVAIL;
575 else
576 return -EHOSTUNREACH;
577 }
578
579
580 switch (self->ias_result->type) {
581 case IAS_INTEGER:
582 IRDA_DEBUG(4, "%s() int=%d\n",
583 __func__, self->ias_result->t.integer);
584
585 if (self->ias_result->t.integer != -1)
586 self->dtsap_sel = self->ias_result->t.integer;
587 else
588 self->dtsap_sel = 0;
589 break;
590 default:
591 self->dtsap_sel = 0;
592 IRDA_DEBUG(0, "%s(), bad type!\n", __func__);
593 break;
594 }
595 if (self->ias_result)
596 irias_delete_value(self->ias_result);
597
598 if (self->dtsap_sel)
599 return 0;
600
601 return -EADDRNOTAVAIL;
602}
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
622{
623 discinfo_t *discoveries;
624 int number;
625 int i;
626 int err = -ENETUNREACH;
627 __u32 daddr = DEV_ADDR_ANY;
628 __u8 dtsap_sel = 0x0;
629
630 IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name);
631
632
633
634
635
636 discoveries = irlmp_get_discoveries(&number, self->mask.word,
637 self->nslots);
638
639 if (discoveries == NULL)
640 return -ENETUNREACH;
641
642
643
644
645
646
647 for(i = 0; i < number; i++) {
648
649 self->daddr = discoveries[i].daddr;
650 self->saddr = 0x0;
651 IRDA_DEBUG(1, "%s(), trying daddr = %08x\n",
652 __func__, self->daddr);
653
654
655 err = irda_find_lsap_sel(self, name);
656 switch (err) {
657 case 0:
658
659 if(daddr != DEV_ADDR_ANY) {
660 IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n",
661 __func__, name);
662 self->daddr = DEV_ADDR_ANY;
663 kfree(discoveries);
664 return -ENOTUNIQ;
665 }
666
667 daddr = self->daddr;
668 dtsap_sel = self->dtsap_sel;
669 break;
670 case -EADDRNOTAVAIL:
671
672 break;
673 default:
674
675 IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
676 self->daddr = DEV_ADDR_ANY;
677 kfree(discoveries);
678 return -EHOSTUNREACH;
679 break;
680 }
681 }
682
683 kfree(discoveries);
684
685
686 if(daddr == DEV_ADDR_ANY) {
687 IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
688 __func__, name);
689 self->daddr = DEV_ADDR_ANY;
690 return -EADDRNOTAVAIL;
691 }
692
693
694 self->daddr = daddr;
695 self->saddr = 0x0;
696 self->dtsap_sel = dtsap_sel;
697
698 IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n",
699 __func__, name, self->daddr);
700
701 return 0;
702}
703
704
705
706
707
708
709
710static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
711 int *uaddr_len, int peer)
712{
713 struct sockaddr_irda saddr;
714 struct sock *sk = sock->sk;
715 struct irda_sock *self = irda_sk(sk);
716
717 memset(&saddr, 0, sizeof(saddr));
718 if (peer) {
719 if (sk->sk_state != TCP_ESTABLISHED)
720 return -ENOTCONN;
721
722 saddr.sir_family = AF_IRDA;
723 saddr.sir_lsap_sel = self->dtsap_sel;
724 saddr.sir_addr = self->daddr;
725 } else {
726 saddr.sir_family = AF_IRDA;
727 saddr.sir_lsap_sel = self->stsap_sel;
728 saddr.sir_addr = self->saddr;
729 }
730
731 IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
732 IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr);
733
734
735 *uaddr_len = sizeof (struct sockaddr_irda);
736 memcpy(uaddr, &saddr, *uaddr_len);
737
738 return 0;
739}
740
741
742
743
744
745
746
747static int irda_listen(struct socket *sock, int backlog)
748{
749 struct sock *sk = sock->sk;
750 int err = -EOPNOTSUPP;
751
752 IRDA_DEBUG(2, "%s()\n", __func__);
753
754 lock_sock(sk);
755
756 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
757 (sk->sk_type != SOCK_DGRAM))
758 goto out;
759
760 if (sk->sk_state != TCP_LISTEN) {
761 sk->sk_max_ack_backlog = backlog;
762 sk->sk_state = TCP_LISTEN;
763
764 err = 0;
765 }
766out:
767 release_sock(sk);
768
769 return err;
770}
771
772
773
774
775
776
777
778static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
779{
780 struct sock *sk = sock->sk;
781 struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
782 struct irda_sock *self = irda_sk(sk);
783 int err;
784
785 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
786
787 if (addr_len != sizeof(struct sockaddr_irda))
788 return -EINVAL;
789
790 lock_sock(sk);
791#ifdef CONFIG_IRDA_ULTRA
792
793 if ((sk->sk_type == SOCK_DGRAM) &&
794 (sk->sk_protocol == IRDAPROTO_ULTRA)) {
795 self->pid = addr->sir_lsap_sel;
796 err = -EOPNOTSUPP;
797 if (self->pid & 0x80) {
798 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
799 goto out;
800 }
801 err = irda_open_lsap(self, self->pid);
802 if (err < 0)
803 goto out;
804
805
806 sock->state = SS_CONNECTED;
807 sk->sk_state = TCP_ESTABLISHED;
808 err = 0;
809
810 goto out;
811 }
812#endif
813
814 self->ias_obj = irias_new_object(addr->sir_name, jiffies);
815 err = -ENOMEM;
816 if (self->ias_obj == NULL)
817 goto out;
818
819 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
820 if (err < 0) {
821 irias_delete_object(self->ias_obj);
822 self->ias_obj = NULL;
823 goto out;
824 }
825
826
827 irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel",
828 self->stsap_sel, IAS_KERNEL_ATTR);
829 irias_insert_object(self->ias_obj);
830
831 err = 0;
832out:
833 release_sock(sk);
834 return err;
835}
836
837
838
839
840
841
842
843static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
844{
845 struct sock *sk = sock->sk;
846 struct irda_sock *new, *self = irda_sk(sk);
847 struct sock *newsk;
848 struct sk_buff *skb;
849 int err;
850
851 IRDA_DEBUG(2, "%s()\n", __func__);
852
853 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
854 if (err)
855 return err;
856
857 err = -EINVAL;
858
859 lock_sock(sk);
860 if (sock->state != SS_UNCONNECTED)
861 goto out;
862
863 if ((sk = sock->sk) == NULL)
864 goto out;
865
866 err = -EOPNOTSUPP;
867 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
868 (sk->sk_type != SOCK_DGRAM))
869 goto out;
870
871 err = -EINVAL;
872 if (sk->sk_state != TCP_LISTEN)
873 goto out;
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 while (1) {
889 skb = skb_dequeue(&sk->sk_receive_queue);
890 if (skb)
891 break;
892
893
894 err = -EWOULDBLOCK;
895 if (flags & O_NONBLOCK)
896 goto out;
897
898 err = wait_event_interruptible(*(sk_sleep(sk)),
899 skb_peek(&sk->sk_receive_queue));
900 if (err)
901 goto out;
902 }
903
904 newsk = newsock->sk;
905 err = -EIO;
906 if (newsk == NULL)
907 goto out;
908
909 newsk->sk_state = TCP_ESTABLISHED;
910
911 new = irda_sk(newsk);
912
913
914 new->tsap = irttp_dup(self->tsap, new);
915 err = -EPERM;
916 if (!new->tsap) {
917 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
918 kfree_skb(skb);
919 goto out;
920 }
921
922 new->stsap_sel = new->tsap->stsap_sel;
923 new->dtsap_sel = new->tsap->dtsap_sel;
924 new->saddr = irttp_get_saddr(new->tsap);
925 new->daddr = irttp_get_daddr(new->tsap);
926
927 new->max_sdu_size_tx = self->max_sdu_size_tx;
928 new->max_sdu_size_rx = self->max_sdu_size_rx;
929 new->max_data_size = self->max_data_size;
930 new->max_header_size = self->max_header_size;
931
932 memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info));
933
934
935 irttp_listen(self->tsap);
936
937 kfree_skb(skb);
938 sk->sk_ack_backlog--;
939
940 newsock->state = SS_CONNECTED;
941
942 irda_connect_response(new);
943 err = 0;
944out:
945 release_sock(sk);
946 return err;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
970 int addr_len, int flags)
971{
972 struct sock *sk = sock->sk;
973 struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
974 struct irda_sock *self = irda_sk(sk);
975 int err;
976
977 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
978
979 lock_sock(sk);
980
981 err = -ESOCKTNOSUPPORT;
982 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
983 goto out;
984
985 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
986 sock->state = SS_CONNECTED;
987 err = 0;
988 goto out;
989 }
990
991 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
992 sock->state = SS_UNCONNECTED;
993 err = -ECONNREFUSED;
994 goto out;
995 }
996
997 err = -EISCONN;
998 if (sk->sk_state == TCP_ESTABLISHED)
999 goto out;
1000
1001 sk->sk_state = TCP_CLOSE;
1002 sock->state = SS_UNCONNECTED;
1003
1004 err = -EINVAL;
1005 if (addr_len != sizeof(struct sockaddr_irda))
1006 goto out;
1007
1008
1009 if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) {
1010
1011 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
1012 if (err) {
1013 IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__);
1014 goto out;
1015 }
1016 } else {
1017
1018 self->daddr = addr->sir_addr;
1019 IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr);
1020
1021
1022
1023
1024 if((addr->sir_name[0] != '\0') ||
1025 (addr->sir_lsap_sel >= 0x70)) {
1026
1027 err = irda_find_lsap_sel(self, addr->sir_name);
1028 if (err) {
1029 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1030 goto out;
1031 }
1032 } else {
1033
1034
1035
1036
1037 self->dtsap_sel = addr->sir_lsap_sel;
1038 }
1039 }
1040
1041
1042 if (!self->tsap)
1043 irda_open_tsap(self, LSAP_ANY, addr->sir_name);
1044
1045
1046 sock->state = SS_CONNECTING;
1047 sk->sk_state = TCP_SYN_SENT;
1048
1049
1050 err = irttp_connect_request(self->tsap, self->dtsap_sel,
1051 self->saddr, self->daddr, NULL,
1052 self->max_sdu_size_rx, NULL);
1053 if (err) {
1054 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1055 goto out;
1056 }
1057
1058
1059 err = -EINPROGRESS;
1060 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
1061 goto out;
1062
1063 err = -ERESTARTSYS;
1064 if (wait_event_interruptible(*(sk_sleep(sk)),
1065 (sk->sk_state != TCP_SYN_SENT)))
1066 goto out;
1067
1068 if (sk->sk_state != TCP_ESTABLISHED) {
1069 sock->state = SS_UNCONNECTED;
1070 if (sk->sk_prot->disconnect(sk, flags))
1071 sock->state = SS_DISCONNECTING;
1072 err = sock_error(sk);
1073 if (!err)
1074 err = -ECONNRESET;
1075 goto out;
1076 }
1077
1078 sock->state = SS_CONNECTED;
1079
1080
1081 self->saddr = irttp_get_saddr(self->tsap);
1082 err = 0;
1083out:
1084 release_sock(sk);
1085 return err;
1086}
1087
1088static struct proto irda_proto = {
1089 .name = "IRDA",
1090 .owner = THIS_MODULE,
1091 .obj_size = sizeof(struct irda_sock),
1092};
1093
1094
1095
1096
1097
1098
1099
1100static int irda_create(struct net *net, struct socket *sock, int protocol,
1101 int kern)
1102{
1103 struct sock *sk;
1104 struct irda_sock *self;
1105
1106 IRDA_DEBUG(2, "%s()\n", __func__);
1107
1108 if (net != &init_net)
1109 return -EAFNOSUPPORT;
1110
1111
1112 switch (sock->type) {
1113 case SOCK_STREAM:
1114 case SOCK_SEQPACKET:
1115 case SOCK_DGRAM:
1116 break;
1117 default:
1118 return -ESOCKTNOSUPPORT;
1119 }
1120
1121
1122 sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto);
1123 if (sk == NULL)
1124 return -ENOMEM;
1125
1126 self = irda_sk(sk);
1127 IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self);
1128
1129 init_waitqueue_head(&self->query_wait);
1130
1131 switch (sock->type) {
1132 case SOCK_STREAM:
1133 sock->ops = &irda_stream_ops;
1134 self->max_sdu_size_rx = TTP_SAR_DISABLE;
1135 break;
1136 case SOCK_SEQPACKET:
1137 sock->ops = &irda_seqpacket_ops;
1138 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1139 break;
1140 case SOCK_DGRAM:
1141 switch (protocol) {
1142#ifdef CONFIG_IRDA_ULTRA
1143 case IRDAPROTO_ULTRA:
1144 sock->ops = &irda_ultra_ops;
1145
1146
1147 self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER;
1148 self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER;
1149 break;
1150#endif
1151 case IRDAPROTO_UNITDATA:
1152 sock->ops = &irda_dgram_ops;
1153
1154 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1155 break;
1156 default:
1157 sk_free(sk);
1158 return -ESOCKTNOSUPPORT;
1159 }
1160 break;
1161 default:
1162 sk_free(sk);
1163 return -ESOCKTNOSUPPORT;
1164 }
1165
1166
1167 sock_init_data(sock, sk);
1168 sk->sk_family = PF_IRDA;
1169 sk->sk_protocol = protocol;
1170
1171
1172 self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
1173 self->mask.word = 0xffff;
1174 self->rx_flow = self->tx_flow = FLOW_START;
1175 self->nslots = DISCOVERY_DEFAULT_SLOTS;
1176 self->daddr = DEV_ADDR_ANY;
1177 self->saddr = 0x0;
1178 return 0;
1179}
1180
1181
1182
1183
1184
1185
1186
1187static void irda_destroy_socket(struct irda_sock *self)
1188{
1189 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
1190
1191
1192 irlmp_unregister_client(self->ckey);
1193 irlmp_unregister_service(self->skey);
1194
1195
1196 if (self->ias_obj) {
1197 irias_delete_object(self->ias_obj);
1198 self->ias_obj = NULL;
1199 }
1200
1201 if (self->iriap) {
1202 iriap_close(self->iriap);
1203 self->iriap = NULL;
1204 }
1205
1206 if (self->tsap) {
1207 irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
1208 irttp_close_tsap(self->tsap);
1209 self->tsap = NULL;
1210 }
1211#ifdef CONFIG_IRDA_ULTRA
1212 if (self->lsap) {
1213 irlmp_close_lsap(self->lsap);
1214 self->lsap = NULL;
1215 }
1216#endif
1217}
1218
1219
1220
1221
1222static int irda_release(struct socket *sock)
1223{
1224 struct sock *sk = sock->sk;
1225
1226 IRDA_DEBUG(2, "%s()\n", __func__);
1227
1228 if (sk == NULL)
1229 return 0;
1230
1231 lock_sock(sk);
1232 sk->sk_state = TCP_CLOSE;
1233 sk->sk_shutdown |= SEND_SHUTDOWN;
1234 sk->sk_state_change(sk);
1235
1236
1237 irda_destroy_socket(irda_sk(sk));
1238
1239 sock_orphan(sk);
1240 sock->sk = NULL;
1241 release_sock(sk);
1242
1243
1244 skb_queue_purge(&sk->sk_receive_queue);
1245
1246
1247
1248 sock_put(sk);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1287 struct msghdr *msg, size_t len)
1288{
1289 struct sock *sk = sock->sk;
1290 struct irda_sock *self;
1291 struct sk_buff *skb;
1292 int err = -EPIPE;
1293
1294 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1295
1296
1297 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1298 MSG_NOSIGNAL)) {
1299 return -EINVAL;
1300 }
1301
1302 lock_sock(sk);
1303
1304 if (sk->sk_shutdown & SEND_SHUTDOWN)
1305 goto out_err;
1306
1307 if (sk->sk_state != TCP_ESTABLISHED) {
1308 err = -ENOTCONN;
1309 goto out;
1310 }
1311
1312 self = irda_sk(sk);
1313
1314
1315
1316 if (wait_event_interruptible(*(sk_sleep(sk)),
1317 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
1318 err = -ERESTARTSYS;
1319 goto out;
1320 }
1321
1322
1323 if (sk->sk_state != TCP_ESTABLISHED) {
1324 err = -ENOTCONN;
1325 goto out;
1326 }
1327
1328
1329 if (len > self->max_data_size) {
1330 IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
1331 __func__, len, self->max_data_size);
1332 len = self->max_data_size;
1333 }
1334
1335 skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16,
1336 msg->msg_flags & MSG_DONTWAIT, &err);
1337 if (!skb)
1338 goto out_err;
1339
1340 skb_reserve(skb, self->max_header_size + 16);
1341 skb_reset_transport_header(skb);
1342 skb_put(skb, len);
1343 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1344 if (err) {
1345 kfree_skb(skb);
1346 goto out_err;
1347 }
1348
1349
1350
1351
1352
1353 err = irttp_data_request(self->tsap, skb);
1354 if (err) {
1355 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1356 goto out_err;
1357 }
1358
1359 release_sock(sk);
1360
1361 return len;
1362
1363out_err:
1364 err = sk_stream_error(sk, msg->msg_flags, err);
1365out:
1366 release_sock(sk);
1367 return err;
1368
1369}
1370
1371
1372
1373
1374
1375
1376
1377static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1378 struct msghdr *msg, size_t size, int flags)
1379{
1380 struct sock *sk = sock->sk;
1381 struct irda_sock *self = irda_sk(sk);
1382 struct sk_buff *skb;
1383 size_t copied;
1384 int err;
1385
1386 IRDA_DEBUG(4, "%s()\n", __func__);
1387
1388 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1389 flags & MSG_DONTWAIT, &err);
1390 if (!skb)
1391 return err;
1392
1393 skb_reset_transport_header(skb);
1394 copied = skb->len;
1395
1396 if (copied > size) {
1397 IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
1398 __func__, copied, size);
1399 copied = size;
1400 msg->msg_flags |= MSG_TRUNC;
1401 }
1402 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1403
1404 skb_free_datagram(sk, skb);
1405
1406
1407
1408
1409
1410
1411
1412 if (self->rx_flow == FLOW_STOP) {
1413 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1414 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
1415 self->rx_flow = FLOW_START;
1416 irttp_flow_request(self->tsap, FLOW_START);
1417 }
1418 }
1419
1420 return copied;
1421}
1422
1423
1424
1425
1426static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1427 struct msghdr *msg, size_t size, int flags)
1428{
1429 struct sock *sk = sock->sk;
1430 struct irda_sock *self = irda_sk(sk);
1431 int noblock = flags & MSG_DONTWAIT;
1432 size_t copied = 0;
1433 int target, err;
1434 long timeo;
1435
1436 IRDA_DEBUG(3, "%s()\n", __func__);
1437
1438 if ((err = sock_error(sk)) < 0)
1439 return err;
1440
1441 if (sock->flags & __SO_ACCEPTCON)
1442 return -EINVAL;
1443
1444 err =-EOPNOTSUPP;
1445 if (flags & MSG_OOB)
1446 return -EOPNOTSUPP;
1447
1448 err = 0;
1449 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
1450 timeo = sock_rcvtimeo(sk, noblock);
1451
1452 do {
1453 int chunk;
1454 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1455
1456 if (skb == NULL) {
1457 DEFINE_WAIT(wait);
1458 err = 0;
1459
1460 if (copied >= target)
1461 break;
1462
1463 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1464
1465
1466
1467
1468 err = sock_error(sk);
1469 if (err)
1470 ;
1471 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1472 ;
1473 else if (noblock)
1474 err = -EAGAIN;
1475 else if (signal_pending(current))
1476 err = sock_intr_errno(timeo);
1477 else if (sk->sk_state != TCP_ESTABLISHED)
1478 err = -ENOTCONN;
1479 else if (skb_peek(&sk->sk_receive_queue) == NULL)
1480
1481 schedule();
1482
1483 finish_wait(sk_sleep(sk), &wait);
1484
1485 if (err)
1486 return err;
1487 if (sk->sk_shutdown & RCV_SHUTDOWN)
1488 break;
1489
1490 continue;
1491 }
1492
1493 chunk = min_t(unsigned int, skb->len, size);
1494 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1495 skb_queue_head(&sk->sk_receive_queue, skb);
1496 if (copied == 0)
1497 copied = -EFAULT;
1498 break;
1499 }
1500 copied += chunk;
1501 size -= chunk;
1502
1503
1504 if (!(flags & MSG_PEEK)) {
1505 skb_pull(skb, chunk);
1506
1507
1508 if (skb->len) {
1509 IRDA_DEBUG(1, "%s(), back on q!\n",
1510 __func__);
1511 skb_queue_head(&sk->sk_receive_queue, skb);
1512 break;
1513 }
1514
1515 kfree_skb(skb);
1516 } else {
1517 IRDA_DEBUG(0, "%s() questionable!?\n", __func__);
1518
1519
1520 skb_queue_head(&sk->sk_receive_queue, skb);
1521 break;
1522 }
1523 } while (size);
1524
1525
1526
1527
1528
1529
1530
1531 if (self->rx_flow == FLOW_STOP) {
1532 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
1533 IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
1534 self->rx_flow = FLOW_START;
1535 irttp_flow_request(self->tsap, FLOW_START);
1536 }
1537 }
1538
1539 return copied;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1550 struct msghdr *msg, size_t len)
1551{
1552 struct sock *sk = sock->sk;
1553 struct irda_sock *self;
1554 struct sk_buff *skb;
1555 int err;
1556
1557 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1558
1559 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1560 return -EINVAL;
1561
1562 lock_sock(sk);
1563
1564 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1565 send_sig(SIGPIPE, current, 0);
1566 err = -EPIPE;
1567 goto out;
1568 }
1569
1570 err = -ENOTCONN;
1571 if (sk->sk_state != TCP_ESTABLISHED)
1572 goto out;
1573
1574 self = irda_sk(sk);
1575
1576
1577
1578
1579
1580 if (len > self->max_data_size) {
1581 IRDA_DEBUG(0, "%s(), Warning to much data! "
1582 "Chopping frame from %zd to %d bytes!\n",
1583 __func__, len, self->max_data_size);
1584 len = self->max_data_size;
1585 }
1586
1587 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1588 msg->msg_flags & MSG_DONTWAIT, &err);
1589 err = -ENOBUFS;
1590 if (!skb)
1591 goto out;
1592
1593 skb_reserve(skb, self->max_header_size);
1594 skb_reset_transport_header(skb);
1595
1596 IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
1597 skb_put(skb, len);
1598 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1599 if (err) {
1600 kfree_skb(skb);
1601 goto out;
1602 }
1603
1604
1605
1606
1607
1608 err = irttp_udata_request(self->tsap, skb);
1609 if (err) {
1610 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1611 goto out;
1612 }
1613
1614 release_sock(sk);
1615 return len;
1616
1617out:
1618 release_sock(sk);
1619 return err;
1620}
1621
1622
1623
1624
1625
1626
1627
1628#ifdef CONFIG_IRDA_ULTRA
1629static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1630 struct msghdr *msg, size_t len)
1631{
1632 struct sock *sk = sock->sk;
1633 struct irda_sock *self;
1634 __u8 pid = 0;
1635 int bound = 0;
1636 struct sk_buff *skb;
1637 int err;
1638
1639 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1640
1641 err = -EINVAL;
1642 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1643 return -EINVAL;
1644
1645 lock_sock(sk);
1646
1647 err = -EPIPE;
1648 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1649 send_sig(SIGPIPE, current, 0);
1650 goto out;
1651 }
1652
1653 self = irda_sk(sk);
1654
1655
1656 if (msg->msg_name) {
1657 struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name;
1658 err = -EINVAL;
1659
1660 if (msg->msg_namelen < sizeof(*addr))
1661 goto out;
1662 if (addr->sir_family != AF_IRDA)
1663 goto out;
1664
1665 pid = addr->sir_lsap_sel;
1666 if (pid & 0x80) {
1667 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
1668 err = -EOPNOTSUPP;
1669 goto out;
1670 }
1671 } else {
1672
1673
1674 if ((self->lsap == NULL) ||
1675 (sk->sk_state != TCP_ESTABLISHED)) {
1676 IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n",
1677 __func__);
1678 err = -ENOTCONN;
1679 goto out;
1680 }
1681
1682 bound = 1;
1683 }
1684
1685
1686
1687
1688
1689 if (len > self->max_data_size) {
1690 IRDA_DEBUG(0, "%s(), Warning to much data! "
1691 "Chopping frame from %zd to %d bytes!\n",
1692 __func__, len, self->max_data_size);
1693 len = self->max_data_size;
1694 }
1695
1696 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1697 msg->msg_flags & MSG_DONTWAIT, &err);
1698 err = -ENOBUFS;
1699 if (!skb)
1700 goto out;
1701
1702 skb_reserve(skb, self->max_header_size);
1703 skb_reset_transport_header(skb);
1704
1705 IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
1706 skb_put(skb, len);
1707 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1708 if (err) {
1709 kfree_skb(skb);
1710 goto out;
1711 }
1712
1713 err = irlmp_connless_data_request((bound ? self->lsap : NULL),
1714 skb, pid);
1715 if (err)
1716 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1717out:
1718 release_sock(sk);
1719 return err ? : len;
1720}
1721#endif
1722
1723
1724
1725
1726static int irda_shutdown(struct socket *sock, int how)
1727{
1728 struct sock *sk = sock->sk;
1729 struct irda_sock *self = irda_sk(sk);
1730
1731 IRDA_DEBUG(1, "%s(%p)\n", __func__, self);
1732
1733 lock_sock(sk);
1734
1735 sk->sk_state = TCP_CLOSE;
1736 sk->sk_shutdown |= SEND_SHUTDOWN;
1737 sk->sk_state_change(sk);
1738
1739 if (self->iriap) {
1740 iriap_close(self->iriap);
1741 self->iriap = NULL;
1742 }
1743
1744 if (self->tsap) {
1745 irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
1746 irttp_close_tsap(self->tsap);
1747 self->tsap = NULL;
1748 }
1749
1750
1751 self->rx_flow = self->tx_flow = FLOW_START;
1752 self->daddr = DEV_ADDR_ANY;
1753 self->saddr = 0x0;
1754
1755 release_sock(sk);
1756
1757 return 0;
1758}
1759
1760
1761
1762
1763static unsigned int irda_poll(struct file * file, struct socket *sock,
1764 poll_table *wait)
1765{
1766 struct sock *sk = sock->sk;
1767 struct irda_sock *self = irda_sk(sk);
1768 unsigned int mask;
1769
1770 IRDA_DEBUG(4, "%s()\n", __func__);
1771
1772 poll_wait(file, sk_sleep(sk), wait);
1773 mask = 0;
1774
1775
1776 if (sk->sk_err)
1777 mask |= POLLERR;
1778 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1779 IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
1780 mask |= POLLHUP;
1781 }
1782
1783
1784 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1785 IRDA_DEBUG(4, "Socket is readable\n");
1786 mask |= POLLIN | POLLRDNORM;
1787 }
1788
1789
1790 switch (sk->sk_type) {
1791 case SOCK_STREAM:
1792 if (sk->sk_state == TCP_CLOSE) {
1793 IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
1794 mask |= POLLHUP;
1795 }
1796
1797 if (sk->sk_state == TCP_ESTABLISHED) {
1798 if ((self->tx_flow == FLOW_START) &&
1799 sock_writeable(sk))
1800 {
1801 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1802 }
1803 }
1804 break;
1805 case SOCK_SEQPACKET:
1806 if ((self->tx_flow == FLOW_START) &&
1807 sock_writeable(sk))
1808 {
1809 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1810 }
1811 break;
1812 case SOCK_DGRAM:
1813 if (sock_writeable(sk))
1814 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1815 break;
1816 default:
1817 break;
1818 }
1819
1820 return mask;
1821}
1822
1823
1824
1825
1826static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1827{
1828 struct sock *sk = sock->sk;
1829 int err;
1830
1831 IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd);
1832
1833 err = -EINVAL;
1834 switch (cmd) {
1835 case TIOCOUTQ: {
1836 long amount;
1837
1838 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1839 if (amount < 0)
1840 amount = 0;
1841 err = put_user(amount, (unsigned int __user *)arg);
1842 break;
1843 }
1844
1845 case TIOCINQ: {
1846 struct sk_buff *skb;
1847 long amount = 0L;
1848
1849 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1850 amount = skb->len;
1851 err = put_user(amount, (unsigned int __user *)arg);
1852 break;
1853 }
1854
1855 case SIOCGSTAMP:
1856 if (sk != NULL)
1857 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
1858 break;
1859
1860 case SIOCGIFADDR:
1861 case SIOCSIFADDR:
1862 case SIOCGIFDSTADDR:
1863 case SIOCSIFDSTADDR:
1864 case SIOCGIFBRDADDR:
1865 case SIOCSIFBRDADDR:
1866 case SIOCGIFNETMASK:
1867 case SIOCSIFNETMASK:
1868 case SIOCGIFMETRIC:
1869 case SIOCSIFMETRIC:
1870 break;
1871 default:
1872 IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__);
1873 err = -ENOIOCTLCMD;
1874 }
1875
1876 return err;
1877}
1878
1879#ifdef CONFIG_COMPAT
1880
1881
1882
1883static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1884{
1885
1886
1887
1888 return -ENOIOCTLCMD;
1889}
1890#endif
1891
1892
1893
1894
1895
1896
1897
1898static int irda_setsockopt(struct socket *sock, int level, int optname,
1899 char __user *optval, unsigned int optlen)
1900{
1901 struct sock *sk = sock->sk;
1902 struct irda_sock *self = irda_sk(sk);
1903 struct irda_ias_set *ias_opt;
1904 struct ias_object *ias_obj;
1905 struct ias_attrib * ias_attr;
1906 int opt, free_ias = 0, err = 0;
1907
1908 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
1909
1910 if (level != SOL_IRLMP)
1911 return -ENOPROTOOPT;
1912
1913 lock_sock(sk);
1914
1915 switch (optname) {
1916 case IRLMP_IAS_SET:
1917
1918
1919
1920
1921
1922
1923
1924 if (optlen != sizeof(struct irda_ias_set)) {
1925 err = -EINVAL;
1926 goto out;
1927 }
1928
1929 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
1930 if (ias_opt == NULL) {
1931 err = -ENOMEM;
1932 goto out;
1933 }
1934
1935
1936 if (copy_from_user(ias_opt, optval, optlen)) {
1937 kfree(ias_opt);
1938 err = -EFAULT;
1939 goto out;
1940 }
1941
1942
1943
1944
1945
1946 if(ias_opt->irda_class_name[0] == '\0') {
1947 if(self->ias_obj == NULL) {
1948 kfree(ias_opt);
1949 err = -EINVAL;
1950 goto out;
1951 }
1952 ias_obj = self->ias_obj;
1953 } else
1954 ias_obj = irias_find_object(ias_opt->irda_class_name);
1955
1956
1957
1958
1959 if((!capable(CAP_NET_ADMIN)) &&
1960 ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
1961 kfree(ias_opt);
1962 err = -EPERM;
1963 goto out;
1964 }
1965
1966
1967 if(ias_obj == (struct ias_object *) NULL) {
1968
1969 ias_obj = irias_new_object(ias_opt->irda_class_name,
1970 jiffies);
1971 if (ias_obj == NULL) {
1972 kfree(ias_opt);
1973 err = -ENOMEM;
1974 goto out;
1975 }
1976 free_ias = 1;
1977 }
1978
1979
1980 if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) {
1981 kfree(ias_opt);
1982 if (free_ias) {
1983 kfree(ias_obj->name);
1984 kfree(ias_obj);
1985 }
1986 err = -EINVAL;
1987 goto out;
1988 }
1989
1990
1991 switch(ias_opt->irda_attrib_type) {
1992 case IAS_INTEGER:
1993
1994 irias_add_integer_attrib(
1995 ias_obj,
1996 ias_opt->irda_attrib_name,
1997 ias_opt->attribute.irda_attrib_int,
1998 IAS_USER_ATTR);
1999 break;
2000 case IAS_OCT_SEQ:
2001
2002 if(ias_opt->attribute.irda_attrib_octet_seq.len >
2003 IAS_MAX_OCTET_STRING) {
2004 kfree(ias_opt);
2005 if (free_ias) {
2006 kfree(ias_obj->name);
2007 kfree(ias_obj);
2008 }
2009
2010 err = -EINVAL;
2011 goto out;
2012 }
2013
2014 irias_add_octseq_attrib(
2015 ias_obj,
2016 ias_opt->irda_attrib_name,
2017 ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
2018 ias_opt->attribute.irda_attrib_octet_seq.len,
2019 IAS_USER_ATTR);
2020 break;
2021 case IAS_STRING:
2022
2023
2024
2025
2026
2027
2028
2029 ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0';
2030
2031 irias_add_string_attrib(
2032 ias_obj,
2033 ias_opt->irda_attrib_name,
2034 ias_opt->attribute.irda_attrib_string.string,
2035 IAS_USER_ATTR);
2036 break;
2037 default :
2038 kfree(ias_opt);
2039 if (free_ias) {
2040 kfree(ias_obj->name);
2041 kfree(ias_obj);
2042 }
2043 err = -EINVAL;
2044 goto out;
2045 }
2046 irias_insert_object(ias_obj);
2047 kfree(ias_opt);
2048 break;
2049 case IRLMP_IAS_DEL:
2050
2051
2052
2053
2054
2055 if (optlen != sizeof(struct irda_ias_set)) {
2056 err = -EINVAL;
2057 goto out;
2058 }
2059
2060 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2061 if (ias_opt == NULL) {
2062 err = -ENOMEM;
2063 goto out;
2064 }
2065
2066
2067 if (copy_from_user(ias_opt, optval, optlen)) {
2068 kfree(ias_opt);
2069 err = -EFAULT;
2070 goto out;
2071 }
2072
2073
2074
2075
2076
2077 if(ias_opt->irda_class_name[0] == '\0')
2078 ias_obj = self->ias_obj;
2079 else
2080 ias_obj = irias_find_object(ias_opt->irda_class_name);
2081 if(ias_obj == (struct ias_object *) NULL) {
2082 kfree(ias_opt);
2083 err = -EINVAL;
2084 goto out;
2085 }
2086
2087
2088
2089
2090 if((!capable(CAP_NET_ADMIN)) &&
2091 ((ias_obj == NULL) || (ias_obj != self->ias_obj))) {
2092 kfree(ias_opt);
2093 err = -EPERM;
2094 goto out;
2095 }
2096
2097
2098 ias_attr = irias_find_attrib(ias_obj,
2099 ias_opt->irda_attrib_name);
2100 if(ias_attr == (struct ias_attrib *) NULL) {
2101 kfree(ias_opt);
2102 err = -EINVAL;
2103 goto out;
2104 }
2105
2106
2107 if(ias_attr->value->owner != IAS_USER_ATTR) {
2108 IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__);
2109 kfree(ias_opt);
2110 err = -EPERM;
2111 goto out;
2112 }
2113
2114
2115 irias_delete_attrib(ias_obj, ias_attr, 1);
2116 kfree(ias_opt);
2117 break;
2118 case IRLMP_MAX_SDU_SIZE:
2119 if (optlen < sizeof(int)) {
2120 err = -EINVAL;
2121 goto out;
2122 }
2123
2124 if (get_user(opt, (int __user *)optval)) {
2125 err = -EFAULT;
2126 goto out;
2127 }
2128
2129
2130 if (sk->sk_type != SOCK_SEQPACKET) {
2131 IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n",
2132 __func__, opt);
2133 self->max_sdu_size_rx = opt;
2134 } else {
2135 IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
2136 __func__);
2137 err = -ENOPROTOOPT;
2138 goto out;
2139 }
2140 break;
2141 case IRLMP_HINTS_SET:
2142 if (optlen < sizeof(int)) {
2143 err = -EINVAL;
2144 goto out;
2145 }
2146
2147
2148 if (get_user(opt, (int __user *)optval)) {
2149 err = -EFAULT;
2150 goto out;
2151 }
2152
2153
2154 if (self->skey)
2155 irlmp_unregister_service(self->skey);
2156
2157 self->skey = irlmp_register_service((__u16) opt);
2158 break;
2159 case IRLMP_HINT_MASK_SET:
2160
2161
2162
2163
2164
2165 if (optlen < sizeof(int)) {
2166 err = -EINVAL;
2167 goto out;
2168 }
2169
2170
2171 if (get_user(opt, (int __user *)optval)) {
2172 err = -EFAULT;
2173 goto out;
2174 }
2175
2176
2177 self->mask.word = (__u16) opt;
2178
2179 self->mask.word &= 0x7f7f;
2180
2181 if(!self->mask.word)
2182 self->mask.word = 0xFFFF;
2183
2184 break;
2185 default:
2186 err = -ENOPROTOOPT;
2187 break;
2188 }
2189
2190out:
2191 release_sock(sk);
2192
2193 return err;
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
2209 struct ias_value *ias_value)
2210{
2211
2212 switch (ias_value->type) {
2213 case IAS_INTEGER:
2214
2215 ias_opt->attribute.irda_attrib_int = ias_value->t.integer;
2216 break;
2217 case IAS_OCT_SEQ:
2218
2219 ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len;
2220
2221 memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq,
2222 ias_value->t.oct_seq, ias_value->len);
2223 break;
2224 case IAS_STRING:
2225
2226 ias_opt->attribute.irda_attrib_string.len = ias_value->len;
2227 ias_opt->attribute.irda_attrib_string.charset = ias_value->charset;
2228
2229 memcpy(ias_opt->attribute.irda_attrib_string.string,
2230 ias_value->t.string, ias_value->len);
2231
2232 ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0';
2233 break;
2234 case IAS_MISSING:
2235 default :
2236 return -EINVAL;
2237 }
2238
2239
2240 ias_opt->irda_attrib_type = ias_value->type;
2241
2242 return 0;
2243}
2244
2245
2246
2247
2248static int irda_getsockopt(struct socket *sock, int level, int optname,
2249 char __user *optval, int __user *optlen)
2250{
2251 struct sock *sk = sock->sk;
2252 struct irda_sock *self = irda_sk(sk);
2253 struct irda_device_list list;
2254 struct irda_device_info *discoveries;
2255 struct irda_ias_set * ias_opt;
2256 struct ias_object * ias_obj;
2257 struct ias_attrib * ias_attr;
2258 int daddr = DEV_ADDR_ANY;
2259 int val = 0;
2260 int len = 0;
2261 int err = 0;
2262 int offset, total;
2263
2264 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
2265
2266 if (level != SOL_IRLMP)
2267 return -ENOPROTOOPT;
2268
2269 if (get_user(len, optlen))
2270 return -EFAULT;
2271
2272 if(len < 0)
2273 return -EINVAL;
2274
2275 lock_sock(sk);
2276
2277 switch (optname) {
2278 case IRLMP_ENUMDEVICES:
2279
2280
2281 offset = sizeof(struct irda_device_list) -
2282 sizeof(struct irda_device_info);
2283
2284 if (len < offset) {
2285 err = -EINVAL;
2286 goto out;
2287 }
2288
2289
2290 discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
2291 self->nslots);
2292
2293 if (discoveries == NULL) {
2294 err = -EAGAIN;
2295 goto out;
2296 }
2297
2298
2299 if (copy_to_user(optval, &list, offset))
2300 err = -EFAULT;
2301
2302
2303 if (list.len > 2048) {
2304 err = -EINVAL;
2305 goto bed;
2306 }
2307 total = offset + (list.len * sizeof(struct irda_device_info));
2308 if (total > len)
2309 total = len;
2310 if (copy_to_user(optval+offset, discoveries, total - offset))
2311 err = -EFAULT;
2312
2313
2314 if (put_user(total, optlen))
2315 err = -EFAULT;
2316bed:
2317
2318 kfree(discoveries);
2319 break;
2320 case IRLMP_MAX_SDU_SIZE:
2321 val = self->max_data_size;
2322 len = sizeof(int);
2323 if (put_user(len, optlen)) {
2324 err = -EFAULT;
2325 goto out;
2326 }
2327
2328 if (copy_to_user(optval, &val, len)) {
2329 err = -EFAULT;
2330 goto out;
2331 }
2332
2333 break;
2334 case IRLMP_IAS_GET:
2335
2336
2337
2338
2339
2340 if (len != sizeof(struct irda_ias_set)) {
2341 err = -EINVAL;
2342 goto out;
2343 }
2344
2345 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2346 if (ias_opt == NULL) {
2347 err = -ENOMEM;
2348 goto out;
2349 }
2350
2351
2352 if (copy_from_user(ias_opt, optval, len)) {
2353 kfree(ias_opt);
2354 err = -EFAULT;
2355 goto out;
2356 }
2357
2358
2359
2360
2361
2362 if(ias_opt->irda_class_name[0] == '\0')
2363 ias_obj = self->ias_obj;
2364 else
2365 ias_obj = irias_find_object(ias_opt->irda_class_name);
2366 if(ias_obj == (struct ias_object *) NULL) {
2367 kfree(ias_opt);
2368 err = -EINVAL;
2369 goto out;
2370 }
2371
2372
2373 ias_attr = irias_find_attrib(ias_obj,
2374 ias_opt->irda_attrib_name);
2375 if(ias_attr == (struct ias_attrib *) NULL) {
2376 kfree(ias_opt);
2377 err = -EINVAL;
2378 goto out;
2379 }
2380
2381
2382 err = irda_extract_ias_value(ias_opt, ias_attr->value);
2383 if(err) {
2384 kfree(ias_opt);
2385 goto out;
2386 }
2387
2388
2389 if (copy_to_user(optval, ias_opt,
2390 sizeof(struct irda_ias_set))) {
2391 kfree(ias_opt);
2392 err = -EFAULT;
2393 goto out;
2394 }
2395
2396 kfree(ias_opt);
2397 break;
2398 case IRLMP_IAS_QUERY:
2399
2400
2401
2402
2403
2404 if (len != sizeof(struct irda_ias_set)) {
2405 err = -EINVAL;
2406 goto out;
2407 }
2408
2409 ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
2410 if (ias_opt == NULL) {
2411 err = -ENOMEM;
2412 goto out;
2413 }
2414
2415
2416 if (copy_from_user(ias_opt, optval, len)) {
2417 kfree(ias_opt);
2418 err = -EFAULT;
2419 goto out;
2420 }
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431 if(self->daddr != DEV_ADDR_ANY) {
2432
2433 daddr = self->daddr;
2434 } else {
2435
2436
2437 daddr = ias_opt->daddr;
2438 if((!daddr) || (daddr == DEV_ADDR_ANY)) {
2439 kfree(ias_opt);
2440 err = -EINVAL;
2441 goto out;
2442 }
2443 }
2444
2445
2446 if (self->iriap) {
2447 IRDA_WARNING("%s: busy with a previous query\n",
2448 __func__);
2449 kfree(ias_opt);
2450 err = -EBUSY;
2451 goto out;
2452 }
2453
2454 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
2455 irda_getvalue_confirm);
2456
2457 if (self->iriap == NULL) {
2458 kfree(ias_opt);
2459 err = -ENOMEM;
2460 goto out;
2461 }
2462
2463
2464 self->errno = -EHOSTUNREACH;
2465
2466
2467 iriap_getvaluebyclass_request(self->iriap,
2468 self->saddr, daddr,
2469 ias_opt->irda_class_name,
2470 ias_opt->irda_attrib_name);
2471
2472
2473 if (wait_event_interruptible(self->query_wait,
2474 (self->iriap == NULL))) {
2475
2476
2477 kfree(ias_opt);
2478
2479 err = -EHOSTUNREACH;
2480 goto out;
2481 }
2482
2483
2484 if (self->errno)
2485 {
2486 kfree(ias_opt);
2487
2488 if((self->errno == IAS_CLASS_UNKNOWN) ||
2489 (self->errno == IAS_ATTRIB_UNKNOWN))
2490 err = -EADDRNOTAVAIL;
2491 else
2492 err = -EHOSTUNREACH;
2493
2494 goto out;
2495 }
2496
2497
2498 err = irda_extract_ias_value(ias_opt, self->ias_result);
2499 if (self->ias_result)
2500 irias_delete_value(self->ias_result);
2501 if (err) {
2502 kfree(ias_opt);
2503 goto out;
2504 }
2505
2506
2507 if (copy_to_user(optval, ias_opt,
2508 sizeof(struct irda_ias_set))) {
2509 kfree(ias_opt);
2510 err = -EFAULT;
2511 goto out;
2512 }
2513
2514 kfree(ias_opt);
2515 break;
2516 case IRLMP_WAITDEVICE:
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 if (len != sizeof(int)) {
2533 err = -EINVAL;
2534 goto out;
2535 }
2536
2537 if (get_user(val, (int __user *)optval)) {
2538 err = -EFAULT;
2539 goto out;
2540 }
2541
2542
2543 irlmp_update_client(self->ckey, self->mask.word,
2544 irda_selective_discovery_indication,
2545 NULL, (void *) self);
2546
2547
2548 irlmp_discovery_request(self->nslots);
2549
2550
2551 if (!self->cachedaddr) {
2552 IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__);
2553
2554
2555 self->errno = 0;
2556 setup_timer(&self->watchdog, irda_discovery_timeout,
2557 (unsigned long)self);
2558 mod_timer(&self->watchdog,
2559 jiffies + msecs_to_jiffies(val));
2560
2561
2562 err = __wait_event_interruptible(self->query_wait,
2563 (self->cachedaddr != 0 || self->errno == -ETIME));
2564
2565
2566 del_timer(&(self->watchdog));
2567
2568 IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__);
2569
2570 if (err != 0)
2571 goto out;
2572 }
2573 else
2574 IRDA_DEBUG(1, "%s(), found immediately !\n",
2575 __func__);
2576
2577
2578 irlmp_update_client(self->ckey, self->mask.word,
2579 NULL, NULL, NULL);
2580
2581
2582 if (!self->cachedaddr) {
2583 err = -EAGAIN;
2584 goto out;
2585 }
2586 daddr = self->cachedaddr;
2587
2588 self->cachedaddr = 0;
2589
2590
2591
2592
2593
2594
2595
2596 if (put_user(daddr, (int __user *)optval)) {
2597 err = -EFAULT;
2598 goto out;
2599 }
2600
2601 break;
2602 default:
2603 err = -ENOPROTOOPT;
2604 }
2605
2606out:
2607
2608 release_sock(sk);
2609
2610 return err;
2611}
2612
2613static const struct net_proto_family irda_family_ops = {
2614 .family = PF_IRDA,
2615 .create = irda_create,
2616 .owner = THIS_MODULE,
2617};
2618
2619static const struct proto_ops irda_stream_ops = {
2620 .family = PF_IRDA,
2621 .owner = THIS_MODULE,
2622 .release = irda_release,
2623 .bind = irda_bind,
2624 .connect = irda_connect,
2625 .socketpair = sock_no_socketpair,
2626 .accept = irda_accept,
2627 .getname = irda_getname,
2628 .poll = irda_poll,
2629 .ioctl = irda_ioctl,
2630#ifdef CONFIG_COMPAT
2631 .compat_ioctl = irda_compat_ioctl,
2632#endif
2633 .listen = irda_listen,
2634 .shutdown = irda_shutdown,
2635 .setsockopt = irda_setsockopt,
2636 .getsockopt = irda_getsockopt,
2637 .sendmsg = irda_sendmsg,
2638 .recvmsg = irda_recvmsg_stream,
2639 .mmap = sock_no_mmap,
2640 .sendpage = sock_no_sendpage,
2641};
2642
2643static const struct proto_ops irda_seqpacket_ops = {
2644 .family = PF_IRDA,
2645 .owner = THIS_MODULE,
2646 .release = irda_release,
2647 .bind = irda_bind,
2648 .connect = irda_connect,
2649 .socketpair = sock_no_socketpair,
2650 .accept = irda_accept,
2651 .getname = irda_getname,
2652 .poll = datagram_poll,
2653 .ioctl = irda_ioctl,
2654#ifdef CONFIG_COMPAT
2655 .compat_ioctl = irda_compat_ioctl,
2656#endif
2657 .listen = irda_listen,
2658 .shutdown = irda_shutdown,
2659 .setsockopt = irda_setsockopt,
2660 .getsockopt = irda_getsockopt,
2661 .sendmsg = irda_sendmsg,
2662 .recvmsg = irda_recvmsg_dgram,
2663 .mmap = sock_no_mmap,
2664 .sendpage = sock_no_sendpage,
2665};
2666
2667static const struct proto_ops irda_dgram_ops = {
2668 .family = PF_IRDA,
2669 .owner = THIS_MODULE,
2670 .release = irda_release,
2671 .bind = irda_bind,
2672 .connect = irda_connect,
2673 .socketpair = sock_no_socketpair,
2674 .accept = irda_accept,
2675 .getname = irda_getname,
2676 .poll = datagram_poll,
2677 .ioctl = irda_ioctl,
2678#ifdef CONFIG_COMPAT
2679 .compat_ioctl = irda_compat_ioctl,
2680#endif
2681 .listen = irda_listen,
2682 .shutdown = irda_shutdown,
2683 .setsockopt = irda_setsockopt,
2684 .getsockopt = irda_getsockopt,
2685 .sendmsg = irda_sendmsg_dgram,
2686 .recvmsg = irda_recvmsg_dgram,
2687 .mmap = sock_no_mmap,
2688 .sendpage = sock_no_sendpage,
2689};
2690
2691#ifdef CONFIG_IRDA_ULTRA
2692static const struct proto_ops irda_ultra_ops = {
2693 .family = PF_IRDA,
2694 .owner = THIS_MODULE,
2695 .release = irda_release,
2696 .bind = irda_bind,
2697 .connect = sock_no_connect,
2698 .socketpair = sock_no_socketpair,
2699 .accept = sock_no_accept,
2700 .getname = irda_getname,
2701 .poll = datagram_poll,
2702 .ioctl = irda_ioctl,
2703#ifdef CONFIG_COMPAT
2704 .compat_ioctl = irda_compat_ioctl,
2705#endif
2706 .listen = sock_no_listen,
2707 .shutdown = irda_shutdown,
2708 .setsockopt = irda_setsockopt,
2709 .getsockopt = irda_getsockopt,
2710 .sendmsg = irda_sendmsg_ultra,
2711 .recvmsg = irda_recvmsg_dgram,
2712 .mmap = sock_no_mmap,
2713 .sendpage = sock_no_sendpage,
2714};
2715#endif
2716
2717
2718
2719
2720
2721
2722
2723int __init irsock_init(void)
2724{
2725 int rc = proto_register(&irda_proto, 0);
2726
2727 if (rc == 0)
2728 rc = sock_register(&irda_family_ops);
2729
2730 return rc;
2731}
2732
2733
2734
2735
2736
2737
2738
2739void irsock_cleanup(void)
2740{
2741 sock_unregister(PF_IRDA);
2742 proto_unregister(&irda_proto);
2743}
2744