1
2
3
4
5
6
7
8
9
10
11
12#define KMSG_COMPONENT "af_iucv"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/init.h>
24#include <linux/poll.h>
25#include <net/sock.h>
26#include <asm/ebcdic.h>
27#include <asm/cpcmd.h>
28#include <linux/kmod.h>
29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h>
32
33#define VERSION "1.1"
34
35static char iucv_userid[80];
36
37static const struct proto_ops iucv_sock_ops;
38
39static struct proto iucv_proto = {
40 .name = "AF_IUCV",
41 .owner = THIS_MODULE,
42 .obj_size = sizeof(struct iucv_sock),
43};
44
45
46static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48
49#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50
51
52#define CB_TAG(skb) ((skb)->cb)
53#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN)
55#define CB_TRGCLS_LEN (TRGCLS_SIZE)
56
57#define __iucv_sock_wait(sk, condition, timeo, ret) \
58do { \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
61 ret = 0; \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \
64 if (!__timeo) { \
65 ret = -EAGAIN; \
66 break; \
67 } \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
70 break; \
71 } \
72 release_sock(sk); \
73 __timeo = schedule_timeout(__timeo); \
74 lock_sock(sk); \
75 ret = sock_error(sk); \
76 if (ret) \
77 break; \
78 } \
79 finish_wait(sk->sk_sleep, &__wait); \
80} while (0)
81
82#define iucv_sock_wait(sk, condition, timeo) \
83({ \
84 int __ret = 0; \
85 if (!(condition)) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 __ret; \
88})
89
90static void iucv_sock_kill(struct sock *sk);
91static void iucv_sock_close(struct sock *sk);
92
93
94static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
96static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
97static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
98 u8 ipuser[16]);
99static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
100static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
101
102static struct iucv_sock_list iucv_sk_list = {
103 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
104 .autobind_name = ATOMIC_INIT(0)
105};
106
107static struct iucv_handler af_iucv_handler = {
108 .path_pending = iucv_callback_connreq,
109 .path_complete = iucv_callback_connack,
110 .path_severed = iucv_callback_connrej,
111 .message_pending = iucv_callback_rx,
112 .message_complete = iucv_callback_txdone,
113 .path_quiesced = iucv_callback_shutdown,
114};
115
116static inline void high_nmcpy(unsigned char *dst, char *src)
117{
118 memcpy(dst, src, 8);
119}
120
121static inline void low_nmcpy(unsigned char *dst, char *src)
122{
123 memcpy(&dst[8], src, 8);
124}
125
126static int afiucv_pm_prepare(struct device *dev)
127{
128#ifdef CONFIG_PM_DEBUG
129 printk(KERN_WARNING "afiucv_pm_prepare\n");
130#endif
131 return 0;
132}
133
134static void afiucv_pm_complete(struct device *dev)
135{
136#ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING "afiucv_pm_complete\n");
138#endif
139 return;
140}
141
142
143
144
145
146
147
148static int afiucv_pm_freeze(struct device *dev)
149{
150 struct iucv_sock *iucv;
151 struct sock *sk;
152 struct hlist_node *node;
153 int err = 0;
154
155#ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING "afiucv_pm_freeze\n");
157#endif
158 read_lock(&iucv_sk_list.lock);
159 sk_for_each(sk, node, &iucv_sk_list.head) {
160 iucv = iucv_sk(sk);
161 skb_queue_purge(&iucv->send_skb_q);
162 skb_queue_purge(&iucv->backlog_skb_q);
163 switch (sk->sk_state) {
164 case IUCV_SEVERED:
165 case IUCV_DISCONN:
166 case IUCV_CLOSING:
167 case IUCV_CONNECTED:
168 if (iucv->path) {
169 err = iucv_path_sever(iucv->path, NULL);
170 iucv_path_free(iucv->path);
171 iucv->path = NULL;
172 }
173 break;
174 case IUCV_OPEN:
175 case IUCV_BOUND:
176 case IUCV_LISTEN:
177 case IUCV_CLOSED:
178 default:
179 break;
180 }
181 }
182 read_unlock(&iucv_sk_list.lock);
183 return err;
184}
185
186
187
188
189
190
191
192static int afiucv_pm_restore_thaw(struct device *dev)
193{
194 struct iucv_sock *iucv;
195 struct sock *sk;
196 struct hlist_node *node;
197
198#ifdef CONFIG_PM_DEBUG
199 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
200#endif
201 read_lock(&iucv_sk_list.lock);
202 sk_for_each(sk, node, &iucv_sk_list.head) {
203 iucv = iucv_sk(sk);
204 switch (sk->sk_state) {
205 case IUCV_CONNECTED:
206 sk->sk_err = EPIPE;
207 sk->sk_state = IUCV_DISCONN;
208 sk->sk_state_change(sk);
209 break;
210 case IUCV_DISCONN:
211 case IUCV_SEVERED:
212 case IUCV_CLOSING:
213 case IUCV_LISTEN:
214 case IUCV_BOUND:
215 case IUCV_OPEN:
216 default:
217 break;
218 }
219 }
220 read_unlock(&iucv_sk_list.lock);
221 return 0;
222}
223
224static struct dev_pm_ops afiucv_pm_ops = {
225 .prepare = afiucv_pm_prepare,
226 .complete = afiucv_pm_complete,
227 .freeze = afiucv_pm_freeze,
228 .thaw = afiucv_pm_restore_thaw,
229 .restore = afiucv_pm_restore_thaw,
230};
231
232static struct device_driver af_iucv_driver = {
233 .owner = THIS_MODULE,
234 .name = "afiucv",
235 .bus = &iucv_bus,
236 .pm = &afiucv_pm_ops,
237};
238
239
240static struct device *af_iucv_dev;
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262static inline size_t iucv_msg_length(struct iucv_message *msg)
263{
264 size_t datalen;
265
266 if (msg->flags & IUCV_IPRMDATA) {
267 datalen = 0xff - msg->rmmsg[7];
268 return (datalen < 8) ? datalen : 8;
269 }
270 return msg->length;
271}
272
273
274
275
276
277
278
279
280
281static int iucv_sock_in_state(struct sock *sk, int state, int state2)
282{
283 return (sk->sk_state == state || sk->sk_state == state2);
284}
285
286
287
288
289
290
291
292
293
294static inline int iucv_below_msglim(struct sock *sk)
295{
296 struct iucv_sock *iucv = iucv_sk(sk);
297
298 if (sk->sk_state != IUCV_CONNECTED)
299 return 1;
300 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
301}
302
303
304
305
306static void iucv_sock_wake_msglim(struct sock *sk)
307{
308 read_lock(&sk->sk_callback_lock);
309 if (sk_has_sleeper(sk))
310 wake_up_interruptible_all(sk->sk_sleep);
311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312 read_unlock(&sk->sk_callback_lock);
313}
314
315
316static void iucv_sock_timeout(unsigned long arg)
317{
318 struct sock *sk = (struct sock *)arg;
319
320 bh_lock_sock(sk);
321 sk->sk_err = ETIMEDOUT;
322 sk->sk_state_change(sk);
323 bh_unlock_sock(sk);
324
325 iucv_sock_kill(sk);
326 sock_put(sk);
327}
328
329static void iucv_sock_clear_timer(struct sock *sk)
330{
331 sk_stop_timer(sk, &sk->sk_timer);
332}
333
334static struct sock *__iucv_get_sock_by_name(char *nm)
335{
336 struct sock *sk;
337 struct hlist_node *node;
338
339 sk_for_each(sk, node, &iucv_sk_list.head)
340 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
341 return sk;
342
343 return NULL;
344}
345
346static void iucv_sock_destruct(struct sock *sk)
347{
348 skb_queue_purge(&sk->sk_receive_queue);
349 skb_queue_purge(&sk->sk_write_queue);
350}
351
352
353static void iucv_sock_cleanup_listen(struct sock *parent)
354{
355 struct sock *sk;
356
357
358 while ((sk = iucv_accept_dequeue(parent, NULL))) {
359 iucv_sock_close(sk);
360 iucv_sock_kill(sk);
361 }
362
363 parent->sk_state = IUCV_CLOSED;
364}
365
366
367static void iucv_sock_kill(struct sock *sk)
368{
369 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
370 return;
371
372 iucv_sock_unlink(&iucv_sk_list, sk);
373 sock_set_flag(sk, SOCK_DEAD);
374 sock_put(sk);
375}
376
377
378static void iucv_sock_close(struct sock *sk)
379{
380 unsigned char user_data[16];
381 struct iucv_sock *iucv = iucv_sk(sk);
382 int err;
383 unsigned long timeo;
384
385 iucv_sock_clear_timer(sk);
386 lock_sock(sk);
387
388 switch (sk->sk_state) {
389 case IUCV_LISTEN:
390 iucv_sock_cleanup_listen(sk);
391 break;
392
393 case IUCV_CONNECTED:
394 case IUCV_DISCONN:
395 err = 0;
396
397 sk->sk_state = IUCV_CLOSING;
398 sk->sk_state_change(sk);
399
400 if (!skb_queue_empty(&iucv->send_skb_q)) {
401 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
402 timeo = sk->sk_lingertime;
403 else
404 timeo = IUCV_DISCONN_TIMEOUT;
405 err = iucv_sock_wait(sk,
406 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
407 timeo);
408 }
409
410 case IUCV_CLOSING:
411 sk->sk_state = IUCV_CLOSED;
412 sk->sk_state_change(sk);
413
414 if (iucv->path) {
415 low_nmcpy(user_data, iucv->src_name);
416 high_nmcpy(user_data, iucv->dst_name);
417 ASCEBC(user_data, sizeof(user_data));
418 err = iucv_path_sever(iucv->path, user_data);
419 iucv_path_free(iucv->path);
420 iucv->path = NULL;
421 }
422
423 sk->sk_err = ECONNRESET;
424 sk->sk_state_change(sk);
425
426 skb_queue_purge(&iucv->send_skb_q);
427 skb_queue_purge(&iucv->backlog_skb_q);
428 break;
429
430 default:
431 sock_set_flag(sk, SOCK_ZAPPED);
432
433 break;
434 }
435
436
437 sock_set_flag(sk, SOCK_ZAPPED);
438
439 release_sock(sk);
440}
441
442static void iucv_sock_init(struct sock *sk, struct sock *parent)
443{
444 if (parent)
445 sk->sk_type = parent->sk_type;
446}
447
448static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
449{
450 struct sock *sk;
451
452 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
453 if (!sk)
454 return NULL;
455
456 sock_init_data(sock, sk);
457 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
458 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
459 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
460 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
461 spin_lock_init(&iucv_sk(sk)->message_q.lock);
462 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
463 iucv_sk(sk)->send_tag = 0;
464 iucv_sk(sk)->flags = 0;
465 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
466 iucv_sk(sk)->path = NULL;
467 memset(&iucv_sk(sk)->src_user_id , 0, 32);
468
469 sk->sk_destruct = iucv_sock_destruct;
470 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
471 sk->sk_allocation = GFP_DMA;
472
473 sock_reset_flag(sk, SOCK_ZAPPED);
474
475 sk->sk_protocol = proto;
476 sk->sk_state = IUCV_OPEN;
477
478 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
479
480 iucv_sock_link(&iucv_sk_list, sk);
481 return sk;
482}
483
484
485static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
486{
487 struct sock *sk;
488
489 if (protocol && protocol != PF_IUCV)
490 return -EPROTONOSUPPORT;
491
492 sock->state = SS_UNCONNECTED;
493
494 switch (sock->type) {
495 case SOCK_STREAM:
496 sock->ops = &iucv_sock_ops;
497 break;
498 case SOCK_SEQPACKET:
499
500 sock->ops = &iucv_sock_ops;
501 break;
502 default:
503 return -ESOCKTNOSUPPORT;
504 }
505
506 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
507 if (!sk)
508 return -ENOMEM;
509
510 iucv_sock_init(sk, NULL);
511
512 return 0;
513}
514
515void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
516{
517 write_lock_bh(&l->lock);
518 sk_add_node(sk, &l->head);
519 write_unlock_bh(&l->lock);
520}
521
522void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
523{
524 write_lock_bh(&l->lock);
525 sk_del_node_init(sk);
526 write_unlock_bh(&l->lock);
527}
528
529void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
530{
531 unsigned long flags;
532 struct iucv_sock *par = iucv_sk(parent);
533
534 sock_hold(sk);
535 spin_lock_irqsave(&par->accept_q_lock, flags);
536 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
537 spin_unlock_irqrestore(&par->accept_q_lock, flags);
538 iucv_sk(sk)->parent = parent;
539 parent->sk_ack_backlog++;
540}
541
542void iucv_accept_unlink(struct sock *sk)
543{
544 unsigned long flags;
545 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
546
547 spin_lock_irqsave(&par->accept_q_lock, flags);
548 list_del_init(&iucv_sk(sk)->accept_q);
549 spin_unlock_irqrestore(&par->accept_q_lock, flags);
550 iucv_sk(sk)->parent->sk_ack_backlog--;
551 iucv_sk(sk)->parent = NULL;
552 sock_put(sk);
553}
554
555struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
556{
557 struct iucv_sock *isk, *n;
558 struct sock *sk;
559
560 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
561 sk = (struct sock *) isk;
562 lock_sock(sk);
563
564 if (sk->sk_state == IUCV_CLOSED) {
565 iucv_accept_unlink(sk);
566 release_sock(sk);
567 continue;
568 }
569
570 if (sk->sk_state == IUCV_CONNECTED ||
571 sk->sk_state == IUCV_SEVERED ||
572 sk->sk_state == IUCV_DISCONN ||
573 !newsock) {
574 iucv_accept_unlink(sk);
575 if (newsock)
576 sock_graft(sk, newsock);
577
578 if (sk->sk_state == IUCV_SEVERED)
579 sk->sk_state = IUCV_DISCONN;
580
581 release_sock(sk);
582 return sk;
583 }
584
585 release_sock(sk);
586 }
587 return NULL;
588}
589
590
591static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
592 int addr_len)
593{
594 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
595 struct sock *sk = sock->sk;
596 struct iucv_sock *iucv;
597 int err;
598
599
600 if (!addr || addr->sa_family != AF_IUCV)
601 return -EINVAL;
602
603 lock_sock(sk);
604 if (sk->sk_state != IUCV_OPEN) {
605 err = -EBADFD;
606 goto done;
607 }
608
609 write_lock_bh(&iucv_sk_list.lock);
610
611 iucv = iucv_sk(sk);
612 if (__iucv_get_sock_by_name(sa->siucv_name)) {
613 err = -EADDRINUSE;
614 goto done_unlock;
615 }
616 if (iucv->path) {
617 err = 0;
618 goto done_unlock;
619 }
620
621
622 memcpy(iucv->src_name, sa->siucv_name, 8);
623
624
625 memcpy(iucv->src_user_id, iucv_userid, 8);
626 sk->sk_state = IUCV_BOUND;
627 err = 0;
628
629done_unlock:
630
631 write_unlock_bh(&iucv_sk_list.lock);
632done:
633 release_sock(sk);
634 return err;
635}
636
637
638static int iucv_sock_autobind(struct sock *sk)
639{
640 struct iucv_sock *iucv = iucv_sk(sk);
641 char query_buffer[80];
642 char name[12];
643 int err = 0;
644
645
646 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
647 if (unlikely(err))
648 return -EPROTO;
649
650 memcpy(iucv->src_user_id, query_buffer, 8);
651
652 write_lock_bh(&iucv_sk_list.lock);
653
654 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
655 while (__iucv_get_sock_by_name(name)) {
656 sprintf(name, "%08x",
657 atomic_inc_return(&iucv_sk_list.autobind_name));
658 }
659
660 write_unlock_bh(&iucv_sk_list.lock);
661
662 memcpy(&iucv->src_name, name, 8);
663
664 return err;
665}
666
667
668static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
669 int alen, int flags)
670{
671 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
672 struct sock *sk = sock->sk;
673 struct iucv_sock *iucv;
674 unsigned char user_data[16];
675 int err;
676
677 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
678 return -EINVAL;
679
680 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
681 return -EBADFD;
682
683 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
684 return -EINVAL;
685
686 if (sk->sk_state == IUCV_OPEN) {
687 err = iucv_sock_autobind(sk);
688 if (unlikely(err))
689 return err;
690 }
691
692 lock_sock(sk);
693
694
695 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
696 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
697
698 high_nmcpy(user_data, sa->siucv_name);
699 low_nmcpy(user_data, iucv_sk(sk)->src_name);
700 ASCEBC(user_data, sizeof(user_data));
701
702 iucv = iucv_sk(sk);
703
704 iucv->path = iucv_path_alloc(iucv->msglimit,
705 IUCV_IPRMDATA, GFP_KERNEL);
706 if (!iucv->path) {
707 err = -ENOMEM;
708 goto done;
709 }
710 err = iucv_path_connect(iucv->path, &af_iucv_handler,
711 sa->siucv_user_id, NULL, user_data, sk);
712 if (err) {
713 iucv_path_free(iucv->path);
714 iucv->path = NULL;
715 switch (err) {
716 case 0x0b:
717 err = -ENETUNREACH;
718 break;
719 case 0x0d:
720 case 0x0e:
721 err = -EAGAIN;
722 break;
723 case 0x0f:
724 err = -EACCES;
725 break;
726 default:
727 err = -ECONNREFUSED;
728 break;
729 }
730 goto done;
731 }
732
733 if (sk->sk_state != IUCV_CONNECTED) {
734 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
735 IUCV_DISCONN),
736 sock_sndtimeo(sk, flags & O_NONBLOCK));
737 }
738
739 if (sk->sk_state == IUCV_DISCONN) {
740 err = -ECONNREFUSED;
741 }
742
743 if (err) {
744 iucv_path_sever(iucv->path, NULL);
745 iucv_path_free(iucv->path);
746 iucv->path = NULL;
747 }
748
749done:
750 release_sock(sk);
751 return err;
752}
753
754
755static int iucv_sock_listen(struct socket *sock, int backlog)
756{
757 struct sock *sk = sock->sk;
758 int err;
759
760 lock_sock(sk);
761
762 err = -EINVAL;
763 if (sk->sk_state != IUCV_BOUND)
764 goto done;
765
766 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
767 goto done;
768
769 sk->sk_max_ack_backlog = backlog;
770 sk->sk_ack_backlog = 0;
771 sk->sk_state = IUCV_LISTEN;
772 err = 0;
773
774done:
775 release_sock(sk);
776 return err;
777}
778
779
780static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
781 int flags)
782{
783 DECLARE_WAITQUEUE(wait, current);
784 struct sock *sk = sock->sk, *nsk;
785 long timeo;
786 int err = 0;
787
788 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
789
790 if (sk->sk_state != IUCV_LISTEN) {
791 err = -EBADFD;
792 goto done;
793 }
794
795 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
796
797
798 add_wait_queue_exclusive(sk->sk_sleep, &wait);
799 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
800 set_current_state(TASK_INTERRUPTIBLE);
801 if (!timeo) {
802 err = -EAGAIN;
803 break;
804 }
805
806 release_sock(sk);
807 timeo = schedule_timeout(timeo);
808 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
809
810 if (sk->sk_state != IUCV_LISTEN) {
811 err = -EBADFD;
812 break;
813 }
814
815 if (signal_pending(current)) {
816 err = sock_intr_errno(timeo);
817 break;
818 }
819 }
820
821 set_current_state(TASK_RUNNING);
822 remove_wait_queue(sk->sk_sleep, &wait);
823
824 if (err)
825 goto done;
826
827 newsock->state = SS_CONNECTED;
828
829done:
830 release_sock(sk);
831 return err;
832}
833
834static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
835 int *len, int peer)
836{
837 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
838 struct sock *sk = sock->sk;
839
840 addr->sa_family = AF_IUCV;
841 *len = sizeof(struct sockaddr_iucv);
842
843 if (peer) {
844 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
845 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
846 } else {
847 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
848 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
849 }
850 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
851 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
852 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
853
854 return 0;
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
871 struct sk_buff *skb)
872{
873 u8 prmdata[8];
874
875 memcpy(prmdata, (void *) skb->data, skb->len);
876 prmdata[7] = 0xff - (u8) skb->len;
877 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
878 (void *) prmdata, 8);
879}
880
881static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
882 struct msghdr *msg, size_t len)
883{
884 struct sock *sk = sock->sk;
885 struct iucv_sock *iucv = iucv_sk(sk);
886 struct sk_buff *skb;
887 struct iucv_message txmsg;
888 struct cmsghdr *cmsg;
889 int cmsg_done;
890 long timeo;
891 char user_id[9];
892 char appl_id[9];
893 int err;
894 int noblock = msg->msg_flags & MSG_DONTWAIT;
895
896 err = sock_error(sk);
897 if (err)
898 return err;
899
900 if (msg->msg_flags & MSG_OOB)
901 return -EOPNOTSUPP;
902
903
904 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
905 return -EOPNOTSUPP;
906
907 lock_sock(sk);
908
909 if (sk->sk_shutdown & SEND_SHUTDOWN) {
910 err = -EPIPE;
911 goto out;
912 }
913
914
915 if (sk->sk_state != IUCV_CONNECTED) {
916 err = -ENOTCONN;
917 goto out;
918 }
919
920
921 cmsg_done = 0;
922 txmsg.class = 0;
923
924
925 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
926 cmsg = CMSG_NXTHDR(msg, cmsg)) {
927
928 if (!CMSG_OK(msg, cmsg)) {
929 err = -EINVAL;
930 goto out;
931 }
932
933 if (cmsg->cmsg_level != SOL_IUCV)
934 continue;
935
936 if (cmsg->cmsg_type & cmsg_done) {
937 err = -EINVAL;
938 goto out;
939 }
940 cmsg_done |= cmsg->cmsg_type;
941
942 switch (cmsg->cmsg_type) {
943 case SCM_IUCV_TRGCLS:
944 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
945 err = -EINVAL;
946 goto out;
947 }
948
949
950 memcpy(&txmsg.class,
951 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
952
953 break;
954
955 default:
956 err = -EINVAL;
957 goto out;
958 break;
959 }
960 }
961
962
963
964
965
966 skb = sock_alloc_send_skb(sk, len, noblock, &err);
967 if (!skb)
968 goto out;
969 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
970 err = -EFAULT;
971 goto fail;
972 }
973
974
975 timeo = sock_sndtimeo(sk, noblock);
976 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
977 if (err)
978 goto fail;
979
980
981 if (sk->sk_state != IUCV_CONNECTED) {
982 err = -ECONNRESET;
983 goto fail;
984 }
985
986
987 txmsg.tag = iucv->send_tag++;
988 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
989 skb_queue_tail(&iucv->send_skb_q, skb);
990
991 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
992 && skb->len <= 7) {
993 err = iucv_send_iprm(iucv->path, &txmsg, skb);
994
995
996
997 if (err == 0) {
998 skb_unlink(skb, &iucv->send_skb_q);
999 kfree_skb(skb);
1000 }
1001
1002
1003
1004 if (err == 0x15) {
1005 iucv_path_sever(iucv->path, NULL);
1006 skb_unlink(skb, &iucv->send_skb_q);
1007 err = -EPIPE;
1008 goto fail;
1009 }
1010 } else
1011 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
1012 (void *) skb->data, skb->len);
1013 if (err) {
1014 if (err == 3) {
1015 user_id[8] = 0;
1016 memcpy(user_id, iucv->dst_user_id, 8);
1017 appl_id[8] = 0;
1018 memcpy(appl_id, iucv->dst_name, 8);
1019 pr_err("Application %s on z/VM guest %s"
1020 " exceeds message limit\n",
1021 appl_id, user_id);
1022 err = -EAGAIN;
1023 } else
1024 err = -EPIPE;
1025 skb_unlink(skb, &iucv->send_skb_q);
1026 goto fail;
1027 }
1028
1029 release_sock(sk);
1030 return len;
1031
1032fail:
1033 kfree_skb(skb);
1034out:
1035 release_sock(sk);
1036 return err;
1037}
1038
1039
1040
1041
1042
1043static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1044{
1045 int dataleft, size, copied = 0;
1046 struct sk_buff *nskb;
1047
1048 dataleft = len;
1049 while (dataleft) {
1050 if (dataleft >= sk->sk_rcvbuf / 4)
1051 size = sk->sk_rcvbuf / 4;
1052 else
1053 size = dataleft;
1054
1055 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1056 if (!nskb)
1057 return -ENOMEM;
1058
1059
1060 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1061
1062
1063 memcpy(nskb->data, skb->data + copied, size);
1064 copied += size;
1065 dataleft -= size;
1066
1067 skb_reset_transport_header(nskb);
1068 skb_reset_network_header(nskb);
1069 nskb->len = size;
1070
1071 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1072 }
1073
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1082 struct iucv_path *path,
1083 struct iucv_message *msg)
1084{
1085 int rc;
1086 unsigned int len;
1087
1088 len = iucv_msg_length(msg);
1089
1090
1091
1092 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1093
1094
1095 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1096 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1097 skb->data = NULL;
1098 skb->len = 0;
1099 }
1100 } else {
1101 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
1102 skb->data, len, NULL);
1103 if (rc) {
1104 kfree_skb(skb);
1105 return;
1106 }
1107
1108
1109
1110 if (sk->sk_type == SOCK_STREAM &&
1111 skb->truesize >= sk->sk_rcvbuf / 4) {
1112 rc = iucv_fragment_skb(sk, skb, len);
1113 kfree_skb(skb);
1114 skb = NULL;
1115 if (rc) {
1116 iucv_path_sever(path, NULL);
1117 return;
1118 }
1119 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1120 } else {
1121 skb_reset_transport_header(skb);
1122 skb_reset_network_header(skb);
1123 skb->len = len;
1124 }
1125 }
1126
1127 if (sock_queue_rcv_skb(sk, skb))
1128 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1129}
1130
1131
1132
1133
1134
1135static void iucv_process_message_q(struct sock *sk)
1136{
1137 struct iucv_sock *iucv = iucv_sk(sk);
1138 struct sk_buff *skb;
1139 struct sock_msg_q *p, *n;
1140
1141 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1142 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1143 if (!skb)
1144 break;
1145 iucv_process_message(sk, skb, p->path, &p->msg);
1146 list_del(&p->list);
1147 kfree(p);
1148 if (!skb_queue_empty(&iucv->backlog_skb_q))
1149 break;
1150 }
1151}
1152
1153static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1154 struct msghdr *msg, size_t len, int flags)
1155{
1156 int noblock = flags & MSG_DONTWAIT;
1157 struct sock *sk = sock->sk;
1158 struct iucv_sock *iucv = iucv_sk(sk);
1159 unsigned int copied, rlen;
1160 struct sk_buff *skb, *rskb, *cskb;
1161 int err = 0;
1162
1163 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1164 skb_queue_empty(&iucv->backlog_skb_q) &&
1165 skb_queue_empty(&sk->sk_receive_queue) &&
1166 list_empty(&iucv->message_q.list))
1167 return 0;
1168
1169 if (flags & (MSG_OOB))
1170 return -EOPNOTSUPP;
1171
1172
1173
1174 skb = skb_recv_datagram(sk, flags, noblock, &err);
1175 if (!skb) {
1176 if (sk->sk_shutdown & RCV_SHUTDOWN)
1177 return 0;
1178 return err;
1179 }
1180
1181 rlen = skb->len;
1182 copied = min_t(unsigned int, rlen, len);
1183
1184 cskb = skb;
1185 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
1186 if (!(flags & MSG_PEEK))
1187 skb_queue_head(&sk->sk_receive_queue, skb);
1188 return -EFAULT;
1189 }
1190
1191
1192 if (sk->sk_type == SOCK_SEQPACKET) {
1193 if (copied < rlen)
1194 msg->msg_flags |= MSG_TRUNC;
1195
1196 msg->msg_flags |= MSG_EOR;
1197 }
1198
1199
1200
1201
1202 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1203 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1204 if (err) {
1205 if (!(flags & MSG_PEEK))
1206 skb_queue_head(&sk->sk_receive_queue, skb);
1207 return err;
1208 }
1209
1210
1211 if (!(flags & MSG_PEEK)) {
1212
1213
1214 if (sk->sk_type == SOCK_STREAM) {
1215 skb_pull(skb, copied);
1216 if (skb->len) {
1217 skb_queue_head(&sk->sk_receive_queue, skb);
1218 goto done;
1219 }
1220 }
1221
1222 kfree_skb(skb);
1223
1224
1225 spin_lock_bh(&iucv->message_q.lock);
1226 rskb = skb_dequeue(&iucv->backlog_skb_q);
1227 while (rskb) {
1228 if (sock_queue_rcv_skb(sk, rskb)) {
1229 skb_queue_head(&iucv->backlog_skb_q,
1230 rskb);
1231 break;
1232 } else {
1233 rskb = skb_dequeue(&iucv->backlog_skb_q);
1234 }
1235 }
1236 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1237 if (!list_empty(&iucv->message_q.list))
1238 iucv_process_message_q(sk);
1239 }
1240 spin_unlock_bh(&iucv->message_q.lock);
1241 }
1242
1243done:
1244
1245 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1246 copied = rlen;
1247
1248 return copied;
1249}
1250
1251static inline unsigned int iucv_accept_poll(struct sock *parent)
1252{
1253 struct iucv_sock *isk, *n;
1254 struct sock *sk;
1255
1256 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1257 sk = (struct sock *) isk;
1258
1259 if (sk->sk_state == IUCV_CONNECTED)
1260 return POLLIN | POLLRDNORM;
1261 }
1262
1263 return 0;
1264}
1265
1266unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1267 poll_table *wait)
1268{
1269 struct sock *sk = sock->sk;
1270 unsigned int mask = 0;
1271
1272 sock_poll_wait(file, sk->sk_sleep, wait);
1273
1274 if (sk->sk_state == IUCV_LISTEN)
1275 return iucv_accept_poll(sk);
1276
1277 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1278 mask |= POLLERR;
1279
1280 if (sk->sk_shutdown & RCV_SHUTDOWN)
1281 mask |= POLLRDHUP;
1282
1283 if (sk->sk_shutdown == SHUTDOWN_MASK)
1284 mask |= POLLHUP;
1285
1286 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1287 (sk->sk_shutdown & RCV_SHUTDOWN))
1288 mask |= POLLIN | POLLRDNORM;
1289
1290 if (sk->sk_state == IUCV_CLOSED)
1291 mask |= POLLHUP;
1292
1293 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1294 mask |= POLLIN;
1295
1296 if (sock_writeable(sk))
1297 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1298 else
1299 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1300
1301 return mask;
1302}
1303
1304static int iucv_sock_shutdown(struct socket *sock, int how)
1305{
1306 struct sock *sk = sock->sk;
1307 struct iucv_sock *iucv = iucv_sk(sk);
1308 struct iucv_message txmsg;
1309 int err = 0;
1310
1311 how++;
1312
1313 if ((how & ~SHUTDOWN_MASK) || !how)
1314 return -EINVAL;
1315
1316 lock_sock(sk);
1317 switch (sk->sk_state) {
1318 case IUCV_DISCONN:
1319 case IUCV_CLOSING:
1320 case IUCV_SEVERED:
1321 case IUCV_CLOSED:
1322 err = -ENOTCONN;
1323 goto fail;
1324
1325 default:
1326 sk->sk_shutdown |= how;
1327 break;
1328 }
1329
1330 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1331 txmsg.class = 0;
1332 txmsg.tag = 0;
1333 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1334 (void *) iprm_shutdown, 8);
1335 if (err) {
1336 switch (err) {
1337 case 1:
1338 err = -ENOTCONN;
1339 break;
1340 case 2:
1341 err = -ECONNRESET;
1342 break;
1343 default:
1344 err = -ENOTCONN;
1345 break;
1346 }
1347 }
1348 }
1349
1350 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1351 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1352 if (err)
1353 err = -ENOTCONN;
1354
1355 skb_queue_purge(&sk->sk_receive_queue);
1356 }
1357
1358
1359 sk->sk_state_change(sk);
1360
1361fail:
1362 release_sock(sk);
1363 return err;
1364}
1365
1366static int iucv_sock_release(struct socket *sock)
1367{
1368 struct sock *sk = sock->sk;
1369 int err = 0;
1370
1371 if (!sk)
1372 return 0;
1373
1374 iucv_sock_close(sk);
1375
1376
1377 if (iucv_sk(sk)->path) {
1378 iucv_path_sever(iucv_sk(sk)->path, NULL);
1379 iucv_path_free(iucv_sk(sk)->path);
1380 iucv_sk(sk)->path = NULL;
1381 }
1382
1383 sock_orphan(sk);
1384 iucv_sock_kill(sk);
1385 return err;
1386}
1387
1388
1389static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1390 char __user *optval, unsigned int optlen)
1391{
1392 struct sock *sk = sock->sk;
1393 struct iucv_sock *iucv = iucv_sk(sk);
1394 int val;
1395 int rc;
1396
1397 if (level != SOL_IUCV)
1398 return -ENOPROTOOPT;
1399
1400 if (optlen < sizeof(int))
1401 return -EINVAL;
1402
1403 if (get_user(val, (int __user *) optval))
1404 return -EFAULT;
1405
1406 rc = 0;
1407
1408 lock_sock(sk);
1409 switch (optname) {
1410 case SO_IPRMDATA_MSG:
1411 if (val)
1412 iucv->flags |= IUCV_IPRMDATA;
1413 else
1414 iucv->flags &= ~IUCV_IPRMDATA;
1415 break;
1416 case SO_MSGLIMIT:
1417 switch (sk->sk_state) {
1418 case IUCV_OPEN:
1419 case IUCV_BOUND:
1420 if (val < 1 || val > (u16)(~0))
1421 rc = -EINVAL;
1422 else
1423 iucv->msglimit = val;
1424 break;
1425 default:
1426 rc = -EINVAL;
1427 break;
1428 }
1429 break;
1430 default:
1431 rc = -ENOPROTOOPT;
1432 break;
1433 }
1434 release_sock(sk);
1435
1436 return rc;
1437}
1438
1439static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1440 char __user *optval, int __user *optlen)
1441{
1442 struct sock *sk = sock->sk;
1443 struct iucv_sock *iucv = iucv_sk(sk);
1444 int val, len;
1445
1446 if (level != SOL_IUCV)
1447 return -ENOPROTOOPT;
1448
1449 if (get_user(len, optlen))
1450 return -EFAULT;
1451
1452 if (len < 0)
1453 return -EINVAL;
1454
1455 len = min_t(unsigned int, len, sizeof(int));
1456
1457 switch (optname) {
1458 case SO_IPRMDATA_MSG:
1459 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1460 break;
1461 case SO_MSGLIMIT:
1462 lock_sock(sk);
1463 val = (iucv->path != NULL) ? iucv->path->msglim
1464 : iucv->msglimit;
1465 release_sock(sk);
1466 break;
1467 default:
1468 return -ENOPROTOOPT;
1469 }
1470
1471 if (put_user(len, optlen))
1472 return -EFAULT;
1473 if (copy_to_user(optval, &val, len))
1474 return -EFAULT;
1475
1476 return 0;
1477}
1478
1479
1480
1481static int iucv_callback_connreq(struct iucv_path *path,
1482 u8 ipvmid[8], u8 ipuser[16])
1483{
1484 unsigned char user_data[16];
1485 unsigned char nuser_data[16];
1486 unsigned char src_name[8];
1487 struct hlist_node *node;
1488 struct sock *sk, *nsk;
1489 struct iucv_sock *iucv, *niucv;
1490 int err;
1491
1492 memcpy(src_name, ipuser, 8);
1493 EBCASC(src_name, 8);
1494
1495 read_lock(&iucv_sk_list.lock);
1496 iucv = NULL;
1497 sk = NULL;
1498 sk_for_each(sk, node, &iucv_sk_list.head)
1499 if (sk->sk_state == IUCV_LISTEN &&
1500 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1501
1502
1503
1504
1505 iucv = iucv_sk(sk);
1506 break;
1507 }
1508 read_unlock(&iucv_sk_list.lock);
1509 if (!iucv)
1510
1511 return -EINVAL;
1512
1513 bh_lock_sock(sk);
1514
1515
1516 low_nmcpy(user_data, iucv->src_name);
1517 high_nmcpy(user_data, iucv->dst_name);
1518 ASCEBC(user_data, sizeof(user_data));
1519 if (sk->sk_state != IUCV_LISTEN) {
1520 err = iucv_path_sever(path, user_data);
1521 iucv_path_free(path);
1522 goto fail;
1523 }
1524
1525
1526 if (sk_acceptq_is_full(sk)) {
1527 err = iucv_path_sever(path, user_data);
1528 iucv_path_free(path);
1529 goto fail;
1530 }
1531
1532
1533 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1534 if (!nsk) {
1535 err = iucv_path_sever(path, user_data);
1536 iucv_path_free(path);
1537 goto fail;
1538 }
1539
1540 niucv = iucv_sk(nsk);
1541 iucv_sock_init(nsk, sk);
1542
1543
1544 memcpy(niucv->dst_name, ipuser + 8, 8);
1545 EBCASC(niucv->dst_name, 8);
1546 memcpy(niucv->dst_user_id, ipvmid, 8);
1547 memcpy(niucv->src_name, iucv->src_name, 8);
1548 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1549 niucv->path = path;
1550
1551
1552 high_nmcpy(nuser_data, ipuser + 8);
1553 memcpy(nuser_data + 8, niucv->src_name, 8);
1554 ASCEBC(nuser_data + 8, 8);
1555
1556
1557 niucv->msglimit = iucv->msglimit;
1558 path->msglim = iucv->msglimit;
1559 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1560 if (err) {
1561 err = iucv_path_sever(path, user_data);
1562 iucv_path_free(path);
1563 iucv_sock_kill(nsk);
1564 goto fail;
1565 }
1566
1567 iucv_accept_enqueue(sk, nsk);
1568
1569
1570 nsk->sk_state = IUCV_CONNECTED;
1571 sk->sk_data_ready(sk, 1);
1572 err = 0;
1573fail:
1574 bh_unlock_sock(sk);
1575 return 0;
1576}
1577
1578static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1579{
1580 struct sock *sk = path->private;
1581
1582 sk->sk_state = IUCV_CONNECTED;
1583 sk->sk_state_change(sk);
1584}
1585
1586static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1587{
1588 struct sock *sk = path->private;
1589 struct iucv_sock *iucv = iucv_sk(sk);
1590 struct sk_buff *skb;
1591 struct sock_msg_q *save_msg;
1592 int len;
1593
1594 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1595 iucv_message_reject(path, msg);
1596 return;
1597 }
1598
1599 spin_lock(&iucv->message_q.lock);
1600
1601 if (!list_empty(&iucv->message_q.list) ||
1602 !skb_queue_empty(&iucv->backlog_skb_q))
1603 goto save_message;
1604
1605 len = atomic_read(&sk->sk_rmem_alloc);
1606 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1607 if (len > sk->sk_rcvbuf)
1608 goto save_message;
1609
1610 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1611 if (!skb)
1612 goto save_message;
1613
1614 iucv_process_message(sk, skb, path, msg);
1615 goto out_unlock;
1616
1617save_message:
1618 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1619 if (!save_msg)
1620 return;
1621 save_msg->path = path;
1622 save_msg->msg = *msg;
1623
1624 list_add_tail(&save_msg->list, &iucv->message_q.list);
1625
1626out_unlock:
1627 spin_unlock(&iucv->message_q.lock);
1628}
1629
1630static void iucv_callback_txdone(struct iucv_path *path,
1631 struct iucv_message *msg)
1632{
1633 struct sock *sk = path->private;
1634 struct sk_buff *this = NULL;
1635 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1636 struct sk_buff *list_skb = list->next;
1637 unsigned long flags;
1638
1639 if (!skb_queue_empty(list)) {
1640 spin_lock_irqsave(&list->lock, flags);
1641
1642 while (list_skb != (struct sk_buff *)list) {
1643 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1644 this = list_skb;
1645 break;
1646 }
1647 list_skb = list_skb->next;
1648 }
1649 if (this)
1650 __skb_unlink(this, list);
1651
1652 spin_unlock_irqrestore(&list->lock, flags);
1653
1654 if (this) {
1655 kfree_skb(this);
1656
1657 iucv_sock_wake_msglim(sk);
1658 }
1659 }
1660 BUG_ON(!this);
1661
1662 if (sk->sk_state == IUCV_CLOSING) {
1663 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1664 sk->sk_state = IUCV_CLOSED;
1665 sk->sk_state_change(sk);
1666 }
1667 }
1668
1669}
1670
1671static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1672{
1673 struct sock *sk = path->private;
1674
1675 if (!list_empty(&iucv_sk(sk)->accept_q))
1676 sk->sk_state = IUCV_SEVERED;
1677 else
1678 sk->sk_state = IUCV_DISCONN;
1679
1680 sk->sk_state_change(sk);
1681}
1682
1683
1684
1685
1686static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1687{
1688 struct sock *sk = path->private;
1689
1690 bh_lock_sock(sk);
1691 if (sk->sk_state != IUCV_CLOSED) {
1692 sk->sk_shutdown |= SEND_SHUTDOWN;
1693 sk->sk_state_change(sk);
1694 }
1695 bh_unlock_sock(sk);
1696}
1697
1698static const struct proto_ops iucv_sock_ops = {
1699 .family = PF_IUCV,
1700 .owner = THIS_MODULE,
1701 .release = iucv_sock_release,
1702 .bind = iucv_sock_bind,
1703 .connect = iucv_sock_connect,
1704 .listen = iucv_sock_listen,
1705 .accept = iucv_sock_accept,
1706 .getname = iucv_sock_getname,
1707 .sendmsg = iucv_sock_sendmsg,
1708 .recvmsg = iucv_sock_recvmsg,
1709 .poll = iucv_sock_poll,
1710 .ioctl = sock_no_ioctl,
1711 .mmap = sock_no_mmap,
1712 .socketpair = sock_no_socketpair,
1713 .shutdown = iucv_sock_shutdown,
1714 .setsockopt = iucv_sock_setsockopt,
1715 .getsockopt = iucv_sock_getsockopt,
1716};
1717
1718static struct net_proto_family iucv_sock_family_ops = {
1719 .family = AF_IUCV,
1720 .owner = THIS_MODULE,
1721 .create = iucv_sock_create,
1722};
1723
1724static int __init afiucv_init(void)
1725{
1726 int err;
1727
1728 if (!MACHINE_IS_VM) {
1729 pr_err("The af_iucv module cannot be loaded"
1730 " without z/VM\n");
1731 err = -EPROTONOSUPPORT;
1732 goto out;
1733 }
1734 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1735 if (unlikely(err)) {
1736 WARN_ON(err);
1737 err = -EPROTONOSUPPORT;
1738 goto out;
1739 }
1740
1741 err = iucv_register(&af_iucv_handler, 0);
1742 if (err)
1743 goto out;
1744 err = proto_register(&iucv_proto, 0);
1745 if (err)
1746 goto out_iucv;
1747 err = sock_register(&iucv_sock_family_ops);
1748 if (err)
1749 goto out_proto;
1750
1751 err = driver_register(&af_iucv_driver);
1752 if (err)
1753 goto out_sock;
1754 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1755 if (!af_iucv_dev) {
1756 err = -ENOMEM;
1757 goto out_driver;
1758 }
1759 dev_set_name(af_iucv_dev, "af_iucv");
1760 af_iucv_dev->bus = &iucv_bus;
1761 af_iucv_dev->parent = iucv_root;
1762 af_iucv_dev->release = (void (*)(struct device *))kfree;
1763 af_iucv_dev->driver = &af_iucv_driver;
1764 err = device_register(af_iucv_dev);
1765 if (err)
1766 goto out_driver;
1767
1768 return 0;
1769
1770out_driver:
1771 driver_unregister(&af_iucv_driver);
1772out_sock:
1773 sock_unregister(PF_IUCV);
1774out_proto:
1775 proto_unregister(&iucv_proto);
1776out_iucv:
1777 iucv_unregister(&af_iucv_handler, 0);
1778out:
1779 return err;
1780}
1781
1782static void __exit afiucv_exit(void)
1783{
1784 device_unregister(af_iucv_dev);
1785 driver_unregister(&af_iucv_driver);
1786 sock_unregister(PF_IUCV);
1787 proto_unregister(&iucv_proto);
1788 iucv_unregister(&af_iucv_handler, 0);
1789}
1790
1791module_init(afiucv_init);
1792module_exit(afiucv_exit);
1793
1794MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1795MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1796MODULE_VERSION(VERSION);
1797MODULE_LICENSE("GPL");
1798MODULE_ALIAS_NETPROTO(PF_IUCV);
1799