linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1/*
   2 *  IUCV protocol stack for Linux on zSeries
   3 *
   4 *  Copyright IBM Corp. 2006, 2009
   5 *
   6 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   7 *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   8 *  PM functions:
   9 *              Ursula Braun <ursula.braun@de.ibm.com>
  10 */
  11
  12#define KMSG_COMPONENT "af_iucv"
  13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/list.h>
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/skbuff.h>
  23#include <linux/init.h>
  24#include <linux/poll.h>
  25#include <net/sock.h>
  26#include <asm/ebcdic.h>
  27#include <asm/cpcmd.h>
  28#include <linux/kmod.h>
  29
  30#include <net/iucv/af_iucv.h>
  31
  32#define VERSION "1.2"
  33
  34static char iucv_userid[80];
  35
  36static const struct proto_ops iucv_sock_ops;
  37
  38static struct proto iucv_proto = {
  39        .name           = "AF_IUCV",
  40        .owner          = THIS_MODULE,
  41        .obj_size       = sizeof(struct iucv_sock),
  42};
  43
  44static struct iucv_interface *pr_iucv;
  45
  46/* special AF_IUCV IPRM messages */
  47static const u8 iprm_shutdown[8] =
  48        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  49
  50#define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
  51
  52#define __iucv_sock_wait(sk, condition, timeo, ret)                     \
  53do {                                                                    \
  54        DEFINE_WAIT(__wait);                                            \
  55        long __timeo = timeo;                                           \
  56        ret = 0;                                                        \
  57        prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
  58        while (!(condition)) {                                          \
  59                if (!__timeo) {                                         \
  60                        ret = -EAGAIN;                                  \
  61                        break;                                          \
  62                }                                                       \
  63                if (signal_pending(current)) {                          \
  64                        ret = sock_intr_errno(__timeo);                 \
  65                        break;                                          \
  66                }                                                       \
  67                release_sock(sk);                                       \
  68                __timeo = schedule_timeout(__timeo);                    \
  69                lock_sock(sk);                                          \
  70                ret = sock_error(sk);                                   \
  71                if (ret)                                                \
  72                        break;                                          \
  73        }                                                               \
  74        finish_wait(sk_sleep(sk), &__wait);                             \
  75} while (0)
  76
  77#define iucv_sock_wait(sk, condition, timeo)                            \
  78({                                                                      \
  79        int __ret = 0;                                                  \
  80        if (!(condition))                                               \
  81                __iucv_sock_wait(sk, condition, timeo, __ret);          \
  82        __ret;                                                          \
  83})
  84
  85static void iucv_sock_kill(struct sock *sk);
  86static void iucv_sock_close(struct sock *sk);
  87static void iucv_sever_path(struct sock *, int);
  88
  89static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
  90        struct packet_type *pt, struct net_device *orig_dev);
  91static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
  92                   struct sk_buff *skb, u8 flags);
  93static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
  94
  95/* Call Back functions */
  96static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  97static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  98static void iucv_callback_connack(struct iucv_path *, u8 *);
  99static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
 100static void iucv_callback_connrej(struct iucv_path *, u8 *);
 101static void iucv_callback_shutdown(struct iucv_path *, u8 *);
 102
 103static struct iucv_sock_list iucv_sk_list = {
 104        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
 105        .autobind_name = ATOMIC_INIT(0)
 106};
 107
 108static struct iucv_handler af_iucv_handler = {
 109        .path_pending     = iucv_callback_connreq,
 110        .path_complete    = iucv_callback_connack,
 111        .path_severed     = iucv_callback_connrej,
 112        .message_pending  = iucv_callback_rx,
 113        .message_complete = iucv_callback_txdone,
 114        .path_quiesced    = iucv_callback_shutdown,
 115};
 116
 117static inline void high_nmcpy(unsigned char *dst, char *src)
 118{
 119       memcpy(dst, src, 8);
 120}
 121
 122static inline void low_nmcpy(unsigned char *dst, char *src)
 123{
 124       memcpy(&dst[8], src, 8);
 125}
 126
 127static int afiucv_pm_prepare(struct device *dev)
 128{
 129#ifdef CONFIG_PM_DEBUG
 130        printk(KERN_WARNING "afiucv_pm_prepare\n");
 131#endif
 132        return 0;
 133}
 134
 135static void afiucv_pm_complete(struct device *dev)
 136{
 137#ifdef CONFIG_PM_DEBUG
 138        printk(KERN_WARNING "afiucv_pm_complete\n");
 139#endif
 140}
 141
 142/**
 143 * afiucv_pm_freeze() - Freeze PM callback
 144 * @dev:        AFIUCV dummy device
 145 *
 146 * Sever all established IUCV communication pathes
 147 */
 148static int afiucv_pm_freeze(struct device *dev)
 149{
 150        struct iucv_sock *iucv;
 151        struct sock *sk;
 152        int err = 0;
 153
 154#ifdef CONFIG_PM_DEBUG
 155        printk(KERN_WARNING "afiucv_pm_freeze\n");
 156#endif
 157        read_lock(&iucv_sk_list.lock);
 158        sk_for_each(sk, &iucv_sk_list.head) {
 159                iucv = iucv_sk(sk);
 160                switch (sk->sk_state) {
 161                case IUCV_DISCONN:
 162                case IUCV_CLOSING:
 163                case IUCV_CONNECTED:
 164                        iucv_sever_path(sk, 0);
 165                        break;
 166                case IUCV_OPEN:
 167                case IUCV_BOUND:
 168                case IUCV_LISTEN:
 169                case IUCV_CLOSED:
 170                default:
 171                        break;
 172                }
 173                skb_queue_purge(&iucv->send_skb_q);
 174                skb_queue_purge(&iucv->backlog_skb_q);
 175        }
 176        read_unlock(&iucv_sk_list.lock);
 177        return err;
 178}
 179
 180/**
 181 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
 182 * @dev:        AFIUCV dummy device
 183 *
 184 * socket clean up after freeze
 185 */
 186static int afiucv_pm_restore_thaw(struct device *dev)
 187{
 188        struct sock *sk;
 189
 190#ifdef CONFIG_PM_DEBUG
 191        printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
 192#endif
 193        read_lock(&iucv_sk_list.lock);
 194        sk_for_each(sk, &iucv_sk_list.head) {
 195                switch (sk->sk_state) {
 196                case IUCV_CONNECTED:
 197                        sk->sk_err = EPIPE;
 198                        sk->sk_state = IUCV_DISCONN;
 199                        sk->sk_state_change(sk);
 200                        break;
 201                case IUCV_DISCONN:
 202                case IUCV_CLOSING:
 203                case IUCV_LISTEN:
 204                case IUCV_BOUND:
 205                case IUCV_OPEN:
 206                default:
 207                        break;
 208                }
 209        }
 210        read_unlock(&iucv_sk_list.lock);
 211        return 0;
 212}
 213
 214static const struct dev_pm_ops afiucv_pm_ops = {
 215        .prepare = afiucv_pm_prepare,
 216        .complete = afiucv_pm_complete,
 217        .freeze = afiucv_pm_freeze,
 218        .thaw = afiucv_pm_restore_thaw,
 219        .restore = afiucv_pm_restore_thaw,
 220};
 221
 222static struct device_driver af_iucv_driver = {
 223        .owner = THIS_MODULE,
 224        .name = "afiucv",
 225        .bus  = NULL,
 226        .pm   = &afiucv_pm_ops,
 227};
 228
 229/* dummy device used as trigger for PM functions */
 230static struct device *af_iucv_dev;
 231
 232/**
 233 * iucv_msg_length() - Returns the length of an iucv message.
 234 * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
 235 *
 236 * The function returns the length of the specified iucv message @msg of data
 237 * stored in a buffer and of data stored in the parameter list (PRMDATA).
 238 *
 239 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
 240 * data:
 241 *      PRMDATA[0..6]   socket data (max 7 bytes);
 242 *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
 243 *
 244 * The socket data length is computed by subtracting the socket data length
 245 * value from 0xFF.
 246 * If the socket data len is greater 7, then PRMDATA can be used for special
 247 * notifications (see iucv_sock_shutdown); and further,
 248 * if the socket data len is > 7, the function returns 8.
 249 *
 250 * Use this function to allocate socket buffers to store iucv message data.
 251 */
 252static inline size_t iucv_msg_length(struct iucv_message *msg)
 253{
 254        size_t datalen;
 255
 256        if (msg->flags & IUCV_IPRMDATA) {
 257                datalen = 0xff - msg->rmmsg[7];
 258                return (datalen < 8) ? datalen : 8;
 259        }
 260        return msg->length;
 261}
 262
 263/**
 264 * iucv_sock_in_state() - check for specific states
 265 * @sk:         sock structure
 266 * @state:      first iucv sk state
 267 * @state:      second iucv sk state
 268 *
 269 * Returns true if the socket in either in the first or second state.
 270 */
 271static int iucv_sock_in_state(struct sock *sk, int state, int state2)
 272{
 273        return (sk->sk_state == state || sk->sk_state == state2);
 274}
 275
 276/**
 277 * iucv_below_msglim() - function to check if messages can be sent
 278 * @sk:         sock structure
 279 *
 280 * Returns true if the send queue length is lower than the message limit.
 281 * Always returns true if the socket is not connected (no iucv path for
 282 * checking the message limit).
 283 */
 284static inline int iucv_below_msglim(struct sock *sk)
 285{
 286        struct iucv_sock *iucv = iucv_sk(sk);
 287
 288        if (sk->sk_state != IUCV_CONNECTED)
 289                return 1;
 290        if (iucv->transport == AF_IUCV_TRANS_IUCV)
 291                return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
 292        else
 293                return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
 294                        (atomic_read(&iucv->pendings) <= 0));
 295}
 296
 297/**
 298 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
 299 */
 300static void iucv_sock_wake_msglim(struct sock *sk)
 301{
 302        struct socket_wq *wq;
 303
 304        rcu_read_lock();
 305        wq = rcu_dereference(sk->sk_wq);
 306        if (wq_has_sleeper(wq))
 307                wake_up_interruptible_all(&wq->wait);
 308        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 309        rcu_read_unlock();
 310}
 311
 312/**
 313 * afiucv_hs_send() - send a message through HiperSockets transport
 314 */
 315static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
 316                   struct sk_buff *skb, u8 flags)
 317{
 318        struct iucv_sock *iucv = iucv_sk(sock);
 319        struct af_iucv_trans_hdr *phs_hdr;
 320        struct sk_buff *nskb;
 321        int err, confirm_recv = 0;
 322
 323        memset(skb->head, 0, ETH_HLEN);
 324        phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
 325                                        sizeof(struct af_iucv_trans_hdr));
 326        skb_reset_mac_header(skb);
 327        skb_reset_network_header(skb);
 328        skb_push(skb, ETH_HLEN);
 329        skb_reset_mac_header(skb);
 330        memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
 331
 332        phs_hdr->magic = ETH_P_AF_IUCV;
 333        phs_hdr->version = 1;
 334        phs_hdr->flags = flags;
 335        if (flags == AF_IUCV_FLAG_SYN)
 336                phs_hdr->window = iucv->msglimit;
 337        else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
 338                confirm_recv = atomic_read(&iucv->msg_recv);
 339                phs_hdr->window = confirm_recv;
 340                if (confirm_recv)
 341                        phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
 342        }
 343        memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
 344        memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
 345        memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
 346        memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
 347        ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
 348        ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
 349        ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
 350        ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
 351        if (imsg)
 352                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 353
 354        skb->dev = iucv->hs_dev;
 355        if (!skb->dev)
 356                return -ENODEV;
 357        if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
 358                return -ENETDOWN;
 359        if (skb->len > skb->dev->mtu) {
 360                if (sock->sk_type == SOCK_SEQPACKET)
 361                        return -EMSGSIZE;
 362                else
 363                        skb_trim(skb, skb->dev->mtu);
 364        }
 365        skb->protocol = ETH_P_AF_IUCV;
 366        nskb = skb_clone(skb, GFP_ATOMIC);
 367        if (!nskb)
 368                return -ENOMEM;
 369        skb_queue_tail(&iucv->send_skb_q, nskb);
 370        err = dev_queue_xmit(skb);
 371        if (net_xmit_eval(err)) {
 372                skb_unlink(nskb, &iucv->send_skb_q);
 373                kfree_skb(nskb);
 374        } else {
 375                atomic_sub(confirm_recv, &iucv->msg_recv);
 376                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
 377        }
 378        return net_xmit_eval(err);
 379}
 380
 381static struct sock *__iucv_get_sock_by_name(char *nm)
 382{
 383        struct sock *sk;
 384
 385        sk_for_each(sk, &iucv_sk_list.head)
 386                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 387                        return sk;
 388
 389        return NULL;
 390}
 391
 392static void iucv_sock_destruct(struct sock *sk)
 393{
 394        skb_queue_purge(&sk->sk_receive_queue);
 395        skb_queue_purge(&sk->sk_error_queue);
 396
 397        sk_mem_reclaim(sk);
 398
 399        if (!sock_flag(sk, SOCK_DEAD)) {
 400                pr_err("Attempt to release alive iucv socket %p\n", sk);
 401                return;
 402        }
 403
 404        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 405        WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 406        WARN_ON(sk->sk_wmem_queued);
 407        WARN_ON(sk->sk_forward_alloc);
 408}
 409
 410/* Cleanup Listen */
 411static void iucv_sock_cleanup_listen(struct sock *parent)
 412{
 413        struct sock *sk;
 414
 415        /* Close non-accepted connections */
 416        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 417                iucv_sock_close(sk);
 418                iucv_sock_kill(sk);
 419        }
 420
 421        parent->sk_state = IUCV_CLOSED;
 422}
 423
 424/* Kill socket (only if zapped and orphaned) */
 425static void iucv_sock_kill(struct sock *sk)
 426{
 427        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 428                return;
 429
 430        iucv_sock_unlink(&iucv_sk_list, sk);
 431        sock_set_flag(sk, SOCK_DEAD);
 432        sock_put(sk);
 433}
 434
 435/* Terminate an IUCV path */
 436static void iucv_sever_path(struct sock *sk, int with_user_data)
 437{
 438        unsigned char user_data[16];
 439        struct iucv_sock *iucv = iucv_sk(sk);
 440        struct iucv_path *path = iucv->path;
 441
 442        if (iucv->path) {
 443                iucv->path = NULL;
 444                if (with_user_data) {
 445                        low_nmcpy(user_data, iucv->src_name);
 446                        high_nmcpy(user_data, iucv->dst_name);
 447                        ASCEBC(user_data, sizeof(user_data));
 448                        pr_iucv->path_sever(path, user_data);
 449                } else
 450                        pr_iucv->path_sever(path, NULL);
 451                iucv_path_free(path);
 452        }
 453}
 454
 455/* Send FIN through an IUCV socket for HIPER transport */
 456static int iucv_send_ctrl(struct sock *sk, u8 flags)
 457{
 458        int err = 0;
 459        int blen;
 460        struct sk_buff *skb;
 461
 462        blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
 463        skb = sock_alloc_send_skb(sk, blen, 1, &err);
 464        if (skb) {
 465                skb_reserve(skb, blen);
 466                err = afiucv_hs_send(NULL, sk, skb, flags);
 467        }
 468        return err;
 469}
 470
 471/* Close an IUCV socket */
 472static void iucv_sock_close(struct sock *sk)
 473{
 474        struct iucv_sock *iucv = iucv_sk(sk);
 475        unsigned long timeo;
 476        int err = 0;
 477
 478        lock_sock(sk);
 479
 480        switch (sk->sk_state) {
 481        case IUCV_LISTEN:
 482                iucv_sock_cleanup_listen(sk);
 483                break;
 484
 485        case IUCV_CONNECTED:
 486                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
 487                        err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
 488                        sk->sk_state = IUCV_DISCONN;
 489                        sk->sk_state_change(sk);
 490                }
 491        case IUCV_DISCONN:   /* fall through */
 492                sk->sk_state = IUCV_CLOSING;
 493                sk->sk_state_change(sk);
 494
 495                if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
 496                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 497                                timeo = sk->sk_lingertime;
 498                        else
 499                                timeo = IUCV_DISCONN_TIMEOUT;
 500                        iucv_sock_wait(sk,
 501                                        iucv_sock_in_state(sk, IUCV_CLOSED, 0),
 502                                        timeo);
 503                }
 504
 505        case IUCV_CLOSING:   /* fall through */
 506                sk->sk_state = IUCV_CLOSED;
 507                sk->sk_state_change(sk);
 508
 509                sk->sk_err = ECONNRESET;
 510                sk->sk_state_change(sk);
 511
 512                skb_queue_purge(&iucv->send_skb_q);
 513                skb_queue_purge(&iucv->backlog_skb_q);
 514
 515        default:   /* fall through */
 516                iucv_sever_path(sk, 1);
 517        }
 518
 519        if (iucv->hs_dev) {
 520                dev_put(iucv->hs_dev);
 521                iucv->hs_dev = NULL;
 522                sk->sk_bound_dev_if = 0;
 523        }
 524
 525        /* mark socket for deletion by iucv_sock_kill() */
 526        sock_set_flag(sk, SOCK_ZAPPED);
 527
 528        release_sock(sk);
 529}
 530
 531static void iucv_sock_init(struct sock *sk, struct sock *parent)
 532{
 533        if (parent)
 534                sk->sk_type = parent->sk_type;
 535}
 536
 537static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 538{
 539        struct sock *sk;
 540        struct iucv_sock *iucv;
 541
 542        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
 543        if (!sk)
 544                return NULL;
 545        iucv = iucv_sk(sk);
 546
 547        sock_init_data(sock, sk);
 548        INIT_LIST_HEAD(&iucv->accept_q);
 549        spin_lock_init(&iucv->accept_q_lock);
 550        skb_queue_head_init(&iucv->send_skb_q);
 551        INIT_LIST_HEAD(&iucv->message_q.list);
 552        spin_lock_init(&iucv->message_q.lock);
 553        skb_queue_head_init(&iucv->backlog_skb_q);
 554        iucv->send_tag = 0;
 555        atomic_set(&iucv->pendings, 0);
 556        iucv->flags = 0;
 557        iucv->msglimit = 0;
 558        atomic_set(&iucv->msg_sent, 0);
 559        atomic_set(&iucv->msg_recv, 0);
 560        iucv->path = NULL;
 561        iucv->sk_txnotify = afiucv_hs_callback_txnotify;
 562        memset(&iucv->src_user_id , 0, 32);
 563        if (pr_iucv)
 564                iucv->transport = AF_IUCV_TRANS_IUCV;
 565        else
 566                iucv->transport = AF_IUCV_TRANS_HIPER;
 567
 568        sk->sk_destruct = iucv_sock_destruct;
 569        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 570        sk->sk_allocation = GFP_DMA;
 571
 572        sock_reset_flag(sk, SOCK_ZAPPED);
 573
 574        sk->sk_protocol = proto;
 575        sk->sk_state    = IUCV_OPEN;
 576
 577        iucv_sock_link(&iucv_sk_list, sk);
 578        return sk;
 579}
 580
 581/* Create an IUCV socket */
 582static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
 583                            int kern)
 584{
 585        struct sock *sk;
 586
 587        if (protocol && protocol != PF_IUCV)
 588                return -EPROTONOSUPPORT;
 589
 590        sock->state = SS_UNCONNECTED;
 591
 592        switch (sock->type) {
 593        case SOCK_STREAM:
 594                sock->ops = &iucv_sock_ops;
 595                break;
 596        case SOCK_SEQPACKET:
 597                /* currently, proto ops can handle both sk types */
 598                sock->ops = &iucv_sock_ops;
 599                break;
 600        default:
 601                return -ESOCKTNOSUPPORT;
 602        }
 603
 604        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
 605        if (!sk)
 606                return -ENOMEM;
 607
 608        iucv_sock_init(sk, NULL);
 609
 610        return 0;
 611}
 612
 613void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 614{
 615        write_lock_bh(&l->lock);
 616        sk_add_node(sk, &l->head);
 617        write_unlock_bh(&l->lock);
 618}
 619
 620void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 621{
 622        write_lock_bh(&l->lock);
 623        sk_del_node_init(sk);
 624        write_unlock_bh(&l->lock);
 625}
 626
 627void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 628{
 629        unsigned long flags;
 630        struct iucv_sock *par = iucv_sk(parent);
 631
 632        sock_hold(sk);
 633        spin_lock_irqsave(&par->accept_q_lock, flags);
 634        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 635        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 636        iucv_sk(sk)->parent = parent;
 637        sk_acceptq_added(parent);
 638}
 639
 640void iucv_accept_unlink(struct sock *sk)
 641{
 642        unsigned long flags;
 643        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 644
 645        spin_lock_irqsave(&par->accept_q_lock, flags);
 646        list_del_init(&iucv_sk(sk)->accept_q);
 647        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 648        sk_acceptq_removed(iucv_sk(sk)->parent);
 649        iucv_sk(sk)->parent = NULL;
 650        sock_put(sk);
 651}
 652
 653struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
 654{
 655        struct iucv_sock *isk, *n;
 656        struct sock *sk;
 657
 658        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 659                sk = (struct sock *) isk;
 660                lock_sock(sk);
 661
 662                if (sk->sk_state == IUCV_CLOSED) {
 663                        iucv_accept_unlink(sk);
 664                        release_sock(sk);
 665                        continue;
 666                }
 667
 668                if (sk->sk_state == IUCV_CONNECTED ||
 669                    sk->sk_state == IUCV_DISCONN ||
 670                    !newsock) {
 671                        iucv_accept_unlink(sk);
 672                        if (newsock)
 673                                sock_graft(sk, newsock);
 674
 675                        release_sock(sk);
 676                        return sk;
 677                }
 678
 679                release_sock(sk);
 680        }
 681        return NULL;
 682}
 683
 684static void __iucv_auto_name(struct iucv_sock *iucv)
 685{
 686        char name[12];
 687
 688        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 689        while (__iucv_get_sock_by_name(name)) {
 690                sprintf(name, "%08x",
 691                        atomic_inc_return(&iucv_sk_list.autobind_name));
 692        }
 693        memcpy(iucv->src_name, name, 8);
 694}
 695
 696/* Bind an unbound socket */
 697static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 698                          int addr_len)
 699{
 700        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 701        struct sock *sk = sock->sk;
 702        struct iucv_sock *iucv;
 703        int err = 0;
 704        struct net_device *dev;
 705        char uid[9];
 706
 707        /* Verify the input sockaddr */
 708        if (!addr || addr->sa_family != AF_IUCV)
 709                return -EINVAL;
 710
 711        lock_sock(sk);
 712        if (sk->sk_state != IUCV_OPEN) {
 713                err = -EBADFD;
 714                goto done;
 715        }
 716
 717        write_lock_bh(&iucv_sk_list.lock);
 718
 719        iucv = iucv_sk(sk);
 720        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 721                err = -EADDRINUSE;
 722                goto done_unlock;
 723        }
 724        if (iucv->path)
 725                goto done_unlock;
 726
 727        /* Bind the socket */
 728        if (pr_iucv)
 729                if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
 730                        goto vm_bind; /* VM IUCV transport */
 731
 732        /* try hiper transport */
 733        memcpy(uid, sa->siucv_user_id, sizeof(uid));
 734        ASCEBC(uid, 8);
 735        rcu_read_lock();
 736        for_each_netdev_rcu(&init_net, dev) {
 737                if (!memcmp(dev->perm_addr, uid, 8)) {
 738                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
 739                        /* Check for unitialized siucv_name */
 740                        if (strncmp(sa->siucv_name, "        ", 8) == 0)
 741                                __iucv_auto_name(iucv);
 742                        else
 743                                memcpy(iucv->src_name, sa->siucv_name, 8);
 744                        sk->sk_bound_dev_if = dev->ifindex;
 745                        iucv->hs_dev = dev;
 746                        dev_hold(dev);
 747                        sk->sk_state = IUCV_BOUND;
 748                        iucv->transport = AF_IUCV_TRANS_HIPER;
 749                        if (!iucv->msglimit)
 750                                iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
 751                        rcu_read_unlock();
 752                        goto done_unlock;
 753                }
 754        }
 755        rcu_read_unlock();
 756vm_bind:
 757        if (pr_iucv) {
 758                /* use local userid for backward compat */
 759                memcpy(iucv->src_name, sa->siucv_name, 8);
 760                memcpy(iucv->src_user_id, iucv_userid, 8);
 761                sk->sk_state = IUCV_BOUND;
 762                iucv->transport = AF_IUCV_TRANS_IUCV;
 763                if (!iucv->msglimit)
 764                        iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 765                goto done_unlock;
 766        }
 767        /* found no dev to bind */
 768        err = -ENODEV;
 769done_unlock:
 770        /* Release the socket list lock */
 771        write_unlock_bh(&iucv_sk_list.lock);
 772done:
 773        release_sock(sk);
 774        return err;
 775}
 776
 777/* Automatically bind an unbound socket */
 778static int iucv_sock_autobind(struct sock *sk)
 779{
 780        struct iucv_sock *iucv = iucv_sk(sk);
 781        int err = 0;
 782
 783        if (unlikely(!pr_iucv))
 784                return -EPROTO;
 785
 786        memcpy(iucv->src_user_id, iucv_userid, 8);
 787
 788        write_lock_bh(&iucv_sk_list.lock);
 789        __iucv_auto_name(iucv);
 790        write_unlock_bh(&iucv_sk_list.lock);
 791
 792        if (!iucv->msglimit)
 793                iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 794
 795        return err;
 796}
 797
 798static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
 799{
 800        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 801        struct sock *sk = sock->sk;
 802        struct iucv_sock *iucv = iucv_sk(sk);
 803        unsigned char user_data[16];
 804        int err;
 805
 806        high_nmcpy(user_data, sa->siucv_name);
 807        low_nmcpy(user_data, iucv->src_name);
 808        ASCEBC(user_data, sizeof(user_data));
 809
 810        /* Create path. */
 811        iucv->path = iucv_path_alloc(iucv->msglimit,
 812                                     IUCV_IPRMDATA, GFP_KERNEL);
 813        if (!iucv->path) {
 814                err = -ENOMEM;
 815                goto done;
 816        }
 817        err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
 818                                    sa->siucv_user_id, NULL, user_data,
 819                                    sk);
 820        if (err) {
 821                iucv_path_free(iucv->path);
 822                iucv->path = NULL;
 823                switch (err) {
 824                case 0x0b:      /* Target communicator is not logged on */
 825                        err = -ENETUNREACH;
 826                        break;
 827                case 0x0d:      /* Max connections for this guest exceeded */
 828                case 0x0e:      /* Max connections for target guest exceeded */
 829                        err = -EAGAIN;
 830                        break;
 831                case 0x0f:      /* Missing IUCV authorization */
 832                        err = -EACCES;
 833                        break;
 834                default:
 835                        err = -ECONNREFUSED;
 836                        break;
 837                }
 838        }
 839done:
 840        return err;
 841}
 842
 843/* Connect an unconnected socket */
 844static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 845                             int alen, int flags)
 846{
 847        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 848        struct sock *sk = sock->sk;
 849        struct iucv_sock *iucv = iucv_sk(sk);
 850        int err;
 851
 852        if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
 853                return -EINVAL;
 854
 855        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 856                return -EBADFD;
 857
 858        if (sk->sk_state == IUCV_OPEN &&
 859            iucv->transport == AF_IUCV_TRANS_HIPER)
 860                return -EBADFD; /* explicit bind required */
 861
 862        if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
 863                return -EINVAL;
 864
 865        if (sk->sk_state == IUCV_OPEN) {
 866                err = iucv_sock_autobind(sk);
 867                if (unlikely(err))
 868                        return err;
 869        }
 870
 871        lock_sock(sk);
 872
 873        /* Set the destination information */
 874        memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
 875        memcpy(iucv->dst_name, sa->siucv_name, 8);
 876
 877        if (iucv->transport == AF_IUCV_TRANS_HIPER)
 878                err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
 879        else
 880                err = afiucv_path_connect(sock, addr);
 881        if (err)
 882                goto done;
 883
 884        if (sk->sk_state != IUCV_CONNECTED)
 885                err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
 886                                                            IUCV_DISCONN),
 887                                     sock_sndtimeo(sk, flags & O_NONBLOCK));
 888
 889        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
 890                err = -ECONNREFUSED;
 891
 892        if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
 893                iucv_sever_path(sk, 0);
 894
 895done:
 896        release_sock(sk);
 897        return err;
 898}
 899
 900/* Move a socket into listening state. */
 901static int iucv_sock_listen(struct socket *sock, int backlog)
 902{
 903        struct sock *sk = sock->sk;
 904        int err;
 905
 906        lock_sock(sk);
 907
 908        err = -EINVAL;
 909        if (sk->sk_state != IUCV_BOUND)
 910                goto done;
 911
 912        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 913                goto done;
 914
 915        sk->sk_max_ack_backlog = backlog;
 916        sk->sk_ack_backlog = 0;
 917        sk->sk_state = IUCV_LISTEN;
 918        err = 0;
 919
 920done:
 921        release_sock(sk);
 922        return err;
 923}
 924
 925/* Accept a pending connection */
 926static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 927                            int flags)
 928{
 929        DECLARE_WAITQUEUE(wait, current);
 930        struct sock *sk = sock->sk, *nsk;
 931        long timeo;
 932        int err = 0;
 933
 934        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 935
 936        if (sk->sk_state != IUCV_LISTEN) {
 937                err = -EBADFD;
 938                goto done;
 939        }
 940
 941        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 942
 943        /* Wait for an incoming connection */
 944        add_wait_queue_exclusive(sk_sleep(sk), &wait);
 945        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 946                set_current_state(TASK_INTERRUPTIBLE);
 947                if (!timeo) {
 948                        err = -EAGAIN;
 949                        break;
 950                }
 951
 952                release_sock(sk);
 953                timeo = schedule_timeout(timeo);
 954                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 955
 956                if (sk->sk_state != IUCV_LISTEN) {
 957                        err = -EBADFD;
 958                        break;
 959                }
 960
 961                if (signal_pending(current)) {
 962                        err = sock_intr_errno(timeo);
 963                        break;
 964                }
 965        }
 966
 967        set_current_state(TASK_RUNNING);
 968        remove_wait_queue(sk_sleep(sk), &wait);
 969
 970        if (err)
 971                goto done;
 972
 973        newsock->state = SS_CONNECTED;
 974
 975done:
 976        release_sock(sk);
 977        return err;
 978}
 979
 980static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 981                             int *len, int peer)
 982{
 983        struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
 984        struct sock *sk = sock->sk;
 985        struct iucv_sock *iucv = iucv_sk(sk);
 986
 987        addr->sa_family = AF_IUCV;
 988        *len = sizeof(struct sockaddr_iucv);
 989
 990        if (peer) {
 991                memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
 992                memcpy(siucv->siucv_name, iucv->dst_name, 8);
 993        } else {
 994                memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
 995                memcpy(siucv->siucv_name, iucv->src_name, 8);
 996        }
 997        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
 998        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
 999        memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1000
1001        return 0;
1002}
1003
1004/**
1005 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1006 * @path:       IUCV path
1007 * @msg:        Pointer to a struct iucv_message
1008 * @skb:        The socket data to send, skb->len MUST BE <= 7
1009 *
1010 * Send the socket data in the parameter list in the iucv message
1011 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1012 * list and the socket data len at index 7 (last byte).
1013 * See also iucv_msg_length().
1014 *
1015 * Returns the error code from the iucv_message_send() call.
1016 */
1017static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1018                          struct sk_buff *skb)
1019{
1020        u8 prmdata[8];
1021
1022        memcpy(prmdata, (void *) skb->data, skb->len);
1023        prmdata[7] = 0xff - (u8) skb->len;
1024        return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1025                                 (void *) prmdata, 8);
1026}
1027
1028static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1029                             size_t len)
1030{
1031        struct sock *sk = sock->sk;
1032        struct iucv_sock *iucv = iucv_sk(sk);
1033        struct sk_buff *skb;
1034        struct iucv_message txmsg;
1035        struct cmsghdr *cmsg;
1036        int cmsg_done;
1037        long timeo;
1038        char user_id[9];
1039        char appl_id[9];
1040        int err;
1041        int noblock = msg->msg_flags & MSG_DONTWAIT;
1042
1043        err = sock_error(sk);
1044        if (err)
1045                return err;
1046
1047        if (msg->msg_flags & MSG_OOB)
1048                return -EOPNOTSUPP;
1049
1050        /* SOCK_SEQPACKET: we do not support segmented records */
1051        if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1052                return -EOPNOTSUPP;
1053
1054        lock_sock(sk);
1055
1056        if (sk->sk_shutdown & SEND_SHUTDOWN) {
1057                err = -EPIPE;
1058                goto out;
1059        }
1060
1061        /* Return if the socket is not in connected state */
1062        if (sk->sk_state != IUCV_CONNECTED) {
1063                err = -ENOTCONN;
1064                goto out;
1065        }
1066
1067        /* initialize defaults */
1068        cmsg_done   = 0;        /* check for duplicate headers */
1069        txmsg.class = 0;
1070
1071        /* iterate over control messages */
1072        for_each_cmsghdr(cmsg, msg) {
1073                if (!CMSG_OK(msg, cmsg)) {
1074                        err = -EINVAL;
1075                        goto out;
1076                }
1077
1078                if (cmsg->cmsg_level != SOL_IUCV)
1079                        continue;
1080
1081                if (cmsg->cmsg_type & cmsg_done) {
1082                        err = -EINVAL;
1083                        goto out;
1084                }
1085                cmsg_done |= cmsg->cmsg_type;
1086
1087                switch (cmsg->cmsg_type) {
1088                case SCM_IUCV_TRGCLS:
1089                        if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1090                                err = -EINVAL;
1091                                goto out;
1092                        }
1093
1094                        /* set iucv message target class */
1095                        memcpy(&txmsg.class,
1096                                (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1097
1098                        break;
1099
1100                default:
1101                        err = -EINVAL;
1102                        goto out;
1103                }
1104        }
1105
1106        /* allocate one skb for each iucv message:
1107         * this is fine for SOCK_SEQPACKET (unless we want to support
1108         * segmented records using the MSG_EOR flag), but
1109         * for SOCK_STREAM we might want to improve it in future */
1110        if (iucv->transport == AF_IUCV_TRANS_HIPER)
1111                skb = sock_alloc_send_skb(sk,
1112                        len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1113                        noblock, &err);
1114        else
1115                skb = sock_alloc_send_skb(sk, len, noblock, &err);
1116        if (!skb)
1117                goto out;
1118        if (iucv->transport == AF_IUCV_TRANS_HIPER)
1119                skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1120        if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1121                err = -EFAULT;
1122                goto fail;
1123        }
1124
1125        /* wait if outstanding messages for iucv path has reached */
1126        timeo = sock_sndtimeo(sk, noblock);
1127        err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1128        if (err)
1129                goto fail;
1130
1131        /* return -ECONNRESET if the socket is no longer connected */
1132        if (sk->sk_state != IUCV_CONNECTED) {
1133                err = -ECONNRESET;
1134                goto fail;
1135        }
1136
1137        /* increment and save iucv message tag for msg_completion cbk */
1138        txmsg.tag = iucv->send_tag++;
1139        IUCV_SKB_CB(skb)->tag = txmsg.tag;
1140
1141        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1142                atomic_inc(&iucv->msg_sent);
1143                err = afiucv_hs_send(&txmsg, sk, skb, 0);
1144                if (err) {
1145                        atomic_dec(&iucv->msg_sent);
1146                        goto fail;
1147                }
1148                goto release;
1149        }
1150        skb_queue_tail(&iucv->send_skb_q, skb);
1151
1152        if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1153              && skb->len <= 7) {
1154                err = iucv_send_iprm(iucv->path, &txmsg, skb);
1155
1156                /* on success: there is no message_complete callback
1157                 * for an IPRMDATA msg; remove skb from send queue */
1158                if (err == 0) {
1159                        skb_unlink(skb, &iucv->send_skb_q);
1160                        kfree_skb(skb);
1161                }
1162
1163                /* this error should never happen since the
1164                 * IUCV_IPRMDATA path flag is set... sever path */
1165                if (err == 0x15) {
1166                        pr_iucv->path_sever(iucv->path, NULL);
1167                        skb_unlink(skb, &iucv->send_skb_q);
1168                        err = -EPIPE;
1169                        goto fail;
1170                }
1171        } else
1172                err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1173                                        (void *) skb->data, skb->len);
1174        if (err) {
1175                if (err == 3) {
1176                        user_id[8] = 0;
1177                        memcpy(user_id, iucv->dst_user_id, 8);
1178                        appl_id[8] = 0;
1179                        memcpy(appl_id, iucv->dst_name, 8);
1180                        pr_err("Application %s on z/VM guest %s"
1181                                " exceeds message limit\n",
1182                                appl_id, user_id);
1183                        err = -EAGAIN;
1184                } else
1185                        err = -EPIPE;
1186                skb_unlink(skb, &iucv->send_skb_q);
1187                goto fail;
1188        }
1189
1190release:
1191        release_sock(sk);
1192        return len;
1193
1194fail:
1195        kfree_skb(skb);
1196out:
1197        release_sock(sk);
1198        return err;
1199}
1200
1201/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1202 *
1203 * Locking: must be called with message_q.lock held
1204 */
1205static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1206{
1207        int dataleft, size, copied = 0;
1208        struct sk_buff *nskb;
1209
1210        dataleft = len;
1211        while (dataleft) {
1212                if (dataleft >= sk->sk_rcvbuf / 4)
1213                        size = sk->sk_rcvbuf / 4;
1214                else
1215                        size = dataleft;
1216
1217                nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1218                if (!nskb)
1219                        return -ENOMEM;
1220
1221                /* copy target class to control buffer of new skb */
1222                IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1223
1224                /* copy data fragment */
1225                memcpy(nskb->data, skb->data + copied, size);
1226                copied += size;
1227                dataleft -= size;
1228
1229                skb_reset_transport_header(nskb);
1230                skb_reset_network_header(nskb);
1231                nskb->len = size;
1232
1233                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1234        }
1235
1236        return 0;
1237}
1238
1239/* iucv_process_message() - Receive a single outstanding IUCV message
1240 *
1241 * Locking: must be called with message_q.lock held
1242 */
1243static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1244                                 struct iucv_path *path,
1245                                 struct iucv_message *msg)
1246{
1247        int rc;
1248        unsigned int len;
1249
1250        len = iucv_msg_length(msg);
1251
1252        /* store msg target class in the second 4 bytes of skb ctrl buffer */
1253        /* Note: the first 4 bytes are reserved for msg tag */
1254        IUCV_SKB_CB(skb)->class = msg->class;
1255
1256        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1257        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1258                if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1259                        skb->data = NULL;
1260                        skb->len = 0;
1261                }
1262        } else {
1263                rc = pr_iucv->message_receive(path, msg,
1264                                              msg->flags & IUCV_IPRMDATA,
1265                                              skb->data, len, NULL);
1266                if (rc) {
1267                        kfree_skb(skb);
1268                        return;
1269                }
1270                /* we need to fragment iucv messages for SOCK_STREAM only;
1271                 * for SOCK_SEQPACKET, it is only relevant if we support
1272                 * record segmentation using MSG_EOR (see also recvmsg()) */
1273                if (sk->sk_type == SOCK_STREAM &&
1274                    skb->truesize >= sk->sk_rcvbuf / 4) {
1275                        rc = iucv_fragment_skb(sk, skb, len);
1276                        kfree_skb(skb);
1277                        skb = NULL;
1278                        if (rc) {
1279                                pr_iucv->path_sever(path, NULL);
1280                                return;
1281                        }
1282                        skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1283                } else {
1284                        skb_reset_transport_header(skb);
1285                        skb_reset_network_header(skb);
1286                        skb->len = len;
1287                }
1288        }
1289
1290        IUCV_SKB_CB(skb)->offset = 0;
1291        if (sock_queue_rcv_skb(sk, skb))
1292                skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1293}
1294
1295/* iucv_process_message_q() - Process outstanding IUCV messages
1296 *
1297 * Locking: must be called with message_q.lock held
1298 */
1299static void iucv_process_message_q(struct sock *sk)
1300{
1301        struct iucv_sock *iucv = iucv_sk(sk);
1302        struct sk_buff *skb;
1303        struct sock_msg_q *p, *n;
1304
1305        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1306                skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1307                if (!skb)
1308                        break;
1309                iucv_process_message(sk, skb, p->path, &p->msg);
1310                list_del(&p->list);
1311                kfree(p);
1312                if (!skb_queue_empty(&iucv->backlog_skb_q))
1313                        break;
1314        }
1315}
1316
1317static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1318                             size_t len, int flags)
1319{
1320        int noblock = flags & MSG_DONTWAIT;
1321        struct sock *sk = sock->sk;
1322        struct iucv_sock *iucv = iucv_sk(sk);
1323        unsigned int copied, rlen;
1324        struct sk_buff *skb, *rskb, *cskb;
1325        int err = 0;
1326        u32 offset;
1327
1328        if ((sk->sk_state == IUCV_DISCONN) &&
1329            skb_queue_empty(&iucv->backlog_skb_q) &&
1330            skb_queue_empty(&sk->sk_receive_queue) &&
1331            list_empty(&iucv->message_q.list))
1332                return 0;
1333
1334        if (flags & (MSG_OOB))
1335                return -EOPNOTSUPP;
1336
1337        /* receive/dequeue next skb:
1338         * the function understands MSG_PEEK and, thus, does not dequeue skb */
1339        skb = skb_recv_datagram(sk, flags, noblock, &err);
1340        if (!skb) {
1341                if (sk->sk_shutdown & RCV_SHUTDOWN)
1342                        return 0;
1343                return err;
1344        }
1345
1346        offset = IUCV_SKB_CB(skb)->offset;
1347        rlen   = skb->len - offset;             /* real length of skb */
1348        copied = min_t(unsigned int, rlen, len);
1349        if (!rlen)
1350                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1351
1352        cskb = skb;
1353        if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1354                if (!(flags & MSG_PEEK))
1355                        skb_queue_head(&sk->sk_receive_queue, skb);
1356                return -EFAULT;
1357        }
1358
1359        /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1360        if (sk->sk_type == SOCK_SEQPACKET) {
1361                if (copied < rlen)
1362                        msg->msg_flags |= MSG_TRUNC;
1363                /* each iucv message contains a complete record */
1364                msg->msg_flags |= MSG_EOR;
1365        }
1366
1367        /* create control message to store iucv msg target class:
1368         * get the trgcls from the control buffer of the skb due to
1369         * fragmentation of original iucv message. */
1370        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1371                       sizeof(IUCV_SKB_CB(skb)->class),
1372                       (void *)&IUCV_SKB_CB(skb)->class);
1373        if (err) {
1374                if (!(flags & MSG_PEEK))
1375                        skb_queue_head(&sk->sk_receive_queue, skb);
1376                return err;
1377        }
1378
1379        /* Mark read part of skb as used */
1380        if (!(flags & MSG_PEEK)) {
1381
1382                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1383                if (sk->sk_type == SOCK_STREAM) {
1384                        if (copied < rlen) {
1385                                IUCV_SKB_CB(skb)->offset = offset + copied;
1386                                skb_queue_head(&sk->sk_receive_queue, skb);
1387                                goto done;
1388                        }
1389                }
1390
1391                kfree_skb(skb);
1392                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1393                        atomic_inc(&iucv->msg_recv);
1394                        if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1395                                WARN_ON(1);
1396                                iucv_sock_close(sk);
1397                                return -EFAULT;
1398                        }
1399                }
1400
1401                /* Queue backlog skbs */
1402                spin_lock_bh(&iucv->message_q.lock);
1403                rskb = skb_dequeue(&iucv->backlog_skb_q);
1404                while (rskb) {
1405                        IUCV_SKB_CB(rskb)->offset = 0;
1406                        if (sock_queue_rcv_skb(sk, rskb)) {
1407                                skb_queue_head(&iucv->backlog_skb_q,
1408                                                rskb);
1409                                break;
1410                        } else {
1411                                rskb = skb_dequeue(&iucv->backlog_skb_q);
1412                        }
1413                }
1414                if (skb_queue_empty(&iucv->backlog_skb_q)) {
1415                        if (!list_empty(&iucv->message_q.list))
1416                                iucv_process_message_q(sk);
1417                        if (atomic_read(&iucv->msg_recv) >=
1418                                                        iucv->msglimit / 2) {
1419                                err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1420                                if (err) {
1421                                        sk->sk_state = IUCV_DISCONN;
1422                                        sk->sk_state_change(sk);
1423                                }
1424                        }
1425                }
1426                spin_unlock_bh(&iucv->message_q.lock);
1427        }
1428
1429done:
1430        /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1431        if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1432                copied = rlen;
1433
1434        return copied;
1435}
1436
1437static inline unsigned int iucv_accept_poll(struct sock *parent)
1438{
1439        struct iucv_sock *isk, *n;
1440        struct sock *sk;
1441
1442        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1443                sk = (struct sock *) isk;
1444
1445                if (sk->sk_state == IUCV_CONNECTED)
1446                        return POLLIN | POLLRDNORM;
1447        }
1448
1449        return 0;
1450}
1451
1452unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1453                            poll_table *wait)
1454{
1455        struct sock *sk = sock->sk;
1456        unsigned int mask = 0;
1457
1458        sock_poll_wait(file, sk_sleep(sk), wait);
1459
1460        if (sk->sk_state == IUCV_LISTEN)
1461                return iucv_accept_poll(sk);
1462
1463        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1464                mask |= POLLERR |
1465                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1466
1467        if (sk->sk_shutdown & RCV_SHUTDOWN)
1468                mask |= POLLRDHUP;
1469
1470        if (sk->sk_shutdown == SHUTDOWN_MASK)
1471                mask |= POLLHUP;
1472
1473        if (!skb_queue_empty(&sk->sk_receive_queue) ||
1474            (sk->sk_shutdown & RCV_SHUTDOWN))
1475                mask |= POLLIN | POLLRDNORM;
1476
1477        if (sk->sk_state == IUCV_CLOSED)
1478                mask |= POLLHUP;
1479
1480        if (sk->sk_state == IUCV_DISCONN)
1481                mask |= POLLIN;
1482
1483        if (sock_writeable(sk) && iucv_below_msglim(sk))
1484                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1485        else
1486                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1487
1488        return mask;
1489}
1490
1491static int iucv_sock_shutdown(struct socket *sock, int how)
1492{
1493        struct sock *sk = sock->sk;
1494        struct iucv_sock *iucv = iucv_sk(sk);
1495        struct iucv_message txmsg;
1496        int err = 0;
1497
1498        how++;
1499
1500        if ((how & ~SHUTDOWN_MASK) || !how)
1501                return -EINVAL;
1502
1503        lock_sock(sk);
1504        switch (sk->sk_state) {
1505        case IUCV_LISTEN:
1506        case IUCV_DISCONN:
1507        case IUCV_CLOSING:
1508        case IUCV_CLOSED:
1509                err = -ENOTCONN;
1510                goto fail;
1511        default:
1512                break;
1513        }
1514
1515        if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1516                if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1517                        txmsg.class = 0;
1518                        txmsg.tag = 0;
1519                        err = pr_iucv->message_send(iucv->path, &txmsg,
1520                                IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1521                        if (err) {
1522                                switch (err) {
1523                                case 1:
1524                                        err = -ENOTCONN;
1525                                        break;
1526                                case 2:
1527                                        err = -ECONNRESET;
1528                                        break;
1529                                default:
1530                                        err = -ENOTCONN;
1531                                        break;
1532                                }
1533                        }
1534                } else
1535                        iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1536        }
1537
1538        sk->sk_shutdown |= how;
1539        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1540                if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1541                    iucv->path) {
1542                        err = pr_iucv->path_quiesce(iucv->path, NULL);
1543                        if (err)
1544                                err = -ENOTCONN;
1545/*                      skb_queue_purge(&sk->sk_receive_queue); */
1546                }
1547                skb_queue_purge(&sk->sk_receive_queue);
1548        }
1549
1550        /* Wake up anyone sleeping in poll */
1551        sk->sk_state_change(sk);
1552
1553fail:
1554        release_sock(sk);
1555        return err;
1556}
1557
1558static int iucv_sock_release(struct socket *sock)
1559{
1560        struct sock *sk = sock->sk;
1561        int err = 0;
1562
1563        if (!sk)
1564                return 0;
1565
1566        iucv_sock_close(sk);
1567
1568        sock_orphan(sk);
1569        iucv_sock_kill(sk);
1570        return err;
1571}
1572
1573/* getsockopt and setsockopt */
1574static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1575                                char __user *optval, unsigned int optlen)
1576{
1577        struct sock *sk = sock->sk;
1578        struct iucv_sock *iucv = iucv_sk(sk);
1579        int val;
1580        int rc;
1581
1582        if (level != SOL_IUCV)
1583                return -ENOPROTOOPT;
1584
1585        if (optlen < sizeof(int))
1586                return -EINVAL;
1587
1588        if (get_user(val, (int __user *) optval))
1589                return -EFAULT;
1590
1591        rc = 0;
1592
1593        lock_sock(sk);
1594        switch (optname) {
1595        case SO_IPRMDATA_MSG:
1596                if (val)
1597                        iucv->flags |= IUCV_IPRMDATA;
1598                else
1599                        iucv->flags &= ~IUCV_IPRMDATA;
1600                break;
1601        case SO_MSGLIMIT:
1602                switch (sk->sk_state) {
1603                case IUCV_OPEN:
1604                case IUCV_BOUND:
1605                        if (val < 1 || val > (u16)(~0))
1606                                rc = -EINVAL;
1607                        else
1608                                iucv->msglimit = val;
1609                        break;
1610                default:
1611                        rc = -EINVAL;
1612                        break;
1613                }
1614                break;
1615        default:
1616                rc = -ENOPROTOOPT;
1617                break;
1618        }
1619        release_sock(sk);
1620
1621        return rc;
1622}
1623
1624static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1625                                char __user *optval, int __user *optlen)
1626{
1627        struct sock *sk = sock->sk;
1628        struct iucv_sock *iucv = iucv_sk(sk);
1629        unsigned int val;
1630        int len;
1631
1632        if (level != SOL_IUCV)
1633                return -ENOPROTOOPT;
1634
1635        if (get_user(len, optlen))
1636                return -EFAULT;
1637
1638        if (len < 0)
1639                return -EINVAL;
1640
1641        len = min_t(unsigned int, len, sizeof(int));
1642
1643        switch (optname) {
1644        case SO_IPRMDATA_MSG:
1645                val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1646                break;
1647        case SO_MSGLIMIT:
1648                lock_sock(sk);
1649                val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1650                                           : iucv->msglimit;    /* default */
1651                release_sock(sk);
1652                break;
1653        case SO_MSGSIZE:
1654                if (sk->sk_state == IUCV_OPEN)
1655                        return -EBADFD;
1656                val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1657                                sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1658                                0x7fffffff;
1659                break;
1660        default:
1661                return -ENOPROTOOPT;
1662        }
1663
1664        if (put_user(len, optlen))
1665                return -EFAULT;
1666        if (copy_to_user(optval, &val, len))
1667                return -EFAULT;
1668
1669        return 0;
1670}
1671
1672
1673/* Callback wrappers - called from iucv base support */
1674static int iucv_callback_connreq(struct iucv_path *path,
1675                                 u8 ipvmid[8], u8 ipuser[16])
1676{
1677        unsigned char user_data[16];
1678        unsigned char nuser_data[16];
1679        unsigned char src_name[8];
1680        struct sock *sk, *nsk;
1681        struct iucv_sock *iucv, *niucv;
1682        int err;
1683
1684        memcpy(src_name, ipuser, 8);
1685        EBCASC(src_name, 8);
1686        /* Find out if this path belongs to af_iucv. */
1687        read_lock(&iucv_sk_list.lock);
1688        iucv = NULL;
1689        sk = NULL;
1690        sk_for_each(sk, &iucv_sk_list.head)
1691                if (sk->sk_state == IUCV_LISTEN &&
1692                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1693                        /*
1694                         * Found a listening socket with
1695                         * src_name == ipuser[0-7].
1696                         */
1697                        iucv = iucv_sk(sk);
1698                        break;
1699                }
1700        read_unlock(&iucv_sk_list.lock);
1701        if (!iucv)
1702                /* No socket found, not one of our paths. */
1703                return -EINVAL;
1704
1705        bh_lock_sock(sk);
1706
1707        /* Check if parent socket is listening */
1708        low_nmcpy(user_data, iucv->src_name);
1709        high_nmcpy(user_data, iucv->dst_name);
1710        ASCEBC(user_data, sizeof(user_data));
1711        if (sk->sk_state != IUCV_LISTEN) {
1712                err = pr_iucv->path_sever(path, user_data);
1713                iucv_path_free(path);
1714                goto fail;
1715        }
1716
1717        /* Check for backlog size */
1718        if (sk_acceptq_is_full(sk)) {
1719                err = pr_iucv->path_sever(path, user_data);
1720                iucv_path_free(path);
1721                goto fail;
1722        }
1723
1724        /* Create the new socket */
1725        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1726        if (!nsk) {
1727                err = pr_iucv->path_sever(path, user_data);
1728                iucv_path_free(path);
1729                goto fail;
1730        }
1731
1732        niucv = iucv_sk(nsk);
1733        iucv_sock_init(nsk, sk);
1734
1735        /* Set the new iucv_sock */
1736        memcpy(niucv->dst_name, ipuser + 8, 8);
1737        EBCASC(niucv->dst_name, 8);
1738        memcpy(niucv->dst_user_id, ipvmid, 8);
1739        memcpy(niucv->src_name, iucv->src_name, 8);
1740        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1741        niucv->path = path;
1742
1743        /* Call iucv_accept */
1744        high_nmcpy(nuser_data, ipuser + 8);
1745        memcpy(nuser_data + 8, niucv->src_name, 8);
1746        ASCEBC(nuser_data + 8, 8);
1747
1748        /* set message limit for path based on msglimit of accepting socket */
1749        niucv->msglimit = iucv->msglimit;
1750        path->msglim = iucv->msglimit;
1751        err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1752        if (err) {
1753                iucv_sever_path(nsk, 1);
1754                iucv_sock_kill(nsk);
1755                goto fail;
1756        }
1757
1758        iucv_accept_enqueue(sk, nsk);
1759
1760        /* Wake up accept */
1761        nsk->sk_state = IUCV_CONNECTED;
1762        sk->sk_data_ready(sk);
1763        err = 0;
1764fail:
1765        bh_unlock_sock(sk);
1766        return 0;
1767}
1768
1769static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1770{
1771        struct sock *sk = path->private;
1772
1773        sk->sk_state = IUCV_CONNECTED;
1774        sk->sk_state_change(sk);
1775}
1776
1777static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1778{
1779        struct sock *sk = path->private;
1780        struct iucv_sock *iucv = iucv_sk(sk);
1781        struct sk_buff *skb;
1782        struct sock_msg_q *save_msg;
1783        int len;
1784
1785        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1786                pr_iucv->message_reject(path, msg);
1787                return;
1788        }
1789
1790        spin_lock(&iucv->message_q.lock);
1791
1792        if (!list_empty(&iucv->message_q.list) ||
1793            !skb_queue_empty(&iucv->backlog_skb_q))
1794                goto save_message;
1795
1796        len = atomic_read(&sk->sk_rmem_alloc);
1797        len += SKB_TRUESIZE(iucv_msg_length(msg));
1798        if (len > sk->sk_rcvbuf)
1799                goto save_message;
1800
1801        skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1802        if (!skb)
1803                goto save_message;
1804
1805        iucv_process_message(sk, skb, path, msg);
1806        goto out_unlock;
1807
1808save_message:
1809        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1810        if (!save_msg)
1811                goto out_unlock;
1812        save_msg->path = path;
1813        save_msg->msg = *msg;
1814
1815        list_add_tail(&save_msg->list, &iucv->message_q.list);
1816
1817out_unlock:
1818        spin_unlock(&iucv->message_q.lock);
1819}
1820
1821static void iucv_callback_txdone(struct iucv_path *path,
1822                                 struct iucv_message *msg)
1823{
1824        struct sock *sk = path->private;
1825        struct sk_buff *this = NULL;
1826        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1827        struct sk_buff *list_skb = list->next;
1828        unsigned long flags;
1829
1830        bh_lock_sock(sk);
1831        if (!skb_queue_empty(list)) {
1832                spin_lock_irqsave(&list->lock, flags);
1833
1834                while (list_skb != (struct sk_buff *)list) {
1835                        if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1836                                this = list_skb;
1837                                break;
1838                        }
1839                        list_skb = list_skb->next;
1840                }
1841                if (this)
1842                        __skb_unlink(this, list);
1843
1844                spin_unlock_irqrestore(&list->lock, flags);
1845
1846                if (this) {
1847                        kfree_skb(this);
1848                        /* wake up any process waiting for sending */
1849                        iucv_sock_wake_msglim(sk);
1850                }
1851        }
1852
1853        if (sk->sk_state == IUCV_CLOSING) {
1854                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1855                        sk->sk_state = IUCV_CLOSED;
1856                        sk->sk_state_change(sk);
1857                }
1858        }
1859        bh_unlock_sock(sk);
1860
1861}
1862
1863static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1864{
1865        struct sock *sk = path->private;
1866
1867        if (sk->sk_state == IUCV_CLOSED)
1868                return;
1869
1870        bh_lock_sock(sk);
1871        iucv_sever_path(sk, 1);
1872        sk->sk_state = IUCV_DISCONN;
1873
1874        sk->sk_state_change(sk);
1875        bh_unlock_sock(sk);
1876}
1877
1878/* called if the other communication side shuts down its RECV direction;
1879 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1880 */
1881static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1882{
1883        struct sock *sk = path->private;
1884
1885        bh_lock_sock(sk);
1886        if (sk->sk_state != IUCV_CLOSED) {
1887                sk->sk_shutdown |= SEND_SHUTDOWN;
1888                sk->sk_state_change(sk);
1889        }
1890        bh_unlock_sock(sk);
1891}
1892
1893/***************** HiperSockets transport callbacks ********************/
1894static void afiucv_swap_src_dest(struct sk_buff *skb)
1895{
1896        struct af_iucv_trans_hdr *trans_hdr =
1897                                (struct af_iucv_trans_hdr *)skb->data;
1898        char tmpID[8];
1899        char tmpName[8];
1900
1901        ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1902        ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1903        ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1904        ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1905        memcpy(tmpID, trans_hdr->srcUserID, 8);
1906        memcpy(tmpName, trans_hdr->srcAppName, 8);
1907        memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1908        memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1909        memcpy(trans_hdr->destUserID, tmpID, 8);
1910        memcpy(trans_hdr->destAppName, tmpName, 8);
1911        skb_push(skb, ETH_HLEN);
1912        memset(skb->data, 0, ETH_HLEN);
1913}
1914
1915/**
1916 * afiucv_hs_callback_syn - react on received SYN
1917 **/
1918static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1919{
1920        struct sock *nsk;
1921        struct iucv_sock *iucv, *niucv;
1922        struct af_iucv_trans_hdr *trans_hdr;
1923        int err;
1924
1925        iucv = iucv_sk(sk);
1926        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1927        if (!iucv) {
1928                /* no sock - connection refused */
1929                afiucv_swap_src_dest(skb);
1930                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1931                err = dev_queue_xmit(skb);
1932                goto out;
1933        }
1934
1935        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1936        bh_lock_sock(sk);
1937        if ((sk->sk_state != IUCV_LISTEN) ||
1938            sk_acceptq_is_full(sk) ||
1939            !nsk) {
1940                /* error on server socket - connection refused */
1941                afiucv_swap_src_dest(skb);
1942                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1943                err = dev_queue_xmit(skb);
1944                iucv_sock_kill(nsk);
1945                bh_unlock_sock(sk);
1946                goto out;
1947        }
1948
1949        niucv = iucv_sk(nsk);
1950        iucv_sock_init(nsk, sk);
1951        niucv->transport = AF_IUCV_TRANS_HIPER;
1952        niucv->msglimit = iucv->msglimit;
1953        if (!trans_hdr->window)
1954                niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1955        else
1956                niucv->msglimit_peer = trans_hdr->window;
1957        memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1958        memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1959        memcpy(niucv->src_name, iucv->src_name, 8);
1960        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1961        nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1962        niucv->hs_dev = iucv->hs_dev;
1963        dev_hold(niucv->hs_dev);
1964        afiucv_swap_src_dest(skb);
1965        trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1966        trans_hdr->window = niucv->msglimit;
1967        /* if receiver acks the xmit connection is established */
1968        err = dev_queue_xmit(skb);
1969        if (!err) {
1970                iucv_accept_enqueue(sk, nsk);
1971                nsk->sk_state = IUCV_CONNECTED;
1972                sk->sk_data_ready(sk);
1973        } else
1974                iucv_sock_kill(nsk);
1975        bh_unlock_sock(sk);
1976
1977out:
1978        return NET_RX_SUCCESS;
1979}
1980
1981/**
1982 * afiucv_hs_callback_synack() - react on received SYN-ACK
1983 **/
1984static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1985{
1986        struct iucv_sock *iucv = iucv_sk(sk);
1987        struct af_iucv_trans_hdr *trans_hdr =
1988                                        (struct af_iucv_trans_hdr *)skb->data;
1989
1990        if (!iucv)
1991                goto out;
1992        if (sk->sk_state != IUCV_BOUND)
1993                goto out;
1994        bh_lock_sock(sk);
1995        iucv->msglimit_peer = trans_hdr->window;
1996        sk->sk_state = IUCV_CONNECTED;
1997        sk->sk_state_change(sk);
1998        bh_unlock_sock(sk);
1999out:
2000        kfree_skb(skb);
2001        return NET_RX_SUCCESS;
2002}
2003
2004/**
2005 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2006 **/
2007static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2008{
2009        struct iucv_sock *iucv = iucv_sk(sk);
2010
2011        if (!iucv)
2012                goto out;
2013        if (sk->sk_state != IUCV_BOUND)
2014                goto out;
2015        bh_lock_sock(sk);
2016        sk->sk_state = IUCV_DISCONN;
2017        sk->sk_state_change(sk);
2018        bh_unlock_sock(sk);
2019out:
2020        kfree_skb(skb);
2021        return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_fin() - react on received FIN
2026 **/
2027static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2028{
2029        struct iucv_sock *iucv = iucv_sk(sk);
2030
2031        /* other end of connection closed */
2032        if (!iucv)
2033                goto out;
2034        bh_lock_sock(sk);
2035        if (sk->sk_state == IUCV_CONNECTED) {
2036                sk->sk_state = IUCV_DISCONN;
2037                sk->sk_state_change(sk);
2038        }
2039        bh_unlock_sock(sk);
2040out:
2041        kfree_skb(skb);
2042        return NET_RX_SUCCESS;
2043}
2044
2045/**
2046 * afiucv_hs_callback_win() - react on received WIN
2047 **/
2048static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2049{
2050        struct iucv_sock *iucv = iucv_sk(sk);
2051        struct af_iucv_trans_hdr *trans_hdr =
2052                                        (struct af_iucv_trans_hdr *)skb->data;
2053
2054        if (!iucv)
2055                return NET_RX_SUCCESS;
2056
2057        if (sk->sk_state != IUCV_CONNECTED)
2058                return NET_RX_SUCCESS;
2059
2060        atomic_sub(trans_hdr->window, &iucv->msg_sent);
2061        iucv_sock_wake_msglim(sk);
2062        return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_rx() - react on received data
2067 **/
2068static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2069{
2070        struct iucv_sock *iucv = iucv_sk(sk);
2071
2072        if (!iucv) {
2073                kfree_skb(skb);
2074                return NET_RX_SUCCESS;
2075        }
2076
2077        if (sk->sk_state != IUCV_CONNECTED) {
2078                kfree_skb(skb);
2079                return NET_RX_SUCCESS;
2080        }
2081
2082        if (sk->sk_shutdown & RCV_SHUTDOWN) {
2083                kfree_skb(skb);
2084                return NET_RX_SUCCESS;
2085        }
2086
2087                /* write stuff from iucv_msg to skb cb */
2088        if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2089                kfree_skb(skb);
2090                return NET_RX_SUCCESS;
2091        }
2092        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2093        skb_reset_transport_header(skb);
2094        skb_reset_network_header(skb);
2095        IUCV_SKB_CB(skb)->offset = 0;
2096        spin_lock(&iucv->message_q.lock);
2097        if (skb_queue_empty(&iucv->backlog_skb_q)) {
2098                if (sock_queue_rcv_skb(sk, skb)) {
2099                        /* handle rcv queue full */
2100                        skb_queue_tail(&iucv->backlog_skb_q, skb);
2101                }
2102        } else
2103                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2104        spin_unlock(&iucv->message_q.lock);
2105        return NET_RX_SUCCESS;
2106}
2107
2108/**
2109 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2110 *                   transport
2111 *                   called from netif RX softirq
2112 **/
2113static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2114        struct packet_type *pt, struct net_device *orig_dev)
2115{
2116        struct sock *sk;
2117        struct iucv_sock *iucv;
2118        struct af_iucv_trans_hdr *trans_hdr;
2119        char nullstring[8];
2120        int err = 0;
2121
2122        skb_pull(skb, ETH_HLEN);
2123        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2124        EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2125        EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2126        EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2127        EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2128        memset(nullstring, 0, sizeof(nullstring));
2129        iucv = NULL;
2130        sk = NULL;
2131        read_lock(&iucv_sk_list.lock);
2132        sk_for_each(sk, &iucv_sk_list.head) {
2133                if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2134                        if ((!memcmp(&iucv_sk(sk)->src_name,
2135                                     trans_hdr->destAppName, 8)) &&
2136                            (!memcmp(&iucv_sk(sk)->src_user_id,
2137                                     trans_hdr->destUserID, 8)) &&
2138                            (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2139                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2140                                     nullstring, 8))) {
2141                                iucv = iucv_sk(sk);
2142                                break;
2143                        }
2144                } else {
2145                        if ((!memcmp(&iucv_sk(sk)->src_name,
2146                                     trans_hdr->destAppName, 8)) &&
2147                            (!memcmp(&iucv_sk(sk)->src_user_id,
2148                                     trans_hdr->destUserID, 8)) &&
2149                            (!memcmp(&iucv_sk(sk)->dst_name,
2150                                     trans_hdr->srcAppName, 8)) &&
2151                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2152                                     trans_hdr->srcUserID, 8))) {
2153                                iucv = iucv_sk(sk);
2154                                break;
2155                        }
2156                }
2157        }
2158        read_unlock(&iucv_sk_list.lock);
2159        if (!iucv)
2160                sk = NULL;
2161
2162        /* no sock
2163        how should we send with no sock
2164        1) send without sock no send rc checking?
2165        2) introduce default sock to handle this cases
2166
2167         SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2168         data -> send FIN
2169         SYN|ACK, SYN|FIN, FIN -> no action? */
2170
2171        switch (trans_hdr->flags) {
2172        case AF_IUCV_FLAG_SYN:
2173                /* connect request */
2174                err = afiucv_hs_callback_syn(sk, skb);
2175                break;
2176        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2177                /* connect request confirmed */
2178                err = afiucv_hs_callback_synack(sk, skb);
2179                break;
2180        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2181                /* connect request refused */
2182                err = afiucv_hs_callback_synfin(sk, skb);
2183                break;
2184        case (AF_IUCV_FLAG_FIN):
2185                /* close request */
2186                err = afiucv_hs_callback_fin(sk, skb);
2187                break;
2188        case (AF_IUCV_FLAG_WIN):
2189                err = afiucv_hs_callback_win(sk, skb);
2190                if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2191                        kfree_skb(skb);
2192                        break;
2193                }
2194                /* fall through and receive non-zero length data */
2195        case (AF_IUCV_FLAG_SHT):
2196                /* shutdown request */
2197                /* fall through and receive zero length data */
2198        case 0:
2199                /* plain data frame */
2200                IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2201                err = afiucv_hs_callback_rx(sk, skb);
2202                break;
2203        default:
2204                ;
2205        }
2206
2207        return err;
2208}
2209
2210/**
2211 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2212 *                                 transport
2213 **/
2214static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2215                                        enum iucv_tx_notify n)
2216{
2217        struct sock *isk = skb->sk;
2218        struct sock *sk = NULL;
2219        struct iucv_sock *iucv = NULL;
2220        struct sk_buff_head *list;
2221        struct sk_buff *list_skb;
2222        struct sk_buff *nskb;
2223        unsigned long flags;
2224
2225        read_lock_irqsave(&iucv_sk_list.lock, flags);
2226        sk_for_each(sk, &iucv_sk_list.head)
2227                if (sk == isk) {
2228                        iucv = iucv_sk(sk);
2229                        break;
2230                }
2231        read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2232
2233        if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2234                return;
2235
2236        list = &iucv->send_skb_q;
2237        spin_lock_irqsave(&list->lock, flags);
2238        if (skb_queue_empty(list))
2239                goto out_unlock;
2240        list_skb = list->next;
2241        nskb = list_skb->next;
2242        while (list_skb != (struct sk_buff *)list) {
2243                if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2244                        switch (n) {
2245                        case TX_NOTIFY_OK:
2246                                __skb_unlink(list_skb, list);
2247                                kfree_skb(list_skb);
2248                                iucv_sock_wake_msglim(sk);
2249                                break;
2250                        case TX_NOTIFY_PENDING:
2251                                atomic_inc(&iucv->pendings);
2252                                break;
2253                        case TX_NOTIFY_DELAYED_OK:
2254                                __skb_unlink(list_skb, list);
2255                                atomic_dec(&iucv->pendings);
2256                                if (atomic_read(&iucv->pendings) <= 0)
2257                                        iucv_sock_wake_msglim(sk);
2258                                kfree_skb(list_skb);
2259                                break;
2260                        case TX_NOTIFY_UNREACHABLE:
2261                        case TX_NOTIFY_DELAYED_UNREACHABLE:
2262                        case TX_NOTIFY_TPQFULL: /* not yet used */
2263                        case TX_NOTIFY_GENERALERROR:
2264                        case TX_NOTIFY_DELAYED_GENERALERROR:
2265                                __skb_unlink(list_skb, list);
2266                                kfree_skb(list_skb);
2267                                if (sk->sk_state == IUCV_CONNECTED) {
2268                                        sk->sk_state = IUCV_DISCONN;
2269                                        sk->sk_state_change(sk);
2270                                }
2271                                break;
2272                        }
2273                        break;
2274                }
2275                list_skb = nskb;
2276                nskb = nskb->next;
2277        }
2278out_unlock:
2279        spin_unlock_irqrestore(&list->lock, flags);
2280
2281        if (sk->sk_state == IUCV_CLOSING) {
2282                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2283                        sk->sk_state = IUCV_CLOSED;
2284                        sk->sk_state_change(sk);
2285                }
2286        }
2287
2288}
2289
2290/*
2291 * afiucv_netdev_event: handle netdev notifier chain events
2292 */
2293static int afiucv_netdev_event(struct notifier_block *this,
2294                               unsigned long event, void *ptr)
2295{
2296        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2297        struct sock *sk;
2298        struct iucv_sock *iucv;
2299
2300        switch (event) {
2301        case NETDEV_REBOOT:
2302        case NETDEV_GOING_DOWN:
2303                sk_for_each(sk, &iucv_sk_list.head) {
2304                        iucv = iucv_sk(sk);
2305                        if ((iucv->hs_dev == event_dev) &&
2306                            (sk->sk_state == IUCV_CONNECTED)) {
2307                                if (event == NETDEV_GOING_DOWN)
2308                                        iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2309                                sk->sk_state = IUCV_DISCONN;
2310                                sk->sk_state_change(sk);
2311                        }
2312                }
2313                break;
2314        case NETDEV_DOWN:
2315        case NETDEV_UNREGISTER:
2316        default:
2317                break;
2318        }
2319        return NOTIFY_DONE;
2320}
2321
2322static struct notifier_block afiucv_netdev_notifier = {
2323        .notifier_call = afiucv_netdev_event,
2324};
2325
2326static const struct proto_ops iucv_sock_ops = {
2327        .family         = PF_IUCV,
2328        .owner          = THIS_MODULE,
2329        .release        = iucv_sock_release,
2330        .bind           = iucv_sock_bind,
2331        .connect        = iucv_sock_connect,
2332        .listen         = iucv_sock_listen,
2333        .accept         = iucv_sock_accept,
2334        .getname        = iucv_sock_getname,
2335        .sendmsg        = iucv_sock_sendmsg,
2336        .recvmsg        = iucv_sock_recvmsg,
2337        .poll           = iucv_sock_poll,
2338        .ioctl          = sock_no_ioctl,
2339        .mmap           = sock_no_mmap,
2340        .socketpair     = sock_no_socketpair,
2341        .shutdown       = iucv_sock_shutdown,
2342        .setsockopt     = iucv_sock_setsockopt,
2343        .getsockopt     = iucv_sock_getsockopt,
2344};
2345
2346static const struct net_proto_family iucv_sock_family_ops = {
2347        .family = AF_IUCV,
2348        .owner  = THIS_MODULE,
2349        .create = iucv_sock_create,
2350};
2351
2352static struct packet_type iucv_packet_type = {
2353        .type = cpu_to_be16(ETH_P_AF_IUCV),
2354        .func = afiucv_hs_rcv,
2355};
2356
2357static int afiucv_iucv_init(void)
2358{
2359        int err;
2360
2361        err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2362        if (err)
2363                goto out;
2364        /* establish dummy device */
2365        af_iucv_driver.bus = pr_iucv->bus;
2366        err = driver_register(&af_iucv_driver);
2367        if (err)
2368                goto out_iucv;
2369        af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2370        if (!af_iucv_dev) {
2371                err = -ENOMEM;
2372                goto out_driver;
2373        }
2374        dev_set_name(af_iucv_dev, "af_iucv");
2375        af_iucv_dev->bus = pr_iucv->bus;
2376        af_iucv_dev->parent = pr_iucv->root;
2377        af_iucv_dev->release = (void (*)(struct device *))kfree;
2378        af_iucv_dev->driver = &af_iucv_driver;
2379        err = device_register(af_iucv_dev);
2380        if (err)
2381                goto out_driver;
2382        return 0;
2383
2384out_driver:
2385        driver_unregister(&af_iucv_driver);
2386out_iucv:
2387        pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2388out:
2389        return err;
2390}
2391
2392static int __init afiucv_init(void)
2393{
2394        int err;
2395
2396        if (MACHINE_IS_VM) {
2397                cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2398                if (unlikely(err)) {
2399                        WARN_ON(err);
2400                        err = -EPROTONOSUPPORT;
2401                        goto out;
2402                }
2403
2404                pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2405                if (!pr_iucv) {
2406                        printk(KERN_WARNING "iucv_if lookup failed\n");
2407                        memset(&iucv_userid, 0, sizeof(iucv_userid));
2408                }
2409        } else {
2410                memset(&iucv_userid, 0, sizeof(iucv_userid));
2411                pr_iucv = NULL;
2412        }
2413
2414        err = proto_register(&iucv_proto, 0);
2415        if (err)
2416                goto out;
2417        err = sock_register(&iucv_sock_family_ops);
2418        if (err)
2419                goto out_proto;
2420
2421        if (pr_iucv) {
2422                err = afiucv_iucv_init();
2423                if (err)
2424                        goto out_sock;
2425        } else
2426                register_netdevice_notifier(&afiucv_netdev_notifier);
2427        dev_add_pack(&iucv_packet_type);
2428        return 0;
2429
2430out_sock:
2431        sock_unregister(PF_IUCV);
2432out_proto:
2433        proto_unregister(&iucv_proto);
2434out:
2435        if (pr_iucv)
2436                symbol_put(iucv_if);
2437        return err;
2438}
2439
2440static void __exit afiucv_exit(void)
2441{
2442        if (pr_iucv) {
2443                device_unregister(af_iucv_dev);
2444                driver_unregister(&af_iucv_driver);
2445                pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2446                symbol_put(iucv_if);
2447        } else
2448                unregister_netdevice_notifier(&afiucv_netdev_notifier);
2449        dev_remove_pack(&iucv_packet_type);
2450        sock_unregister(PF_IUCV);
2451        proto_unregister(&iucv_proto);
2452}
2453
2454module_init(afiucv_init);
2455module_exit(afiucv_exit);
2456
2457MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2458MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2459MODULE_VERSION(VERSION);
2460MODULE_LICENSE("GPL");
2461MODULE_ALIAS_NETPROTO(PF_IUCV);
2462
2463