linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  IUCV protocol stack for Linux on zSeries
   4 *
   5 *  Copyright IBM Corp. 2006, 2009
   6 *
   7 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   8 *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   9 *  PM functions:
  10 *              Ursula Braun <ursula.braun@de.ibm.com>
  11 */
  12
  13#define KMSG_COMPONENT "af_iucv"
  14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
  18#include <linux/types.h>
  19#include <linux/limits.h>
  20#include <linux/list.h>
  21#include <linux/errno.h>
  22#include <linux/kernel.h>
  23#include <linux/sched/signal.h>
  24#include <linux/slab.h>
  25#include <linux/skbuff.h>
  26#include <linux/init.h>
  27#include <linux/poll.h>
  28#include <linux/security.h>
  29#include <net/sock.h>
  30#include <asm/ebcdic.h>
  31#include <asm/cpcmd.h>
  32#include <linux/kmod.h>
  33
  34#include <net/iucv/af_iucv.h>
  35
  36#define VERSION "1.2"
  37
  38static char iucv_userid[80];
  39
  40static struct proto iucv_proto = {
  41        .name           = "AF_IUCV",
  42        .owner          = THIS_MODULE,
  43        .obj_size       = sizeof(struct iucv_sock),
  44};
  45
  46static struct iucv_interface *pr_iucv;
  47static struct iucv_handler af_iucv_handler;
  48
  49/* special AF_IUCV IPRM messages */
  50static const u8 iprm_shutdown[8] =
  51        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  52
  53#define TRGCLS_SIZE     sizeof_field(struct iucv_message, class)
  54
  55#define __iucv_sock_wait(sk, condition, timeo, ret)                     \
  56do {                                                                    \
  57        DEFINE_WAIT(__wait);                                            \
  58        long __timeo = timeo;                                           \
  59        ret = 0;                                                        \
  60        prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
  61        while (!(condition)) {                                          \
  62                if (!__timeo) {                                         \
  63                        ret = -EAGAIN;                                  \
  64                        break;                                          \
  65                }                                                       \
  66                if (signal_pending(current)) {                          \
  67                        ret = sock_intr_errno(__timeo);                 \
  68                        break;                                          \
  69                }                                                       \
  70                release_sock(sk);                                       \
  71                __timeo = schedule_timeout(__timeo);                    \
  72                lock_sock(sk);                                          \
  73                ret = sock_error(sk);                                   \
  74                if (ret)                                                \
  75                        break;                                          \
  76        }                                                               \
  77        finish_wait(sk_sleep(sk), &__wait);                             \
  78} while (0)
  79
  80#define iucv_sock_wait(sk, condition, timeo)                            \
  81({                                                                      \
  82        int __ret = 0;                                                  \
  83        if (!(condition))                                               \
  84                __iucv_sock_wait(sk, condition, timeo, __ret);          \
  85        __ret;                                                          \
  86})
  87
  88static struct sock *iucv_accept_dequeue(struct sock *parent,
  89                                        struct socket *newsock);
  90static void iucv_sock_kill(struct sock *sk);
  91static void iucv_sock_close(struct sock *sk);
  92
  93static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
  94
  95static struct iucv_sock_list iucv_sk_list = {
  96        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
  97        .autobind_name = ATOMIC_INIT(0)
  98};
  99
 100static inline void high_nmcpy(unsigned char *dst, char *src)
 101{
 102       memcpy(dst, src, 8);
 103}
 104
 105static inline void low_nmcpy(unsigned char *dst, char *src)
 106{
 107       memcpy(&dst[8], src, 8);
 108}
 109
 110/**
 111 * iucv_msg_length() - Returns the length of an iucv message.
 112 * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
 113 *
 114 * The function returns the length of the specified iucv message @msg of data
 115 * stored in a buffer and of data stored in the parameter list (PRMDATA).
 116 *
 117 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
 118 * data:
 119 *      PRMDATA[0..6]   socket data (max 7 bytes);
 120 *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
 121 *
 122 * The socket data length is computed by subtracting the socket data length
 123 * value from 0xFF.
 124 * If the socket data len is greater 7, then PRMDATA can be used for special
 125 * notifications (see iucv_sock_shutdown); and further,
 126 * if the socket data len is > 7, the function returns 8.
 127 *
 128 * Use this function to allocate socket buffers to store iucv message data.
 129 */
 130static inline size_t iucv_msg_length(struct iucv_message *msg)
 131{
 132        size_t datalen;
 133
 134        if (msg->flags & IUCV_IPRMDATA) {
 135                datalen = 0xff - msg->rmmsg[7];
 136                return (datalen < 8) ? datalen : 8;
 137        }
 138        return msg->length;
 139}
 140
 141/**
 142 * iucv_sock_in_state() - check for specific states
 143 * @sk:         sock structure
 144 * @state:      first iucv sk state
 145 * @state:      second iucv sk state
 146 *
 147 * Returns true if the socket in either in the first or second state.
 148 */
 149static int iucv_sock_in_state(struct sock *sk, int state, int state2)
 150{
 151        return (sk->sk_state == state || sk->sk_state == state2);
 152}
 153
 154/**
 155 * iucv_below_msglim() - function to check if messages can be sent
 156 * @sk:         sock structure
 157 *
 158 * Returns true if the send queue length is lower than the message limit.
 159 * Always returns true if the socket is not connected (no iucv path for
 160 * checking the message limit).
 161 */
 162static inline int iucv_below_msglim(struct sock *sk)
 163{
 164        struct iucv_sock *iucv = iucv_sk(sk);
 165
 166        if (sk->sk_state != IUCV_CONNECTED)
 167                return 1;
 168        if (iucv->transport == AF_IUCV_TRANS_IUCV)
 169                return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
 170        else
 171                return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
 172                        (atomic_read(&iucv->pendings) <= 0));
 173}
 174
 175/**
 176 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
 177 */
 178static void iucv_sock_wake_msglim(struct sock *sk)
 179{
 180        struct socket_wq *wq;
 181
 182        rcu_read_lock();
 183        wq = rcu_dereference(sk->sk_wq);
 184        if (skwq_has_sleeper(wq))
 185                wake_up_interruptible_all(&wq->wait);
 186        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 187        rcu_read_unlock();
 188}
 189
 190/**
 191 * afiucv_hs_send() - send a message through HiperSockets transport
 192 */
 193static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
 194                   struct sk_buff *skb, u8 flags)
 195{
 196        struct iucv_sock *iucv = iucv_sk(sock);
 197        struct af_iucv_trans_hdr *phs_hdr;
 198        int err, confirm_recv = 0;
 199
 200        phs_hdr = skb_push(skb, sizeof(*phs_hdr));
 201        memset(phs_hdr, 0, sizeof(*phs_hdr));
 202        skb_reset_network_header(skb);
 203
 204        phs_hdr->magic = ETH_P_AF_IUCV;
 205        phs_hdr->version = 1;
 206        phs_hdr->flags = flags;
 207        if (flags == AF_IUCV_FLAG_SYN)
 208                phs_hdr->window = iucv->msglimit;
 209        else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
 210                confirm_recv = atomic_read(&iucv->msg_recv);
 211                phs_hdr->window = confirm_recv;
 212                if (confirm_recv)
 213                        phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
 214        }
 215        memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
 216        memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
 217        memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
 218        memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
 219        ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
 220        ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
 221        ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
 222        ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
 223        if (imsg)
 224                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 225
 226        skb->dev = iucv->hs_dev;
 227        if (!skb->dev) {
 228                err = -ENODEV;
 229                goto err_free;
 230        }
 231
 232        dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
 233
 234        if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
 235                err = -ENETDOWN;
 236                goto err_free;
 237        }
 238        if (skb->len > skb->dev->mtu) {
 239                if (sock->sk_type == SOCK_SEQPACKET) {
 240                        err = -EMSGSIZE;
 241                        goto err_free;
 242                }
 243                err = pskb_trim(skb, skb->dev->mtu);
 244                if (err)
 245                        goto err_free;
 246        }
 247        skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
 248
 249        atomic_inc(&iucv->skbs_in_xmit);
 250        err = dev_queue_xmit(skb);
 251        if (net_xmit_eval(err)) {
 252                atomic_dec(&iucv->skbs_in_xmit);
 253        } else {
 254                atomic_sub(confirm_recv, &iucv->msg_recv);
 255                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
 256        }
 257        return net_xmit_eval(err);
 258
 259err_free:
 260        kfree_skb(skb);
 261        return err;
 262}
 263
 264static struct sock *__iucv_get_sock_by_name(char *nm)
 265{
 266        struct sock *sk;
 267
 268        sk_for_each(sk, &iucv_sk_list.head)
 269                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 270                        return sk;
 271
 272        return NULL;
 273}
 274
 275static void iucv_sock_destruct(struct sock *sk)
 276{
 277        skb_queue_purge(&sk->sk_receive_queue);
 278        skb_queue_purge(&sk->sk_error_queue);
 279
 280        sk_mem_reclaim(sk);
 281
 282        if (!sock_flag(sk, SOCK_DEAD)) {
 283                pr_err("Attempt to release alive iucv socket %p\n", sk);
 284                return;
 285        }
 286
 287        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 288        WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 289        WARN_ON(sk->sk_wmem_queued);
 290        WARN_ON(sk->sk_forward_alloc);
 291}
 292
 293/* Cleanup Listen */
 294static void iucv_sock_cleanup_listen(struct sock *parent)
 295{
 296        struct sock *sk;
 297
 298        /* Close non-accepted connections */
 299        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 300                iucv_sock_close(sk);
 301                iucv_sock_kill(sk);
 302        }
 303
 304        parent->sk_state = IUCV_CLOSED;
 305}
 306
 307static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 308{
 309        write_lock_bh(&l->lock);
 310        sk_add_node(sk, &l->head);
 311        write_unlock_bh(&l->lock);
 312}
 313
 314static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 315{
 316        write_lock_bh(&l->lock);
 317        sk_del_node_init(sk);
 318        write_unlock_bh(&l->lock);
 319}
 320
 321/* Kill socket (only if zapped and orphaned) */
 322static void iucv_sock_kill(struct sock *sk)
 323{
 324        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 325                return;
 326
 327        iucv_sock_unlink(&iucv_sk_list, sk);
 328        sock_set_flag(sk, SOCK_DEAD);
 329        sock_put(sk);
 330}
 331
 332/* Terminate an IUCV path */
 333static void iucv_sever_path(struct sock *sk, int with_user_data)
 334{
 335        unsigned char user_data[16];
 336        struct iucv_sock *iucv = iucv_sk(sk);
 337        struct iucv_path *path = iucv->path;
 338
 339        if (iucv->path) {
 340                iucv->path = NULL;
 341                if (with_user_data) {
 342                        low_nmcpy(user_data, iucv->src_name);
 343                        high_nmcpy(user_data, iucv->dst_name);
 344                        ASCEBC(user_data, sizeof(user_data));
 345                        pr_iucv->path_sever(path, user_data);
 346                } else
 347                        pr_iucv->path_sever(path, NULL);
 348                iucv_path_free(path);
 349        }
 350}
 351
 352/* Send controlling flags through an IUCV socket for HIPER transport */
 353static int iucv_send_ctrl(struct sock *sk, u8 flags)
 354{
 355        struct iucv_sock *iucv = iucv_sk(sk);
 356        int err = 0;
 357        int blen;
 358        struct sk_buff *skb;
 359        u8 shutdown = 0;
 360
 361        blen = sizeof(struct af_iucv_trans_hdr) +
 362               LL_RESERVED_SPACE(iucv->hs_dev);
 363        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 364                /* controlling flags should be sent anyway */
 365                shutdown = sk->sk_shutdown;
 366                sk->sk_shutdown &= RCV_SHUTDOWN;
 367        }
 368        skb = sock_alloc_send_skb(sk, blen, 1, &err);
 369        if (skb) {
 370                skb_reserve(skb, blen);
 371                err = afiucv_hs_send(NULL, sk, skb, flags);
 372        }
 373        if (shutdown)
 374                sk->sk_shutdown = shutdown;
 375        return err;
 376}
 377
 378/* Close an IUCV socket */
 379static void iucv_sock_close(struct sock *sk)
 380{
 381        struct iucv_sock *iucv = iucv_sk(sk);
 382        unsigned long timeo;
 383        int err = 0;
 384
 385        lock_sock(sk);
 386
 387        switch (sk->sk_state) {
 388        case IUCV_LISTEN:
 389                iucv_sock_cleanup_listen(sk);
 390                break;
 391
 392        case IUCV_CONNECTED:
 393                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
 394                        err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
 395                        sk->sk_state = IUCV_DISCONN;
 396                        sk->sk_state_change(sk);
 397                }
 398                fallthrough;
 399
 400        case IUCV_DISCONN:
 401                sk->sk_state = IUCV_CLOSING;
 402                sk->sk_state_change(sk);
 403
 404                if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
 405                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 406                                timeo = sk->sk_lingertime;
 407                        else
 408                                timeo = IUCV_DISCONN_TIMEOUT;
 409                        iucv_sock_wait(sk,
 410                                        iucv_sock_in_state(sk, IUCV_CLOSED, 0),
 411                                        timeo);
 412                }
 413                fallthrough;
 414
 415        case IUCV_CLOSING:
 416                sk->sk_state = IUCV_CLOSED;
 417                sk->sk_state_change(sk);
 418
 419                sk->sk_err = ECONNRESET;
 420                sk->sk_state_change(sk);
 421
 422                skb_queue_purge(&iucv->send_skb_q);
 423                skb_queue_purge(&iucv->backlog_skb_q);
 424                fallthrough;
 425
 426        default:
 427                iucv_sever_path(sk, 1);
 428        }
 429
 430        if (iucv->hs_dev) {
 431                dev_put(iucv->hs_dev);
 432                iucv->hs_dev = NULL;
 433                sk->sk_bound_dev_if = 0;
 434        }
 435
 436        /* mark socket for deletion by iucv_sock_kill() */
 437        sock_set_flag(sk, SOCK_ZAPPED);
 438
 439        release_sock(sk);
 440}
 441
 442static void iucv_sock_init(struct sock *sk, struct sock *parent)
 443{
 444        if (parent) {
 445                sk->sk_type = parent->sk_type;
 446                security_sk_clone(parent, sk);
 447        }
 448}
 449
 450static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 451{
 452        struct sock *sk;
 453        struct iucv_sock *iucv;
 454
 455        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
 456        if (!sk)
 457                return NULL;
 458        iucv = iucv_sk(sk);
 459
 460        sock_init_data(sock, sk);
 461        INIT_LIST_HEAD(&iucv->accept_q);
 462        spin_lock_init(&iucv->accept_q_lock);
 463        skb_queue_head_init(&iucv->send_skb_q);
 464        INIT_LIST_HEAD(&iucv->message_q.list);
 465        spin_lock_init(&iucv->message_q.lock);
 466        skb_queue_head_init(&iucv->backlog_skb_q);
 467        iucv->send_tag = 0;
 468        atomic_set(&iucv->pendings, 0);
 469        iucv->flags = 0;
 470        iucv->msglimit = 0;
 471        atomic_set(&iucv->skbs_in_xmit, 0);
 472        atomic_set(&iucv->msg_sent, 0);
 473        atomic_set(&iucv->msg_recv, 0);
 474        iucv->path = NULL;
 475        iucv->sk_txnotify = afiucv_hs_callback_txnotify;
 476        memset(&iucv->src_user_id , 0, 32);
 477        if (pr_iucv)
 478                iucv->transport = AF_IUCV_TRANS_IUCV;
 479        else
 480                iucv->transport = AF_IUCV_TRANS_HIPER;
 481
 482        sk->sk_destruct = iucv_sock_destruct;
 483        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 484
 485        sock_reset_flag(sk, SOCK_ZAPPED);
 486
 487        sk->sk_protocol = proto;
 488        sk->sk_state    = IUCV_OPEN;
 489
 490        iucv_sock_link(&iucv_sk_list, sk);
 491        return sk;
 492}
 493
 494static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 495{
 496        unsigned long flags;
 497        struct iucv_sock *par = iucv_sk(parent);
 498
 499        sock_hold(sk);
 500        spin_lock_irqsave(&par->accept_q_lock, flags);
 501        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 502        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 503        iucv_sk(sk)->parent = parent;
 504        sk_acceptq_added(parent);
 505}
 506
 507static void iucv_accept_unlink(struct sock *sk)
 508{
 509        unsigned long flags;
 510        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 511
 512        spin_lock_irqsave(&par->accept_q_lock, flags);
 513        list_del_init(&iucv_sk(sk)->accept_q);
 514        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 515        sk_acceptq_removed(iucv_sk(sk)->parent);
 516        iucv_sk(sk)->parent = NULL;
 517        sock_put(sk);
 518}
 519
 520static struct sock *iucv_accept_dequeue(struct sock *parent,
 521                                        struct socket *newsock)
 522{
 523        struct iucv_sock *isk, *n;
 524        struct sock *sk;
 525
 526        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 527                sk = (struct sock *) isk;
 528                lock_sock(sk);
 529
 530                if (sk->sk_state == IUCV_CLOSED) {
 531                        iucv_accept_unlink(sk);
 532                        release_sock(sk);
 533                        continue;
 534                }
 535
 536                if (sk->sk_state == IUCV_CONNECTED ||
 537                    sk->sk_state == IUCV_DISCONN ||
 538                    !newsock) {
 539                        iucv_accept_unlink(sk);
 540                        if (newsock)
 541                                sock_graft(sk, newsock);
 542
 543                        release_sock(sk);
 544                        return sk;
 545                }
 546
 547                release_sock(sk);
 548        }
 549        return NULL;
 550}
 551
 552static void __iucv_auto_name(struct iucv_sock *iucv)
 553{
 554        char name[12];
 555
 556        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 557        while (__iucv_get_sock_by_name(name)) {
 558                sprintf(name, "%08x",
 559                        atomic_inc_return(&iucv_sk_list.autobind_name));
 560        }
 561        memcpy(iucv->src_name, name, 8);
 562}
 563
 564/* Bind an unbound socket */
 565static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 566                          int addr_len)
 567{
 568        DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
 569        char uid[sizeof(sa->siucv_user_id)];
 570        struct sock *sk = sock->sk;
 571        struct iucv_sock *iucv;
 572        int err = 0;
 573        struct net_device *dev;
 574
 575        /* Verify the input sockaddr */
 576        if (addr_len < sizeof(struct sockaddr_iucv) ||
 577            addr->sa_family != AF_IUCV)
 578                return -EINVAL;
 579
 580        lock_sock(sk);
 581        if (sk->sk_state != IUCV_OPEN) {
 582                err = -EBADFD;
 583                goto done;
 584        }
 585
 586        write_lock_bh(&iucv_sk_list.lock);
 587
 588        iucv = iucv_sk(sk);
 589        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 590                err = -EADDRINUSE;
 591                goto done_unlock;
 592        }
 593        if (iucv->path)
 594                goto done_unlock;
 595
 596        /* Bind the socket */
 597        if (pr_iucv)
 598                if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
 599                        goto vm_bind; /* VM IUCV transport */
 600
 601        /* try hiper transport */
 602        memcpy(uid, sa->siucv_user_id, sizeof(uid));
 603        ASCEBC(uid, 8);
 604        rcu_read_lock();
 605        for_each_netdev_rcu(&init_net, dev) {
 606                if (!memcmp(dev->perm_addr, uid, 8)) {
 607                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
 608                        /* Check for uninitialized siucv_name */
 609                        if (strncmp(sa->siucv_name, "        ", 8) == 0)
 610                                __iucv_auto_name(iucv);
 611                        else
 612                                memcpy(iucv->src_name, sa->siucv_name, 8);
 613                        sk->sk_bound_dev_if = dev->ifindex;
 614                        iucv->hs_dev = dev;
 615                        dev_hold(dev);
 616                        sk->sk_state = IUCV_BOUND;
 617                        iucv->transport = AF_IUCV_TRANS_HIPER;
 618                        if (!iucv->msglimit)
 619                                iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
 620                        rcu_read_unlock();
 621                        goto done_unlock;
 622                }
 623        }
 624        rcu_read_unlock();
 625vm_bind:
 626        if (pr_iucv) {
 627                /* use local userid for backward compat */
 628                memcpy(iucv->src_name, sa->siucv_name, 8);
 629                memcpy(iucv->src_user_id, iucv_userid, 8);
 630                sk->sk_state = IUCV_BOUND;
 631                iucv->transport = AF_IUCV_TRANS_IUCV;
 632                sk->sk_allocation |= GFP_DMA;
 633                if (!iucv->msglimit)
 634                        iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 635                goto done_unlock;
 636        }
 637        /* found no dev to bind */
 638        err = -ENODEV;
 639done_unlock:
 640        /* Release the socket list lock */
 641        write_unlock_bh(&iucv_sk_list.lock);
 642done:
 643        release_sock(sk);
 644        return err;
 645}
 646
 647/* Automatically bind an unbound socket */
 648static int iucv_sock_autobind(struct sock *sk)
 649{
 650        struct iucv_sock *iucv = iucv_sk(sk);
 651        int err = 0;
 652
 653        if (unlikely(!pr_iucv))
 654                return -EPROTO;
 655
 656        memcpy(iucv->src_user_id, iucv_userid, 8);
 657        iucv->transport = AF_IUCV_TRANS_IUCV;
 658        sk->sk_allocation |= GFP_DMA;
 659
 660        write_lock_bh(&iucv_sk_list.lock);
 661        __iucv_auto_name(iucv);
 662        write_unlock_bh(&iucv_sk_list.lock);
 663
 664        if (!iucv->msglimit)
 665                iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 666
 667        return err;
 668}
 669
 670static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
 671{
 672        DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
 673        struct sock *sk = sock->sk;
 674        struct iucv_sock *iucv = iucv_sk(sk);
 675        unsigned char user_data[16];
 676        int err;
 677
 678        high_nmcpy(user_data, sa->siucv_name);
 679        low_nmcpy(user_data, iucv->src_name);
 680        ASCEBC(user_data, sizeof(user_data));
 681
 682        /* Create path. */
 683        iucv->path = iucv_path_alloc(iucv->msglimit,
 684                                     IUCV_IPRMDATA, GFP_KERNEL);
 685        if (!iucv->path) {
 686                err = -ENOMEM;
 687                goto done;
 688        }
 689        err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
 690                                    sa->siucv_user_id, NULL, user_data,
 691                                    sk);
 692        if (err) {
 693                iucv_path_free(iucv->path);
 694                iucv->path = NULL;
 695                switch (err) {
 696                case 0x0b:      /* Target communicator is not logged on */
 697                        err = -ENETUNREACH;
 698                        break;
 699                case 0x0d:      /* Max connections for this guest exceeded */
 700                case 0x0e:      /* Max connections for target guest exceeded */
 701                        err = -EAGAIN;
 702                        break;
 703                case 0x0f:      /* Missing IUCV authorization */
 704                        err = -EACCES;
 705                        break;
 706                default:
 707                        err = -ECONNREFUSED;
 708                        break;
 709                }
 710        }
 711done:
 712        return err;
 713}
 714
 715/* Connect an unconnected socket */
 716static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 717                             int alen, int flags)
 718{
 719        DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
 720        struct sock *sk = sock->sk;
 721        struct iucv_sock *iucv = iucv_sk(sk);
 722        int err;
 723
 724        if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
 725                return -EINVAL;
 726
 727        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 728                return -EBADFD;
 729
 730        if (sk->sk_state == IUCV_OPEN &&
 731            iucv->transport == AF_IUCV_TRANS_HIPER)
 732                return -EBADFD; /* explicit bind required */
 733
 734        if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
 735                return -EINVAL;
 736
 737        if (sk->sk_state == IUCV_OPEN) {
 738                err = iucv_sock_autobind(sk);
 739                if (unlikely(err))
 740                        return err;
 741        }
 742
 743        lock_sock(sk);
 744
 745        /* Set the destination information */
 746        memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
 747        memcpy(iucv->dst_name, sa->siucv_name, 8);
 748
 749        if (iucv->transport == AF_IUCV_TRANS_HIPER)
 750                err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
 751        else
 752                err = afiucv_path_connect(sock, addr);
 753        if (err)
 754                goto done;
 755
 756        if (sk->sk_state != IUCV_CONNECTED)
 757                err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
 758                                                            IUCV_DISCONN),
 759                                     sock_sndtimeo(sk, flags & O_NONBLOCK));
 760
 761        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
 762                err = -ECONNREFUSED;
 763
 764        if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
 765                iucv_sever_path(sk, 0);
 766
 767done:
 768        release_sock(sk);
 769        return err;
 770}
 771
 772/* Move a socket into listening state. */
 773static int iucv_sock_listen(struct socket *sock, int backlog)
 774{
 775        struct sock *sk = sock->sk;
 776        int err;
 777
 778        lock_sock(sk);
 779
 780        err = -EINVAL;
 781        if (sk->sk_state != IUCV_BOUND)
 782                goto done;
 783
 784        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 785                goto done;
 786
 787        sk->sk_max_ack_backlog = backlog;
 788        sk->sk_ack_backlog = 0;
 789        sk->sk_state = IUCV_LISTEN;
 790        err = 0;
 791
 792done:
 793        release_sock(sk);
 794        return err;
 795}
 796
 797/* Accept a pending connection */
 798static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 799                            int flags, bool kern)
 800{
 801        DECLARE_WAITQUEUE(wait, current);
 802        struct sock *sk = sock->sk, *nsk;
 803        long timeo;
 804        int err = 0;
 805
 806        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 807
 808        if (sk->sk_state != IUCV_LISTEN) {
 809                err = -EBADFD;
 810                goto done;
 811        }
 812
 813        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 814
 815        /* Wait for an incoming connection */
 816        add_wait_queue_exclusive(sk_sleep(sk), &wait);
 817        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 818                set_current_state(TASK_INTERRUPTIBLE);
 819                if (!timeo) {
 820                        err = -EAGAIN;
 821                        break;
 822                }
 823
 824                release_sock(sk);
 825                timeo = schedule_timeout(timeo);
 826                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 827
 828                if (sk->sk_state != IUCV_LISTEN) {
 829                        err = -EBADFD;
 830                        break;
 831                }
 832
 833                if (signal_pending(current)) {
 834                        err = sock_intr_errno(timeo);
 835                        break;
 836                }
 837        }
 838
 839        set_current_state(TASK_RUNNING);
 840        remove_wait_queue(sk_sleep(sk), &wait);
 841
 842        if (err)
 843                goto done;
 844
 845        newsock->state = SS_CONNECTED;
 846
 847done:
 848        release_sock(sk);
 849        return err;
 850}
 851
 852static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 853                             int peer)
 854{
 855        DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr);
 856        struct sock *sk = sock->sk;
 857        struct iucv_sock *iucv = iucv_sk(sk);
 858
 859        addr->sa_family = AF_IUCV;
 860
 861        if (peer) {
 862                memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
 863                memcpy(siucv->siucv_name, iucv->dst_name, 8);
 864        } else {
 865                memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
 866                memcpy(siucv->siucv_name, iucv->src_name, 8);
 867        }
 868        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
 869        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
 870        memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
 871
 872        return sizeof(struct sockaddr_iucv);
 873}
 874
 875/**
 876 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
 877 * @path:       IUCV path
 878 * @msg:        Pointer to a struct iucv_message
 879 * @skb:        The socket data to send, skb->len MUST BE <= 7
 880 *
 881 * Send the socket data in the parameter list in the iucv message
 882 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
 883 * list and the socket data len at index 7 (last byte).
 884 * See also iucv_msg_length().
 885 *
 886 * Returns the error code from the iucv_message_send() call.
 887 */
 888static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
 889                          struct sk_buff *skb)
 890{
 891        u8 prmdata[8];
 892
 893        memcpy(prmdata, (void *) skb->data, skb->len);
 894        prmdata[7] = 0xff - (u8) skb->len;
 895        return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
 896                                 (void *) prmdata, 8);
 897}
 898
 899static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
 900                             size_t len)
 901{
 902        struct sock *sk = sock->sk;
 903        struct iucv_sock *iucv = iucv_sk(sk);
 904        size_t headroom = 0;
 905        size_t linear;
 906        struct sk_buff *skb;
 907        struct iucv_message txmsg = {0};
 908        struct cmsghdr *cmsg;
 909        int cmsg_done;
 910        long timeo;
 911        char user_id[9];
 912        char appl_id[9];
 913        int err;
 914        int noblock = msg->msg_flags & MSG_DONTWAIT;
 915
 916        err = sock_error(sk);
 917        if (err)
 918                return err;
 919
 920        if (msg->msg_flags & MSG_OOB)
 921                return -EOPNOTSUPP;
 922
 923        /* SOCK_SEQPACKET: we do not support segmented records */
 924        if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
 925                return -EOPNOTSUPP;
 926
 927        lock_sock(sk);
 928
 929        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 930                err = -EPIPE;
 931                goto out;
 932        }
 933
 934        /* Return if the socket is not in connected state */
 935        if (sk->sk_state != IUCV_CONNECTED) {
 936                err = -ENOTCONN;
 937                goto out;
 938        }
 939
 940        /* initialize defaults */
 941        cmsg_done   = 0;        /* check for duplicate headers */
 942
 943        /* iterate over control messages */
 944        for_each_cmsghdr(cmsg, msg) {
 945                if (!CMSG_OK(msg, cmsg)) {
 946                        err = -EINVAL;
 947                        goto out;
 948                }
 949
 950                if (cmsg->cmsg_level != SOL_IUCV)
 951                        continue;
 952
 953                if (cmsg->cmsg_type & cmsg_done) {
 954                        err = -EINVAL;
 955                        goto out;
 956                }
 957                cmsg_done |= cmsg->cmsg_type;
 958
 959                switch (cmsg->cmsg_type) {
 960                case SCM_IUCV_TRGCLS:
 961                        if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
 962                                err = -EINVAL;
 963                                goto out;
 964                        }
 965
 966                        /* set iucv message target class */
 967                        memcpy(&txmsg.class,
 968                                (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
 969
 970                        break;
 971
 972                default:
 973                        err = -EINVAL;
 974                        goto out;
 975                }
 976        }
 977
 978        /* allocate one skb for each iucv message:
 979         * this is fine for SOCK_SEQPACKET (unless we want to support
 980         * segmented records using the MSG_EOR flag), but
 981         * for SOCK_STREAM we might want to improve it in future */
 982        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
 983                headroom = sizeof(struct af_iucv_trans_hdr) +
 984                           LL_RESERVED_SPACE(iucv->hs_dev);
 985                linear = min(len, PAGE_SIZE - headroom);
 986        } else {
 987                if (len < PAGE_SIZE) {
 988                        linear = len;
 989                } else {
 990                        /* In nonlinear "classic" iucv skb,
 991                         * reserve space for iucv_array
 992                         */
 993                        headroom = sizeof(struct iucv_array) *
 994                                   (MAX_SKB_FRAGS + 1);
 995                        linear = PAGE_SIZE - headroom;
 996                }
 997        }
 998        skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
 999                                   noblock, &err, 0);
1000        if (!skb)
1001                goto out;
1002        if (headroom)
1003                skb_reserve(skb, headroom);
1004        skb_put(skb, linear);
1005        skb->len = len;
1006        skb->data_len = len - linear;
1007        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1008        if (err)
1009                goto fail;
1010
1011        /* wait if outstanding messages for iucv path has reached */
1012        timeo = sock_sndtimeo(sk, noblock);
1013        err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1014        if (err)
1015                goto fail;
1016
1017        /* return -ECONNRESET if the socket is no longer connected */
1018        if (sk->sk_state != IUCV_CONNECTED) {
1019                err = -ECONNRESET;
1020                goto fail;
1021        }
1022
1023        /* increment and save iucv message tag for msg_completion cbk */
1024        txmsg.tag = iucv->send_tag++;
1025        IUCV_SKB_CB(skb)->tag = txmsg.tag;
1026
1027        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1028                atomic_inc(&iucv->msg_sent);
1029                err = afiucv_hs_send(&txmsg, sk, skb, 0);
1030                if (err) {
1031                        atomic_dec(&iucv->msg_sent);
1032                        goto out;
1033                }
1034        } else { /* Classic VM IUCV transport */
1035                skb_queue_tail(&iucv->send_skb_q, skb);
1036                atomic_inc(&iucv->skbs_in_xmit);
1037
1038                if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1039                    skb->len <= 7) {
1040                        err = iucv_send_iprm(iucv->path, &txmsg, skb);
1041
1042                        /* on success: there is no message_complete callback */
1043                        /* for an IPRMDATA msg; remove skb from send queue   */
1044                        if (err == 0) {
1045                                atomic_dec(&iucv->skbs_in_xmit);
1046                                skb_unlink(skb, &iucv->send_skb_q);
1047                                consume_skb(skb);
1048                        }
1049
1050                        /* this error should never happen since the     */
1051                        /* IUCV_IPRMDATA path flag is set... sever path */
1052                        if (err == 0x15) {
1053                                pr_iucv->path_sever(iucv->path, NULL);
1054                                atomic_dec(&iucv->skbs_in_xmit);
1055                                skb_unlink(skb, &iucv->send_skb_q);
1056                                err = -EPIPE;
1057                                goto fail;
1058                        }
1059                } else if (skb_is_nonlinear(skb)) {
1060                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1061                        int i;
1062
1063                        /* skip iucv_array lying in the headroom */
1064                        iba[0].address = (u32)(addr_t)skb->data;
1065                        iba[0].length = (u32)skb_headlen(skb);
1066                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1067                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1068
1069                                iba[i + 1].address =
1070                                        (u32)(addr_t)skb_frag_address(frag);
1071                                iba[i + 1].length = (u32)skb_frag_size(frag);
1072                        }
1073                        err = pr_iucv->message_send(iucv->path, &txmsg,
1074                                                    IUCV_IPBUFLST, 0,
1075                                                    (void *)iba, skb->len);
1076                } else { /* non-IPRM Linear skb */
1077                        err = pr_iucv->message_send(iucv->path, &txmsg,
1078                                        0, 0, (void *)skb->data, skb->len);
1079                }
1080                if (err) {
1081                        if (err == 3) {
1082                                user_id[8] = 0;
1083                                memcpy(user_id, iucv->dst_user_id, 8);
1084                                appl_id[8] = 0;
1085                                memcpy(appl_id, iucv->dst_name, 8);
1086                                pr_err(
1087                "Application %s on z/VM guest %s exceeds message limit\n",
1088                                        appl_id, user_id);
1089                                err = -EAGAIN;
1090                        } else {
1091                                err = -EPIPE;
1092                        }
1093
1094                        atomic_dec(&iucv->skbs_in_xmit);
1095                        skb_unlink(skb, &iucv->send_skb_q);
1096                        goto fail;
1097                }
1098        }
1099
1100        release_sock(sk);
1101        return len;
1102
1103fail:
1104        kfree_skb(skb);
1105out:
1106        release_sock(sk);
1107        return err;
1108}
1109
1110static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1111{
1112        size_t headroom, linear;
1113        struct sk_buff *skb;
1114        int err;
1115
1116        if (len < PAGE_SIZE) {
1117                headroom = 0;
1118                linear = len;
1119        } else {
1120                headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1121                linear = PAGE_SIZE - headroom;
1122        }
1123        skb = alloc_skb_with_frags(headroom + linear, len - linear,
1124                                   0, &err, GFP_ATOMIC | GFP_DMA);
1125        WARN_ONCE(!skb,
1126                  "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1127                  len, err);
1128        if (skb) {
1129                if (headroom)
1130                        skb_reserve(skb, headroom);
1131                skb_put(skb, linear);
1132                skb->len = len;
1133                skb->data_len = len - linear;
1134        }
1135        return skb;
1136}
1137
1138/* iucv_process_message() - Receive a single outstanding IUCV message
1139 *
1140 * Locking: must be called with message_q.lock held
1141 */
1142static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1143                                 struct iucv_path *path,
1144                                 struct iucv_message *msg)
1145{
1146        int rc;
1147        unsigned int len;
1148
1149        len = iucv_msg_length(msg);
1150
1151        /* store msg target class in the second 4 bytes of skb ctrl buffer */
1152        /* Note: the first 4 bytes are reserved for msg tag */
1153        IUCV_SKB_CB(skb)->class = msg->class;
1154
1155        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1156        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1157                if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1158                        skb->data = NULL;
1159                        skb->len = 0;
1160                }
1161        } else {
1162                if (skb_is_nonlinear(skb)) {
1163                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1164                        int i;
1165
1166                        iba[0].address = (u32)(addr_t)skb->data;
1167                        iba[0].length = (u32)skb_headlen(skb);
1168                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1169                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1170
1171                                iba[i + 1].address =
1172                                        (u32)(addr_t)skb_frag_address(frag);
1173                                iba[i + 1].length = (u32)skb_frag_size(frag);
1174                        }
1175                        rc = pr_iucv->message_receive(path, msg,
1176                                              IUCV_IPBUFLST,
1177                                              (void *)iba, len, NULL);
1178                } else {
1179                        rc = pr_iucv->message_receive(path, msg,
1180                                              msg->flags & IUCV_IPRMDATA,
1181                                              skb->data, len, NULL);
1182                }
1183                if (rc) {
1184                        kfree_skb(skb);
1185                        return;
1186                }
1187                WARN_ON_ONCE(skb->len != len);
1188        }
1189
1190        IUCV_SKB_CB(skb)->offset = 0;
1191        if (sk_filter(sk, skb)) {
1192                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
1193                kfree_skb(skb);
1194                return;
1195        }
1196        if (__sock_queue_rcv_skb(sk, skb))      /* handle rcv queue full */
1197                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1198}
1199
1200/* iucv_process_message_q() - Process outstanding IUCV messages
1201 *
1202 * Locking: must be called with message_q.lock held
1203 */
1204static void iucv_process_message_q(struct sock *sk)
1205{
1206        struct iucv_sock *iucv = iucv_sk(sk);
1207        struct sk_buff *skb;
1208        struct sock_msg_q *p, *n;
1209
1210        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1211                skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1212                if (!skb)
1213                        break;
1214                iucv_process_message(sk, skb, p->path, &p->msg);
1215                list_del(&p->list);
1216                kfree(p);
1217                if (!skb_queue_empty(&iucv->backlog_skb_q))
1218                        break;
1219        }
1220}
1221
1222static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1223                             size_t len, int flags)
1224{
1225        int noblock = flags & MSG_DONTWAIT;
1226        struct sock *sk = sock->sk;
1227        struct iucv_sock *iucv = iucv_sk(sk);
1228        unsigned int copied, rlen;
1229        struct sk_buff *skb, *rskb, *cskb;
1230        int err = 0;
1231        u32 offset;
1232
1233        if ((sk->sk_state == IUCV_DISCONN) &&
1234            skb_queue_empty(&iucv->backlog_skb_q) &&
1235            skb_queue_empty(&sk->sk_receive_queue) &&
1236            list_empty(&iucv->message_q.list))
1237                return 0;
1238
1239        if (flags & (MSG_OOB))
1240                return -EOPNOTSUPP;
1241
1242        /* receive/dequeue next skb:
1243         * the function understands MSG_PEEK and, thus, does not dequeue skb */
1244        skb = skb_recv_datagram(sk, flags, noblock, &err);
1245        if (!skb) {
1246                if (sk->sk_shutdown & RCV_SHUTDOWN)
1247                        return 0;
1248                return err;
1249        }
1250
1251        offset = IUCV_SKB_CB(skb)->offset;
1252        rlen   = skb->len - offset;             /* real length of skb */
1253        copied = min_t(unsigned int, rlen, len);
1254        if (!rlen)
1255                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1256
1257        cskb = skb;
1258        if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1259                if (!(flags & MSG_PEEK))
1260                        skb_queue_head(&sk->sk_receive_queue, skb);
1261                return -EFAULT;
1262        }
1263
1264        /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1265        if (sk->sk_type == SOCK_SEQPACKET) {
1266                if (copied < rlen)
1267                        msg->msg_flags |= MSG_TRUNC;
1268                /* each iucv message contains a complete record */
1269                msg->msg_flags |= MSG_EOR;
1270        }
1271
1272        /* create control message to store iucv msg target class:
1273         * get the trgcls from the control buffer of the skb due to
1274         * fragmentation of original iucv message. */
1275        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1276                       sizeof(IUCV_SKB_CB(skb)->class),
1277                       (void *)&IUCV_SKB_CB(skb)->class);
1278        if (err) {
1279                if (!(flags & MSG_PEEK))
1280                        skb_queue_head(&sk->sk_receive_queue, skb);
1281                return err;
1282        }
1283
1284        /* Mark read part of skb as used */
1285        if (!(flags & MSG_PEEK)) {
1286
1287                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1288                if (sk->sk_type == SOCK_STREAM) {
1289                        if (copied < rlen) {
1290                                IUCV_SKB_CB(skb)->offset = offset + copied;
1291                                skb_queue_head(&sk->sk_receive_queue, skb);
1292                                goto done;
1293                        }
1294                }
1295
1296                consume_skb(skb);
1297                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1298                        atomic_inc(&iucv->msg_recv);
1299                        if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1300                                WARN_ON(1);
1301                                iucv_sock_close(sk);
1302                                return -EFAULT;
1303                        }
1304                }
1305
1306                /* Queue backlog skbs */
1307                spin_lock_bh(&iucv->message_q.lock);
1308                rskb = skb_dequeue(&iucv->backlog_skb_q);
1309                while (rskb) {
1310                        IUCV_SKB_CB(rskb)->offset = 0;
1311                        if (__sock_queue_rcv_skb(sk, rskb)) {
1312                                /* handle rcv queue full */
1313                                skb_queue_head(&iucv->backlog_skb_q,
1314                                                rskb);
1315                                break;
1316                        }
1317                        rskb = skb_dequeue(&iucv->backlog_skb_q);
1318                }
1319                if (skb_queue_empty(&iucv->backlog_skb_q)) {
1320                        if (!list_empty(&iucv->message_q.list))
1321                                iucv_process_message_q(sk);
1322                        if (atomic_read(&iucv->msg_recv) >=
1323                                                        iucv->msglimit / 2) {
1324                                err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1325                                if (err) {
1326                                        sk->sk_state = IUCV_DISCONN;
1327                                        sk->sk_state_change(sk);
1328                                }
1329                        }
1330                }
1331                spin_unlock_bh(&iucv->message_q.lock);
1332        }
1333
1334done:
1335        /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1336        if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1337                copied = rlen;
1338
1339        return copied;
1340}
1341
1342static inline __poll_t iucv_accept_poll(struct sock *parent)
1343{
1344        struct iucv_sock *isk, *n;
1345        struct sock *sk;
1346
1347        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1348                sk = (struct sock *) isk;
1349
1350                if (sk->sk_state == IUCV_CONNECTED)
1351                        return EPOLLIN | EPOLLRDNORM;
1352        }
1353
1354        return 0;
1355}
1356
1357static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1358                               poll_table *wait)
1359{
1360        struct sock *sk = sock->sk;
1361        __poll_t mask = 0;
1362
1363        sock_poll_wait(file, sock, wait);
1364
1365        if (sk->sk_state == IUCV_LISTEN)
1366                return iucv_accept_poll(sk);
1367
1368        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1369                mask |= EPOLLERR |
1370                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1371
1372        if (sk->sk_shutdown & RCV_SHUTDOWN)
1373                mask |= EPOLLRDHUP;
1374
1375        if (sk->sk_shutdown == SHUTDOWN_MASK)
1376                mask |= EPOLLHUP;
1377
1378        if (!skb_queue_empty(&sk->sk_receive_queue) ||
1379            (sk->sk_shutdown & RCV_SHUTDOWN))
1380                mask |= EPOLLIN | EPOLLRDNORM;
1381
1382        if (sk->sk_state == IUCV_CLOSED)
1383                mask |= EPOLLHUP;
1384
1385        if (sk->sk_state == IUCV_DISCONN)
1386                mask |= EPOLLIN;
1387
1388        if (sock_writeable(sk) && iucv_below_msglim(sk))
1389                mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1390        else
1391                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1392
1393        return mask;
1394}
1395
1396static int iucv_sock_shutdown(struct socket *sock, int how)
1397{
1398        struct sock *sk = sock->sk;
1399        struct iucv_sock *iucv = iucv_sk(sk);
1400        struct iucv_message txmsg;
1401        int err = 0;
1402
1403        how++;
1404
1405        if ((how & ~SHUTDOWN_MASK) || !how)
1406                return -EINVAL;
1407
1408        lock_sock(sk);
1409        switch (sk->sk_state) {
1410        case IUCV_LISTEN:
1411        case IUCV_DISCONN:
1412        case IUCV_CLOSING:
1413        case IUCV_CLOSED:
1414                err = -ENOTCONN;
1415                goto fail;
1416        default:
1417                break;
1418        }
1419
1420        if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
1421            sk->sk_state == IUCV_CONNECTED) {
1422                if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1423                        txmsg.class = 0;
1424                        txmsg.tag = 0;
1425                        err = pr_iucv->message_send(iucv->path, &txmsg,
1426                                IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1427                        if (err) {
1428                                switch (err) {
1429                                case 1:
1430                                        err = -ENOTCONN;
1431                                        break;
1432                                case 2:
1433                                        err = -ECONNRESET;
1434                                        break;
1435                                default:
1436                                        err = -ENOTCONN;
1437                                        break;
1438                                }
1439                        }
1440                } else
1441                        iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1442        }
1443
1444        sk->sk_shutdown |= how;
1445        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1446                if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1447                    iucv->path) {
1448                        err = pr_iucv->path_quiesce(iucv->path, NULL);
1449                        if (err)
1450                                err = -ENOTCONN;
1451/*                      skb_queue_purge(&sk->sk_receive_queue); */
1452                }
1453                skb_queue_purge(&sk->sk_receive_queue);
1454        }
1455
1456        /* Wake up anyone sleeping in poll */
1457        sk->sk_state_change(sk);
1458
1459fail:
1460        release_sock(sk);
1461        return err;
1462}
1463
1464static int iucv_sock_release(struct socket *sock)
1465{
1466        struct sock *sk = sock->sk;
1467        int err = 0;
1468
1469        if (!sk)
1470                return 0;
1471
1472        iucv_sock_close(sk);
1473
1474        sock_orphan(sk);
1475        iucv_sock_kill(sk);
1476        return err;
1477}
1478
1479/* getsockopt and setsockopt */
1480static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1481                                sockptr_t optval, unsigned int optlen)
1482{
1483        struct sock *sk = sock->sk;
1484        struct iucv_sock *iucv = iucv_sk(sk);
1485        int val;
1486        int rc;
1487
1488        if (level != SOL_IUCV)
1489                return -ENOPROTOOPT;
1490
1491        if (optlen < sizeof(int))
1492                return -EINVAL;
1493
1494        if (copy_from_sockptr(&val, optval, sizeof(int)))
1495                return -EFAULT;
1496
1497        rc = 0;
1498
1499        lock_sock(sk);
1500        switch (optname) {
1501        case SO_IPRMDATA_MSG:
1502                if (val)
1503                        iucv->flags |= IUCV_IPRMDATA;
1504                else
1505                        iucv->flags &= ~IUCV_IPRMDATA;
1506                break;
1507        case SO_MSGLIMIT:
1508                switch (sk->sk_state) {
1509                case IUCV_OPEN:
1510                case IUCV_BOUND:
1511                        if (val < 1 || val > U16_MAX)
1512                                rc = -EINVAL;
1513                        else
1514                                iucv->msglimit = val;
1515                        break;
1516                default:
1517                        rc = -EINVAL;
1518                        break;
1519                }
1520                break;
1521        default:
1522                rc = -ENOPROTOOPT;
1523                break;
1524        }
1525        release_sock(sk);
1526
1527        return rc;
1528}
1529
1530static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1531                                char __user *optval, int __user *optlen)
1532{
1533        struct sock *sk = sock->sk;
1534        struct iucv_sock *iucv = iucv_sk(sk);
1535        unsigned int val;
1536        int len;
1537
1538        if (level != SOL_IUCV)
1539                return -ENOPROTOOPT;
1540
1541        if (get_user(len, optlen))
1542                return -EFAULT;
1543
1544        if (len < 0)
1545                return -EINVAL;
1546
1547        len = min_t(unsigned int, len, sizeof(int));
1548
1549        switch (optname) {
1550        case SO_IPRMDATA_MSG:
1551                val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1552                break;
1553        case SO_MSGLIMIT:
1554                lock_sock(sk);
1555                val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1556                                           : iucv->msglimit;    /* default */
1557                release_sock(sk);
1558                break;
1559        case SO_MSGSIZE:
1560                if (sk->sk_state == IUCV_OPEN)
1561                        return -EBADFD;
1562                val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1563                                sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1564                                0x7fffffff;
1565                break;
1566        default:
1567                return -ENOPROTOOPT;
1568        }
1569
1570        if (put_user(len, optlen))
1571                return -EFAULT;
1572        if (copy_to_user(optval, &val, len))
1573                return -EFAULT;
1574
1575        return 0;
1576}
1577
1578
1579/* Callback wrappers - called from iucv base support */
1580static int iucv_callback_connreq(struct iucv_path *path,
1581                                 u8 ipvmid[8], u8 ipuser[16])
1582{
1583        unsigned char user_data[16];
1584        unsigned char nuser_data[16];
1585        unsigned char src_name[8];
1586        struct sock *sk, *nsk;
1587        struct iucv_sock *iucv, *niucv;
1588        int err;
1589
1590        memcpy(src_name, ipuser, 8);
1591        EBCASC(src_name, 8);
1592        /* Find out if this path belongs to af_iucv. */
1593        read_lock(&iucv_sk_list.lock);
1594        iucv = NULL;
1595        sk = NULL;
1596        sk_for_each(sk, &iucv_sk_list.head)
1597                if (sk->sk_state == IUCV_LISTEN &&
1598                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1599                        /*
1600                         * Found a listening socket with
1601                         * src_name == ipuser[0-7].
1602                         */
1603                        iucv = iucv_sk(sk);
1604                        break;
1605                }
1606        read_unlock(&iucv_sk_list.lock);
1607        if (!iucv)
1608                /* No socket found, not one of our paths. */
1609                return -EINVAL;
1610
1611        bh_lock_sock(sk);
1612
1613        /* Check if parent socket is listening */
1614        low_nmcpy(user_data, iucv->src_name);
1615        high_nmcpy(user_data, iucv->dst_name);
1616        ASCEBC(user_data, sizeof(user_data));
1617        if (sk->sk_state != IUCV_LISTEN) {
1618                err = pr_iucv->path_sever(path, user_data);
1619                iucv_path_free(path);
1620                goto fail;
1621        }
1622
1623        /* Check for backlog size */
1624        if (sk_acceptq_is_full(sk)) {
1625                err = pr_iucv->path_sever(path, user_data);
1626                iucv_path_free(path);
1627                goto fail;
1628        }
1629
1630        /* Create the new socket */
1631        nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1632        if (!nsk) {
1633                err = pr_iucv->path_sever(path, user_data);
1634                iucv_path_free(path);
1635                goto fail;
1636        }
1637
1638        niucv = iucv_sk(nsk);
1639        iucv_sock_init(nsk, sk);
1640        niucv->transport = AF_IUCV_TRANS_IUCV;
1641        nsk->sk_allocation |= GFP_DMA;
1642
1643        /* Set the new iucv_sock */
1644        memcpy(niucv->dst_name, ipuser + 8, 8);
1645        EBCASC(niucv->dst_name, 8);
1646        memcpy(niucv->dst_user_id, ipvmid, 8);
1647        memcpy(niucv->src_name, iucv->src_name, 8);
1648        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1649        niucv->path = path;
1650
1651        /* Call iucv_accept */
1652        high_nmcpy(nuser_data, ipuser + 8);
1653        memcpy(nuser_data + 8, niucv->src_name, 8);
1654        ASCEBC(nuser_data + 8, 8);
1655
1656        /* set message limit for path based on msglimit of accepting socket */
1657        niucv->msglimit = iucv->msglimit;
1658        path->msglim = iucv->msglimit;
1659        err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1660        if (err) {
1661                iucv_sever_path(nsk, 1);
1662                iucv_sock_kill(nsk);
1663                goto fail;
1664        }
1665
1666        iucv_accept_enqueue(sk, nsk);
1667
1668        /* Wake up accept */
1669        nsk->sk_state = IUCV_CONNECTED;
1670        sk->sk_data_ready(sk);
1671        err = 0;
1672fail:
1673        bh_unlock_sock(sk);
1674        return 0;
1675}
1676
1677static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1678{
1679        struct sock *sk = path->private;
1680
1681        sk->sk_state = IUCV_CONNECTED;
1682        sk->sk_state_change(sk);
1683}
1684
1685static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1686{
1687        struct sock *sk = path->private;
1688        struct iucv_sock *iucv = iucv_sk(sk);
1689        struct sk_buff *skb;
1690        struct sock_msg_q *save_msg;
1691        int len;
1692
1693        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1694                pr_iucv->message_reject(path, msg);
1695                return;
1696        }
1697
1698        spin_lock(&iucv->message_q.lock);
1699
1700        if (!list_empty(&iucv->message_q.list) ||
1701            !skb_queue_empty(&iucv->backlog_skb_q))
1702                goto save_message;
1703
1704        len = atomic_read(&sk->sk_rmem_alloc);
1705        len += SKB_TRUESIZE(iucv_msg_length(msg));
1706        if (len > sk->sk_rcvbuf)
1707                goto save_message;
1708
1709        skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1710        if (!skb)
1711                goto save_message;
1712
1713        iucv_process_message(sk, skb, path, msg);
1714        goto out_unlock;
1715
1716save_message:
1717        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1718        if (!save_msg)
1719                goto out_unlock;
1720        save_msg->path = path;
1721        save_msg->msg = *msg;
1722
1723        list_add_tail(&save_msg->list, &iucv->message_q.list);
1724
1725out_unlock:
1726        spin_unlock(&iucv->message_q.lock);
1727}
1728
1729static void iucv_callback_txdone(struct iucv_path *path,
1730                                 struct iucv_message *msg)
1731{
1732        struct sock *sk = path->private;
1733        struct sk_buff *this = NULL;
1734        struct sk_buff_head *list;
1735        struct sk_buff *list_skb;
1736        struct iucv_sock *iucv;
1737        unsigned long flags;
1738
1739        iucv = iucv_sk(sk);
1740        list = &iucv->send_skb_q;
1741
1742        bh_lock_sock(sk);
1743
1744        spin_lock_irqsave(&list->lock, flags);
1745        skb_queue_walk(list, list_skb) {
1746                if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1747                        this = list_skb;
1748                        break;
1749                }
1750        }
1751        if (this) {
1752                atomic_dec(&iucv->skbs_in_xmit);
1753                __skb_unlink(this, list);
1754        }
1755
1756        spin_unlock_irqrestore(&list->lock, flags);
1757
1758        if (this) {
1759                consume_skb(this);
1760                /* wake up any process waiting for sending */
1761                iucv_sock_wake_msglim(sk);
1762        }
1763
1764        if (sk->sk_state == IUCV_CLOSING) {
1765                if (atomic_read(&iucv->skbs_in_xmit) == 0) {
1766                        sk->sk_state = IUCV_CLOSED;
1767                        sk->sk_state_change(sk);
1768                }
1769        }
1770        bh_unlock_sock(sk);
1771
1772}
1773
1774static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1775{
1776        struct sock *sk = path->private;
1777
1778        if (sk->sk_state == IUCV_CLOSED)
1779                return;
1780
1781        bh_lock_sock(sk);
1782        iucv_sever_path(sk, 1);
1783        sk->sk_state = IUCV_DISCONN;
1784
1785        sk->sk_state_change(sk);
1786        bh_unlock_sock(sk);
1787}
1788
1789/* called if the other communication side shuts down its RECV direction;
1790 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1791 */
1792static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1793{
1794        struct sock *sk = path->private;
1795
1796        bh_lock_sock(sk);
1797        if (sk->sk_state != IUCV_CLOSED) {
1798                sk->sk_shutdown |= SEND_SHUTDOWN;
1799                sk->sk_state_change(sk);
1800        }
1801        bh_unlock_sock(sk);
1802}
1803
1804static struct iucv_handler af_iucv_handler = {
1805        .path_pending           = iucv_callback_connreq,
1806        .path_complete          = iucv_callback_connack,
1807        .path_severed           = iucv_callback_connrej,
1808        .message_pending        = iucv_callback_rx,
1809        .message_complete       = iucv_callback_txdone,
1810        .path_quiesced          = iucv_callback_shutdown,
1811};
1812
1813/***************** HiperSockets transport callbacks ********************/
1814static void afiucv_swap_src_dest(struct sk_buff *skb)
1815{
1816        struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1817        char tmpID[8];
1818        char tmpName[8];
1819
1820        ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1821        ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1822        ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1823        ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1824        memcpy(tmpID, trans_hdr->srcUserID, 8);
1825        memcpy(tmpName, trans_hdr->srcAppName, 8);
1826        memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1827        memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1828        memcpy(trans_hdr->destUserID, tmpID, 8);
1829        memcpy(trans_hdr->destAppName, tmpName, 8);
1830        skb_push(skb, ETH_HLEN);
1831        memset(skb->data, 0, ETH_HLEN);
1832}
1833
1834/**
1835 * afiucv_hs_callback_syn - react on received SYN
1836 **/
1837static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1838{
1839        struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1840        struct sock *nsk;
1841        struct iucv_sock *iucv, *niucv;
1842        int err;
1843
1844        iucv = iucv_sk(sk);
1845        if (!iucv) {
1846                /* no sock - connection refused */
1847                afiucv_swap_src_dest(skb);
1848                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1849                err = dev_queue_xmit(skb);
1850                goto out;
1851        }
1852
1853        nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1854        bh_lock_sock(sk);
1855        if ((sk->sk_state != IUCV_LISTEN) ||
1856            sk_acceptq_is_full(sk) ||
1857            !nsk) {
1858                /* error on server socket - connection refused */
1859                afiucv_swap_src_dest(skb);
1860                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1861                err = dev_queue_xmit(skb);
1862                iucv_sock_kill(nsk);
1863                bh_unlock_sock(sk);
1864                goto out;
1865        }
1866
1867        niucv = iucv_sk(nsk);
1868        iucv_sock_init(nsk, sk);
1869        niucv->transport = AF_IUCV_TRANS_HIPER;
1870        niucv->msglimit = iucv->msglimit;
1871        if (!trans_hdr->window)
1872                niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1873        else
1874                niucv->msglimit_peer = trans_hdr->window;
1875        memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1876        memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1877        memcpy(niucv->src_name, iucv->src_name, 8);
1878        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1879        nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1880        niucv->hs_dev = iucv->hs_dev;
1881        dev_hold(niucv->hs_dev);
1882        afiucv_swap_src_dest(skb);
1883        trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1884        trans_hdr->window = niucv->msglimit;
1885        /* if receiver acks the xmit connection is established */
1886        err = dev_queue_xmit(skb);
1887        if (!err) {
1888                iucv_accept_enqueue(sk, nsk);
1889                nsk->sk_state = IUCV_CONNECTED;
1890                sk->sk_data_ready(sk);
1891        } else
1892                iucv_sock_kill(nsk);
1893        bh_unlock_sock(sk);
1894
1895out:
1896        return NET_RX_SUCCESS;
1897}
1898
1899/**
1900 * afiucv_hs_callback_synack() - react on received SYN-ACK
1901 **/
1902static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1903{
1904        struct iucv_sock *iucv = iucv_sk(sk);
1905
1906        if (!iucv || sk->sk_state != IUCV_BOUND) {
1907                kfree_skb(skb);
1908                return NET_RX_SUCCESS;
1909        }
1910
1911        bh_lock_sock(sk);
1912        iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
1913        sk->sk_state = IUCV_CONNECTED;
1914        sk->sk_state_change(sk);
1915        bh_unlock_sock(sk);
1916        consume_skb(skb);
1917        return NET_RX_SUCCESS;
1918}
1919
1920/**
1921 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1922 **/
1923static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
1924{
1925        struct iucv_sock *iucv = iucv_sk(sk);
1926
1927        if (!iucv || sk->sk_state != IUCV_BOUND) {
1928                kfree_skb(skb);
1929                return NET_RX_SUCCESS;
1930        }
1931
1932        bh_lock_sock(sk);
1933        sk->sk_state = IUCV_DISCONN;
1934        sk->sk_state_change(sk);
1935        bh_unlock_sock(sk);
1936        consume_skb(skb);
1937        return NET_RX_SUCCESS;
1938}
1939
1940/**
1941 * afiucv_hs_callback_fin() - react on received FIN
1942 **/
1943static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
1944{
1945        struct iucv_sock *iucv = iucv_sk(sk);
1946
1947        /* other end of connection closed */
1948        if (!iucv) {
1949                kfree_skb(skb);
1950                return NET_RX_SUCCESS;
1951        }
1952
1953        bh_lock_sock(sk);
1954        if (sk->sk_state == IUCV_CONNECTED) {
1955                sk->sk_state = IUCV_DISCONN;
1956                sk->sk_state_change(sk);
1957        }
1958        bh_unlock_sock(sk);
1959        consume_skb(skb);
1960        return NET_RX_SUCCESS;
1961}
1962
1963/**
1964 * afiucv_hs_callback_win() - react on received WIN
1965 **/
1966static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
1967{
1968        struct iucv_sock *iucv = iucv_sk(sk);
1969
1970        if (!iucv)
1971                return NET_RX_SUCCESS;
1972
1973        if (sk->sk_state != IUCV_CONNECTED)
1974                return NET_RX_SUCCESS;
1975
1976        atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
1977        iucv_sock_wake_msglim(sk);
1978        return NET_RX_SUCCESS;
1979}
1980
1981/**
1982 * afiucv_hs_callback_rx() - react on received data
1983 **/
1984static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
1985{
1986        struct iucv_sock *iucv = iucv_sk(sk);
1987
1988        if (!iucv) {
1989                kfree_skb(skb);
1990                return NET_RX_SUCCESS;
1991        }
1992
1993        if (sk->sk_state != IUCV_CONNECTED) {
1994                kfree_skb(skb);
1995                return NET_RX_SUCCESS;
1996        }
1997
1998        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1999                kfree_skb(skb);
2000                return NET_RX_SUCCESS;
2001        }
2002
2003        /* write stuff from iucv_msg to skb cb */
2004        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2005        skb_reset_transport_header(skb);
2006        skb_reset_network_header(skb);
2007        IUCV_SKB_CB(skb)->offset = 0;
2008        if (sk_filter(sk, skb)) {
2009                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
2010                kfree_skb(skb);
2011                return NET_RX_SUCCESS;
2012        }
2013
2014        spin_lock(&iucv->message_q.lock);
2015        if (skb_queue_empty(&iucv->backlog_skb_q)) {
2016                if (__sock_queue_rcv_skb(sk, skb))
2017                        /* handle rcv queue full */
2018                        skb_queue_tail(&iucv->backlog_skb_q, skb);
2019        } else
2020                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2021        spin_unlock(&iucv->message_q.lock);
2022        return NET_RX_SUCCESS;
2023}
2024
2025/**
2026 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2027 *                   transport
2028 *                   called from netif RX softirq
2029 **/
2030static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2031        struct packet_type *pt, struct net_device *orig_dev)
2032{
2033        struct sock *sk;
2034        struct iucv_sock *iucv;
2035        struct af_iucv_trans_hdr *trans_hdr;
2036        int err = NET_RX_SUCCESS;
2037        char nullstring[8];
2038
2039        if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
2040                kfree_skb(skb);
2041                return NET_RX_SUCCESS;
2042        }
2043
2044        trans_hdr = iucv_trans_hdr(skb);
2045        EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2046        EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2047        EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2048        EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2049        memset(nullstring, 0, sizeof(nullstring));
2050        iucv = NULL;
2051        sk = NULL;
2052        read_lock(&iucv_sk_list.lock);
2053        sk_for_each(sk, &iucv_sk_list.head) {
2054                if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2055                        if ((!memcmp(&iucv_sk(sk)->src_name,
2056                                     trans_hdr->destAppName, 8)) &&
2057                            (!memcmp(&iucv_sk(sk)->src_user_id,
2058                                     trans_hdr->destUserID, 8)) &&
2059                            (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2060                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2061                                     nullstring, 8))) {
2062                                iucv = iucv_sk(sk);
2063                                break;
2064                        }
2065                } else {
2066                        if ((!memcmp(&iucv_sk(sk)->src_name,
2067                                     trans_hdr->destAppName, 8)) &&
2068                            (!memcmp(&iucv_sk(sk)->src_user_id,
2069                                     trans_hdr->destUserID, 8)) &&
2070                            (!memcmp(&iucv_sk(sk)->dst_name,
2071                                     trans_hdr->srcAppName, 8)) &&
2072                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2073                                     trans_hdr->srcUserID, 8))) {
2074                                iucv = iucv_sk(sk);
2075                                break;
2076                        }
2077                }
2078        }
2079        read_unlock(&iucv_sk_list.lock);
2080        if (!iucv)
2081                sk = NULL;
2082
2083        /* no sock
2084        how should we send with no sock
2085        1) send without sock no send rc checking?
2086        2) introduce default sock to handle this cases
2087
2088         SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2089         data -> send FIN
2090         SYN|ACK, SYN|FIN, FIN -> no action? */
2091
2092        switch (trans_hdr->flags) {
2093        case AF_IUCV_FLAG_SYN:
2094                /* connect request */
2095                err = afiucv_hs_callback_syn(sk, skb);
2096                break;
2097        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2098                /* connect request confirmed */
2099                err = afiucv_hs_callback_synack(sk, skb);
2100                break;
2101        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2102                /* connect request refused */
2103                err = afiucv_hs_callback_synfin(sk, skb);
2104                break;
2105        case (AF_IUCV_FLAG_FIN):
2106                /* close request */
2107                err = afiucv_hs_callback_fin(sk, skb);
2108                break;
2109        case (AF_IUCV_FLAG_WIN):
2110                err = afiucv_hs_callback_win(sk, skb);
2111                if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2112                        consume_skb(skb);
2113                        break;
2114                }
2115                fallthrough;    /* and receive non-zero length data */
2116        case (AF_IUCV_FLAG_SHT):
2117                /* shutdown request */
2118                fallthrough;    /* and receive zero length data */
2119        case 0:
2120                /* plain data frame */
2121                IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2122                err = afiucv_hs_callback_rx(sk, skb);
2123                break;
2124        default:
2125                kfree_skb(skb);
2126        }
2127
2128        return err;
2129}
2130
2131/**
2132 * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
2133 *                                 transport
2134 **/
2135static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
2136{
2137        struct iucv_sock *iucv = iucv_sk(sk);
2138
2139        if (sock_flag(sk, SOCK_ZAPPED))
2140                return;
2141
2142        switch (n) {
2143        case TX_NOTIFY_OK:
2144                atomic_dec(&iucv->skbs_in_xmit);
2145                iucv_sock_wake_msglim(sk);
2146                break;
2147        case TX_NOTIFY_PENDING:
2148                atomic_inc(&iucv->pendings);
2149                break;
2150        case TX_NOTIFY_DELAYED_OK:
2151                atomic_dec(&iucv->skbs_in_xmit);
2152                if (atomic_dec_return(&iucv->pendings) <= 0)
2153                        iucv_sock_wake_msglim(sk);
2154                break;
2155        default:
2156                atomic_dec(&iucv->skbs_in_xmit);
2157                if (sk->sk_state == IUCV_CONNECTED) {
2158                        sk->sk_state = IUCV_DISCONN;
2159                        sk->sk_state_change(sk);
2160                }
2161        }
2162
2163        if (sk->sk_state == IUCV_CLOSING) {
2164                if (atomic_read(&iucv->skbs_in_xmit) == 0) {
2165                        sk->sk_state = IUCV_CLOSED;
2166                        sk->sk_state_change(sk);
2167                }
2168        }
2169}
2170
2171/*
2172 * afiucv_netdev_event: handle netdev notifier chain events
2173 */
2174static int afiucv_netdev_event(struct notifier_block *this,
2175                               unsigned long event, void *ptr)
2176{
2177        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2178        struct sock *sk;
2179        struct iucv_sock *iucv;
2180
2181        switch (event) {
2182        case NETDEV_REBOOT:
2183        case NETDEV_GOING_DOWN:
2184                sk_for_each(sk, &iucv_sk_list.head) {
2185                        iucv = iucv_sk(sk);
2186                        if ((iucv->hs_dev == event_dev) &&
2187                            (sk->sk_state == IUCV_CONNECTED)) {
2188                                if (event == NETDEV_GOING_DOWN)
2189                                        iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2190                                sk->sk_state = IUCV_DISCONN;
2191                                sk->sk_state_change(sk);
2192                        }
2193                }
2194                break;
2195        case NETDEV_DOWN:
2196        case NETDEV_UNREGISTER:
2197        default:
2198                break;
2199        }
2200        return NOTIFY_DONE;
2201}
2202
2203static struct notifier_block afiucv_netdev_notifier = {
2204        .notifier_call = afiucv_netdev_event,
2205};
2206
2207static const struct proto_ops iucv_sock_ops = {
2208        .family         = PF_IUCV,
2209        .owner          = THIS_MODULE,
2210        .release        = iucv_sock_release,
2211        .bind           = iucv_sock_bind,
2212        .connect        = iucv_sock_connect,
2213        .listen         = iucv_sock_listen,
2214        .accept         = iucv_sock_accept,
2215        .getname        = iucv_sock_getname,
2216        .sendmsg        = iucv_sock_sendmsg,
2217        .recvmsg        = iucv_sock_recvmsg,
2218        .poll           = iucv_sock_poll,
2219        .ioctl          = sock_no_ioctl,
2220        .mmap           = sock_no_mmap,
2221        .socketpair     = sock_no_socketpair,
2222        .shutdown       = iucv_sock_shutdown,
2223        .setsockopt     = iucv_sock_setsockopt,
2224        .getsockopt     = iucv_sock_getsockopt,
2225};
2226
2227static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
2228                            int kern)
2229{
2230        struct sock *sk;
2231
2232        if (protocol && protocol != PF_IUCV)
2233                return -EPROTONOSUPPORT;
2234
2235        sock->state = SS_UNCONNECTED;
2236
2237        switch (sock->type) {
2238        case SOCK_STREAM:
2239        case SOCK_SEQPACKET:
2240                /* currently, proto ops can handle both sk types */
2241                sock->ops = &iucv_sock_ops;
2242                break;
2243        default:
2244                return -ESOCKTNOSUPPORT;
2245        }
2246
2247        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
2248        if (!sk)
2249                return -ENOMEM;
2250
2251        iucv_sock_init(sk, NULL);
2252
2253        return 0;
2254}
2255
2256static const struct net_proto_family iucv_sock_family_ops = {
2257        .family = AF_IUCV,
2258        .owner  = THIS_MODULE,
2259        .create = iucv_sock_create,
2260};
2261
2262static struct packet_type iucv_packet_type = {
2263        .type = cpu_to_be16(ETH_P_AF_IUCV),
2264        .func = afiucv_hs_rcv,
2265};
2266
2267static int __init afiucv_init(void)
2268{
2269        int err;
2270
2271        if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
2272                cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2273                if (unlikely(err)) {
2274                        WARN_ON(err);
2275                        err = -EPROTONOSUPPORT;
2276                        goto out;
2277                }
2278
2279                pr_iucv = &iucv_if;
2280        } else {
2281                memset(&iucv_userid, 0, sizeof(iucv_userid));
2282                pr_iucv = NULL;
2283        }
2284
2285        err = proto_register(&iucv_proto, 0);
2286        if (err)
2287                goto out;
2288        err = sock_register(&iucv_sock_family_ops);
2289        if (err)
2290                goto out_proto;
2291
2292        if (pr_iucv) {
2293                err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2294                if (err)
2295                        goto out_sock;
2296        }
2297
2298        err = register_netdevice_notifier(&afiucv_netdev_notifier);
2299        if (err)
2300                goto out_notifier;
2301
2302        dev_add_pack(&iucv_packet_type);
2303        return 0;
2304
2305out_notifier:
2306        if (pr_iucv)
2307                pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2308out_sock:
2309        sock_unregister(PF_IUCV);
2310out_proto:
2311        proto_unregister(&iucv_proto);
2312out:
2313        return err;
2314}
2315
2316static void __exit afiucv_exit(void)
2317{
2318        if (pr_iucv)
2319                pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2320
2321        unregister_netdevice_notifier(&afiucv_netdev_notifier);
2322        dev_remove_pack(&iucv_packet_type);
2323        sock_unregister(PF_IUCV);
2324        proto_unregister(&iucv_proto);
2325}
2326
2327module_init(afiucv_init);
2328module_exit(afiucv_exit);
2329
2330MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2331MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2332MODULE_VERSION(VERSION);
2333MODULE_LICENSE("GPL");
2334MODULE_ALIAS_NETPROTO(PF_IUCV);
2335