linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1/*
   2 *  IUCV protocol stack for Linux on zSeries
   3 *
   4 *  Copyright IBM Corp. 2006, 2009
   5 *
   6 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   7 *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   8 *  PM functions:
   9 *              Ursula Braun <ursula.braun@de.ibm.com>
  10 */
  11
  12#define KMSG_COMPONENT "af_iucv"
  13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/list.h>
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/skbuff.h>
  23#include <linux/init.h>
  24#include <linux/poll.h>
  25#include <linux/security.h>
  26#include <net/sock.h>
  27#include <asm/ebcdic.h>
  28#include <asm/cpcmd.h>
  29#include <linux/kmod.h>
  30
  31#include <net/iucv/af_iucv.h>
  32
  33#define VERSION "1.2"
  34
  35static char iucv_userid[80];
  36
  37static const struct proto_ops iucv_sock_ops;
  38
  39static struct proto iucv_proto = {
  40        .name           = "AF_IUCV",
  41        .owner          = THIS_MODULE,
  42        .obj_size       = sizeof(struct iucv_sock),
  43};
  44
  45static struct iucv_interface *pr_iucv;
  46
  47/* special AF_IUCV IPRM messages */
  48static const u8 iprm_shutdown[8] =
  49        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  50
  51#define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
  52
  53#define __iucv_sock_wait(sk, condition, timeo, ret)                     \
  54do {                                                                    \
  55        DEFINE_WAIT(__wait);                                            \
  56        long __timeo = timeo;                                           \
  57        ret = 0;                                                        \
  58        prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
  59        while (!(condition)) {                                          \
  60                if (!__timeo) {                                         \
  61                        ret = -EAGAIN;                                  \
  62                        break;                                          \
  63                }                                                       \
  64                if (signal_pending(current)) {                          \
  65                        ret = sock_intr_errno(__timeo);                 \
  66                        break;                                          \
  67                }                                                       \
  68                release_sock(sk);                                       \
  69                __timeo = schedule_timeout(__timeo);                    \
  70                lock_sock(sk);                                          \
  71                ret = sock_error(sk);                                   \
  72                if (ret)                                                \
  73                        break;                                          \
  74        }                                                               \
  75        finish_wait(sk_sleep(sk), &__wait);                             \
  76} while (0)
  77
  78#define iucv_sock_wait(sk, condition, timeo)                            \
  79({                                                                      \
  80        int __ret = 0;                                                  \
  81        if (!(condition))                                               \
  82                __iucv_sock_wait(sk, condition, timeo, __ret);          \
  83        __ret;                                                          \
  84})
  85
  86static void iucv_sock_kill(struct sock *sk);
  87static void iucv_sock_close(struct sock *sk);
  88static void iucv_sever_path(struct sock *, int);
  89
  90static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
  91        struct packet_type *pt, struct net_device *orig_dev);
  92static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
  93                   struct sk_buff *skb, u8 flags);
  94static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
  95
  96/* Call Back functions */
  97static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  98static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  99static void iucv_callback_connack(struct iucv_path *, u8 *);
 100static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
 101static void iucv_callback_connrej(struct iucv_path *, u8 *);
 102static void iucv_callback_shutdown(struct iucv_path *, u8 *);
 103
 104static struct iucv_sock_list iucv_sk_list = {
 105        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
 106        .autobind_name = ATOMIC_INIT(0)
 107};
 108
 109static struct iucv_handler af_iucv_handler = {
 110        .path_pending     = iucv_callback_connreq,
 111        .path_complete    = iucv_callback_connack,
 112        .path_severed     = iucv_callback_connrej,
 113        .message_pending  = iucv_callback_rx,
 114        .message_complete = iucv_callback_txdone,
 115        .path_quiesced    = iucv_callback_shutdown,
 116};
 117
 118static inline void high_nmcpy(unsigned char *dst, char *src)
 119{
 120       memcpy(dst, src, 8);
 121}
 122
 123static inline void low_nmcpy(unsigned char *dst, char *src)
 124{
 125       memcpy(&dst[8], src, 8);
 126}
 127
 128static int afiucv_pm_prepare(struct device *dev)
 129{
 130#ifdef CONFIG_PM_DEBUG
 131        printk(KERN_WARNING "afiucv_pm_prepare\n");
 132#endif
 133        return 0;
 134}
 135
 136static void afiucv_pm_complete(struct device *dev)
 137{
 138#ifdef CONFIG_PM_DEBUG
 139        printk(KERN_WARNING "afiucv_pm_complete\n");
 140#endif
 141}
 142
 143/**
 144 * afiucv_pm_freeze() - Freeze PM callback
 145 * @dev:        AFIUCV dummy device
 146 *
 147 * Sever all established IUCV communication pathes
 148 */
 149static int afiucv_pm_freeze(struct device *dev)
 150{
 151        struct iucv_sock *iucv;
 152        struct sock *sk;
 153        int err = 0;
 154
 155#ifdef CONFIG_PM_DEBUG
 156        printk(KERN_WARNING "afiucv_pm_freeze\n");
 157#endif
 158        read_lock(&iucv_sk_list.lock);
 159        sk_for_each(sk, &iucv_sk_list.head) {
 160                iucv = iucv_sk(sk);
 161                switch (sk->sk_state) {
 162                case IUCV_DISCONN:
 163                case IUCV_CLOSING:
 164                case IUCV_CONNECTED:
 165                        iucv_sever_path(sk, 0);
 166                        break;
 167                case IUCV_OPEN:
 168                case IUCV_BOUND:
 169                case IUCV_LISTEN:
 170                case IUCV_CLOSED:
 171                default:
 172                        break;
 173                }
 174                skb_queue_purge(&iucv->send_skb_q);
 175                skb_queue_purge(&iucv->backlog_skb_q);
 176        }
 177        read_unlock(&iucv_sk_list.lock);
 178        return err;
 179}
 180
 181/**
 182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
 183 * @dev:        AFIUCV dummy device
 184 *
 185 * socket clean up after freeze
 186 */
 187static int afiucv_pm_restore_thaw(struct device *dev)
 188{
 189        struct sock *sk;
 190
 191#ifdef CONFIG_PM_DEBUG
 192        printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
 193#endif
 194        read_lock(&iucv_sk_list.lock);
 195        sk_for_each(sk, &iucv_sk_list.head) {
 196                switch (sk->sk_state) {
 197                case IUCV_CONNECTED:
 198                        sk->sk_err = EPIPE;
 199                        sk->sk_state = IUCV_DISCONN;
 200                        sk->sk_state_change(sk);
 201                        break;
 202                case IUCV_DISCONN:
 203                case IUCV_CLOSING:
 204                case IUCV_LISTEN:
 205                case IUCV_BOUND:
 206                case IUCV_OPEN:
 207                default:
 208                        break;
 209                }
 210        }
 211        read_unlock(&iucv_sk_list.lock);
 212        return 0;
 213}
 214
 215static const struct dev_pm_ops afiucv_pm_ops = {
 216        .prepare = afiucv_pm_prepare,
 217        .complete = afiucv_pm_complete,
 218        .freeze = afiucv_pm_freeze,
 219        .thaw = afiucv_pm_restore_thaw,
 220        .restore = afiucv_pm_restore_thaw,
 221};
 222
 223static struct device_driver af_iucv_driver = {
 224        .owner = THIS_MODULE,
 225        .name = "afiucv",
 226        .bus  = NULL,
 227        .pm   = &afiucv_pm_ops,
 228};
 229
 230/* dummy device used as trigger for PM functions */
 231static struct device *af_iucv_dev;
 232
 233/**
 234 * iucv_msg_length() - Returns the length of an iucv message.
 235 * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
 236 *
 237 * The function returns the length of the specified iucv message @msg of data
 238 * stored in a buffer and of data stored in the parameter list (PRMDATA).
 239 *
 240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
 241 * data:
 242 *      PRMDATA[0..6]   socket data (max 7 bytes);
 243 *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
 244 *
 245 * The socket data length is computed by subtracting the socket data length
 246 * value from 0xFF.
 247 * If the socket data len is greater 7, then PRMDATA can be used for special
 248 * notifications (see iucv_sock_shutdown); and further,
 249 * if the socket data len is > 7, the function returns 8.
 250 *
 251 * Use this function to allocate socket buffers to store iucv message data.
 252 */
 253static inline size_t iucv_msg_length(struct iucv_message *msg)
 254{
 255        size_t datalen;
 256
 257        if (msg->flags & IUCV_IPRMDATA) {
 258                datalen = 0xff - msg->rmmsg[7];
 259                return (datalen < 8) ? datalen : 8;
 260        }
 261        return msg->length;
 262}
 263
 264/**
 265 * iucv_sock_in_state() - check for specific states
 266 * @sk:         sock structure
 267 * @state:      first iucv sk state
 268 * @state:      second iucv sk state
 269 *
 270 * Returns true if the socket in either in the first or second state.
 271 */
 272static int iucv_sock_in_state(struct sock *sk, int state, int state2)
 273{
 274        return (sk->sk_state == state || sk->sk_state == state2);
 275}
 276
 277/**
 278 * iucv_below_msglim() - function to check if messages can be sent
 279 * @sk:         sock structure
 280 *
 281 * Returns true if the send queue length is lower than the message limit.
 282 * Always returns true if the socket is not connected (no iucv path for
 283 * checking the message limit).
 284 */
 285static inline int iucv_below_msglim(struct sock *sk)
 286{
 287        struct iucv_sock *iucv = iucv_sk(sk);
 288
 289        if (sk->sk_state != IUCV_CONNECTED)
 290                return 1;
 291        if (iucv->transport == AF_IUCV_TRANS_IUCV)
 292                return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
 293        else
 294                return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
 295                        (atomic_read(&iucv->pendings) <= 0));
 296}
 297
 298/**
 299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
 300 */
 301static void iucv_sock_wake_msglim(struct sock *sk)
 302{
 303        struct socket_wq *wq;
 304
 305        rcu_read_lock();
 306        wq = rcu_dereference(sk->sk_wq);
 307        if (skwq_has_sleeper(wq))
 308                wake_up_interruptible_all(&wq->wait);
 309        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 310        rcu_read_unlock();
 311}
 312
 313/**
 314 * afiucv_hs_send() - send a message through HiperSockets transport
 315 */
 316static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
 317                   struct sk_buff *skb, u8 flags)
 318{
 319        struct iucv_sock *iucv = iucv_sk(sock);
 320        struct af_iucv_trans_hdr *phs_hdr;
 321        struct sk_buff *nskb;
 322        int err, confirm_recv = 0;
 323
 324        memset(skb->head, 0, ETH_HLEN);
 325        phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
 326                                        sizeof(struct af_iucv_trans_hdr));
 327        skb_reset_mac_header(skb);
 328        skb_reset_network_header(skb);
 329        skb_push(skb, ETH_HLEN);
 330        skb_reset_mac_header(skb);
 331        memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
 332
 333        phs_hdr->magic = ETH_P_AF_IUCV;
 334        phs_hdr->version = 1;
 335        phs_hdr->flags = flags;
 336        if (flags == AF_IUCV_FLAG_SYN)
 337                phs_hdr->window = iucv->msglimit;
 338        else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
 339                confirm_recv = atomic_read(&iucv->msg_recv);
 340                phs_hdr->window = confirm_recv;
 341                if (confirm_recv)
 342                        phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
 343        }
 344        memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
 345        memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
 346        memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
 347        memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
 348        ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
 349        ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
 350        ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
 351        ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
 352        if (imsg)
 353                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 354
 355        skb->dev = iucv->hs_dev;
 356        if (!skb->dev)
 357                return -ENODEV;
 358        if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
 359                return -ENETDOWN;
 360        if (skb->len > skb->dev->mtu) {
 361                if (sock->sk_type == SOCK_SEQPACKET)
 362                        return -EMSGSIZE;
 363                else
 364                        skb_trim(skb, skb->dev->mtu);
 365        }
 366        skb->protocol = ETH_P_AF_IUCV;
 367        nskb = skb_clone(skb, GFP_ATOMIC);
 368        if (!nskb)
 369                return -ENOMEM;
 370        skb_queue_tail(&iucv->send_skb_q, nskb);
 371        err = dev_queue_xmit(skb);
 372        if (net_xmit_eval(err)) {
 373                skb_unlink(nskb, &iucv->send_skb_q);
 374                kfree_skb(nskb);
 375        } else {
 376                atomic_sub(confirm_recv, &iucv->msg_recv);
 377                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
 378        }
 379        return net_xmit_eval(err);
 380}
 381
 382static struct sock *__iucv_get_sock_by_name(char *nm)
 383{
 384        struct sock *sk;
 385
 386        sk_for_each(sk, &iucv_sk_list.head)
 387                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 388                        return sk;
 389
 390        return NULL;
 391}
 392
 393static void iucv_sock_destruct(struct sock *sk)
 394{
 395        skb_queue_purge(&sk->sk_receive_queue);
 396        skb_queue_purge(&sk->sk_error_queue);
 397
 398        sk_mem_reclaim(sk);
 399
 400        if (!sock_flag(sk, SOCK_DEAD)) {
 401                pr_err("Attempt to release alive iucv socket %p\n", sk);
 402                return;
 403        }
 404
 405        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 406        WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 407        WARN_ON(sk->sk_wmem_queued);
 408        WARN_ON(sk->sk_forward_alloc);
 409}
 410
 411/* Cleanup Listen */
 412static void iucv_sock_cleanup_listen(struct sock *parent)
 413{
 414        struct sock *sk;
 415
 416        /* Close non-accepted connections */
 417        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 418                iucv_sock_close(sk);
 419                iucv_sock_kill(sk);
 420        }
 421
 422        parent->sk_state = IUCV_CLOSED;
 423}
 424
 425/* Kill socket (only if zapped and orphaned) */
 426static void iucv_sock_kill(struct sock *sk)
 427{
 428        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 429                return;
 430
 431        iucv_sock_unlink(&iucv_sk_list, sk);
 432        sock_set_flag(sk, SOCK_DEAD);
 433        sock_put(sk);
 434}
 435
 436/* Terminate an IUCV path */
 437static void iucv_sever_path(struct sock *sk, int with_user_data)
 438{
 439        unsigned char user_data[16];
 440        struct iucv_sock *iucv = iucv_sk(sk);
 441        struct iucv_path *path = iucv->path;
 442
 443        if (iucv->path) {
 444                iucv->path = NULL;
 445                if (with_user_data) {
 446                        low_nmcpy(user_data, iucv->src_name);
 447                        high_nmcpy(user_data, iucv->dst_name);
 448                        ASCEBC(user_data, sizeof(user_data));
 449                        pr_iucv->path_sever(path, user_data);
 450                } else
 451                        pr_iucv->path_sever(path, NULL);
 452                iucv_path_free(path);
 453        }
 454}
 455
 456/* Send controlling flags through an IUCV socket for HIPER transport */
 457static int iucv_send_ctrl(struct sock *sk, u8 flags)
 458{
 459        int err = 0;
 460        int blen;
 461        struct sk_buff *skb;
 462        u8 shutdown = 0;
 463
 464        blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
 465        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 466                /* controlling flags should be sent anyway */
 467                shutdown = sk->sk_shutdown;
 468                sk->sk_shutdown &= RCV_SHUTDOWN;
 469        }
 470        skb = sock_alloc_send_skb(sk, blen, 1, &err);
 471        if (skb) {
 472                skb_reserve(skb, blen);
 473                err = afiucv_hs_send(NULL, sk, skb, flags);
 474        }
 475        if (shutdown)
 476                sk->sk_shutdown = shutdown;
 477        return err;
 478}
 479
 480/* Close an IUCV socket */
 481static void iucv_sock_close(struct sock *sk)
 482{
 483        struct iucv_sock *iucv = iucv_sk(sk);
 484        unsigned long timeo;
 485        int err = 0;
 486
 487        lock_sock(sk);
 488
 489        switch (sk->sk_state) {
 490        case IUCV_LISTEN:
 491                iucv_sock_cleanup_listen(sk);
 492                break;
 493
 494        case IUCV_CONNECTED:
 495                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
 496                        err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
 497                        sk->sk_state = IUCV_DISCONN;
 498                        sk->sk_state_change(sk);
 499                }
 500        case IUCV_DISCONN:   /* fall through */
 501                sk->sk_state = IUCV_CLOSING;
 502                sk->sk_state_change(sk);
 503
 504                if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
 505                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 506                                timeo = sk->sk_lingertime;
 507                        else
 508                                timeo = IUCV_DISCONN_TIMEOUT;
 509                        iucv_sock_wait(sk,
 510                                        iucv_sock_in_state(sk, IUCV_CLOSED, 0),
 511                                        timeo);
 512                }
 513
 514        case IUCV_CLOSING:   /* fall through */
 515                sk->sk_state = IUCV_CLOSED;
 516                sk->sk_state_change(sk);
 517
 518                sk->sk_err = ECONNRESET;
 519                sk->sk_state_change(sk);
 520
 521                skb_queue_purge(&iucv->send_skb_q);
 522                skb_queue_purge(&iucv->backlog_skb_q);
 523
 524        default:   /* fall through */
 525                iucv_sever_path(sk, 1);
 526        }
 527
 528        if (iucv->hs_dev) {
 529                dev_put(iucv->hs_dev);
 530                iucv->hs_dev = NULL;
 531                sk->sk_bound_dev_if = 0;
 532        }
 533
 534        /* mark socket for deletion by iucv_sock_kill() */
 535        sock_set_flag(sk, SOCK_ZAPPED);
 536
 537        release_sock(sk);
 538}
 539
 540static void iucv_sock_init(struct sock *sk, struct sock *parent)
 541{
 542        if (parent) {
 543                sk->sk_type = parent->sk_type;
 544                security_sk_clone(parent, sk);
 545        }
 546}
 547
 548static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 549{
 550        struct sock *sk;
 551        struct iucv_sock *iucv;
 552
 553        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
 554        if (!sk)
 555                return NULL;
 556        iucv = iucv_sk(sk);
 557
 558        sock_init_data(sock, sk);
 559        INIT_LIST_HEAD(&iucv->accept_q);
 560        spin_lock_init(&iucv->accept_q_lock);
 561        skb_queue_head_init(&iucv->send_skb_q);
 562        INIT_LIST_HEAD(&iucv->message_q.list);
 563        spin_lock_init(&iucv->message_q.lock);
 564        skb_queue_head_init(&iucv->backlog_skb_q);
 565        iucv->send_tag = 0;
 566        atomic_set(&iucv->pendings, 0);
 567        iucv->flags = 0;
 568        iucv->msglimit = 0;
 569        atomic_set(&iucv->msg_sent, 0);
 570        atomic_set(&iucv->msg_recv, 0);
 571        iucv->path = NULL;
 572        iucv->sk_txnotify = afiucv_hs_callback_txnotify;
 573        memset(&iucv->src_user_id , 0, 32);
 574        if (pr_iucv)
 575                iucv->transport = AF_IUCV_TRANS_IUCV;
 576        else
 577                iucv->transport = AF_IUCV_TRANS_HIPER;
 578
 579        sk->sk_destruct = iucv_sock_destruct;
 580        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 581        sk->sk_allocation = GFP_DMA;
 582
 583        sock_reset_flag(sk, SOCK_ZAPPED);
 584
 585        sk->sk_protocol = proto;
 586        sk->sk_state    = IUCV_OPEN;
 587
 588        iucv_sock_link(&iucv_sk_list, sk);
 589        return sk;
 590}
 591
 592/* Create an IUCV socket */
 593static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
 594                            int kern)
 595{
 596        struct sock *sk;
 597
 598        if (protocol && protocol != PF_IUCV)
 599                return -EPROTONOSUPPORT;
 600
 601        sock->state = SS_UNCONNECTED;
 602
 603        switch (sock->type) {
 604        case SOCK_STREAM:
 605                sock->ops = &iucv_sock_ops;
 606                break;
 607        case SOCK_SEQPACKET:
 608                /* currently, proto ops can handle both sk types */
 609                sock->ops = &iucv_sock_ops;
 610                break;
 611        default:
 612                return -ESOCKTNOSUPPORT;
 613        }
 614
 615        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
 616        if (!sk)
 617                return -ENOMEM;
 618
 619        iucv_sock_init(sk, NULL);
 620
 621        return 0;
 622}
 623
 624void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 625{
 626        write_lock_bh(&l->lock);
 627        sk_add_node(sk, &l->head);
 628        write_unlock_bh(&l->lock);
 629}
 630
 631void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 632{
 633        write_lock_bh(&l->lock);
 634        sk_del_node_init(sk);
 635        write_unlock_bh(&l->lock);
 636}
 637
 638void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 639{
 640        unsigned long flags;
 641        struct iucv_sock *par = iucv_sk(parent);
 642
 643        sock_hold(sk);
 644        spin_lock_irqsave(&par->accept_q_lock, flags);
 645        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 646        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 647        iucv_sk(sk)->parent = parent;
 648        sk_acceptq_added(parent);
 649}
 650
 651void iucv_accept_unlink(struct sock *sk)
 652{
 653        unsigned long flags;
 654        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 655
 656        spin_lock_irqsave(&par->accept_q_lock, flags);
 657        list_del_init(&iucv_sk(sk)->accept_q);
 658        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 659        sk_acceptq_removed(iucv_sk(sk)->parent);
 660        iucv_sk(sk)->parent = NULL;
 661        sock_put(sk);
 662}
 663
 664struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
 665{
 666        struct iucv_sock *isk, *n;
 667        struct sock *sk;
 668
 669        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 670                sk = (struct sock *) isk;
 671                lock_sock(sk);
 672
 673                if (sk->sk_state == IUCV_CLOSED) {
 674                        iucv_accept_unlink(sk);
 675                        release_sock(sk);
 676                        continue;
 677                }
 678
 679                if (sk->sk_state == IUCV_CONNECTED ||
 680                    sk->sk_state == IUCV_DISCONN ||
 681                    !newsock) {
 682                        iucv_accept_unlink(sk);
 683                        if (newsock)
 684                                sock_graft(sk, newsock);
 685
 686                        release_sock(sk);
 687                        return sk;
 688                }
 689
 690                release_sock(sk);
 691        }
 692        return NULL;
 693}
 694
 695static void __iucv_auto_name(struct iucv_sock *iucv)
 696{
 697        char name[12];
 698
 699        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 700        while (__iucv_get_sock_by_name(name)) {
 701                sprintf(name, "%08x",
 702                        atomic_inc_return(&iucv_sk_list.autobind_name));
 703        }
 704        memcpy(iucv->src_name, name, 8);
 705}
 706
 707/* Bind an unbound socket */
 708static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 709                          int addr_len)
 710{
 711        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 712        struct sock *sk = sock->sk;
 713        struct iucv_sock *iucv;
 714        int err = 0;
 715        struct net_device *dev;
 716        char uid[9];
 717
 718        /* Verify the input sockaddr */
 719        if (!addr || addr->sa_family != AF_IUCV)
 720                return -EINVAL;
 721
 722        if (addr_len < sizeof(struct sockaddr_iucv))
 723                return -EINVAL;
 724
 725        lock_sock(sk);
 726        if (sk->sk_state != IUCV_OPEN) {
 727                err = -EBADFD;
 728                goto done;
 729        }
 730
 731        write_lock_bh(&iucv_sk_list.lock);
 732
 733        iucv = iucv_sk(sk);
 734        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 735                err = -EADDRINUSE;
 736                goto done_unlock;
 737        }
 738        if (iucv->path)
 739                goto done_unlock;
 740
 741        /* Bind the socket */
 742        if (pr_iucv)
 743                if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
 744                        goto vm_bind; /* VM IUCV transport */
 745
 746        /* try hiper transport */
 747        memcpy(uid, sa->siucv_user_id, sizeof(uid));
 748        ASCEBC(uid, 8);
 749        rcu_read_lock();
 750        for_each_netdev_rcu(&init_net, dev) {
 751                if (!memcmp(dev->perm_addr, uid, 8)) {
 752                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
 753                        /* Check for unitialized siucv_name */
 754                        if (strncmp(sa->siucv_name, "        ", 8) == 0)
 755                                __iucv_auto_name(iucv);
 756                        else
 757                                memcpy(iucv->src_name, sa->siucv_name, 8);
 758                        sk->sk_bound_dev_if = dev->ifindex;
 759                        iucv->hs_dev = dev;
 760                        dev_hold(dev);
 761                        sk->sk_state = IUCV_BOUND;
 762                        iucv->transport = AF_IUCV_TRANS_HIPER;
 763                        if (!iucv->msglimit)
 764                                iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
 765                        rcu_read_unlock();
 766                        goto done_unlock;
 767                }
 768        }
 769        rcu_read_unlock();
 770vm_bind:
 771        if (pr_iucv) {
 772                /* use local userid for backward compat */
 773                memcpy(iucv->src_name, sa->siucv_name, 8);
 774                memcpy(iucv->src_user_id, iucv_userid, 8);
 775                sk->sk_state = IUCV_BOUND;
 776                iucv->transport = AF_IUCV_TRANS_IUCV;
 777                if (!iucv->msglimit)
 778                        iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 779                goto done_unlock;
 780        }
 781        /* found no dev to bind */
 782        err = -ENODEV;
 783done_unlock:
 784        /* Release the socket list lock */
 785        write_unlock_bh(&iucv_sk_list.lock);
 786done:
 787        release_sock(sk);
 788        return err;
 789}
 790
 791/* Automatically bind an unbound socket */
 792static int iucv_sock_autobind(struct sock *sk)
 793{
 794        struct iucv_sock *iucv = iucv_sk(sk);
 795        int err = 0;
 796
 797        if (unlikely(!pr_iucv))
 798                return -EPROTO;
 799
 800        memcpy(iucv->src_user_id, iucv_userid, 8);
 801
 802        write_lock_bh(&iucv_sk_list.lock);
 803        __iucv_auto_name(iucv);
 804        write_unlock_bh(&iucv_sk_list.lock);
 805
 806        if (!iucv->msglimit)
 807                iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 808
 809        return err;
 810}
 811
 812static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
 813{
 814        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 815        struct sock *sk = sock->sk;
 816        struct iucv_sock *iucv = iucv_sk(sk);
 817        unsigned char user_data[16];
 818        int err;
 819
 820        high_nmcpy(user_data, sa->siucv_name);
 821        low_nmcpy(user_data, iucv->src_name);
 822        ASCEBC(user_data, sizeof(user_data));
 823
 824        /* Create path. */
 825        iucv->path = iucv_path_alloc(iucv->msglimit,
 826                                     IUCV_IPRMDATA, GFP_KERNEL);
 827        if (!iucv->path) {
 828                err = -ENOMEM;
 829                goto done;
 830        }
 831        err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
 832                                    sa->siucv_user_id, NULL, user_data,
 833                                    sk);
 834        if (err) {
 835                iucv_path_free(iucv->path);
 836                iucv->path = NULL;
 837                switch (err) {
 838                case 0x0b:      /* Target communicator is not logged on */
 839                        err = -ENETUNREACH;
 840                        break;
 841                case 0x0d:      /* Max connections for this guest exceeded */
 842                case 0x0e:      /* Max connections for target guest exceeded */
 843                        err = -EAGAIN;
 844                        break;
 845                case 0x0f:      /* Missing IUCV authorization */
 846                        err = -EACCES;
 847                        break;
 848                default:
 849                        err = -ECONNREFUSED;
 850                        break;
 851                }
 852        }
 853done:
 854        return err;
 855}
 856
 857/* Connect an unconnected socket */
 858static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 859                             int alen, int flags)
 860{
 861        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 862        struct sock *sk = sock->sk;
 863        struct iucv_sock *iucv = iucv_sk(sk);
 864        int err;
 865
 866        if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
 867                return -EINVAL;
 868
 869        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 870                return -EBADFD;
 871
 872        if (sk->sk_state == IUCV_OPEN &&
 873            iucv->transport == AF_IUCV_TRANS_HIPER)
 874                return -EBADFD; /* explicit bind required */
 875
 876        if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
 877                return -EINVAL;
 878
 879        if (sk->sk_state == IUCV_OPEN) {
 880                err = iucv_sock_autobind(sk);
 881                if (unlikely(err))
 882                        return err;
 883        }
 884
 885        lock_sock(sk);
 886
 887        /* Set the destination information */
 888        memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
 889        memcpy(iucv->dst_name, sa->siucv_name, 8);
 890
 891        if (iucv->transport == AF_IUCV_TRANS_HIPER)
 892                err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
 893        else
 894                err = afiucv_path_connect(sock, addr);
 895        if (err)
 896                goto done;
 897
 898        if (sk->sk_state != IUCV_CONNECTED)
 899                err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
 900                                                            IUCV_DISCONN),
 901                                     sock_sndtimeo(sk, flags & O_NONBLOCK));
 902
 903        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
 904                err = -ECONNREFUSED;
 905
 906        if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
 907                iucv_sever_path(sk, 0);
 908
 909done:
 910        release_sock(sk);
 911        return err;
 912}
 913
 914/* Move a socket into listening state. */
 915static int iucv_sock_listen(struct socket *sock, int backlog)
 916{
 917        struct sock *sk = sock->sk;
 918        int err;
 919
 920        lock_sock(sk);
 921
 922        err = -EINVAL;
 923        if (sk->sk_state != IUCV_BOUND)
 924                goto done;
 925
 926        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 927                goto done;
 928
 929        sk->sk_max_ack_backlog = backlog;
 930        sk->sk_ack_backlog = 0;
 931        sk->sk_state = IUCV_LISTEN;
 932        err = 0;
 933
 934done:
 935        release_sock(sk);
 936        return err;
 937}
 938
 939/* Accept a pending connection */
 940static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 941                            int flags)
 942{
 943        DECLARE_WAITQUEUE(wait, current);
 944        struct sock *sk = sock->sk, *nsk;
 945        long timeo;
 946        int err = 0;
 947
 948        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 949
 950        if (sk->sk_state != IUCV_LISTEN) {
 951                err = -EBADFD;
 952                goto done;
 953        }
 954
 955        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 956
 957        /* Wait for an incoming connection */
 958        add_wait_queue_exclusive(sk_sleep(sk), &wait);
 959        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 960                set_current_state(TASK_INTERRUPTIBLE);
 961                if (!timeo) {
 962                        err = -EAGAIN;
 963                        break;
 964                }
 965
 966                release_sock(sk);
 967                timeo = schedule_timeout(timeo);
 968                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 969
 970                if (sk->sk_state != IUCV_LISTEN) {
 971                        err = -EBADFD;
 972                        break;
 973                }
 974
 975                if (signal_pending(current)) {
 976                        err = sock_intr_errno(timeo);
 977                        break;
 978                }
 979        }
 980
 981        set_current_state(TASK_RUNNING);
 982        remove_wait_queue(sk_sleep(sk), &wait);
 983
 984        if (err)
 985                goto done;
 986
 987        newsock->state = SS_CONNECTED;
 988
 989done:
 990        release_sock(sk);
 991        return err;
 992}
 993
 994static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 995                             int *len, int peer)
 996{
 997        struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
 998        struct sock *sk = sock->sk;
 999        struct iucv_sock *iucv = iucv_sk(sk);
1000
1001        addr->sa_family = AF_IUCV;
1002        *len = sizeof(struct sockaddr_iucv);
1003
1004        if (peer) {
1005                memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1006                memcpy(siucv->siucv_name, iucv->dst_name, 8);
1007        } else {
1008                memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1009                memcpy(siucv->siucv_name, iucv->src_name, 8);
1010        }
1011        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1012        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1013        memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1014
1015        return 0;
1016}
1017
1018/**
1019 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1020 * @path:       IUCV path
1021 * @msg:        Pointer to a struct iucv_message
1022 * @skb:        The socket data to send, skb->len MUST BE <= 7
1023 *
1024 * Send the socket data in the parameter list in the iucv message
1025 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1026 * list and the socket data len at index 7 (last byte).
1027 * See also iucv_msg_length().
1028 *
1029 * Returns the error code from the iucv_message_send() call.
1030 */
1031static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1032                          struct sk_buff *skb)
1033{
1034        u8 prmdata[8];
1035
1036        memcpy(prmdata, (void *) skb->data, skb->len);
1037        prmdata[7] = 0xff - (u8) skb->len;
1038        return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1039                                 (void *) prmdata, 8);
1040}
1041
1042static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1043                             size_t len)
1044{
1045        struct sock *sk = sock->sk;
1046        struct iucv_sock *iucv = iucv_sk(sk);
1047        size_t headroom = 0;
1048        size_t linear;
1049        struct sk_buff *skb;
1050        struct iucv_message txmsg = {0};
1051        struct cmsghdr *cmsg;
1052        int cmsg_done;
1053        long timeo;
1054        char user_id[9];
1055        char appl_id[9];
1056        int err;
1057        int noblock = msg->msg_flags & MSG_DONTWAIT;
1058
1059        err = sock_error(sk);
1060        if (err)
1061                return err;
1062
1063        if (msg->msg_flags & MSG_OOB)
1064                return -EOPNOTSUPP;
1065
1066        /* SOCK_SEQPACKET: we do not support segmented records */
1067        if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1068                return -EOPNOTSUPP;
1069
1070        lock_sock(sk);
1071
1072        if (sk->sk_shutdown & SEND_SHUTDOWN) {
1073                err = -EPIPE;
1074                goto out;
1075        }
1076
1077        /* Return if the socket is not in connected state */
1078        if (sk->sk_state != IUCV_CONNECTED) {
1079                err = -ENOTCONN;
1080                goto out;
1081        }
1082
1083        /* initialize defaults */
1084        cmsg_done   = 0;        /* check for duplicate headers */
1085        txmsg.class = 0;
1086
1087        /* iterate over control messages */
1088        for_each_cmsghdr(cmsg, msg) {
1089                if (!CMSG_OK(msg, cmsg)) {
1090                        err = -EINVAL;
1091                        goto out;
1092                }
1093
1094                if (cmsg->cmsg_level != SOL_IUCV)
1095                        continue;
1096
1097                if (cmsg->cmsg_type & cmsg_done) {
1098                        err = -EINVAL;
1099                        goto out;
1100                }
1101                cmsg_done |= cmsg->cmsg_type;
1102
1103                switch (cmsg->cmsg_type) {
1104                case SCM_IUCV_TRGCLS:
1105                        if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1106                                err = -EINVAL;
1107                                goto out;
1108                        }
1109
1110                        /* set iucv message target class */
1111                        memcpy(&txmsg.class,
1112                                (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1113
1114                        break;
1115
1116                default:
1117                        err = -EINVAL;
1118                        goto out;
1119                }
1120        }
1121
1122        /* allocate one skb for each iucv message:
1123         * this is fine for SOCK_SEQPACKET (unless we want to support
1124         * segmented records using the MSG_EOR flag), but
1125         * for SOCK_STREAM we might want to improve it in future */
1126        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1127                headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
1128                linear = len;
1129        } else {
1130                if (len < PAGE_SIZE) {
1131                        linear = len;
1132                } else {
1133                        /* In nonlinear "classic" iucv skb,
1134                         * reserve space for iucv_array
1135                         */
1136                        headroom = sizeof(struct iucv_array) *
1137                                   (MAX_SKB_FRAGS + 1);
1138                        linear = PAGE_SIZE - headroom;
1139                }
1140        }
1141        skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1142                                   noblock, &err, 0);
1143        if (!skb)
1144                goto out;
1145        if (headroom)
1146                skb_reserve(skb, headroom);
1147        skb_put(skb, linear);
1148        skb->len = len;
1149        skb->data_len = len - linear;
1150        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1151        if (err)
1152                goto fail;
1153
1154        /* wait if outstanding messages for iucv path has reached */
1155        timeo = sock_sndtimeo(sk, noblock);
1156        err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1157        if (err)
1158                goto fail;
1159
1160        /* return -ECONNRESET if the socket is no longer connected */
1161        if (sk->sk_state != IUCV_CONNECTED) {
1162                err = -ECONNRESET;
1163                goto fail;
1164        }
1165
1166        /* increment and save iucv message tag for msg_completion cbk */
1167        txmsg.tag = iucv->send_tag++;
1168        IUCV_SKB_CB(skb)->tag = txmsg.tag;
1169
1170        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1171                atomic_inc(&iucv->msg_sent);
1172                err = afiucv_hs_send(&txmsg, sk, skb, 0);
1173                if (err) {
1174                        atomic_dec(&iucv->msg_sent);
1175                        goto fail;
1176                }
1177        } else { /* Classic VM IUCV transport */
1178                skb_queue_tail(&iucv->send_skb_q, skb);
1179
1180                if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1181                    skb->len <= 7) {
1182                        err = iucv_send_iprm(iucv->path, &txmsg, skb);
1183
1184                        /* on success: there is no message_complete callback */
1185                        /* for an IPRMDATA msg; remove skb from send queue   */
1186                        if (err == 0) {
1187                                skb_unlink(skb, &iucv->send_skb_q);
1188                                kfree_skb(skb);
1189                        }
1190
1191                        /* this error should never happen since the     */
1192                        /* IUCV_IPRMDATA path flag is set... sever path */
1193                        if (err == 0x15) {
1194                                pr_iucv->path_sever(iucv->path, NULL);
1195                                skb_unlink(skb, &iucv->send_skb_q);
1196                                err = -EPIPE;
1197                                goto fail;
1198                        }
1199                } else if (skb_is_nonlinear(skb)) {
1200                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1201                        int i;
1202
1203                        /* skip iucv_array lying in the headroom */
1204                        iba[0].address = (u32)(addr_t)skb->data;
1205                        iba[0].length = (u32)skb_headlen(skb);
1206                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1207                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1208
1209                                iba[i + 1].address =
1210                                        (u32)(addr_t)skb_frag_address(frag);
1211                                iba[i + 1].length = (u32)skb_frag_size(frag);
1212                        }
1213                        err = pr_iucv->message_send(iucv->path, &txmsg,
1214                                                    IUCV_IPBUFLST, 0,
1215                                                    (void *)iba, skb->len);
1216                } else { /* non-IPRM Linear skb */
1217                        err = pr_iucv->message_send(iucv->path, &txmsg,
1218                                        0, 0, (void *)skb->data, skb->len);
1219                }
1220                if (err) {
1221                        if (err == 3) {
1222                                user_id[8] = 0;
1223                                memcpy(user_id, iucv->dst_user_id, 8);
1224                                appl_id[8] = 0;
1225                                memcpy(appl_id, iucv->dst_name, 8);
1226                                pr_err(
1227                "Application %s on z/VM guest %s exceeds message limit\n",
1228                                        appl_id, user_id);
1229                                err = -EAGAIN;
1230                        } else {
1231                                err = -EPIPE;
1232                        }
1233                        skb_unlink(skb, &iucv->send_skb_q);
1234                        goto fail;
1235                }
1236        }
1237
1238        release_sock(sk);
1239        return len;
1240
1241fail:
1242        kfree_skb(skb);
1243out:
1244        release_sock(sk);
1245        return err;
1246}
1247
1248static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1249{
1250        size_t headroom, linear;
1251        struct sk_buff *skb;
1252        int err;
1253
1254        if (len < PAGE_SIZE) {
1255                headroom = 0;
1256                linear = len;
1257        } else {
1258                headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1259                linear = PAGE_SIZE - headroom;
1260        }
1261        skb = alloc_skb_with_frags(headroom + linear, len - linear,
1262                                   0, &err, GFP_ATOMIC | GFP_DMA);
1263        WARN_ONCE(!skb,
1264                  "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1265                  len, err);
1266        if (skb) {
1267                if (headroom)
1268                        skb_reserve(skb, headroom);
1269                skb_put(skb, linear);
1270                skb->len = len;
1271                skb->data_len = len - linear;
1272        }
1273        return skb;
1274}
1275
1276/* iucv_process_message() - Receive a single outstanding IUCV message
1277 *
1278 * Locking: must be called with message_q.lock held
1279 */
1280static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1281                                 struct iucv_path *path,
1282                                 struct iucv_message *msg)
1283{
1284        int rc;
1285        unsigned int len;
1286
1287        len = iucv_msg_length(msg);
1288
1289        /* store msg target class in the second 4 bytes of skb ctrl buffer */
1290        /* Note: the first 4 bytes are reserved for msg tag */
1291        IUCV_SKB_CB(skb)->class = msg->class;
1292
1293        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1294        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1295                if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1296                        skb->data = NULL;
1297                        skb->len = 0;
1298                }
1299        } else {
1300                if (skb_is_nonlinear(skb)) {
1301                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1302                        int i;
1303
1304                        iba[0].address = (u32)(addr_t)skb->data;
1305                        iba[0].length = (u32)skb_headlen(skb);
1306                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1307                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1308
1309                                iba[i + 1].address =
1310                                        (u32)(addr_t)skb_frag_address(frag);
1311                                iba[i + 1].length = (u32)skb_frag_size(frag);
1312                        }
1313                        rc = pr_iucv->message_receive(path, msg,
1314                                              IUCV_IPBUFLST,
1315                                              (void *)iba, len, NULL);
1316                } else {
1317                        rc = pr_iucv->message_receive(path, msg,
1318                                              msg->flags & IUCV_IPRMDATA,
1319                                              skb->data, len, NULL);
1320                }
1321                if (rc) {
1322                        kfree_skb(skb);
1323                        return;
1324                }
1325                WARN_ON_ONCE(skb->len != len);
1326        }
1327
1328        IUCV_SKB_CB(skb)->offset = 0;
1329        if (sk_filter(sk, skb)) {
1330                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
1331                kfree_skb(skb);
1332                return;
1333        }
1334        if (__sock_queue_rcv_skb(sk, skb))      /* handle rcv queue full */
1335                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1336}
1337
1338/* iucv_process_message_q() - Process outstanding IUCV messages
1339 *
1340 * Locking: must be called with message_q.lock held
1341 */
1342static void iucv_process_message_q(struct sock *sk)
1343{
1344        struct iucv_sock *iucv = iucv_sk(sk);
1345        struct sk_buff *skb;
1346        struct sock_msg_q *p, *n;
1347
1348        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1349                skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1350                if (!skb)
1351                        break;
1352                iucv_process_message(sk, skb, p->path, &p->msg);
1353                list_del(&p->list);
1354                kfree(p);
1355                if (!skb_queue_empty(&iucv->backlog_skb_q))
1356                        break;
1357        }
1358}
1359
1360static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1361                             size_t len, int flags)
1362{
1363        int noblock = flags & MSG_DONTWAIT;
1364        struct sock *sk = sock->sk;
1365        struct iucv_sock *iucv = iucv_sk(sk);
1366        unsigned int copied, rlen;
1367        struct sk_buff *skb, *rskb, *cskb;
1368        int err = 0;
1369        u32 offset;
1370
1371        if ((sk->sk_state == IUCV_DISCONN) &&
1372            skb_queue_empty(&iucv->backlog_skb_q) &&
1373            skb_queue_empty(&sk->sk_receive_queue) &&
1374            list_empty(&iucv->message_q.list))
1375                return 0;
1376
1377        if (flags & (MSG_OOB))
1378                return -EOPNOTSUPP;
1379
1380        /* receive/dequeue next skb:
1381         * the function understands MSG_PEEK and, thus, does not dequeue skb */
1382        skb = skb_recv_datagram(sk, flags, noblock, &err);
1383        if (!skb) {
1384                if (sk->sk_shutdown & RCV_SHUTDOWN)
1385                        return 0;
1386                return err;
1387        }
1388
1389        offset = IUCV_SKB_CB(skb)->offset;
1390        rlen   = skb->len - offset;             /* real length of skb */
1391        copied = min_t(unsigned int, rlen, len);
1392        if (!rlen)
1393                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1394
1395        cskb = skb;
1396        if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1397                if (!(flags & MSG_PEEK))
1398                        skb_queue_head(&sk->sk_receive_queue, skb);
1399                return -EFAULT;
1400        }
1401
1402        /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1403        if (sk->sk_type == SOCK_SEQPACKET) {
1404                if (copied < rlen)
1405                        msg->msg_flags |= MSG_TRUNC;
1406                /* each iucv message contains a complete record */
1407                msg->msg_flags |= MSG_EOR;
1408        }
1409
1410        /* create control message to store iucv msg target class:
1411         * get the trgcls from the control buffer of the skb due to
1412         * fragmentation of original iucv message. */
1413        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1414                       sizeof(IUCV_SKB_CB(skb)->class),
1415                       (void *)&IUCV_SKB_CB(skb)->class);
1416        if (err) {
1417                if (!(flags & MSG_PEEK))
1418                        skb_queue_head(&sk->sk_receive_queue, skb);
1419                return err;
1420        }
1421
1422        /* Mark read part of skb as used */
1423        if (!(flags & MSG_PEEK)) {
1424
1425                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1426                if (sk->sk_type == SOCK_STREAM) {
1427                        if (copied < rlen) {
1428                                IUCV_SKB_CB(skb)->offset = offset + copied;
1429                                skb_queue_head(&sk->sk_receive_queue, skb);
1430                                goto done;
1431                        }
1432                }
1433
1434                kfree_skb(skb);
1435                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1436                        atomic_inc(&iucv->msg_recv);
1437                        if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1438                                WARN_ON(1);
1439                                iucv_sock_close(sk);
1440                                return -EFAULT;
1441                        }
1442                }
1443
1444                /* Queue backlog skbs */
1445                spin_lock_bh(&iucv->message_q.lock);
1446                rskb = skb_dequeue(&iucv->backlog_skb_q);
1447                while (rskb) {
1448                        IUCV_SKB_CB(rskb)->offset = 0;
1449                        if (__sock_queue_rcv_skb(sk, rskb)) {
1450                                /* handle rcv queue full */
1451                                skb_queue_head(&iucv->backlog_skb_q,
1452                                                rskb);
1453                                break;
1454                        }
1455                        rskb = skb_dequeue(&iucv->backlog_skb_q);
1456                }
1457                if (skb_queue_empty(&iucv->backlog_skb_q)) {
1458                        if (!list_empty(&iucv->message_q.list))
1459                                iucv_process_message_q(sk);
1460                        if (atomic_read(&iucv->msg_recv) >=
1461                                                        iucv->msglimit / 2) {
1462                                err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1463                                if (err) {
1464                                        sk->sk_state = IUCV_DISCONN;
1465                                        sk->sk_state_change(sk);
1466                                }
1467                        }
1468                }
1469                spin_unlock_bh(&iucv->message_q.lock);
1470        }
1471
1472done:
1473        /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1474        if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1475                copied = rlen;
1476
1477        return copied;
1478}
1479
1480static inline unsigned int iucv_accept_poll(struct sock *parent)
1481{
1482        struct iucv_sock *isk, *n;
1483        struct sock *sk;
1484
1485        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1486                sk = (struct sock *) isk;
1487
1488                if (sk->sk_state == IUCV_CONNECTED)
1489                        return POLLIN | POLLRDNORM;
1490        }
1491
1492        return 0;
1493}
1494
1495unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1496                            poll_table *wait)
1497{
1498        struct sock *sk = sock->sk;
1499        unsigned int mask = 0;
1500
1501        sock_poll_wait(file, sk_sleep(sk), wait);
1502
1503        if (sk->sk_state == IUCV_LISTEN)
1504                return iucv_accept_poll(sk);
1505
1506        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1507                mask |= POLLERR |
1508                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1509
1510        if (sk->sk_shutdown & RCV_SHUTDOWN)
1511                mask |= POLLRDHUP;
1512
1513        if (sk->sk_shutdown == SHUTDOWN_MASK)
1514                mask |= POLLHUP;
1515
1516        if (!skb_queue_empty(&sk->sk_receive_queue) ||
1517            (sk->sk_shutdown & RCV_SHUTDOWN))
1518                mask |= POLLIN | POLLRDNORM;
1519
1520        if (sk->sk_state == IUCV_CLOSED)
1521                mask |= POLLHUP;
1522
1523        if (sk->sk_state == IUCV_DISCONN)
1524                mask |= POLLIN;
1525
1526        if (sock_writeable(sk) && iucv_below_msglim(sk))
1527                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1528        else
1529                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1530
1531        return mask;
1532}
1533
1534static int iucv_sock_shutdown(struct socket *sock, int how)
1535{
1536        struct sock *sk = sock->sk;
1537        struct iucv_sock *iucv = iucv_sk(sk);
1538        struct iucv_message txmsg;
1539        int err = 0;
1540
1541        how++;
1542
1543        if ((how & ~SHUTDOWN_MASK) || !how)
1544                return -EINVAL;
1545
1546        lock_sock(sk);
1547        switch (sk->sk_state) {
1548        case IUCV_LISTEN:
1549        case IUCV_DISCONN:
1550        case IUCV_CLOSING:
1551        case IUCV_CLOSED:
1552                err = -ENOTCONN;
1553                goto fail;
1554        default:
1555                break;
1556        }
1557
1558        if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1559                if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1560                        txmsg.class = 0;
1561                        txmsg.tag = 0;
1562                        err = pr_iucv->message_send(iucv->path, &txmsg,
1563                                IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1564                        if (err) {
1565                                switch (err) {
1566                                case 1:
1567                                        err = -ENOTCONN;
1568                                        break;
1569                                case 2:
1570                                        err = -ECONNRESET;
1571                                        break;
1572                                default:
1573                                        err = -ENOTCONN;
1574                                        break;
1575                                }
1576                        }
1577                } else
1578                        iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1579        }
1580
1581        sk->sk_shutdown |= how;
1582        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1583                if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1584                    iucv->path) {
1585                        err = pr_iucv->path_quiesce(iucv->path, NULL);
1586                        if (err)
1587                                err = -ENOTCONN;
1588/*                      skb_queue_purge(&sk->sk_receive_queue); */
1589                }
1590                skb_queue_purge(&sk->sk_receive_queue);
1591        }
1592
1593        /* Wake up anyone sleeping in poll */
1594        sk->sk_state_change(sk);
1595
1596fail:
1597        release_sock(sk);
1598        return err;
1599}
1600
1601static int iucv_sock_release(struct socket *sock)
1602{
1603        struct sock *sk = sock->sk;
1604        int err = 0;
1605
1606        if (!sk)
1607                return 0;
1608
1609        iucv_sock_close(sk);
1610
1611        sock_orphan(sk);
1612        iucv_sock_kill(sk);
1613        return err;
1614}
1615
1616/* getsockopt and setsockopt */
1617static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1618                                char __user *optval, unsigned int optlen)
1619{
1620        struct sock *sk = sock->sk;
1621        struct iucv_sock *iucv = iucv_sk(sk);
1622        int val;
1623        int rc;
1624
1625        if (level != SOL_IUCV)
1626                return -ENOPROTOOPT;
1627
1628        if (optlen < sizeof(int))
1629                return -EINVAL;
1630
1631        if (get_user(val, (int __user *) optval))
1632                return -EFAULT;
1633
1634        rc = 0;
1635
1636        lock_sock(sk);
1637        switch (optname) {
1638        case SO_IPRMDATA_MSG:
1639                if (val)
1640                        iucv->flags |= IUCV_IPRMDATA;
1641                else
1642                        iucv->flags &= ~IUCV_IPRMDATA;
1643                break;
1644        case SO_MSGLIMIT:
1645                switch (sk->sk_state) {
1646                case IUCV_OPEN:
1647                case IUCV_BOUND:
1648                        if (val < 1 || val > (u16)(~0))
1649                                rc = -EINVAL;
1650                        else
1651                                iucv->msglimit = val;
1652                        break;
1653                default:
1654                        rc = -EINVAL;
1655                        break;
1656                }
1657                break;
1658        default:
1659                rc = -ENOPROTOOPT;
1660                break;
1661        }
1662        release_sock(sk);
1663
1664        return rc;
1665}
1666
1667static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1668                                char __user *optval, int __user *optlen)
1669{
1670        struct sock *sk = sock->sk;
1671        struct iucv_sock *iucv = iucv_sk(sk);
1672        unsigned int val;
1673        int len;
1674
1675        if (level != SOL_IUCV)
1676                return -ENOPROTOOPT;
1677
1678        if (get_user(len, optlen))
1679                return -EFAULT;
1680
1681        if (len < 0)
1682                return -EINVAL;
1683
1684        len = min_t(unsigned int, len, sizeof(int));
1685
1686        switch (optname) {
1687        case SO_IPRMDATA_MSG:
1688                val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1689                break;
1690        case SO_MSGLIMIT:
1691                lock_sock(sk);
1692                val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1693                                           : iucv->msglimit;    /* default */
1694                release_sock(sk);
1695                break;
1696        case SO_MSGSIZE:
1697                if (sk->sk_state == IUCV_OPEN)
1698                        return -EBADFD;
1699                val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1700                                sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1701                                0x7fffffff;
1702                break;
1703        default:
1704                return -ENOPROTOOPT;
1705        }
1706
1707        if (put_user(len, optlen))
1708                return -EFAULT;
1709        if (copy_to_user(optval, &val, len))
1710                return -EFAULT;
1711
1712        return 0;
1713}
1714
1715
1716/* Callback wrappers - called from iucv base support */
1717static int iucv_callback_connreq(struct iucv_path *path,
1718                                 u8 ipvmid[8], u8 ipuser[16])
1719{
1720        unsigned char user_data[16];
1721        unsigned char nuser_data[16];
1722        unsigned char src_name[8];
1723        struct sock *sk, *nsk;
1724        struct iucv_sock *iucv, *niucv;
1725        int err;
1726
1727        memcpy(src_name, ipuser, 8);
1728        EBCASC(src_name, 8);
1729        /* Find out if this path belongs to af_iucv. */
1730        read_lock(&iucv_sk_list.lock);
1731        iucv = NULL;
1732        sk = NULL;
1733        sk_for_each(sk, &iucv_sk_list.head)
1734                if (sk->sk_state == IUCV_LISTEN &&
1735                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1736                        /*
1737                         * Found a listening socket with
1738                         * src_name == ipuser[0-7].
1739                         */
1740                        iucv = iucv_sk(sk);
1741                        break;
1742                }
1743        read_unlock(&iucv_sk_list.lock);
1744        if (!iucv)
1745                /* No socket found, not one of our paths. */
1746                return -EINVAL;
1747
1748        bh_lock_sock(sk);
1749
1750        /* Check if parent socket is listening */
1751        low_nmcpy(user_data, iucv->src_name);
1752        high_nmcpy(user_data, iucv->dst_name);
1753        ASCEBC(user_data, sizeof(user_data));
1754        if (sk->sk_state != IUCV_LISTEN) {
1755                err = pr_iucv->path_sever(path, user_data);
1756                iucv_path_free(path);
1757                goto fail;
1758        }
1759
1760        /* Check for backlog size */
1761        if (sk_acceptq_is_full(sk)) {
1762                err = pr_iucv->path_sever(path, user_data);
1763                iucv_path_free(path);
1764                goto fail;
1765        }
1766
1767        /* Create the new socket */
1768        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1769        if (!nsk) {
1770                err = pr_iucv->path_sever(path, user_data);
1771                iucv_path_free(path);
1772                goto fail;
1773        }
1774
1775        niucv = iucv_sk(nsk);
1776        iucv_sock_init(nsk, sk);
1777
1778        /* Set the new iucv_sock */
1779        memcpy(niucv->dst_name, ipuser + 8, 8);
1780        EBCASC(niucv->dst_name, 8);
1781        memcpy(niucv->dst_user_id, ipvmid, 8);
1782        memcpy(niucv->src_name, iucv->src_name, 8);
1783        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1784        niucv->path = path;
1785
1786        /* Call iucv_accept */
1787        high_nmcpy(nuser_data, ipuser + 8);
1788        memcpy(nuser_data + 8, niucv->src_name, 8);
1789        ASCEBC(nuser_data + 8, 8);
1790
1791        /* set message limit for path based on msglimit of accepting socket */
1792        niucv->msglimit = iucv->msglimit;
1793        path->msglim = iucv->msglimit;
1794        err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1795        if (err) {
1796                iucv_sever_path(nsk, 1);
1797                iucv_sock_kill(nsk);
1798                goto fail;
1799        }
1800
1801        iucv_accept_enqueue(sk, nsk);
1802
1803        /* Wake up accept */
1804        nsk->sk_state = IUCV_CONNECTED;
1805        sk->sk_data_ready(sk);
1806        err = 0;
1807fail:
1808        bh_unlock_sock(sk);
1809        return 0;
1810}
1811
1812static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1813{
1814        struct sock *sk = path->private;
1815
1816        sk->sk_state = IUCV_CONNECTED;
1817        sk->sk_state_change(sk);
1818}
1819
1820static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1821{
1822        struct sock *sk = path->private;
1823        struct iucv_sock *iucv = iucv_sk(sk);
1824        struct sk_buff *skb;
1825        struct sock_msg_q *save_msg;
1826        int len;
1827
1828        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1829                pr_iucv->message_reject(path, msg);
1830                return;
1831        }
1832
1833        spin_lock(&iucv->message_q.lock);
1834
1835        if (!list_empty(&iucv->message_q.list) ||
1836            !skb_queue_empty(&iucv->backlog_skb_q))
1837                goto save_message;
1838
1839        len = atomic_read(&sk->sk_rmem_alloc);
1840        len += SKB_TRUESIZE(iucv_msg_length(msg));
1841        if (len > sk->sk_rcvbuf)
1842                goto save_message;
1843
1844        skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1845        if (!skb)
1846                goto save_message;
1847
1848        iucv_process_message(sk, skb, path, msg);
1849        goto out_unlock;
1850
1851save_message:
1852        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1853        if (!save_msg)
1854                goto out_unlock;
1855        save_msg->path = path;
1856        save_msg->msg = *msg;
1857
1858        list_add_tail(&save_msg->list, &iucv->message_q.list);
1859
1860out_unlock:
1861        spin_unlock(&iucv->message_q.lock);
1862}
1863
1864static void iucv_callback_txdone(struct iucv_path *path,
1865                                 struct iucv_message *msg)
1866{
1867        struct sock *sk = path->private;
1868        struct sk_buff *this = NULL;
1869        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1870        struct sk_buff *list_skb = list->next;
1871        unsigned long flags;
1872
1873        bh_lock_sock(sk);
1874        if (!skb_queue_empty(list)) {
1875                spin_lock_irqsave(&list->lock, flags);
1876
1877                while (list_skb != (struct sk_buff *)list) {
1878                        if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1879                                this = list_skb;
1880                                break;
1881                        }
1882                        list_skb = list_skb->next;
1883                }
1884                if (this)
1885                        __skb_unlink(this, list);
1886
1887                spin_unlock_irqrestore(&list->lock, flags);
1888
1889                if (this) {
1890                        kfree_skb(this);
1891                        /* wake up any process waiting for sending */
1892                        iucv_sock_wake_msglim(sk);
1893                }
1894        }
1895
1896        if (sk->sk_state == IUCV_CLOSING) {
1897                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1898                        sk->sk_state = IUCV_CLOSED;
1899                        sk->sk_state_change(sk);
1900                }
1901        }
1902        bh_unlock_sock(sk);
1903
1904}
1905
1906static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1907{
1908        struct sock *sk = path->private;
1909
1910        if (sk->sk_state == IUCV_CLOSED)
1911                return;
1912
1913        bh_lock_sock(sk);
1914        iucv_sever_path(sk, 1);
1915        sk->sk_state = IUCV_DISCONN;
1916
1917        sk->sk_state_change(sk);
1918        bh_unlock_sock(sk);
1919}
1920
1921/* called if the other communication side shuts down its RECV direction;
1922 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1923 */
1924static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1925{
1926        struct sock *sk = path->private;
1927
1928        bh_lock_sock(sk);
1929        if (sk->sk_state != IUCV_CLOSED) {
1930                sk->sk_shutdown |= SEND_SHUTDOWN;
1931                sk->sk_state_change(sk);
1932        }
1933        bh_unlock_sock(sk);
1934}
1935
1936/***************** HiperSockets transport callbacks ********************/
1937static void afiucv_swap_src_dest(struct sk_buff *skb)
1938{
1939        struct af_iucv_trans_hdr *trans_hdr =
1940                                (struct af_iucv_trans_hdr *)skb->data;
1941        char tmpID[8];
1942        char tmpName[8];
1943
1944        ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1945        ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1946        ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1947        ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1948        memcpy(tmpID, trans_hdr->srcUserID, 8);
1949        memcpy(tmpName, trans_hdr->srcAppName, 8);
1950        memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1951        memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1952        memcpy(trans_hdr->destUserID, tmpID, 8);
1953        memcpy(trans_hdr->destAppName, tmpName, 8);
1954        skb_push(skb, ETH_HLEN);
1955        memset(skb->data, 0, ETH_HLEN);
1956}
1957
1958/**
1959 * afiucv_hs_callback_syn - react on received SYN
1960 **/
1961static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1962{
1963        struct sock *nsk;
1964        struct iucv_sock *iucv, *niucv;
1965        struct af_iucv_trans_hdr *trans_hdr;
1966        int err;
1967
1968        iucv = iucv_sk(sk);
1969        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1970        if (!iucv) {
1971                /* no sock - connection refused */
1972                afiucv_swap_src_dest(skb);
1973                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1974                err = dev_queue_xmit(skb);
1975                goto out;
1976        }
1977
1978        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1979        bh_lock_sock(sk);
1980        if ((sk->sk_state != IUCV_LISTEN) ||
1981            sk_acceptq_is_full(sk) ||
1982            !nsk) {
1983                /* error on server socket - connection refused */
1984                afiucv_swap_src_dest(skb);
1985                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1986                err = dev_queue_xmit(skb);
1987                iucv_sock_kill(nsk);
1988                bh_unlock_sock(sk);
1989                goto out;
1990        }
1991
1992        niucv = iucv_sk(nsk);
1993        iucv_sock_init(nsk, sk);
1994        niucv->transport = AF_IUCV_TRANS_HIPER;
1995        niucv->msglimit = iucv->msglimit;
1996        if (!trans_hdr->window)
1997                niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1998        else
1999                niucv->msglimit_peer = trans_hdr->window;
2000        memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
2001        memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
2002        memcpy(niucv->src_name, iucv->src_name, 8);
2003        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
2004        nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
2005        niucv->hs_dev = iucv->hs_dev;
2006        dev_hold(niucv->hs_dev);
2007        afiucv_swap_src_dest(skb);
2008        trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
2009        trans_hdr->window = niucv->msglimit;
2010        /* if receiver acks the xmit connection is established */
2011        err = dev_queue_xmit(skb);
2012        if (!err) {
2013                iucv_accept_enqueue(sk, nsk);
2014                nsk->sk_state = IUCV_CONNECTED;
2015                sk->sk_data_ready(sk);
2016        } else
2017                iucv_sock_kill(nsk);
2018        bh_unlock_sock(sk);
2019
2020out:
2021        return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synack() - react on received SYN-ACK
2026 **/
2027static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2028{
2029        struct iucv_sock *iucv = iucv_sk(sk);
2030        struct af_iucv_trans_hdr *trans_hdr =
2031                                        (struct af_iucv_trans_hdr *)skb->data;
2032
2033        if (!iucv)
2034                goto out;
2035        if (sk->sk_state != IUCV_BOUND)
2036                goto out;
2037        bh_lock_sock(sk);
2038        iucv->msglimit_peer = trans_hdr->window;
2039        sk->sk_state = IUCV_CONNECTED;
2040        sk->sk_state_change(sk);
2041        bh_unlock_sock(sk);
2042out:
2043        kfree_skb(skb);
2044        return NET_RX_SUCCESS;
2045}
2046
2047/**
2048 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2049 **/
2050static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2051{
2052        struct iucv_sock *iucv = iucv_sk(sk);
2053
2054        if (!iucv)
2055                goto out;
2056        if (sk->sk_state != IUCV_BOUND)
2057                goto out;
2058        bh_lock_sock(sk);
2059        sk->sk_state = IUCV_DISCONN;
2060        sk->sk_state_change(sk);
2061        bh_unlock_sock(sk);
2062out:
2063        kfree_skb(skb);
2064        return NET_RX_SUCCESS;
2065}
2066
2067/**
2068 * afiucv_hs_callback_fin() - react on received FIN
2069 **/
2070static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2071{
2072        struct iucv_sock *iucv = iucv_sk(sk);
2073
2074        /* other end of connection closed */
2075        if (!iucv)
2076                goto out;
2077        bh_lock_sock(sk);
2078        if (sk->sk_state == IUCV_CONNECTED) {
2079                sk->sk_state = IUCV_DISCONN;
2080                sk->sk_state_change(sk);
2081        }
2082        bh_unlock_sock(sk);
2083out:
2084        kfree_skb(skb);
2085        return NET_RX_SUCCESS;
2086}
2087
2088/**
2089 * afiucv_hs_callback_win() - react on received WIN
2090 **/
2091static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2092{
2093        struct iucv_sock *iucv = iucv_sk(sk);
2094        struct af_iucv_trans_hdr *trans_hdr =
2095                                        (struct af_iucv_trans_hdr *)skb->data;
2096
2097        if (!iucv)
2098                return NET_RX_SUCCESS;
2099
2100        if (sk->sk_state != IUCV_CONNECTED)
2101                return NET_RX_SUCCESS;
2102
2103        atomic_sub(trans_hdr->window, &iucv->msg_sent);
2104        iucv_sock_wake_msglim(sk);
2105        return NET_RX_SUCCESS;
2106}
2107
2108/**
2109 * afiucv_hs_callback_rx() - react on received data
2110 **/
2111static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2112{
2113        struct iucv_sock *iucv = iucv_sk(sk);
2114
2115        if (!iucv) {
2116                kfree_skb(skb);
2117                return NET_RX_SUCCESS;
2118        }
2119
2120        if (sk->sk_state != IUCV_CONNECTED) {
2121                kfree_skb(skb);
2122                return NET_RX_SUCCESS;
2123        }
2124
2125        if (sk->sk_shutdown & RCV_SHUTDOWN) {
2126                kfree_skb(skb);
2127                return NET_RX_SUCCESS;
2128        }
2129
2130        /* write stuff from iucv_msg to skb cb */
2131        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2132        skb_reset_transport_header(skb);
2133        skb_reset_network_header(skb);
2134        IUCV_SKB_CB(skb)->offset = 0;
2135        if (sk_filter(sk, skb)) {
2136                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
2137                kfree_skb(skb);
2138                return NET_RX_SUCCESS;
2139        }
2140
2141        spin_lock(&iucv->message_q.lock);
2142        if (skb_queue_empty(&iucv->backlog_skb_q)) {
2143                if (__sock_queue_rcv_skb(sk, skb))
2144                        /* handle rcv queue full */
2145                        skb_queue_tail(&iucv->backlog_skb_q, skb);
2146        } else
2147                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2148        spin_unlock(&iucv->message_q.lock);
2149        return NET_RX_SUCCESS;
2150}
2151
2152/**
2153 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2154 *                   transport
2155 *                   called from netif RX softirq
2156 **/
2157static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2158        struct packet_type *pt, struct net_device *orig_dev)
2159{
2160        struct sock *sk;
2161        struct iucv_sock *iucv;
2162        struct af_iucv_trans_hdr *trans_hdr;
2163        char nullstring[8];
2164        int err = 0;
2165
2166        if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
2167                WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2168                          (int)skb->len,
2169                          (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
2170                kfree_skb(skb);
2171                return NET_RX_SUCCESS;
2172        }
2173        if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
2174                if (skb_linearize(skb)) {
2175                        WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2176                                  (int)skb->len);
2177                        kfree_skb(skb);
2178                        return NET_RX_SUCCESS;
2179                }
2180        skb_pull(skb, ETH_HLEN);
2181        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2182        EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2183        EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2184        EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2185        EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2186        memset(nullstring, 0, sizeof(nullstring));
2187        iucv = NULL;
2188        sk = NULL;
2189        read_lock(&iucv_sk_list.lock);
2190        sk_for_each(sk, &iucv_sk_list.head) {
2191                if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2192                        if ((!memcmp(&iucv_sk(sk)->src_name,
2193                                     trans_hdr->destAppName, 8)) &&
2194                            (!memcmp(&iucv_sk(sk)->src_user_id,
2195                                     trans_hdr->destUserID, 8)) &&
2196                            (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2197                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2198                                     nullstring, 8))) {
2199                                iucv = iucv_sk(sk);
2200                                break;
2201                        }
2202                } else {
2203                        if ((!memcmp(&iucv_sk(sk)->src_name,
2204                                     trans_hdr->destAppName, 8)) &&
2205                            (!memcmp(&iucv_sk(sk)->src_user_id,
2206                                     trans_hdr->destUserID, 8)) &&
2207                            (!memcmp(&iucv_sk(sk)->dst_name,
2208                                     trans_hdr->srcAppName, 8)) &&
2209                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2210                                     trans_hdr->srcUserID, 8))) {
2211                                iucv = iucv_sk(sk);
2212                                break;
2213                        }
2214                }
2215        }
2216        read_unlock(&iucv_sk_list.lock);
2217        if (!iucv)
2218                sk = NULL;
2219
2220        /* no sock
2221        how should we send with no sock
2222        1) send without sock no send rc checking?
2223        2) introduce default sock to handle this cases
2224
2225         SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2226         data -> send FIN
2227         SYN|ACK, SYN|FIN, FIN -> no action? */
2228
2229        switch (trans_hdr->flags) {
2230        case AF_IUCV_FLAG_SYN:
2231                /* connect request */
2232                err = afiucv_hs_callback_syn(sk, skb);
2233                break;
2234        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2235                /* connect request confirmed */
2236                err = afiucv_hs_callback_synack(sk, skb);
2237                break;
2238        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2239                /* connect request refused */
2240                err = afiucv_hs_callback_synfin(sk, skb);
2241                break;
2242        case (AF_IUCV_FLAG_FIN):
2243                /* close request */
2244                err = afiucv_hs_callback_fin(sk, skb);
2245                break;
2246        case (AF_IUCV_FLAG_WIN):
2247                err = afiucv_hs_callback_win(sk, skb);
2248                if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2249                        kfree_skb(skb);
2250                        break;
2251                }
2252                /* fall through and receive non-zero length data */
2253        case (AF_IUCV_FLAG_SHT):
2254                /* shutdown request */
2255                /* fall through and receive zero length data */
2256        case 0:
2257                /* plain data frame */
2258                IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2259                err = afiucv_hs_callback_rx(sk, skb);
2260                break;
2261        default:
2262                ;
2263        }
2264
2265        return err;
2266}
2267
2268/**
2269 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2270 *                                 transport
2271 **/
2272static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2273                                        enum iucv_tx_notify n)
2274{
2275        struct sock *isk = skb->sk;
2276        struct sock *sk = NULL;
2277        struct iucv_sock *iucv = NULL;
2278        struct sk_buff_head *list;
2279        struct sk_buff *list_skb;
2280        struct sk_buff *nskb;
2281        unsigned long flags;
2282
2283        read_lock_irqsave(&iucv_sk_list.lock, flags);
2284        sk_for_each(sk, &iucv_sk_list.head)
2285                if (sk == isk) {
2286                        iucv = iucv_sk(sk);
2287                        break;
2288                }
2289        read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2290
2291        if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2292                return;
2293
2294        list = &iucv->send_skb_q;
2295        spin_lock_irqsave(&list->lock, flags);
2296        if (skb_queue_empty(list))
2297                goto out_unlock;
2298        list_skb = list->next;
2299        nskb = list_skb->next;
2300        while (list_skb != (struct sk_buff *)list) {
2301                if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2302                        switch (n) {
2303                        case TX_NOTIFY_OK:
2304                                __skb_unlink(list_skb, list);
2305                                kfree_skb(list_skb);
2306                                iucv_sock_wake_msglim(sk);
2307                                break;
2308                        case TX_NOTIFY_PENDING:
2309                                atomic_inc(&iucv->pendings);
2310                                break;
2311                        case TX_NOTIFY_DELAYED_OK:
2312                                __skb_unlink(list_skb, list);
2313                                atomic_dec(&iucv->pendings);
2314                                if (atomic_read(&iucv->pendings) <= 0)
2315                                        iucv_sock_wake_msglim(sk);
2316                                kfree_skb(list_skb);
2317                                break;
2318                        case TX_NOTIFY_UNREACHABLE:
2319                        case TX_NOTIFY_DELAYED_UNREACHABLE:
2320                        case TX_NOTIFY_TPQFULL: /* not yet used */
2321                        case TX_NOTIFY_GENERALERROR:
2322                        case TX_NOTIFY_DELAYED_GENERALERROR:
2323                                __skb_unlink(list_skb, list);
2324                                kfree_skb(list_skb);
2325                                if (sk->sk_state == IUCV_CONNECTED) {
2326                                        sk->sk_state = IUCV_DISCONN;
2327                                        sk->sk_state_change(sk);
2328                                }
2329                                break;
2330                        }
2331                        break;
2332                }
2333                list_skb = nskb;
2334                nskb = nskb->next;
2335        }
2336out_unlock:
2337        spin_unlock_irqrestore(&list->lock, flags);
2338
2339        if (sk->sk_state == IUCV_CLOSING) {
2340                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2341                        sk->sk_state = IUCV_CLOSED;
2342                        sk->sk_state_change(sk);
2343                }
2344        }
2345
2346}
2347
2348/*
2349 * afiucv_netdev_event: handle netdev notifier chain events
2350 */
2351static int afiucv_netdev_event(struct notifier_block *this,
2352                               unsigned long event, void *ptr)
2353{
2354        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2355        struct sock *sk;
2356        struct iucv_sock *iucv;
2357
2358        switch (event) {
2359        case NETDEV_REBOOT:
2360        case NETDEV_GOING_DOWN:
2361                sk_for_each(sk, &iucv_sk_list.head) {
2362                        iucv = iucv_sk(sk);
2363                        if ((iucv->hs_dev == event_dev) &&
2364                            (sk->sk_state == IUCV_CONNECTED)) {
2365                                if (event == NETDEV_GOING_DOWN)
2366                                        iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2367                                sk->sk_state = IUCV_DISCONN;
2368                                sk->sk_state_change(sk);
2369                        }
2370                }
2371                break;
2372        case NETDEV_DOWN:
2373        case NETDEV_UNREGISTER:
2374        default:
2375                break;
2376        }
2377        return NOTIFY_DONE;
2378}
2379
2380static struct notifier_block afiucv_netdev_notifier = {
2381        .notifier_call = afiucv_netdev_event,
2382};
2383
2384static const struct proto_ops iucv_sock_ops = {
2385        .family         = PF_IUCV,
2386        .owner          = THIS_MODULE,
2387        .release        = iucv_sock_release,
2388        .bind           = iucv_sock_bind,
2389        .connect        = iucv_sock_connect,
2390        .listen         = iucv_sock_listen,
2391        .accept         = iucv_sock_accept,
2392        .getname        = iucv_sock_getname,
2393        .sendmsg        = iucv_sock_sendmsg,
2394        .recvmsg        = iucv_sock_recvmsg,
2395        .poll           = iucv_sock_poll,
2396        .ioctl          = sock_no_ioctl,
2397        .mmap           = sock_no_mmap,
2398        .socketpair     = sock_no_socketpair,
2399        .shutdown       = iucv_sock_shutdown,
2400        .setsockopt     = iucv_sock_setsockopt,
2401        .getsockopt     = iucv_sock_getsockopt,
2402};
2403
2404static const struct net_proto_family iucv_sock_family_ops = {
2405        .family = AF_IUCV,
2406        .owner  = THIS_MODULE,
2407        .create = iucv_sock_create,
2408};
2409
2410static struct packet_type iucv_packet_type = {
2411        .type = cpu_to_be16(ETH_P_AF_IUCV),
2412        .func = afiucv_hs_rcv,
2413};
2414
2415static int afiucv_iucv_init(void)
2416{
2417        int err;
2418
2419        err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2420        if (err)
2421                goto out;
2422        /* establish dummy device */
2423        af_iucv_driver.bus = pr_iucv->bus;
2424        err = driver_register(&af_iucv_driver);
2425        if (err)
2426                goto out_iucv;
2427        af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2428        if (!af_iucv_dev) {
2429                err = -ENOMEM;
2430                goto out_driver;
2431        }
2432        dev_set_name(af_iucv_dev, "af_iucv");
2433        af_iucv_dev->bus = pr_iucv->bus;
2434        af_iucv_dev->parent = pr_iucv->root;
2435        af_iucv_dev->release = (void (*)(struct device *))kfree;
2436        af_iucv_dev->driver = &af_iucv_driver;
2437        err = device_register(af_iucv_dev);
2438        if (err)
2439                goto out_driver;
2440        return 0;
2441
2442out_driver:
2443        driver_unregister(&af_iucv_driver);
2444out_iucv:
2445        pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2446out:
2447        return err;
2448}
2449
2450static int __init afiucv_init(void)
2451{
2452        int err;
2453
2454        if (MACHINE_IS_VM) {
2455                cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2456                if (unlikely(err)) {
2457                        WARN_ON(err);
2458                        err = -EPROTONOSUPPORT;
2459                        goto out;
2460                }
2461
2462                pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2463                if (!pr_iucv) {
2464                        printk(KERN_WARNING "iucv_if lookup failed\n");
2465                        memset(&iucv_userid, 0, sizeof(iucv_userid));
2466                }
2467        } else {
2468                memset(&iucv_userid, 0, sizeof(iucv_userid));
2469                pr_iucv = NULL;
2470        }
2471
2472        err = proto_register(&iucv_proto, 0);
2473        if (err)
2474                goto out;
2475        err = sock_register(&iucv_sock_family_ops);
2476        if (err)
2477                goto out_proto;
2478
2479        if (pr_iucv) {
2480                err = afiucv_iucv_init();
2481                if (err)
2482                        goto out_sock;
2483        } else
2484                register_netdevice_notifier(&afiucv_netdev_notifier);
2485        dev_add_pack(&iucv_packet_type);
2486        return 0;
2487
2488out_sock:
2489        sock_unregister(PF_IUCV);
2490out_proto:
2491        proto_unregister(&iucv_proto);
2492out:
2493        if (pr_iucv)
2494                symbol_put(iucv_if);
2495        return err;
2496}
2497
2498static void __exit afiucv_exit(void)
2499{
2500        if (pr_iucv) {
2501                device_unregister(af_iucv_dev);
2502                driver_unregister(&af_iucv_driver);
2503                pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2504                symbol_put(iucv_if);
2505        } else
2506                unregister_netdevice_notifier(&afiucv_netdev_notifier);
2507        dev_remove_pack(&iucv_packet_type);
2508        sock_unregister(PF_IUCV);
2509        proto_unregister(&iucv_proto);
2510}
2511
2512module_init(afiucv_init);
2513module_exit(afiucv_exit);
2514
2515MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2516MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2517MODULE_VERSION(VERSION);
2518MODULE_LICENSE("GPL");
2519MODULE_ALIAS_NETPROTO(PF_IUCV);
2520
2521