linux/net/iucv/af_iucv.c
<<
>>
Prefs
   1/*
   2 *  IUCV protocol stack for Linux on zSeries
   3 *
   4 *  Copyright IBM Corp. 2006, 2009
   5 *
   6 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
   7 *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   8 *  PM functions:
   9 *              Ursula Braun <ursula.braun@de.ibm.com>
  10 */
  11
  12#define KMSG_COMPONENT "af_iucv"
  13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/list.h>
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/sched/signal.h>
  21#include <linux/slab.h>
  22#include <linux/skbuff.h>
  23#include <linux/init.h>
  24#include <linux/poll.h>
  25#include <linux/security.h>
  26#include <net/sock.h>
  27#include <asm/ebcdic.h>
  28#include <asm/cpcmd.h>
  29#include <linux/kmod.h>
  30
  31#include <net/iucv/af_iucv.h>
  32
  33#define VERSION "1.2"
  34
  35static char iucv_userid[80];
  36
  37static const struct proto_ops iucv_sock_ops;
  38
  39static struct proto iucv_proto = {
  40        .name           = "AF_IUCV",
  41        .owner          = THIS_MODULE,
  42        .obj_size       = sizeof(struct iucv_sock),
  43};
  44
  45static struct iucv_interface *pr_iucv;
  46
  47/* special AF_IUCV IPRM messages */
  48static const u8 iprm_shutdown[8] =
  49        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  50
  51#define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
  52
  53#define __iucv_sock_wait(sk, condition, timeo, ret)                     \
  54do {                                                                    \
  55        DEFINE_WAIT(__wait);                                            \
  56        long __timeo = timeo;                                           \
  57        ret = 0;                                                        \
  58        prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
  59        while (!(condition)) {                                          \
  60                if (!__timeo) {                                         \
  61                        ret = -EAGAIN;                                  \
  62                        break;                                          \
  63                }                                                       \
  64                if (signal_pending(current)) {                          \
  65                        ret = sock_intr_errno(__timeo);                 \
  66                        break;                                          \
  67                }                                                       \
  68                release_sock(sk);                                       \
  69                __timeo = schedule_timeout(__timeo);                    \
  70                lock_sock(sk);                                          \
  71                ret = sock_error(sk);                                   \
  72                if (ret)                                                \
  73                        break;                                          \
  74        }                                                               \
  75        finish_wait(sk_sleep(sk), &__wait);                             \
  76} while (0)
  77
  78#define iucv_sock_wait(sk, condition, timeo)                            \
  79({                                                                      \
  80        int __ret = 0;                                                  \
  81        if (!(condition))                                               \
  82                __iucv_sock_wait(sk, condition, timeo, __ret);          \
  83        __ret;                                                          \
  84})
  85
  86static void iucv_sock_kill(struct sock *sk);
  87static void iucv_sock_close(struct sock *sk);
  88static void iucv_sever_path(struct sock *, int);
  89
  90static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
  91        struct packet_type *pt, struct net_device *orig_dev);
  92static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
  93                   struct sk_buff *skb, u8 flags);
  94static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
  95
  96/* Call Back functions */
  97static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  98static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  99static void iucv_callback_connack(struct iucv_path *, u8 *);
 100static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
 101static void iucv_callback_connrej(struct iucv_path *, u8 *);
 102static void iucv_callback_shutdown(struct iucv_path *, u8 *);
 103
 104static struct iucv_sock_list iucv_sk_list = {
 105        .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
 106        .autobind_name = ATOMIC_INIT(0)
 107};
 108
 109static struct iucv_handler af_iucv_handler = {
 110        .path_pending     = iucv_callback_connreq,
 111        .path_complete    = iucv_callback_connack,
 112        .path_severed     = iucv_callback_connrej,
 113        .message_pending  = iucv_callback_rx,
 114        .message_complete = iucv_callback_txdone,
 115        .path_quiesced    = iucv_callback_shutdown,
 116};
 117
 118static inline void high_nmcpy(unsigned char *dst, char *src)
 119{
 120       memcpy(dst, src, 8);
 121}
 122
 123static inline void low_nmcpy(unsigned char *dst, char *src)
 124{
 125       memcpy(&dst[8], src, 8);
 126}
 127
 128static int afiucv_pm_prepare(struct device *dev)
 129{
 130#ifdef CONFIG_PM_DEBUG
 131        printk(KERN_WARNING "afiucv_pm_prepare\n");
 132#endif
 133        return 0;
 134}
 135
 136static void afiucv_pm_complete(struct device *dev)
 137{
 138#ifdef CONFIG_PM_DEBUG
 139        printk(KERN_WARNING "afiucv_pm_complete\n");
 140#endif
 141}
 142
 143/**
 144 * afiucv_pm_freeze() - Freeze PM callback
 145 * @dev:        AFIUCV dummy device
 146 *
 147 * Sever all established IUCV communication pathes
 148 */
 149static int afiucv_pm_freeze(struct device *dev)
 150{
 151        struct iucv_sock *iucv;
 152        struct sock *sk;
 153        int err = 0;
 154
 155#ifdef CONFIG_PM_DEBUG
 156        printk(KERN_WARNING "afiucv_pm_freeze\n");
 157#endif
 158        read_lock(&iucv_sk_list.lock);
 159        sk_for_each(sk, &iucv_sk_list.head) {
 160                iucv = iucv_sk(sk);
 161                switch (sk->sk_state) {
 162                case IUCV_DISCONN:
 163                case IUCV_CLOSING:
 164                case IUCV_CONNECTED:
 165                        iucv_sever_path(sk, 0);
 166                        break;
 167                case IUCV_OPEN:
 168                case IUCV_BOUND:
 169                case IUCV_LISTEN:
 170                case IUCV_CLOSED:
 171                default:
 172                        break;
 173                }
 174                skb_queue_purge(&iucv->send_skb_q);
 175                skb_queue_purge(&iucv->backlog_skb_q);
 176        }
 177        read_unlock(&iucv_sk_list.lock);
 178        return err;
 179}
 180
 181/**
 182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
 183 * @dev:        AFIUCV dummy device
 184 *
 185 * socket clean up after freeze
 186 */
 187static int afiucv_pm_restore_thaw(struct device *dev)
 188{
 189        struct sock *sk;
 190
 191#ifdef CONFIG_PM_DEBUG
 192        printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
 193#endif
 194        read_lock(&iucv_sk_list.lock);
 195        sk_for_each(sk, &iucv_sk_list.head) {
 196                switch (sk->sk_state) {
 197                case IUCV_CONNECTED:
 198                        sk->sk_err = EPIPE;
 199                        sk->sk_state = IUCV_DISCONN;
 200                        sk->sk_state_change(sk);
 201                        break;
 202                case IUCV_DISCONN:
 203                case IUCV_CLOSING:
 204                case IUCV_LISTEN:
 205                case IUCV_BOUND:
 206                case IUCV_OPEN:
 207                default:
 208                        break;
 209                }
 210        }
 211        read_unlock(&iucv_sk_list.lock);
 212        return 0;
 213}
 214
 215static const struct dev_pm_ops afiucv_pm_ops = {
 216        .prepare = afiucv_pm_prepare,
 217        .complete = afiucv_pm_complete,
 218        .freeze = afiucv_pm_freeze,
 219        .thaw = afiucv_pm_restore_thaw,
 220        .restore = afiucv_pm_restore_thaw,
 221};
 222
 223static struct device_driver af_iucv_driver = {
 224        .owner = THIS_MODULE,
 225        .name = "afiucv",
 226        .bus  = NULL,
 227        .pm   = &afiucv_pm_ops,
 228};
 229
 230/* dummy device used as trigger for PM functions */
 231static struct device *af_iucv_dev;
 232
 233/**
 234 * iucv_msg_length() - Returns the length of an iucv message.
 235 * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
 236 *
 237 * The function returns the length of the specified iucv message @msg of data
 238 * stored in a buffer and of data stored in the parameter list (PRMDATA).
 239 *
 240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
 241 * data:
 242 *      PRMDATA[0..6]   socket data (max 7 bytes);
 243 *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
 244 *
 245 * The socket data length is computed by subtracting the socket data length
 246 * value from 0xFF.
 247 * If the socket data len is greater 7, then PRMDATA can be used for special
 248 * notifications (see iucv_sock_shutdown); and further,
 249 * if the socket data len is > 7, the function returns 8.
 250 *
 251 * Use this function to allocate socket buffers to store iucv message data.
 252 */
 253static inline size_t iucv_msg_length(struct iucv_message *msg)
 254{
 255        size_t datalen;
 256
 257        if (msg->flags & IUCV_IPRMDATA) {
 258                datalen = 0xff - msg->rmmsg[7];
 259                return (datalen < 8) ? datalen : 8;
 260        }
 261        return msg->length;
 262}
 263
 264/**
 265 * iucv_sock_in_state() - check for specific states
 266 * @sk:         sock structure
 267 * @state:      first iucv sk state
 268 * @state:      second iucv sk state
 269 *
 270 * Returns true if the socket in either in the first or second state.
 271 */
 272static int iucv_sock_in_state(struct sock *sk, int state, int state2)
 273{
 274        return (sk->sk_state == state || sk->sk_state == state2);
 275}
 276
 277/**
 278 * iucv_below_msglim() - function to check if messages can be sent
 279 * @sk:         sock structure
 280 *
 281 * Returns true if the send queue length is lower than the message limit.
 282 * Always returns true if the socket is not connected (no iucv path for
 283 * checking the message limit).
 284 */
 285static inline int iucv_below_msglim(struct sock *sk)
 286{
 287        struct iucv_sock *iucv = iucv_sk(sk);
 288
 289        if (sk->sk_state != IUCV_CONNECTED)
 290                return 1;
 291        if (iucv->transport == AF_IUCV_TRANS_IUCV)
 292                return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
 293        else
 294                return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
 295                        (atomic_read(&iucv->pendings) <= 0));
 296}
 297
 298/**
 299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
 300 */
 301static void iucv_sock_wake_msglim(struct sock *sk)
 302{
 303        struct socket_wq *wq;
 304
 305        rcu_read_lock();
 306        wq = rcu_dereference(sk->sk_wq);
 307        if (skwq_has_sleeper(wq))
 308                wake_up_interruptible_all(&wq->wait);
 309        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 310        rcu_read_unlock();
 311}
 312
 313/**
 314 * afiucv_hs_send() - send a message through HiperSockets transport
 315 */
 316static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
 317                   struct sk_buff *skb, u8 flags)
 318{
 319        struct iucv_sock *iucv = iucv_sk(sock);
 320        struct af_iucv_trans_hdr *phs_hdr;
 321        struct sk_buff *nskb;
 322        int err, confirm_recv = 0;
 323
 324        memset(skb->head, 0, ETH_HLEN);
 325        phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr));
 326        skb_reset_mac_header(skb);
 327        skb_reset_network_header(skb);
 328        skb_push(skb, ETH_HLEN);
 329        skb_reset_mac_header(skb);
 330        memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
 331
 332        phs_hdr->magic = ETH_P_AF_IUCV;
 333        phs_hdr->version = 1;
 334        phs_hdr->flags = flags;
 335        if (flags == AF_IUCV_FLAG_SYN)
 336                phs_hdr->window = iucv->msglimit;
 337        else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
 338                confirm_recv = atomic_read(&iucv->msg_recv);
 339                phs_hdr->window = confirm_recv;
 340                if (confirm_recv)
 341                        phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
 342        }
 343        memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
 344        memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
 345        memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
 346        memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
 347        ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
 348        ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
 349        ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
 350        ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
 351        if (imsg)
 352                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 353
 354        skb->dev = iucv->hs_dev;
 355        if (!skb->dev)
 356                return -ENODEV;
 357        if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
 358                return -ENETDOWN;
 359        if (skb->len > skb->dev->mtu) {
 360                if (sock->sk_type == SOCK_SEQPACKET)
 361                        return -EMSGSIZE;
 362                else
 363                        skb_trim(skb, skb->dev->mtu);
 364        }
 365        skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
 366        nskb = skb_clone(skb, GFP_ATOMIC);
 367        if (!nskb)
 368                return -ENOMEM;
 369        skb_queue_tail(&iucv->send_skb_q, nskb);
 370        err = dev_queue_xmit(skb);
 371        if (net_xmit_eval(err)) {
 372                skb_unlink(nskb, &iucv->send_skb_q);
 373                kfree_skb(nskb);
 374        } else {
 375                atomic_sub(confirm_recv, &iucv->msg_recv);
 376                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
 377        }
 378        return net_xmit_eval(err);
 379}
 380
 381static struct sock *__iucv_get_sock_by_name(char *nm)
 382{
 383        struct sock *sk;
 384
 385        sk_for_each(sk, &iucv_sk_list.head)
 386                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
 387                        return sk;
 388
 389        return NULL;
 390}
 391
 392static void iucv_sock_destruct(struct sock *sk)
 393{
 394        skb_queue_purge(&sk->sk_receive_queue);
 395        skb_queue_purge(&sk->sk_error_queue);
 396
 397        sk_mem_reclaim(sk);
 398
 399        if (!sock_flag(sk, SOCK_DEAD)) {
 400                pr_err("Attempt to release alive iucv socket %p\n", sk);
 401                return;
 402        }
 403
 404        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 405        WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 406        WARN_ON(sk->sk_wmem_queued);
 407        WARN_ON(sk->sk_forward_alloc);
 408}
 409
 410/* Cleanup Listen */
 411static void iucv_sock_cleanup_listen(struct sock *parent)
 412{
 413        struct sock *sk;
 414
 415        /* Close non-accepted connections */
 416        while ((sk = iucv_accept_dequeue(parent, NULL))) {
 417                iucv_sock_close(sk);
 418                iucv_sock_kill(sk);
 419        }
 420
 421        parent->sk_state = IUCV_CLOSED;
 422}
 423
 424/* Kill socket (only if zapped and orphaned) */
 425static void iucv_sock_kill(struct sock *sk)
 426{
 427        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 428                return;
 429
 430        iucv_sock_unlink(&iucv_sk_list, sk);
 431        sock_set_flag(sk, SOCK_DEAD);
 432        sock_put(sk);
 433}
 434
 435/* Terminate an IUCV path */
 436static void iucv_sever_path(struct sock *sk, int with_user_data)
 437{
 438        unsigned char user_data[16];
 439        struct iucv_sock *iucv = iucv_sk(sk);
 440        struct iucv_path *path = iucv->path;
 441
 442        if (iucv->path) {
 443                iucv->path = NULL;
 444                if (with_user_data) {
 445                        low_nmcpy(user_data, iucv->src_name);
 446                        high_nmcpy(user_data, iucv->dst_name);
 447                        ASCEBC(user_data, sizeof(user_data));
 448                        pr_iucv->path_sever(path, user_data);
 449                } else
 450                        pr_iucv->path_sever(path, NULL);
 451                iucv_path_free(path);
 452        }
 453}
 454
 455/* Send controlling flags through an IUCV socket for HIPER transport */
 456static int iucv_send_ctrl(struct sock *sk, u8 flags)
 457{
 458        int err = 0;
 459        int blen;
 460        struct sk_buff *skb;
 461        u8 shutdown = 0;
 462
 463        blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
 464        if (sk->sk_shutdown & SEND_SHUTDOWN) {
 465                /* controlling flags should be sent anyway */
 466                shutdown = sk->sk_shutdown;
 467                sk->sk_shutdown &= RCV_SHUTDOWN;
 468        }
 469        skb = sock_alloc_send_skb(sk, blen, 1, &err);
 470        if (skb) {
 471                skb_reserve(skb, blen);
 472                err = afiucv_hs_send(NULL, sk, skb, flags);
 473        }
 474        if (shutdown)
 475                sk->sk_shutdown = shutdown;
 476        return err;
 477}
 478
 479/* Close an IUCV socket */
 480static void iucv_sock_close(struct sock *sk)
 481{
 482        struct iucv_sock *iucv = iucv_sk(sk);
 483        unsigned long timeo;
 484        int err = 0;
 485
 486        lock_sock(sk);
 487
 488        switch (sk->sk_state) {
 489        case IUCV_LISTEN:
 490                iucv_sock_cleanup_listen(sk);
 491                break;
 492
 493        case IUCV_CONNECTED:
 494                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
 495                        err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
 496                        sk->sk_state = IUCV_DISCONN;
 497                        sk->sk_state_change(sk);
 498                }
 499        case IUCV_DISCONN:   /* fall through */
 500                sk->sk_state = IUCV_CLOSING;
 501                sk->sk_state_change(sk);
 502
 503                if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
 504                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 505                                timeo = sk->sk_lingertime;
 506                        else
 507                                timeo = IUCV_DISCONN_TIMEOUT;
 508                        iucv_sock_wait(sk,
 509                                        iucv_sock_in_state(sk, IUCV_CLOSED, 0),
 510                                        timeo);
 511                }
 512
 513        case IUCV_CLOSING:   /* fall through */
 514                sk->sk_state = IUCV_CLOSED;
 515                sk->sk_state_change(sk);
 516
 517                sk->sk_err = ECONNRESET;
 518                sk->sk_state_change(sk);
 519
 520                skb_queue_purge(&iucv->send_skb_q);
 521                skb_queue_purge(&iucv->backlog_skb_q);
 522
 523        default:   /* fall through */
 524                iucv_sever_path(sk, 1);
 525        }
 526
 527        if (iucv->hs_dev) {
 528                dev_put(iucv->hs_dev);
 529                iucv->hs_dev = NULL;
 530                sk->sk_bound_dev_if = 0;
 531        }
 532
 533        /* mark socket for deletion by iucv_sock_kill() */
 534        sock_set_flag(sk, SOCK_ZAPPED);
 535
 536        release_sock(sk);
 537}
 538
 539static void iucv_sock_init(struct sock *sk, struct sock *parent)
 540{
 541        if (parent) {
 542                sk->sk_type = parent->sk_type;
 543                security_sk_clone(parent, sk);
 544        }
 545}
 546
 547static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 548{
 549        struct sock *sk;
 550        struct iucv_sock *iucv;
 551
 552        sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
 553        if (!sk)
 554                return NULL;
 555        iucv = iucv_sk(sk);
 556
 557        sock_init_data(sock, sk);
 558        INIT_LIST_HEAD(&iucv->accept_q);
 559        spin_lock_init(&iucv->accept_q_lock);
 560        skb_queue_head_init(&iucv->send_skb_q);
 561        INIT_LIST_HEAD(&iucv->message_q.list);
 562        spin_lock_init(&iucv->message_q.lock);
 563        skb_queue_head_init(&iucv->backlog_skb_q);
 564        iucv->send_tag = 0;
 565        atomic_set(&iucv->pendings, 0);
 566        iucv->flags = 0;
 567        iucv->msglimit = 0;
 568        atomic_set(&iucv->msg_sent, 0);
 569        atomic_set(&iucv->msg_recv, 0);
 570        iucv->path = NULL;
 571        iucv->sk_txnotify = afiucv_hs_callback_txnotify;
 572        memset(&iucv->src_user_id , 0, 32);
 573        if (pr_iucv)
 574                iucv->transport = AF_IUCV_TRANS_IUCV;
 575        else
 576                iucv->transport = AF_IUCV_TRANS_HIPER;
 577
 578        sk->sk_destruct = iucv_sock_destruct;
 579        sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
 580        sk->sk_allocation = GFP_DMA;
 581
 582        sock_reset_flag(sk, SOCK_ZAPPED);
 583
 584        sk->sk_protocol = proto;
 585        sk->sk_state    = IUCV_OPEN;
 586
 587        iucv_sock_link(&iucv_sk_list, sk);
 588        return sk;
 589}
 590
 591/* Create an IUCV socket */
 592static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
 593                            int kern)
 594{
 595        struct sock *sk;
 596
 597        if (protocol && protocol != PF_IUCV)
 598                return -EPROTONOSUPPORT;
 599
 600        sock->state = SS_UNCONNECTED;
 601
 602        switch (sock->type) {
 603        case SOCK_STREAM:
 604                sock->ops = &iucv_sock_ops;
 605                break;
 606        case SOCK_SEQPACKET:
 607                /* currently, proto ops can handle both sk types */
 608                sock->ops = &iucv_sock_ops;
 609                break;
 610        default:
 611                return -ESOCKTNOSUPPORT;
 612        }
 613
 614        sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
 615        if (!sk)
 616                return -ENOMEM;
 617
 618        iucv_sock_init(sk, NULL);
 619
 620        return 0;
 621}
 622
 623void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
 624{
 625        write_lock_bh(&l->lock);
 626        sk_add_node(sk, &l->head);
 627        write_unlock_bh(&l->lock);
 628}
 629
 630void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 631{
 632        write_lock_bh(&l->lock);
 633        sk_del_node_init(sk);
 634        write_unlock_bh(&l->lock);
 635}
 636
 637void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 638{
 639        unsigned long flags;
 640        struct iucv_sock *par = iucv_sk(parent);
 641
 642        sock_hold(sk);
 643        spin_lock_irqsave(&par->accept_q_lock, flags);
 644        list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
 645        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 646        iucv_sk(sk)->parent = parent;
 647        sk_acceptq_added(parent);
 648}
 649
 650void iucv_accept_unlink(struct sock *sk)
 651{
 652        unsigned long flags;
 653        struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
 654
 655        spin_lock_irqsave(&par->accept_q_lock, flags);
 656        list_del_init(&iucv_sk(sk)->accept_q);
 657        spin_unlock_irqrestore(&par->accept_q_lock, flags);
 658        sk_acceptq_removed(iucv_sk(sk)->parent);
 659        iucv_sk(sk)->parent = NULL;
 660        sock_put(sk);
 661}
 662
 663struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
 664{
 665        struct iucv_sock *isk, *n;
 666        struct sock *sk;
 667
 668        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
 669                sk = (struct sock *) isk;
 670                lock_sock(sk);
 671
 672                if (sk->sk_state == IUCV_CLOSED) {
 673                        iucv_accept_unlink(sk);
 674                        release_sock(sk);
 675                        continue;
 676                }
 677
 678                if (sk->sk_state == IUCV_CONNECTED ||
 679                    sk->sk_state == IUCV_DISCONN ||
 680                    !newsock) {
 681                        iucv_accept_unlink(sk);
 682                        if (newsock)
 683                                sock_graft(sk, newsock);
 684
 685                        release_sock(sk);
 686                        return sk;
 687                }
 688
 689                release_sock(sk);
 690        }
 691        return NULL;
 692}
 693
 694static void __iucv_auto_name(struct iucv_sock *iucv)
 695{
 696        char name[12];
 697
 698        sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
 699        while (__iucv_get_sock_by_name(name)) {
 700                sprintf(name, "%08x",
 701                        atomic_inc_return(&iucv_sk_list.autobind_name));
 702        }
 703        memcpy(iucv->src_name, name, 8);
 704}
 705
 706/* Bind an unbound socket */
 707static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
 708                          int addr_len)
 709{
 710        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 711        struct sock *sk = sock->sk;
 712        struct iucv_sock *iucv;
 713        int err = 0;
 714        struct net_device *dev;
 715        char uid[9];
 716
 717        /* Verify the input sockaddr */
 718        if (addr_len < sizeof(struct sockaddr_iucv) ||
 719            addr->sa_family != AF_IUCV)
 720                return -EINVAL;
 721
 722        lock_sock(sk);
 723        if (sk->sk_state != IUCV_OPEN) {
 724                err = -EBADFD;
 725                goto done;
 726        }
 727
 728        write_lock_bh(&iucv_sk_list.lock);
 729
 730        iucv = iucv_sk(sk);
 731        if (__iucv_get_sock_by_name(sa->siucv_name)) {
 732                err = -EADDRINUSE;
 733                goto done_unlock;
 734        }
 735        if (iucv->path)
 736                goto done_unlock;
 737
 738        /* Bind the socket */
 739        if (pr_iucv)
 740                if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
 741                        goto vm_bind; /* VM IUCV transport */
 742
 743        /* try hiper transport */
 744        memcpy(uid, sa->siucv_user_id, sizeof(uid));
 745        ASCEBC(uid, 8);
 746        rcu_read_lock();
 747        for_each_netdev_rcu(&init_net, dev) {
 748                if (!memcmp(dev->perm_addr, uid, 8)) {
 749                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
 750                        /* Check for unitialized siucv_name */
 751                        if (strncmp(sa->siucv_name, "        ", 8) == 0)
 752                                __iucv_auto_name(iucv);
 753                        else
 754                                memcpy(iucv->src_name, sa->siucv_name, 8);
 755                        sk->sk_bound_dev_if = dev->ifindex;
 756                        iucv->hs_dev = dev;
 757                        dev_hold(dev);
 758                        sk->sk_state = IUCV_BOUND;
 759                        iucv->transport = AF_IUCV_TRANS_HIPER;
 760                        if (!iucv->msglimit)
 761                                iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
 762                        rcu_read_unlock();
 763                        goto done_unlock;
 764                }
 765        }
 766        rcu_read_unlock();
 767vm_bind:
 768        if (pr_iucv) {
 769                /* use local userid for backward compat */
 770                memcpy(iucv->src_name, sa->siucv_name, 8);
 771                memcpy(iucv->src_user_id, iucv_userid, 8);
 772                sk->sk_state = IUCV_BOUND;
 773                iucv->transport = AF_IUCV_TRANS_IUCV;
 774                if (!iucv->msglimit)
 775                        iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 776                goto done_unlock;
 777        }
 778        /* found no dev to bind */
 779        err = -ENODEV;
 780done_unlock:
 781        /* Release the socket list lock */
 782        write_unlock_bh(&iucv_sk_list.lock);
 783done:
 784        release_sock(sk);
 785        return err;
 786}
 787
 788/* Automatically bind an unbound socket */
 789static int iucv_sock_autobind(struct sock *sk)
 790{
 791        struct iucv_sock *iucv = iucv_sk(sk);
 792        int err = 0;
 793
 794        if (unlikely(!pr_iucv))
 795                return -EPROTO;
 796
 797        memcpy(iucv->src_user_id, iucv_userid, 8);
 798
 799        write_lock_bh(&iucv_sk_list.lock);
 800        __iucv_auto_name(iucv);
 801        write_unlock_bh(&iucv_sk_list.lock);
 802
 803        if (!iucv->msglimit)
 804                iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 805
 806        return err;
 807}
 808
 809static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
 810{
 811        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 812        struct sock *sk = sock->sk;
 813        struct iucv_sock *iucv = iucv_sk(sk);
 814        unsigned char user_data[16];
 815        int err;
 816
 817        high_nmcpy(user_data, sa->siucv_name);
 818        low_nmcpy(user_data, iucv->src_name);
 819        ASCEBC(user_data, sizeof(user_data));
 820
 821        /* Create path. */
 822        iucv->path = iucv_path_alloc(iucv->msglimit,
 823                                     IUCV_IPRMDATA, GFP_KERNEL);
 824        if (!iucv->path) {
 825                err = -ENOMEM;
 826                goto done;
 827        }
 828        err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
 829                                    sa->siucv_user_id, NULL, user_data,
 830                                    sk);
 831        if (err) {
 832                iucv_path_free(iucv->path);
 833                iucv->path = NULL;
 834                switch (err) {
 835                case 0x0b:      /* Target communicator is not logged on */
 836                        err = -ENETUNREACH;
 837                        break;
 838                case 0x0d:      /* Max connections for this guest exceeded */
 839                case 0x0e:      /* Max connections for target guest exceeded */
 840                        err = -EAGAIN;
 841                        break;
 842                case 0x0f:      /* Missing IUCV authorization */
 843                        err = -EACCES;
 844                        break;
 845                default:
 846                        err = -ECONNREFUSED;
 847                        break;
 848                }
 849        }
 850done:
 851        return err;
 852}
 853
 854/* Connect an unconnected socket */
 855static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
 856                             int alen, int flags)
 857{
 858        struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
 859        struct sock *sk = sock->sk;
 860        struct iucv_sock *iucv = iucv_sk(sk);
 861        int err;
 862
 863        if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
 864                return -EINVAL;
 865
 866        if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
 867                return -EBADFD;
 868
 869        if (sk->sk_state == IUCV_OPEN &&
 870            iucv->transport == AF_IUCV_TRANS_HIPER)
 871                return -EBADFD; /* explicit bind required */
 872
 873        if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
 874                return -EINVAL;
 875
 876        if (sk->sk_state == IUCV_OPEN) {
 877                err = iucv_sock_autobind(sk);
 878                if (unlikely(err))
 879                        return err;
 880        }
 881
 882        lock_sock(sk);
 883
 884        /* Set the destination information */
 885        memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
 886        memcpy(iucv->dst_name, sa->siucv_name, 8);
 887
 888        if (iucv->transport == AF_IUCV_TRANS_HIPER)
 889                err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
 890        else
 891                err = afiucv_path_connect(sock, addr);
 892        if (err)
 893                goto done;
 894
 895        if (sk->sk_state != IUCV_CONNECTED)
 896                err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
 897                                                            IUCV_DISCONN),
 898                                     sock_sndtimeo(sk, flags & O_NONBLOCK));
 899
 900        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
 901                err = -ECONNREFUSED;
 902
 903        if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
 904                iucv_sever_path(sk, 0);
 905
 906done:
 907        release_sock(sk);
 908        return err;
 909}
 910
 911/* Move a socket into listening state. */
 912static int iucv_sock_listen(struct socket *sock, int backlog)
 913{
 914        struct sock *sk = sock->sk;
 915        int err;
 916
 917        lock_sock(sk);
 918
 919        err = -EINVAL;
 920        if (sk->sk_state != IUCV_BOUND)
 921                goto done;
 922
 923        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
 924                goto done;
 925
 926        sk->sk_max_ack_backlog = backlog;
 927        sk->sk_ack_backlog = 0;
 928        sk->sk_state = IUCV_LISTEN;
 929        err = 0;
 930
 931done:
 932        release_sock(sk);
 933        return err;
 934}
 935
 936/* Accept a pending connection */
 937static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 938                            int flags, bool kern)
 939{
 940        DECLARE_WAITQUEUE(wait, current);
 941        struct sock *sk = sock->sk, *nsk;
 942        long timeo;
 943        int err = 0;
 944
 945        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 946
 947        if (sk->sk_state != IUCV_LISTEN) {
 948                err = -EBADFD;
 949                goto done;
 950        }
 951
 952        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 953
 954        /* Wait for an incoming connection */
 955        add_wait_queue_exclusive(sk_sleep(sk), &wait);
 956        while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
 957                set_current_state(TASK_INTERRUPTIBLE);
 958                if (!timeo) {
 959                        err = -EAGAIN;
 960                        break;
 961                }
 962
 963                release_sock(sk);
 964                timeo = schedule_timeout(timeo);
 965                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 966
 967                if (sk->sk_state != IUCV_LISTEN) {
 968                        err = -EBADFD;
 969                        break;
 970                }
 971
 972                if (signal_pending(current)) {
 973                        err = sock_intr_errno(timeo);
 974                        break;
 975                }
 976        }
 977
 978        set_current_state(TASK_RUNNING);
 979        remove_wait_queue(sk_sleep(sk), &wait);
 980
 981        if (err)
 982                goto done;
 983
 984        newsock->state = SS_CONNECTED;
 985
 986done:
 987        release_sock(sk);
 988        return err;
 989}
 990
 991static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
 992                             int *len, int peer)
 993{
 994        struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
 995        struct sock *sk = sock->sk;
 996        struct iucv_sock *iucv = iucv_sk(sk);
 997
 998        addr->sa_family = AF_IUCV;
 999        *len = sizeof(struct sockaddr_iucv);
1000
1001        if (peer) {
1002                memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1003                memcpy(siucv->siucv_name, iucv->dst_name, 8);
1004        } else {
1005                memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1006                memcpy(siucv->siucv_name, iucv->src_name, 8);
1007        }
1008        memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1009        memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1010        memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1011
1012        return 0;
1013}
1014
1015/**
1016 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1017 * @path:       IUCV path
1018 * @msg:        Pointer to a struct iucv_message
1019 * @skb:        The socket data to send, skb->len MUST BE <= 7
1020 *
1021 * Send the socket data in the parameter list in the iucv message
1022 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1023 * list and the socket data len at index 7 (last byte).
1024 * See also iucv_msg_length().
1025 *
1026 * Returns the error code from the iucv_message_send() call.
1027 */
1028static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1029                          struct sk_buff *skb)
1030{
1031        u8 prmdata[8];
1032
1033        memcpy(prmdata, (void *) skb->data, skb->len);
1034        prmdata[7] = 0xff - (u8) skb->len;
1035        return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1036                                 (void *) prmdata, 8);
1037}
1038
1039static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1040                             size_t len)
1041{
1042        struct sock *sk = sock->sk;
1043        struct iucv_sock *iucv = iucv_sk(sk);
1044        size_t headroom = 0;
1045        size_t linear;
1046        struct sk_buff *skb;
1047        struct iucv_message txmsg = {0};
1048        struct cmsghdr *cmsg;
1049        int cmsg_done;
1050        long timeo;
1051        char user_id[9];
1052        char appl_id[9];
1053        int err;
1054        int noblock = msg->msg_flags & MSG_DONTWAIT;
1055
1056        err = sock_error(sk);
1057        if (err)
1058                return err;
1059
1060        if (msg->msg_flags & MSG_OOB)
1061                return -EOPNOTSUPP;
1062
1063        /* SOCK_SEQPACKET: we do not support segmented records */
1064        if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1065                return -EOPNOTSUPP;
1066
1067        lock_sock(sk);
1068
1069        if (sk->sk_shutdown & SEND_SHUTDOWN) {
1070                err = -EPIPE;
1071                goto out;
1072        }
1073
1074        /* Return if the socket is not in connected state */
1075        if (sk->sk_state != IUCV_CONNECTED) {
1076                err = -ENOTCONN;
1077                goto out;
1078        }
1079
1080        /* initialize defaults */
1081        cmsg_done   = 0;        /* check for duplicate headers */
1082        txmsg.class = 0;
1083
1084        /* iterate over control messages */
1085        for_each_cmsghdr(cmsg, msg) {
1086                if (!CMSG_OK(msg, cmsg)) {
1087                        err = -EINVAL;
1088                        goto out;
1089                }
1090
1091                if (cmsg->cmsg_level != SOL_IUCV)
1092                        continue;
1093
1094                if (cmsg->cmsg_type & cmsg_done) {
1095                        err = -EINVAL;
1096                        goto out;
1097                }
1098                cmsg_done |= cmsg->cmsg_type;
1099
1100                switch (cmsg->cmsg_type) {
1101                case SCM_IUCV_TRGCLS:
1102                        if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1103                                err = -EINVAL;
1104                                goto out;
1105                        }
1106
1107                        /* set iucv message target class */
1108                        memcpy(&txmsg.class,
1109                                (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1110
1111                        break;
1112
1113                default:
1114                        err = -EINVAL;
1115                        goto out;
1116                }
1117        }
1118
1119        /* allocate one skb for each iucv message:
1120         * this is fine for SOCK_SEQPACKET (unless we want to support
1121         * segmented records using the MSG_EOR flag), but
1122         * for SOCK_STREAM we might want to improve it in future */
1123        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1124                headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
1125                linear = len;
1126        } else {
1127                if (len < PAGE_SIZE) {
1128                        linear = len;
1129                } else {
1130                        /* In nonlinear "classic" iucv skb,
1131                         * reserve space for iucv_array
1132                         */
1133                        headroom = sizeof(struct iucv_array) *
1134                                   (MAX_SKB_FRAGS + 1);
1135                        linear = PAGE_SIZE - headroom;
1136                }
1137        }
1138        skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1139                                   noblock, &err, 0);
1140        if (!skb)
1141                goto out;
1142        if (headroom)
1143                skb_reserve(skb, headroom);
1144        skb_put(skb, linear);
1145        skb->len = len;
1146        skb->data_len = len - linear;
1147        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1148        if (err)
1149                goto fail;
1150
1151        /* wait if outstanding messages for iucv path has reached */
1152        timeo = sock_sndtimeo(sk, noblock);
1153        err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1154        if (err)
1155                goto fail;
1156
1157        /* return -ECONNRESET if the socket is no longer connected */
1158        if (sk->sk_state != IUCV_CONNECTED) {
1159                err = -ECONNRESET;
1160                goto fail;
1161        }
1162
1163        /* increment and save iucv message tag for msg_completion cbk */
1164        txmsg.tag = iucv->send_tag++;
1165        IUCV_SKB_CB(skb)->tag = txmsg.tag;
1166
1167        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1168                atomic_inc(&iucv->msg_sent);
1169                err = afiucv_hs_send(&txmsg, sk, skb, 0);
1170                if (err) {
1171                        atomic_dec(&iucv->msg_sent);
1172                        goto fail;
1173                }
1174        } else { /* Classic VM IUCV transport */
1175                skb_queue_tail(&iucv->send_skb_q, skb);
1176
1177                if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1178                    skb->len <= 7) {
1179                        err = iucv_send_iprm(iucv->path, &txmsg, skb);
1180
1181                        /* on success: there is no message_complete callback */
1182                        /* for an IPRMDATA msg; remove skb from send queue   */
1183                        if (err == 0) {
1184                                skb_unlink(skb, &iucv->send_skb_q);
1185                                kfree_skb(skb);
1186                        }
1187
1188                        /* this error should never happen since the     */
1189                        /* IUCV_IPRMDATA path flag is set... sever path */
1190                        if (err == 0x15) {
1191                                pr_iucv->path_sever(iucv->path, NULL);
1192                                skb_unlink(skb, &iucv->send_skb_q);
1193                                err = -EPIPE;
1194                                goto fail;
1195                        }
1196                } else if (skb_is_nonlinear(skb)) {
1197                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1198                        int i;
1199
1200                        /* skip iucv_array lying in the headroom */
1201                        iba[0].address = (u32)(addr_t)skb->data;
1202                        iba[0].length = (u32)skb_headlen(skb);
1203                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1204                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1205
1206                                iba[i + 1].address =
1207                                        (u32)(addr_t)skb_frag_address(frag);
1208                                iba[i + 1].length = (u32)skb_frag_size(frag);
1209                        }
1210                        err = pr_iucv->message_send(iucv->path, &txmsg,
1211                                                    IUCV_IPBUFLST, 0,
1212                                                    (void *)iba, skb->len);
1213                } else { /* non-IPRM Linear skb */
1214                        err = pr_iucv->message_send(iucv->path, &txmsg,
1215                                        0, 0, (void *)skb->data, skb->len);
1216                }
1217                if (err) {
1218                        if (err == 3) {
1219                                user_id[8] = 0;
1220                                memcpy(user_id, iucv->dst_user_id, 8);
1221                                appl_id[8] = 0;
1222                                memcpy(appl_id, iucv->dst_name, 8);
1223                                pr_err(
1224                "Application %s on z/VM guest %s exceeds message limit\n",
1225                                        appl_id, user_id);
1226                                err = -EAGAIN;
1227                        } else {
1228                                err = -EPIPE;
1229                        }
1230                        skb_unlink(skb, &iucv->send_skb_q);
1231                        goto fail;
1232                }
1233        }
1234
1235        release_sock(sk);
1236        return len;
1237
1238fail:
1239        kfree_skb(skb);
1240out:
1241        release_sock(sk);
1242        return err;
1243}
1244
1245static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1246{
1247        size_t headroom, linear;
1248        struct sk_buff *skb;
1249        int err;
1250
1251        if (len < PAGE_SIZE) {
1252                headroom = 0;
1253                linear = len;
1254        } else {
1255                headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1256                linear = PAGE_SIZE - headroom;
1257        }
1258        skb = alloc_skb_with_frags(headroom + linear, len - linear,
1259                                   0, &err, GFP_ATOMIC | GFP_DMA);
1260        WARN_ONCE(!skb,
1261                  "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1262                  len, err);
1263        if (skb) {
1264                if (headroom)
1265                        skb_reserve(skb, headroom);
1266                skb_put(skb, linear);
1267                skb->len = len;
1268                skb->data_len = len - linear;
1269        }
1270        return skb;
1271}
1272
1273/* iucv_process_message() - Receive a single outstanding IUCV message
1274 *
1275 * Locking: must be called with message_q.lock held
1276 */
1277static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1278                                 struct iucv_path *path,
1279                                 struct iucv_message *msg)
1280{
1281        int rc;
1282        unsigned int len;
1283
1284        len = iucv_msg_length(msg);
1285
1286        /* store msg target class in the second 4 bytes of skb ctrl buffer */
1287        /* Note: the first 4 bytes are reserved for msg tag */
1288        IUCV_SKB_CB(skb)->class = msg->class;
1289
1290        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1291        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1292                if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1293                        skb->data = NULL;
1294                        skb->len = 0;
1295                }
1296        } else {
1297                if (skb_is_nonlinear(skb)) {
1298                        struct iucv_array *iba = (struct iucv_array *)skb->head;
1299                        int i;
1300
1301                        iba[0].address = (u32)(addr_t)skb->data;
1302                        iba[0].length = (u32)skb_headlen(skb);
1303                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1304                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1305
1306                                iba[i + 1].address =
1307                                        (u32)(addr_t)skb_frag_address(frag);
1308                                iba[i + 1].length = (u32)skb_frag_size(frag);
1309                        }
1310                        rc = pr_iucv->message_receive(path, msg,
1311                                              IUCV_IPBUFLST,
1312                                              (void *)iba, len, NULL);
1313                } else {
1314                        rc = pr_iucv->message_receive(path, msg,
1315                                              msg->flags & IUCV_IPRMDATA,
1316                                              skb->data, len, NULL);
1317                }
1318                if (rc) {
1319                        kfree_skb(skb);
1320                        return;
1321                }
1322                WARN_ON_ONCE(skb->len != len);
1323        }
1324
1325        IUCV_SKB_CB(skb)->offset = 0;
1326        if (sk_filter(sk, skb)) {
1327                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
1328                kfree_skb(skb);
1329                return;
1330        }
1331        if (__sock_queue_rcv_skb(sk, skb))      /* handle rcv queue full */
1332                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1333}
1334
1335/* iucv_process_message_q() - Process outstanding IUCV messages
1336 *
1337 * Locking: must be called with message_q.lock held
1338 */
1339static void iucv_process_message_q(struct sock *sk)
1340{
1341        struct iucv_sock *iucv = iucv_sk(sk);
1342        struct sk_buff *skb;
1343        struct sock_msg_q *p, *n;
1344
1345        list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1346                skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1347                if (!skb)
1348                        break;
1349                iucv_process_message(sk, skb, p->path, &p->msg);
1350                list_del(&p->list);
1351                kfree(p);
1352                if (!skb_queue_empty(&iucv->backlog_skb_q))
1353                        break;
1354        }
1355}
1356
1357static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1358                             size_t len, int flags)
1359{
1360        int noblock = flags & MSG_DONTWAIT;
1361        struct sock *sk = sock->sk;
1362        struct iucv_sock *iucv = iucv_sk(sk);
1363        unsigned int copied, rlen;
1364        struct sk_buff *skb, *rskb, *cskb;
1365        int err = 0;
1366        u32 offset;
1367
1368        if ((sk->sk_state == IUCV_DISCONN) &&
1369            skb_queue_empty(&iucv->backlog_skb_q) &&
1370            skb_queue_empty(&sk->sk_receive_queue) &&
1371            list_empty(&iucv->message_q.list))
1372                return 0;
1373
1374        if (flags & (MSG_OOB))
1375                return -EOPNOTSUPP;
1376
1377        /* receive/dequeue next skb:
1378         * the function understands MSG_PEEK and, thus, does not dequeue skb */
1379        skb = skb_recv_datagram(sk, flags, noblock, &err);
1380        if (!skb) {
1381                if (sk->sk_shutdown & RCV_SHUTDOWN)
1382                        return 0;
1383                return err;
1384        }
1385
1386        offset = IUCV_SKB_CB(skb)->offset;
1387        rlen   = skb->len - offset;             /* real length of skb */
1388        copied = min_t(unsigned int, rlen, len);
1389        if (!rlen)
1390                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1391
1392        cskb = skb;
1393        if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1394                if (!(flags & MSG_PEEK))
1395                        skb_queue_head(&sk->sk_receive_queue, skb);
1396                return -EFAULT;
1397        }
1398
1399        /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1400        if (sk->sk_type == SOCK_SEQPACKET) {
1401                if (copied < rlen)
1402                        msg->msg_flags |= MSG_TRUNC;
1403                /* each iucv message contains a complete record */
1404                msg->msg_flags |= MSG_EOR;
1405        }
1406
1407        /* create control message to store iucv msg target class:
1408         * get the trgcls from the control buffer of the skb due to
1409         * fragmentation of original iucv message. */
1410        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1411                       sizeof(IUCV_SKB_CB(skb)->class),
1412                       (void *)&IUCV_SKB_CB(skb)->class);
1413        if (err) {
1414                if (!(flags & MSG_PEEK))
1415                        skb_queue_head(&sk->sk_receive_queue, skb);
1416                return err;
1417        }
1418
1419        /* Mark read part of skb as used */
1420        if (!(flags & MSG_PEEK)) {
1421
1422                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1423                if (sk->sk_type == SOCK_STREAM) {
1424                        if (copied < rlen) {
1425                                IUCV_SKB_CB(skb)->offset = offset + copied;
1426                                skb_queue_head(&sk->sk_receive_queue, skb);
1427                                goto done;
1428                        }
1429                }
1430
1431                kfree_skb(skb);
1432                if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1433                        atomic_inc(&iucv->msg_recv);
1434                        if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1435                                WARN_ON(1);
1436                                iucv_sock_close(sk);
1437                                return -EFAULT;
1438                        }
1439                }
1440
1441                /* Queue backlog skbs */
1442                spin_lock_bh(&iucv->message_q.lock);
1443                rskb = skb_dequeue(&iucv->backlog_skb_q);
1444                while (rskb) {
1445                        IUCV_SKB_CB(rskb)->offset = 0;
1446                        if (__sock_queue_rcv_skb(sk, rskb)) {
1447                                /* handle rcv queue full */
1448                                skb_queue_head(&iucv->backlog_skb_q,
1449                                                rskb);
1450                                break;
1451                        }
1452                        rskb = skb_dequeue(&iucv->backlog_skb_q);
1453                }
1454                if (skb_queue_empty(&iucv->backlog_skb_q)) {
1455                        if (!list_empty(&iucv->message_q.list))
1456                                iucv_process_message_q(sk);
1457                        if (atomic_read(&iucv->msg_recv) >=
1458                                                        iucv->msglimit / 2) {
1459                                err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1460                                if (err) {
1461                                        sk->sk_state = IUCV_DISCONN;
1462                                        sk->sk_state_change(sk);
1463                                }
1464                        }
1465                }
1466                spin_unlock_bh(&iucv->message_q.lock);
1467        }
1468
1469done:
1470        /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1471        if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1472                copied = rlen;
1473
1474        return copied;
1475}
1476
1477static inline unsigned int iucv_accept_poll(struct sock *parent)
1478{
1479        struct iucv_sock *isk, *n;
1480        struct sock *sk;
1481
1482        list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1483                sk = (struct sock *) isk;
1484
1485                if (sk->sk_state == IUCV_CONNECTED)
1486                        return POLLIN | POLLRDNORM;
1487        }
1488
1489        return 0;
1490}
1491
1492unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1493                            poll_table *wait)
1494{
1495        struct sock *sk = sock->sk;
1496        unsigned int mask = 0;
1497
1498        sock_poll_wait(file, sk_sleep(sk), wait);
1499
1500        if (sk->sk_state == IUCV_LISTEN)
1501                return iucv_accept_poll(sk);
1502
1503        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1504                mask |= POLLERR |
1505                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1506
1507        if (sk->sk_shutdown & RCV_SHUTDOWN)
1508                mask |= POLLRDHUP;
1509
1510        if (sk->sk_shutdown == SHUTDOWN_MASK)
1511                mask |= POLLHUP;
1512
1513        if (!skb_queue_empty(&sk->sk_receive_queue) ||
1514            (sk->sk_shutdown & RCV_SHUTDOWN))
1515                mask |= POLLIN | POLLRDNORM;
1516
1517        if (sk->sk_state == IUCV_CLOSED)
1518                mask |= POLLHUP;
1519
1520        if (sk->sk_state == IUCV_DISCONN)
1521                mask |= POLLIN;
1522
1523        if (sock_writeable(sk) && iucv_below_msglim(sk))
1524                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1525        else
1526                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1527
1528        return mask;
1529}
1530
1531static int iucv_sock_shutdown(struct socket *sock, int how)
1532{
1533        struct sock *sk = sock->sk;
1534        struct iucv_sock *iucv = iucv_sk(sk);
1535        struct iucv_message txmsg;
1536        int err = 0;
1537
1538        how++;
1539
1540        if ((how & ~SHUTDOWN_MASK) || !how)
1541                return -EINVAL;
1542
1543        lock_sock(sk);
1544        switch (sk->sk_state) {
1545        case IUCV_LISTEN:
1546        case IUCV_DISCONN:
1547        case IUCV_CLOSING:
1548        case IUCV_CLOSED:
1549                err = -ENOTCONN;
1550                goto fail;
1551        default:
1552                break;
1553        }
1554
1555        if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1556                if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1557                        txmsg.class = 0;
1558                        txmsg.tag = 0;
1559                        err = pr_iucv->message_send(iucv->path, &txmsg,
1560                                IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1561                        if (err) {
1562                                switch (err) {
1563                                case 1:
1564                                        err = -ENOTCONN;
1565                                        break;
1566                                case 2:
1567                                        err = -ECONNRESET;
1568                                        break;
1569                                default:
1570                                        err = -ENOTCONN;
1571                                        break;
1572                                }
1573                        }
1574                } else
1575                        iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1576        }
1577
1578        sk->sk_shutdown |= how;
1579        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1580                if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1581                    iucv->path) {
1582                        err = pr_iucv->path_quiesce(iucv->path, NULL);
1583                        if (err)
1584                                err = -ENOTCONN;
1585/*                      skb_queue_purge(&sk->sk_receive_queue); */
1586                }
1587                skb_queue_purge(&sk->sk_receive_queue);
1588        }
1589
1590        /* Wake up anyone sleeping in poll */
1591        sk->sk_state_change(sk);
1592
1593fail:
1594        release_sock(sk);
1595        return err;
1596}
1597
1598static int iucv_sock_release(struct socket *sock)
1599{
1600        struct sock *sk = sock->sk;
1601        int err = 0;
1602
1603        if (!sk)
1604                return 0;
1605
1606        iucv_sock_close(sk);
1607
1608        sock_orphan(sk);
1609        iucv_sock_kill(sk);
1610        return err;
1611}
1612
1613/* getsockopt and setsockopt */
1614static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1615                                char __user *optval, unsigned int optlen)
1616{
1617        struct sock *sk = sock->sk;
1618        struct iucv_sock *iucv = iucv_sk(sk);
1619        int val;
1620        int rc;
1621
1622        if (level != SOL_IUCV)
1623                return -ENOPROTOOPT;
1624
1625        if (optlen < sizeof(int))
1626                return -EINVAL;
1627
1628        if (get_user(val, (int __user *) optval))
1629                return -EFAULT;
1630
1631        rc = 0;
1632
1633        lock_sock(sk);
1634        switch (optname) {
1635        case SO_IPRMDATA_MSG:
1636                if (val)
1637                        iucv->flags |= IUCV_IPRMDATA;
1638                else
1639                        iucv->flags &= ~IUCV_IPRMDATA;
1640                break;
1641        case SO_MSGLIMIT:
1642                switch (sk->sk_state) {
1643                case IUCV_OPEN:
1644                case IUCV_BOUND:
1645                        if (val < 1 || val > (u16)(~0))
1646                                rc = -EINVAL;
1647                        else
1648                                iucv->msglimit = val;
1649                        break;
1650                default:
1651                        rc = -EINVAL;
1652                        break;
1653                }
1654                break;
1655        default:
1656                rc = -ENOPROTOOPT;
1657                break;
1658        }
1659        release_sock(sk);
1660
1661        return rc;
1662}
1663
1664static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1665                                char __user *optval, int __user *optlen)
1666{
1667        struct sock *sk = sock->sk;
1668        struct iucv_sock *iucv = iucv_sk(sk);
1669        unsigned int val;
1670        int len;
1671
1672        if (level != SOL_IUCV)
1673                return -ENOPROTOOPT;
1674
1675        if (get_user(len, optlen))
1676                return -EFAULT;
1677
1678        if (len < 0)
1679                return -EINVAL;
1680
1681        len = min_t(unsigned int, len, sizeof(int));
1682
1683        switch (optname) {
1684        case SO_IPRMDATA_MSG:
1685                val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1686                break;
1687        case SO_MSGLIMIT:
1688                lock_sock(sk);
1689                val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1690                                           : iucv->msglimit;    /* default */
1691                release_sock(sk);
1692                break;
1693        case SO_MSGSIZE:
1694                if (sk->sk_state == IUCV_OPEN)
1695                        return -EBADFD;
1696                val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1697                                sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1698                                0x7fffffff;
1699                break;
1700        default:
1701                return -ENOPROTOOPT;
1702        }
1703
1704        if (put_user(len, optlen))
1705                return -EFAULT;
1706        if (copy_to_user(optval, &val, len))
1707                return -EFAULT;
1708
1709        return 0;
1710}
1711
1712
1713/* Callback wrappers - called from iucv base support */
1714static int iucv_callback_connreq(struct iucv_path *path,
1715                                 u8 ipvmid[8], u8 ipuser[16])
1716{
1717        unsigned char user_data[16];
1718        unsigned char nuser_data[16];
1719        unsigned char src_name[8];
1720        struct sock *sk, *nsk;
1721        struct iucv_sock *iucv, *niucv;
1722        int err;
1723
1724        memcpy(src_name, ipuser, 8);
1725        EBCASC(src_name, 8);
1726        /* Find out if this path belongs to af_iucv. */
1727        read_lock(&iucv_sk_list.lock);
1728        iucv = NULL;
1729        sk = NULL;
1730        sk_for_each(sk, &iucv_sk_list.head)
1731                if (sk->sk_state == IUCV_LISTEN &&
1732                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1733                        /*
1734                         * Found a listening socket with
1735                         * src_name == ipuser[0-7].
1736                         */
1737                        iucv = iucv_sk(sk);
1738                        break;
1739                }
1740        read_unlock(&iucv_sk_list.lock);
1741        if (!iucv)
1742                /* No socket found, not one of our paths. */
1743                return -EINVAL;
1744
1745        bh_lock_sock(sk);
1746
1747        /* Check if parent socket is listening */
1748        low_nmcpy(user_data, iucv->src_name);
1749        high_nmcpy(user_data, iucv->dst_name);
1750        ASCEBC(user_data, sizeof(user_data));
1751        if (sk->sk_state != IUCV_LISTEN) {
1752                err = pr_iucv->path_sever(path, user_data);
1753                iucv_path_free(path);
1754                goto fail;
1755        }
1756
1757        /* Check for backlog size */
1758        if (sk_acceptq_is_full(sk)) {
1759                err = pr_iucv->path_sever(path, user_data);
1760                iucv_path_free(path);
1761                goto fail;
1762        }
1763
1764        /* Create the new socket */
1765        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1766        if (!nsk) {
1767                err = pr_iucv->path_sever(path, user_data);
1768                iucv_path_free(path);
1769                goto fail;
1770        }
1771
1772        niucv = iucv_sk(nsk);
1773        iucv_sock_init(nsk, sk);
1774
1775        /* Set the new iucv_sock */
1776        memcpy(niucv->dst_name, ipuser + 8, 8);
1777        EBCASC(niucv->dst_name, 8);
1778        memcpy(niucv->dst_user_id, ipvmid, 8);
1779        memcpy(niucv->src_name, iucv->src_name, 8);
1780        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1781        niucv->path = path;
1782
1783        /* Call iucv_accept */
1784        high_nmcpy(nuser_data, ipuser + 8);
1785        memcpy(nuser_data + 8, niucv->src_name, 8);
1786        ASCEBC(nuser_data + 8, 8);
1787
1788        /* set message limit for path based on msglimit of accepting socket */
1789        niucv->msglimit = iucv->msglimit;
1790        path->msglim = iucv->msglimit;
1791        err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1792        if (err) {
1793                iucv_sever_path(nsk, 1);
1794                iucv_sock_kill(nsk);
1795                goto fail;
1796        }
1797
1798        iucv_accept_enqueue(sk, nsk);
1799
1800        /* Wake up accept */
1801        nsk->sk_state = IUCV_CONNECTED;
1802        sk->sk_data_ready(sk);
1803        err = 0;
1804fail:
1805        bh_unlock_sock(sk);
1806        return 0;
1807}
1808
1809static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1810{
1811        struct sock *sk = path->private;
1812
1813        sk->sk_state = IUCV_CONNECTED;
1814        sk->sk_state_change(sk);
1815}
1816
1817static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1818{
1819        struct sock *sk = path->private;
1820        struct iucv_sock *iucv = iucv_sk(sk);
1821        struct sk_buff *skb;
1822        struct sock_msg_q *save_msg;
1823        int len;
1824
1825        if (sk->sk_shutdown & RCV_SHUTDOWN) {
1826                pr_iucv->message_reject(path, msg);
1827                return;
1828        }
1829
1830        spin_lock(&iucv->message_q.lock);
1831
1832        if (!list_empty(&iucv->message_q.list) ||
1833            !skb_queue_empty(&iucv->backlog_skb_q))
1834                goto save_message;
1835
1836        len = atomic_read(&sk->sk_rmem_alloc);
1837        len += SKB_TRUESIZE(iucv_msg_length(msg));
1838        if (len > sk->sk_rcvbuf)
1839                goto save_message;
1840
1841        skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1842        if (!skb)
1843                goto save_message;
1844
1845        iucv_process_message(sk, skb, path, msg);
1846        goto out_unlock;
1847
1848save_message:
1849        save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1850        if (!save_msg)
1851                goto out_unlock;
1852        save_msg->path = path;
1853        save_msg->msg = *msg;
1854
1855        list_add_tail(&save_msg->list, &iucv->message_q.list);
1856
1857out_unlock:
1858        spin_unlock(&iucv->message_q.lock);
1859}
1860
1861static void iucv_callback_txdone(struct iucv_path *path,
1862                                 struct iucv_message *msg)
1863{
1864        struct sock *sk = path->private;
1865        struct sk_buff *this = NULL;
1866        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1867        struct sk_buff *list_skb = list->next;
1868        unsigned long flags;
1869
1870        bh_lock_sock(sk);
1871        if (!skb_queue_empty(list)) {
1872                spin_lock_irqsave(&list->lock, flags);
1873
1874                while (list_skb != (struct sk_buff *)list) {
1875                        if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1876                                this = list_skb;
1877                                break;
1878                        }
1879                        list_skb = list_skb->next;
1880                }
1881                if (this)
1882                        __skb_unlink(this, list);
1883
1884                spin_unlock_irqrestore(&list->lock, flags);
1885
1886                if (this) {
1887                        kfree_skb(this);
1888                        /* wake up any process waiting for sending */
1889                        iucv_sock_wake_msglim(sk);
1890                }
1891        }
1892
1893        if (sk->sk_state == IUCV_CLOSING) {
1894                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1895                        sk->sk_state = IUCV_CLOSED;
1896                        sk->sk_state_change(sk);
1897                }
1898        }
1899        bh_unlock_sock(sk);
1900
1901}
1902
1903static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1904{
1905        struct sock *sk = path->private;
1906
1907        if (sk->sk_state == IUCV_CLOSED)
1908                return;
1909
1910        bh_lock_sock(sk);
1911        iucv_sever_path(sk, 1);
1912        sk->sk_state = IUCV_DISCONN;
1913
1914        sk->sk_state_change(sk);
1915        bh_unlock_sock(sk);
1916}
1917
1918/* called if the other communication side shuts down its RECV direction;
1919 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1920 */
1921static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1922{
1923        struct sock *sk = path->private;
1924
1925        bh_lock_sock(sk);
1926        if (sk->sk_state != IUCV_CLOSED) {
1927                sk->sk_shutdown |= SEND_SHUTDOWN;
1928                sk->sk_state_change(sk);
1929        }
1930        bh_unlock_sock(sk);
1931}
1932
1933/***************** HiperSockets transport callbacks ********************/
1934static void afiucv_swap_src_dest(struct sk_buff *skb)
1935{
1936        struct af_iucv_trans_hdr *trans_hdr =
1937                                (struct af_iucv_trans_hdr *)skb->data;
1938        char tmpID[8];
1939        char tmpName[8];
1940
1941        ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1942        ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1943        ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1944        ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1945        memcpy(tmpID, trans_hdr->srcUserID, 8);
1946        memcpy(tmpName, trans_hdr->srcAppName, 8);
1947        memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1948        memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1949        memcpy(trans_hdr->destUserID, tmpID, 8);
1950        memcpy(trans_hdr->destAppName, tmpName, 8);
1951        skb_push(skb, ETH_HLEN);
1952        memset(skb->data, 0, ETH_HLEN);
1953}
1954
1955/**
1956 * afiucv_hs_callback_syn - react on received SYN
1957 **/
1958static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1959{
1960        struct sock *nsk;
1961        struct iucv_sock *iucv, *niucv;
1962        struct af_iucv_trans_hdr *trans_hdr;
1963        int err;
1964
1965        iucv = iucv_sk(sk);
1966        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1967        if (!iucv) {
1968                /* no sock - connection refused */
1969                afiucv_swap_src_dest(skb);
1970                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1971                err = dev_queue_xmit(skb);
1972                goto out;
1973        }
1974
1975        nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1976        bh_lock_sock(sk);
1977        if ((sk->sk_state != IUCV_LISTEN) ||
1978            sk_acceptq_is_full(sk) ||
1979            !nsk) {
1980                /* error on server socket - connection refused */
1981                afiucv_swap_src_dest(skb);
1982                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1983                err = dev_queue_xmit(skb);
1984                iucv_sock_kill(nsk);
1985                bh_unlock_sock(sk);
1986                goto out;
1987        }
1988
1989        niucv = iucv_sk(nsk);
1990        iucv_sock_init(nsk, sk);
1991        niucv->transport = AF_IUCV_TRANS_HIPER;
1992        niucv->msglimit = iucv->msglimit;
1993        if (!trans_hdr->window)
1994                niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1995        else
1996                niucv->msglimit_peer = trans_hdr->window;
1997        memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1998        memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1999        memcpy(niucv->src_name, iucv->src_name, 8);
2000        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
2001        nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
2002        niucv->hs_dev = iucv->hs_dev;
2003        dev_hold(niucv->hs_dev);
2004        afiucv_swap_src_dest(skb);
2005        trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
2006        trans_hdr->window = niucv->msglimit;
2007        /* if receiver acks the xmit connection is established */
2008        err = dev_queue_xmit(skb);
2009        if (!err) {
2010                iucv_accept_enqueue(sk, nsk);
2011                nsk->sk_state = IUCV_CONNECTED;
2012                sk->sk_data_ready(sk);
2013        } else
2014                iucv_sock_kill(nsk);
2015        bh_unlock_sock(sk);
2016
2017out:
2018        return NET_RX_SUCCESS;
2019}
2020
2021/**
2022 * afiucv_hs_callback_synack() - react on received SYN-ACK
2023 **/
2024static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2025{
2026        struct iucv_sock *iucv = iucv_sk(sk);
2027        struct af_iucv_trans_hdr *trans_hdr =
2028                                        (struct af_iucv_trans_hdr *)skb->data;
2029
2030        if (!iucv)
2031                goto out;
2032        if (sk->sk_state != IUCV_BOUND)
2033                goto out;
2034        bh_lock_sock(sk);
2035        iucv->msglimit_peer = trans_hdr->window;
2036        sk->sk_state = IUCV_CONNECTED;
2037        sk->sk_state_change(sk);
2038        bh_unlock_sock(sk);
2039out:
2040        kfree_skb(skb);
2041        return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2046 **/
2047static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2048{
2049        struct iucv_sock *iucv = iucv_sk(sk);
2050
2051        if (!iucv)
2052                goto out;
2053        if (sk->sk_state != IUCV_BOUND)
2054                goto out;
2055        bh_lock_sock(sk);
2056        sk->sk_state = IUCV_DISCONN;
2057        sk->sk_state_change(sk);
2058        bh_unlock_sock(sk);
2059out:
2060        kfree_skb(skb);
2061        return NET_RX_SUCCESS;
2062}
2063
2064/**
2065 * afiucv_hs_callback_fin() - react on received FIN
2066 **/
2067static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2068{
2069        struct iucv_sock *iucv = iucv_sk(sk);
2070
2071        /* other end of connection closed */
2072        if (!iucv)
2073                goto out;
2074        bh_lock_sock(sk);
2075        if (sk->sk_state == IUCV_CONNECTED) {
2076                sk->sk_state = IUCV_DISCONN;
2077                sk->sk_state_change(sk);
2078        }
2079        bh_unlock_sock(sk);
2080out:
2081        kfree_skb(skb);
2082        return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_win() - react on received WIN
2087 **/
2088static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2089{
2090        struct iucv_sock *iucv = iucv_sk(sk);
2091        struct af_iucv_trans_hdr *trans_hdr =
2092                                        (struct af_iucv_trans_hdr *)skb->data;
2093
2094        if (!iucv)
2095                return NET_RX_SUCCESS;
2096
2097        if (sk->sk_state != IUCV_CONNECTED)
2098                return NET_RX_SUCCESS;
2099
2100        atomic_sub(trans_hdr->window, &iucv->msg_sent);
2101        iucv_sock_wake_msglim(sk);
2102        return NET_RX_SUCCESS;
2103}
2104
2105/**
2106 * afiucv_hs_callback_rx() - react on received data
2107 **/
2108static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2109{
2110        struct iucv_sock *iucv = iucv_sk(sk);
2111
2112        if (!iucv) {
2113                kfree_skb(skb);
2114                return NET_RX_SUCCESS;
2115        }
2116
2117        if (sk->sk_state != IUCV_CONNECTED) {
2118                kfree_skb(skb);
2119                return NET_RX_SUCCESS;
2120        }
2121
2122        if (sk->sk_shutdown & RCV_SHUTDOWN) {
2123                kfree_skb(skb);
2124                return NET_RX_SUCCESS;
2125        }
2126
2127        /* write stuff from iucv_msg to skb cb */
2128        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2129        skb_reset_transport_header(skb);
2130        skb_reset_network_header(skb);
2131        IUCV_SKB_CB(skb)->offset = 0;
2132        if (sk_filter(sk, skb)) {
2133                atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
2134                kfree_skb(skb);
2135                return NET_RX_SUCCESS;
2136        }
2137
2138        spin_lock(&iucv->message_q.lock);
2139        if (skb_queue_empty(&iucv->backlog_skb_q)) {
2140                if (__sock_queue_rcv_skb(sk, skb))
2141                        /* handle rcv queue full */
2142                        skb_queue_tail(&iucv->backlog_skb_q, skb);
2143        } else
2144                skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2145        spin_unlock(&iucv->message_q.lock);
2146        return NET_RX_SUCCESS;
2147}
2148
2149/**
2150 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2151 *                   transport
2152 *                   called from netif RX softirq
2153 **/
2154static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2155        struct packet_type *pt, struct net_device *orig_dev)
2156{
2157        struct sock *sk;
2158        struct iucv_sock *iucv;
2159        struct af_iucv_trans_hdr *trans_hdr;
2160        char nullstring[8];
2161        int err = 0;
2162
2163        if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
2164                WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2165                          (int)skb->len,
2166                          (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
2167                kfree_skb(skb);
2168                return NET_RX_SUCCESS;
2169        }
2170        if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
2171                if (skb_linearize(skb)) {
2172                        WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2173                                  (int)skb->len);
2174                        kfree_skb(skb);
2175                        return NET_RX_SUCCESS;
2176                }
2177        skb_pull(skb, ETH_HLEN);
2178        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2179        EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2180        EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2181        EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2182        EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2183        memset(nullstring, 0, sizeof(nullstring));
2184        iucv = NULL;
2185        sk = NULL;
2186        read_lock(&iucv_sk_list.lock);
2187        sk_for_each(sk, &iucv_sk_list.head) {
2188                if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2189                        if ((!memcmp(&iucv_sk(sk)->src_name,
2190                                     trans_hdr->destAppName, 8)) &&
2191                            (!memcmp(&iucv_sk(sk)->src_user_id,
2192                                     trans_hdr->destUserID, 8)) &&
2193                            (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2194                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2195                                     nullstring, 8))) {
2196                                iucv = iucv_sk(sk);
2197                                break;
2198                        }
2199                } else {
2200                        if ((!memcmp(&iucv_sk(sk)->src_name,
2201                                     trans_hdr->destAppName, 8)) &&
2202                            (!memcmp(&iucv_sk(sk)->src_user_id,
2203                                     trans_hdr->destUserID, 8)) &&
2204                            (!memcmp(&iucv_sk(sk)->dst_name,
2205                                     trans_hdr->srcAppName, 8)) &&
2206                            (!memcmp(&iucv_sk(sk)->dst_user_id,
2207                                     trans_hdr->srcUserID, 8))) {
2208                                iucv = iucv_sk(sk);
2209                                break;
2210                        }
2211                }
2212        }
2213        read_unlock(&iucv_sk_list.lock);
2214        if (!iucv)
2215                sk = NULL;
2216
2217        /* no sock
2218        how should we send with no sock
2219        1) send without sock no send rc checking?
2220        2) introduce default sock to handle this cases
2221
2222         SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2223         data -> send FIN
2224         SYN|ACK, SYN|FIN, FIN -> no action? */
2225
2226        switch (trans_hdr->flags) {
2227        case AF_IUCV_FLAG_SYN:
2228                /* connect request */
2229                err = afiucv_hs_callback_syn(sk, skb);
2230                break;
2231        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2232                /* connect request confirmed */
2233                err = afiucv_hs_callback_synack(sk, skb);
2234                break;
2235        case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2236                /* connect request refused */
2237                err = afiucv_hs_callback_synfin(sk, skb);
2238                break;
2239        case (AF_IUCV_FLAG_FIN):
2240                /* close request */
2241                err = afiucv_hs_callback_fin(sk, skb);
2242                break;
2243        case (AF_IUCV_FLAG_WIN):
2244                err = afiucv_hs_callback_win(sk, skb);
2245                if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2246                        kfree_skb(skb);
2247                        break;
2248                }
2249                /* fall through and receive non-zero length data */
2250        case (AF_IUCV_FLAG_SHT):
2251                /* shutdown request */
2252                /* fall through and receive zero length data */
2253        case 0:
2254                /* plain data frame */
2255                IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2256                err = afiucv_hs_callback_rx(sk, skb);
2257                break;
2258        default:
2259                ;
2260        }
2261
2262        return err;
2263}
2264
2265/**
2266 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2267 *                                 transport
2268 **/
2269static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2270                                        enum iucv_tx_notify n)
2271{
2272        struct sock *isk = skb->sk;
2273        struct sock *sk = NULL;
2274        struct iucv_sock *iucv = NULL;
2275        struct sk_buff_head *list;
2276        struct sk_buff *list_skb;
2277        struct sk_buff *nskb;
2278        unsigned long flags;
2279
2280        read_lock_irqsave(&iucv_sk_list.lock, flags);
2281        sk_for_each(sk, &iucv_sk_list.head)
2282                if (sk == isk) {
2283                        iucv = iucv_sk(sk);
2284                        break;
2285                }
2286        read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2287
2288        if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2289                return;
2290
2291        list = &iucv->send_skb_q;
2292        spin_lock_irqsave(&list->lock, flags);
2293        if (skb_queue_empty(list))
2294                goto out_unlock;
2295        list_skb = list->next;
2296        nskb = list_skb->next;
2297        while (list_skb != (struct sk_buff *)list) {
2298                if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2299                        switch (n) {
2300                        case TX_NOTIFY_OK:
2301                                __skb_unlink(list_skb, list);
2302                                kfree_skb(list_skb);
2303                                iucv_sock_wake_msglim(sk);
2304                                break;
2305                        case TX_NOTIFY_PENDING:
2306                                atomic_inc(&iucv->pendings);
2307                                break;
2308                        case TX_NOTIFY_DELAYED_OK:
2309                                __skb_unlink(list_skb, list);
2310                                atomic_dec(&iucv->pendings);
2311                                if (atomic_read(&iucv->pendings) <= 0)
2312                                        iucv_sock_wake_msglim(sk);
2313                                kfree_skb(list_skb);
2314                                break;
2315                        case TX_NOTIFY_UNREACHABLE:
2316                        case TX_NOTIFY_DELAYED_UNREACHABLE:
2317                        case TX_NOTIFY_TPQFULL: /* not yet used */
2318                        case TX_NOTIFY_GENERALERROR:
2319                        case TX_NOTIFY_DELAYED_GENERALERROR:
2320                                __skb_unlink(list_skb, list);
2321                                kfree_skb(list_skb);
2322                                if (sk->sk_state == IUCV_CONNECTED) {
2323                                        sk->sk_state = IUCV_DISCONN;
2324                                        sk->sk_state_change(sk);
2325                                }
2326                                break;
2327                        }
2328                        break;
2329                }
2330                list_skb = nskb;
2331                nskb = nskb->next;
2332        }
2333out_unlock:
2334        spin_unlock_irqrestore(&list->lock, flags);
2335
2336        if (sk->sk_state == IUCV_CLOSING) {
2337                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2338                        sk->sk_state = IUCV_CLOSED;
2339                        sk->sk_state_change(sk);
2340                }
2341        }
2342
2343}
2344
2345/*
2346 * afiucv_netdev_event: handle netdev notifier chain events
2347 */
2348static int afiucv_netdev_event(struct notifier_block *this,
2349                               unsigned long event, void *ptr)
2350{
2351        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2352        struct sock *sk;
2353        struct iucv_sock *iucv;
2354
2355        switch (event) {
2356        case NETDEV_REBOOT:
2357        case NETDEV_GOING_DOWN:
2358                sk_for_each(sk, &iucv_sk_list.head) {
2359                        iucv = iucv_sk(sk);
2360                        if ((iucv->hs_dev == event_dev) &&
2361                            (sk->sk_state == IUCV_CONNECTED)) {
2362                                if (event == NETDEV_GOING_DOWN)
2363                                        iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2364                                sk->sk_state = IUCV_DISCONN;
2365                                sk->sk_state_change(sk);
2366                        }
2367                }
2368                break;
2369        case NETDEV_DOWN:
2370        case NETDEV_UNREGISTER:
2371        default:
2372                break;
2373        }
2374        return NOTIFY_DONE;
2375}
2376
2377static struct notifier_block afiucv_netdev_notifier = {
2378        .notifier_call = afiucv_netdev_event,
2379};
2380
2381static const struct proto_ops iucv_sock_ops = {
2382        .family         = PF_IUCV,
2383        .owner          = THIS_MODULE,
2384        .release        = iucv_sock_release,
2385        .bind           = iucv_sock_bind,
2386        .connect        = iucv_sock_connect,
2387        .listen         = iucv_sock_listen,
2388        .accept         = iucv_sock_accept,
2389        .getname        = iucv_sock_getname,
2390        .sendmsg        = iucv_sock_sendmsg,
2391        .recvmsg        = iucv_sock_recvmsg,
2392        .poll           = iucv_sock_poll,
2393        .ioctl          = sock_no_ioctl,
2394        .mmap           = sock_no_mmap,
2395        .socketpair     = sock_no_socketpair,
2396        .shutdown       = iucv_sock_shutdown,
2397        .setsockopt     = iucv_sock_setsockopt,
2398        .getsockopt     = iucv_sock_getsockopt,
2399};
2400
2401static const struct net_proto_family iucv_sock_family_ops = {
2402        .family = AF_IUCV,
2403        .owner  = THIS_MODULE,
2404        .create = iucv_sock_create,
2405};
2406
2407static struct packet_type iucv_packet_type = {
2408        .type = cpu_to_be16(ETH_P_AF_IUCV),
2409        .func = afiucv_hs_rcv,
2410};
2411
2412static int afiucv_iucv_init(void)
2413{
2414        int err;
2415
2416        err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2417        if (err)
2418                goto out;
2419        /* establish dummy device */
2420        af_iucv_driver.bus = pr_iucv->bus;
2421        err = driver_register(&af_iucv_driver);
2422        if (err)
2423                goto out_iucv;
2424        af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2425        if (!af_iucv_dev) {
2426                err = -ENOMEM;
2427                goto out_driver;
2428        }
2429        dev_set_name(af_iucv_dev, "af_iucv");
2430        af_iucv_dev->bus = pr_iucv->bus;
2431        af_iucv_dev->parent = pr_iucv->root;
2432        af_iucv_dev->release = (void (*)(struct device *))kfree;
2433        af_iucv_dev->driver = &af_iucv_driver;
2434        err = device_register(af_iucv_dev);
2435        if (err)
2436                goto out_driver;
2437        return 0;
2438
2439out_driver:
2440        driver_unregister(&af_iucv_driver);
2441out_iucv:
2442        pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2443out:
2444        return err;
2445}
2446
2447static int __init afiucv_init(void)
2448{
2449        int err;
2450
2451        if (MACHINE_IS_VM) {
2452                cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2453                if (unlikely(err)) {
2454                        WARN_ON(err);
2455                        err = -EPROTONOSUPPORT;
2456                        goto out;
2457                }
2458
2459                pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2460                if (!pr_iucv) {
2461                        printk(KERN_WARNING "iucv_if lookup failed\n");
2462                        memset(&iucv_userid, 0, sizeof(iucv_userid));
2463                }
2464        } else {
2465                memset(&iucv_userid, 0, sizeof(iucv_userid));
2466                pr_iucv = NULL;
2467        }
2468
2469        err = proto_register(&iucv_proto, 0);
2470        if (err)
2471                goto out;
2472        err = sock_register(&iucv_sock_family_ops);
2473        if (err)
2474                goto out_proto;
2475
2476        if (pr_iucv) {
2477                err = afiucv_iucv_init();
2478                if (err)
2479                        goto out_sock;
2480        } else
2481                register_netdevice_notifier(&afiucv_netdev_notifier);
2482        dev_add_pack(&iucv_packet_type);
2483        return 0;
2484
2485out_sock:
2486        sock_unregister(PF_IUCV);
2487out_proto:
2488        proto_unregister(&iucv_proto);
2489out:
2490        if (pr_iucv)
2491                symbol_put(iucv_if);
2492        return err;
2493}
2494
2495static void __exit afiucv_exit(void)
2496{
2497        if (pr_iucv) {
2498                device_unregister(af_iucv_dev);
2499                driver_unregister(&af_iucv_driver);
2500                pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2501                symbol_put(iucv_if);
2502        } else
2503                unregister_netdevice_notifier(&afiucv_netdev_notifier);
2504        dev_remove_pack(&iucv_packet_type);
2505        sock_unregister(PF_IUCV);
2506        proto_unregister(&iucv_proto);
2507}
2508
2509module_init(afiucv_init);
2510module_exit(afiucv_exit);
2511
2512MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2513MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2514MODULE_VERSION(VERSION);
2515MODULE_LICENSE("GPL");
2516MODULE_ALIAS_NETPROTO(PF_IUCV);
2517
2518