linux/drivers/s390/net/netiucv.c
<<
>>
Prefs
   1/*
   2 * IUCV network driver
   3 *
   4 * Copyright IBM Corp. 2001, 2009
   5 *
   6 * Author(s):
   7 *      Original netiucv driver:
   8 *              Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
   9 *      Sysfs integration and all bugs therein:
  10 *              Cornelia Huck (cornelia.huck@de.ibm.com)
  11 *      PM functions:
  12 *              Ursula Braun (ursula.braun@de.ibm.com)
  13 *
  14 * Documentation used:
  15 *  the source of the original IUCV driver by:
  16 *    Stefan Hegewald <hegewald@de.ibm.com>
  17 *    Hartmut Penner <hpenner@de.ibm.com>
  18 *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  19 *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  20 *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
  21 *
  22 * This program is free software; you can redistribute it and/or modify
  23 * it under the terms of the GNU General Public License as published by
  24 * the Free Software Foundation; either version 2, or (at your option)
  25 * any later version.
  26 *
  27 * This program is distributed in the hope that it will be useful,
  28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  30 * GNU General Public License for more details.
  31 *
  32 * You should have received a copy of the GNU General Public License
  33 * along with this program; if not, write to the Free Software
  34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35 *
  36 */
  37
  38#define KMSG_COMPONENT "netiucv"
  39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  40
  41#undef DEBUG
  42
  43#include <linux/module.h>
  44#include <linux/init.h>
  45#include <linux/kernel.h>
  46#include <linux/slab.h>
  47#include <linux/errno.h>
  48#include <linux/types.h>
  49#include <linux/interrupt.h>
  50#include <linux/timer.h>
  51#include <linux/bitops.h>
  52
  53#include <linux/signal.h>
  54#include <linux/string.h>
  55#include <linux/device.h>
  56
  57#include <linux/ip.h>
  58#include <linux/if_arp.h>
  59#include <linux/tcp.h>
  60#include <linux/skbuff.h>
  61#include <linux/ctype.h>
  62#include <net/dst.h>
  63
  64#include <asm/io.h>
  65#include <asm/uaccess.h>
  66#include <asm/ebcdic.h>
  67
  68#include <net/iucv/iucv.h>
  69#include "fsm.h"
  70
  71MODULE_AUTHOR
  72    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  73MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  74
  75/**
  76 * Debug Facility stuff
  77 */
  78#define IUCV_DBF_SETUP_NAME "iucv_setup"
  79#define IUCV_DBF_SETUP_LEN 64
  80#define IUCV_DBF_SETUP_PAGES 2
  81#define IUCV_DBF_SETUP_NR_AREAS 1
  82#define IUCV_DBF_SETUP_LEVEL 3
  83
  84#define IUCV_DBF_DATA_NAME "iucv_data"
  85#define IUCV_DBF_DATA_LEN 128
  86#define IUCV_DBF_DATA_PAGES 2
  87#define IUCV_DBF_DATA_NR_AREAS 1
  88#define IUCV_DBF_DATA_LEVEL 2
  89
  90#define IUCV_DBF_TRACE_NAME "iucv_trace"
  91#define IUCV_DBF_TRACE_LEN 16
  92#define IUCV_DBF_TRACE_PAGES 4
  93#define IUCV_DBF_TRACE_NR_AREAS 1
  94#define IUCV_DBF_TRACE_LEVEL 3
  95
  96#define IUCV_DBF_TEXT(name,level,text) \
  97        do { \
  98                debug_text_event(iucv_dbf_##name,level,text); \
  99        } while (0)
 100
 101#define IUCV_DBF_HEX(name,level,addr,len) \
 102        do { \
 103                debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
 104        } while (0)
 105
 106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
 107
 108/* Allow to sort out low debug levels early to avoid wasted sprints */
 109static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
 110{
 111        return (level <= dbf_grp->level);
 112}
 113
 114#define IUCV_DBF_TEXT_(name, level, text...) \
 115        do { \
 116                if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
 117                        char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
 118                        sprintf(__buf, text); \
 119                        debug_text_event(iucv_dbf_##name, level, __buf); \
 120                        put_cpu_var(iucv_dbf_txt_buf); \
 121                } \
 122        } while (0)
 123
 124#define IUCV_DBF_SPRINTF(name,level,text...) \
 125        do { \
 126                debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
 127                debug_sprintf_event(iucv_dbf_trace, level, text ); \
 128        } while (0)
 129
 130/**
 131 * some more debug stuff
 132 */
 133#define PRINTK_HEADER " iucv: "       /* for debugging */
 134
 135/* dummy device to make sure netiucv_pm functions are called */
 136static struct device *netiucv_dev;
 137
 138static int netiucv_pm_prepare(struct device *);
 139static void netiucv_pm_complete(struct device *);
 140static int netiucv_pm_freeze(struct device *);
 141static int netiucv_pm_restore_thaw(struct device *);
 142
 143static const struct dev_pm_ops netiucv_pm_ops = {
 144        .prepare = netiucv_pm_prepare,
 145        .complete = netiucv_pm_complete,
 146        .freeze = netiucv_pm_freeze,
 147        .thaw = netiucv_pm_restore_thaw,
 148        .restore = netiucv_pm_restore_thaw,
 149};
 150
 151static struct device_driver netiucv_driver = {
 152        .owner = THIS_MODULE,
 153        .name = "netiucv",
 154        .bus  = &iucv_bus,
 155        .pm = &netiucv_pm_ops,
 156};
 157
 158static int netiucv_callback_connreq(struct iucv_path *,
 159                                    u8 ipvmid[8], u8 ipuser[16]);
 160static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
 161static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
 162static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
 163static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
 164static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
 165static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
 166
 167static struct iucv_handler netiucv_handler = {
 168        .path_pending     = netiucv_callback_connreq,
 169        .path_complete    = netiucv_callback_connack,
 170        .path_severed     = netiucv_callback_connrej,
 171        .path_quiesced    = netiucv_callback_connsusp,
 172        .path_resumed     = netiucv_callback_connres,
 173        .message_pending  = netiucv_callback_rx,
 174        .message_complete = netiucv_callback_txdone
 175};
 176
 177/**
 178 * Per connection profiling data
 179 */
 180struct connection_profile {
 181        unsigned long maxmulti;
 182        unsigned long maxcqueue;
 183        unsigned long doios_single;
 184        unsigned long doios_multi;
 185        unsigned long txlen;
 186        unsigned long tx_time;
 187        struct timespec send_stamp;
 188        unsigned long tx_pending;
 189        unsigned long tx_max_pending;
 190};
 191
 192/**
 193 * Representation of one iucv connection
 194 */
 195struct iucv_connection {
 196        struct list_head          list;
 197        struct iucv_path          *path;
 198        struct sk_buff            *rx_buff;
 199        struct sk_buff            *tx_buff;
 200        struct sk_buff_head       collect_queue;
 201        struct sk_buff_head       commit_queue;
 202        spinlock_t                collect_lock;
 203        int                       collect_len;
 204        int                       max_buffsize;
 205        fsm_timer                 timer;
 206        fsm_instance              *fsm;
 207        struct net_device         *netdev;
 208        struct connection_profile prof;
 209        char                      userid[9];
 210        char                      userdata[17];
 211};
 212
 213/**
 214 * Linked list of all connection structs.
 215 */
 216static LIST_HEAD(iucv_connection_list);
 217static DEFINE_RWLOCK(iucv_connection_rwlock);
 218
 219/**
 220 * Representation of event-data for the
 221 * connection state machine.
 222 */
 223struct iucv_event {
 224        struct iucv_connection *conn;
 225        void                   *data;
 226};
 227
 228/**
 229 * Private part of the network device structure
 230 */
 231struct netiucv_priv {
 232        struct net_device_stats stats;
 233        unsigned long           tbusy;
 234        fsm_instance            *fsm;
 235        struct iucv_connection  *conn;
 236        struct device           *dev;
 237        int                      pm_state;
 238};
 239
 240/**
 241 * Link level header for a packet.
 242 */
 243struct ll_header {
 244        u16 next;
 245};
 246
 247#define NETIUCV_HDRLEN           (sizeof(struct ll_header))
 248#define NETIUCV_BUFSIZE_MAX      65537
 249#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 250#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 251#define NETIUCV_MTU_DEFAULT      9216
 252#define NETIUCV_QUEUELEN_DEFAULT 50
 253#define NETIUCV_TIMEOUT_5SEC     5000
 254
 255/**
 256 * Compatibility macros for busy handling
 257 * of network devices.
 258 */
 259static inline void netiucv_clear_busy(struct net_device *dev)
 260{
 261        struct netiucv_priv *priv = netdev_priv(dev);
 262        clear_bit(0, &priv->tbusy);
 263        netif_wake_queue(dev);
 264}
 265
 266static inline int netiucv_test_and_set_busy(struct net_device *dev)
 267{
 268        struct netiucv_priv *priv = netdev_priv(dev);
 269        netif_stop_queue(dev);
 270        return test_and_set_bit(0, &priv->tbusy);
 271}
 272
 273static u8 iucvMagic_ascii[16] = {
 274        0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
 275        0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
 276};
 277
 278static u8 iucvMagic_ebcdic[16] = {
 279        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
 280        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 281};
 282
 283/**
 284 * Convert an iucv userId to its printable
 285 * form (strip whitespace at end).
 286 *
 287 * @param An iucv userId
 288 *
 289 * @returns The printable string (static data!!)
 290 */
 291static char *netiucv_printname(char *name, int len)
 292{
 293        static char tmp[17];
 294        char *p = tmp;
 295        memcpy(tmp, name, len);
 296        tmp[len] = '\0';
 297        while (*p && ((p - tmp) < len) && (!isspace(*p)))
 298                p++;
 299        *p = '\0';
 300        return tmp;
 301}
 302
 303static char *netiucv_printuser(struct iucv_connection *conn)
 304{
 305        static char tmp_uid[9];
 306        static char tmp_udat[17];
 307        static char buf[100];
 308
 309        if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
 310                tmp_uid[8] = '\0';
 311                tmp_udat[16] = '\0';
 312                memcpy(tmp_uid, conn->userid, 8);
 313                memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
 314                memcpy(tmp_udat, conn->userdata, 16);
 315                EBCASC(tmp_udat, 16);
 316                memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
 317                sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
 318                return buf;
 319        } else
 320                return netiucv_printname(conn->userid, 8);
 321}
 322
 323/**
 324 * States of the interface statemachine.
 325 */
 326enum dev_states {
 327        DEV_STATE_STOPPED,
 328        DEV_STATE_STARTWAIT,
 329        DEV_STATE_STOPWAIT,
 330        DEV_STATE_RUNNING,
 331        /**
 332         * MUST be always the last element!!
 333         */
 334        NR_DEV_STATES
 335};
 336
 337static const char *dev_state_names[] = {
 338        "Stopped",
 339        "StartWait",
 340        "StopWait",
 341        "Running",
 342};
 343
 344/**
 345 * Events of the interface statemachine.
 346 */
 347enum dev_events {
 348        DEV_EVENT_START,
 349        DEV_EVENT_STOP,
 350        DEV_EVENT_CONUP,
 351        DEV_EVENT_CONDOWN,
 352        /**
 353         * MUST be always the last element!!
 354         */
 355        NR_DEV_EVENTS
 356};
 357
 358static const char *dev_event_names[] = {
 359        "Start",
 360        "Stop",
 361        "Connection up",
 362        "Connection down",
 363};
 364
 365/**
 366 * Events of the connection statemachine
 367 */
 368enum conn_events {
 369        /**
 370         * Events, representing callbacks from
 371         * lowlevel iucv layer)
 372         */
 373        CONN_EVENT_CONN_REQ,
 374        CONN_EVENT_CONN_ACK,
 375        CONN_EVENT_CONN_REJ,
 376        CONN_EVENT_CONN_SUS,
 377        CONN_EVENT_CONN_RES,
 378        CONN_EVENT_RX,
 379        CONN_EVENT_TXDONE,
 380
 381        /**
 382         * Events, representing errors return codes from
 383         * calls to lowlevel iucv layer
 384         */
 385
 386        /**
 387         * Event, representing timer expiry.
 388         */
 389        CONN_EVENT_TIMER,
 390
 391        /**
 392         * Events, representing commands from upper levels.
 393         */
 394        CONN_EVENT_START,
 395        CONN_EVENT_STOP,
 396
 397        /**
 398         * MUST be always the last element!!
 399         */
 400        NR_CONN_EVENTS,
 401};
 402
 403static const char *conn_event_names[] = {
 404        "Remote connection request",
 405        "Remote connection acknowledge",
 406        "Remote connection reject",
 407        "Connection suspended",
 408        "Connection resumed",
 409        "Data received",
 410        "Data sent",
 411
 412        "Timer",
 413
 414        "Start",
 415        "Stop",
 416};
 417
 418/**
 419 * States of the connection statemachine.
 420 */
 421enum conn_states {
 422        /**
 423         * Connection not assigned to any device,
 424         * initial state, invalid
 425         */
 426        CONN_STATE_INVALID,
 427
 428        /**
 429         * Userid assigned but not operating
 430         */
 431        CONN_STATE_STOPPED,
 432
 433        /**
 434         * Connection registered,
 435         * no connection request sent yet,
 436         * no connection request received
 437         */
 438        CONN_STATE_STARTWAIT,
 439
 440        /**
 441         * Connection registered and connection request sent,
 442         * no acknowledge and no connection request received yet.
 443         */
 444        CONN_STATE_SETUPWAIT,
 445
 446        /**
 447         * Connection up and running idle
 448         */
 449        CONN_STATE_IDLE,
 450
 451        /**
 452         * Data sent, awaiting CONN_EVENT_TXDONE
 453         */
 454        CONN_STATE_TX,
 455
 456        /**
 457         * Error during registration.
 458         */
 459        CONN_STATE_REGERR,
 460
 461        /**
 462         * Error during registration.
 463         */
 464        CONN_STATE_CONNERR,
 465
 466        /**
 467         * MUST be always the last element!!
 468         */
 469        NR_CONN_STATES,
 470};
 471
 472static const char *conn_state_names[] = {
 473        "Invalid",
 474        "Stopped",
 475        "StartWait",
 476        "SetupWait",
 477        "Idle",
 478        "TX",
 479        "Terminating",
 480        "Registration error",
 481        "Connect error",
 482};
 483
 484
 485/**
 486 * Debug Facility Stuff
 487 */
 488static debug_info_t *iucv_dbf_setup = NULL;
 489static debug_info_t *iucv_dbf_data = NULL;
 490static debug_info_t *iucv_dbf_trace = NULL;
 491
 492DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
 493
 494static void iucv_unregister_dbf_views(void)
 495{
 496        if (iucv_dbf_setup)
 497                debug_unregister(iucv_dbf_setup);
 498        if (iucv_dbf_data)
 499                debug_unregister(iucv_dbf_data);
 500        if (iucv_dbf_trace)
 501                debug_unregister(iucv_dbf_trace);
 502}
 503static int iucv_register_dbf_views(void)
 504{
 505        iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
 506                                        IUCV_DBF_SETUP_PAGES,
 507                                        IUCV_DBF_SETUP_NR_AREAS,
 508                                        IUCV_DBF_SETUP_LEN);
 509        iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
 510                                       IUCV_DBF_DATA_PAGES,
 511                                       IUCV_DBF_DATA_NR_AREAS,
 512                                       IUCV_DBF_DATA_LEN);
 513        iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
 514                                        IUCV_DBF_TRACE_PAGES,
 515                                        IUCV_DBF_TRACE_NR_AREAS,
 516                                        IUCV_DBF_TRACE_LEN);
 517
 518        if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
 519            (iucv_dbf_trace == NULL)) {
 520                iucv_unregister_dbf_views();
 521                return -ENOMEM;
 522        }
 523        debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
 524        debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
 525
 526        debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
 527        debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
 528
 529        debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
 530        debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
 531
 532        return 0;
 533}
 534
 535/*
 536 * Callback-wrappers, called from lowlevel iucv layer.
 537 */
 538
 539static void netiucv_callback_rx(struct iucv_path *path,
 540                                struct iucv_message *msg)
 541{
 542        struct iucv_connection *conn = path->private;
 543        struct iucv_event ev;
 544
 545        ev.conn = conn;
 546        ev.data = msg;
 547        fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
 548}
 549
 550static void netiucv_callback_txdone(struct iucv_path *path,
 551                                    struct iucv_message *msg)
 552{
 553        struct iucv_connection *conn = path->private;
 554        struct iucv_event ev;
 555
 556        ev.conn = conn;
 557        ev.data = msg;
 558        fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
 559}
 560
 561static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
 562{
 563        struct iucv_connection *conn = path->private;
 564
 565        fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
 566}
 567
 568static int netiucv_callback_connreq(struct iucv_path *path,
 569                                    u8 ipvmid[8], u8 ipuser[16])
 570{
 571        struct iucv_connection *conn = path->private;
 572        struct iucv_event ev;
 573        static char tmp_user[9];
 574        static char tmp_udat[17];
 575        int rc;
 576
 577        rc = -EINVAL;
 578        memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
 579        memcpy(tmp_udat, ipuser, 16);
 580        EBCASC(tmp_udat, 16);
 581        read_lock_bh(&iucv_connection_rwlock);
 582        list_for_each_entry(conn, &iucv_connection_list, list) {
 583                if (strncmp(ipvmid, conn->userid, 8) ||
 584                    strncmp(ipuser, conn->userdata, 16))
 585                        continue;
 586                /* Found a matching connection for this path. */
 587                conn->path = path;
 588                ev.conn = conn;
 589                ev.data = path;
 590                fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
 591                rc = 0;
 592        }
 593        IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
 594                       tmp_user, netiucv_printname(tmp_udat, 16));
 595        read_unlock_bh(&iucv_connection_rwlock);
 596        return rc;
 597}
 598
 599static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
 600{
 601        struct iucv_connection *conn = path->private;
 602
 603        fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
 604}
 605
 606static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
 607{
 608        struct iucv_connection *conn = path->private;
 609
 610        fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
 611}
 612
 613static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
 614{
 615        struct iucv_connection *conn = path->private;
 616
 617        fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
 618}
 619
 620/**
 621 * NOP action for statemachines
 622 */
 623static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
 624{
 625}
 626
 627/*
 628 * Actions of the connection statemachine
 629 */
 630
 631/**
 632 * netiucv_unpack_skb
 633 * @conn: The connection where this skb has been received.
 634 * @pskb: The received skb.
 635 *
 636 * Unpack a just received skb and hand it over to upper layers.
 637 * Helper function for conn_action_rx.
 638 */
 639static void netiucv_unpack_skb(struct iucv_connection *conn,
 640                               struct sk_buff *pskb)
 641{
 642        struct net_device     *dev = conn->netdev;
 643        struct netiucv_priv   *privptr = netdev_priv(dev);
 644        u16 offset = 0;
 645
 646        skb_put(pskb, NETIUCV_HDRLEN);
 647        pskb->dev = dev;
 648        pskb->ip_summed = CHECKSUM_NONE;
 649        pskb->protocol = ntohs(ETH_P_IP);
 650
 651        while (1) {
 652                struct sk_buff *skb;
 653                struct ll_header *header = (struct ll_header *) pskb->data;
 654
 655                if (!header->next)
 656                        break;
 657
 658                skb_pull(pskb, NETIUCV_HDRLEN);
 659                header->next -= offset;
 660                offset += header->next;
 661                header->next -= NETIUCV_HDRLEN;
 662                if (skb_tailroom(pskb) < header->next) {
 663                        IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
 664                                header->next, skb_tailroom(pskb));
 665                        return;
 666                }
 667                skb_put(pskb, header->next);
 668                skb_reset_mac_header(pskb);
 669                skb = dev_alloc_skb(pskb->len);
 670                if (!skb) {
 671                        IUCV_DBF_TEXT(data, 2,
 672                                "Out of memory in netiucv_unpack_skb\n");
 673                        privptr->stats.rx_dropped++;
 674                        return;
 675                }
 676                skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
 677                                          pskb->len);
 678                skb_reset_mac_header(skb);
 679                skb->dev = pskb->dev;
 680                skb->protocol = pskb->protocol;
 681                pskb->ip_summed = CHECKSUM_UNNECESSARY;
 682                privptr->stats.rx_packets++;
 683                privptr->stats.rx_bytes += skb->len;
 684                /*
 685                 * Since receiving is always initiated from a tasklet (in iucv.c),
 686                 * we must use netif_rx_ni() instead of netif_rx()
 687                 */
 688                netif_rx_ni(skb);
 689                skb_pull(pskb, header->next);
 690                skb_put(pskb, NETIUCV_HDRLEN);
 691        }
 692}
 693
 694static void conn_action_rx(fsm_instance *fi, int event, void *arg)
 695{
 696        struct iucv_event *ev = arg;
 697        struct iucv_connection *conn = ev->conn;
 698        struct iucv_message *msg = ev->data;
 699        struct netiucv_priv *privptr = netdev_priv(conn->netdev);
 700        int rc;
 701
 702        IUCV_DBF_TEXT(trace, 4, __func__);
 703
 704        if (!conn->netdev) {
 705                iucv_message_reject(conn->path, msg);
 706                IUCV_DBF_TEXT(data, 2,
 707                              "Received data for unlinked connection\n");
 708                return;
 709        }
 710        if (msg->length > conn->max_buffsize) {
 711                iucv_message_reject(conn->path, msg);
 712                privptr->stats.rx_dropped++;
 713                IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
 714                               msg->length, conn->max_buffsize);
 715                return;
 716        }
 717        conn->rx_buff->data = conn->rx_buff->head;
 718        skb_reset_tail_pointer(conn->rx_buff);
 719        conn->rx_buff->len = 0;
 720        rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
 721                                  msg->length, NULL);
 722        if (rc || msg->length < 5) {
 723                privptr->stats.rx_errors++;
 724                IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
 725                return;
 726        }
 727        netiucv_unpack_skb(conn, conn->rx_buff);
 728}
 729
 730static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 731{
 732        struct iucv_event *ev = arg;
 733        struct iucv_connection *conn = ev->conn;
 734        struct iucv_message *msg = ev->data;
 735        struct iucv_message txmsg;
 736        struct netiucv_priv *privptr = NULL;
 737        u32 single_flag = msg->tag;
 738        u32 txbytes = 0;
 739        u32 txpackets = 0;
 740        u32 stat_maxcq = 0;
 741        struct sk_buff *skb;
 742        unsigned long saveflags;
 743        struct ll_header header;
 744        int rc;
 745
 746        IUCV_DBF_TEXT(trace, 4, __func__);
 747
 748        if (conn && conn->netdev)
 749                privptr = netdev_priv(conn->netdev);
 750        conn->prof.tx_pending--;
 751        if (single_flag) {
 752                if ((skb = skb_dequeue(&conn->commit_queue))) {
 753                        atomic_dec(&skb->users);
 754                        if (privptr) {
 755                                privptr->stats.tx_packets++;
 756                                privptr->stats.tx_bytes +=
 757                                        (skb->len - NETIUCV_HDRLEN
 758                                                  - NETIUCV_HDRLEN);
 759                        }
 760                        dev_kfree_skb_any(skb);
 761                }
 762        }
 763        conn->tx_buff->data = conn->tx_buff->head;
 764        skb_reset_tail_pointer(conn->tx_buff);
 765        conn->tx_buff->len = 0;
 766        spin_lock_irqsave(&conn->collect_lock, saveflags);
 767        while ((skb = skb_dequeue(&conn->collect_queue))) {
 768                header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
 769                memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
 770                       NETIUCV_HDRLEN);
 771                skb_copy_from_linear_data(skb,
 772                                          skb_put(conn->tx_buff, skb->len),
 773                                          skb->len);
 774                txbytes += skb->len;
 775                txpackets++;
 776                stat_maxcq++;
 777                atomic_dec(&skb->users);
 778                dev_kfree_skb_any(skb);
 779        }
 780        if (conn->collect_len > conn->prof.maxmulti)
 781                conn->prof.maxmulti = conn->collect_len;
 782        conn->collect_len = 0;
 783        spin_unlock_irqrestore(&conn->collect_lock, saveflags);
 784        if (conn->tx_buff->len == 0) {
 785                fsm_newstate(fi, CONN_STATE_IDLE);
 786                return;
 787        }
 788
 789        header.next = 0;
 790        memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
 791        conn->prof.send_stamp = current_kernel_time();
 792        txmsg.class = 0;
 793        txmsg.tag = 0;
 794        rc = iucv_message_send(conn->path, &txmsg, 0, 0,
 795                               conn->tx_buff->data, conn->tx_buff->len);
 796        conn->prof.doios_multi++;
 797        conn->prof.txlen += conn->tx_buff->len;
 798        conn->prof.tx_pending++;
 799        if (conn->prof.tx_pending > conn->prof.tx_max_pending)
 800                conn->prof.tx_max_pending = conn->prof.tx_pending;
 801        if (rc) {
 802                conn->prof.tx_pending--;
 803                fsm_newstate(fi, CONN_STATE_IDLE);
 804                if (privptr)
 805                        privptr->stats.tx_errors += txpackets;
 806                IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
 807        } else {
 808                if (privptr) {
 809                        privptr->stats.tx_packets += txpackets;
 810                        privptr->stats.tx_bytes += txbytes;
 811                }
 812                if (stat_maxcq > conn->prof.maxcqueue)
 813                        conn->prof.maxcqueue = stat_maxcq;
 814        }
 815}
 816
 817static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
 818{
 819        struct iucv_event *ev = arg;
 820        struct iucv_connection *conn = ev->conn;
 821        struct iucv_path *path = ev->data;
 822        struct net_device *netdev = conn->netdev;
 823        struct netiucv_priv *privptr = netdev_priv(netdev);
 824        int rc;
 825
 826        IUCV_DBF_TEXT(trace, 3, __func__);
 827
 828        conn->path = path;
 829        path->msglim = NETIUCV_QUEUELEN_DEFAULT;
 830        path->flags = 0;
 831        rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
 832        if (rc) {
 833                IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
 834                return;
 835        }
 836        fsm_newstate(fi, CONN_STATE_IDLE);
 837        netdev->tx_queue_len = conn->path->msglim;
 838        fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 839}
 840
 841static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
 842{
 843        struct iucv_event *ev = arg;
 844        struct iucv_path *path = ev->data;
 845
 846        IUCV_DBF_TEXT(trace, 3, __func__);
 847        iucv_path_sever(path, NULL);
 848}
 849
 850static void conn_action_connack(fsm_instance *fi, int event, void *arg)
 851{
 852        struct iucv_connection *conn = arg;
 853        struct net_device *netdev = conn->netdev;
 854        struct netiucv_priv *privptr = netdev_priv(netdev);
 855
 856        IUCV_DBF_TEXT(trace, 3, __func__);
 857        fsm_deltimer(&conn->timer);
 858        fsm_newstate(fi, CONN_STATE_IDLE);
 859        netdev->tx_queue_len = conn->path->msglim;
 860        fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 861}
 862
 863static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 864{
 865        struct iucv_connection *conn = arg;
 866
 867        IUCV_DBF_TEXT(trace, 3, __func__);
 868        fsm_deltimer(&conn->timer);
 869        iucv_path_sever(conn->path, conn->userdata);
 870        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 871}
 872
 873static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
 874{
 875        struct iucv_connection *conn = arg;
 876        struct net_device *netdev = conn->netdev;
 877        struct netiucv_priv *privptr = netdev_priv(netdev);
 878
 879        IUCV_DBF_TEXT(trace, 3, __func__);
 880
 881        fsm_deltimer(&conn->timer);
 882        iucv_path_sever(conn->path, conn->userdata);
 883        dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
 884                               "connection\n", netiucv_printuser(conn));
 885        IUCV_DBF_TEXT(data, 2,
 886                      "conn_action_connsever: Remote dropped connection\n");
 887        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 888        fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 889}
 890
 891static void conn_action_start(fsm_instance *fi, int event, void *arg)
 892{
 893        struct iucv_connection *conn = arg;
 894        struct net_device *netdev = conn->netdev;
 895        struct netiucv_priv *privptr = netdev_priv(netdev);
 896        int rc;
 897
 898        IUCV_DBF_TEXT(trace, 3, __func__);
 899
 900        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 901
 902        /*
 903         * We must set the state before calling iucv_connect because the
 904         * callback handler could be called at any point after the connection
 905         * request is sent
 906         */
 907
 908        fsm_newstate(fi, CONN_STATE_SETUPWAIT);
 909        conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
 910        IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
 911                netdev->name, netiucv_printuser(conn));
 912
 913        rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
 914                               NULL, conn->userdata, conn);
 915        switch (rc) {
 916        case 0:
 917                netdev->tx_queue_len = conn->path->msglim;
 918                fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
 919                             CONN_EVENT_TIMER, conn);
 920                return;
 921        case 11:
 922                dev_warn(privptr->dev,
 923                        "The IUCV device failed to connect to z/VM guest %s\n",
 924                        netiucv_printname(conn->userid, 8));
 925                fsm_newstate(fi, CONN_STATE_STARTWAIT);
 926                break;
 927        case 12:
 928                dev_warn(privptr->dev,
 929                        "The IUCV device failed to connect to the peer on z/VM"
 930                        " guest %s\n", netiucv_printname(conn->userid, 8));
 931                fsm_newstate(fi, CONN_STATE_STARTWAIT);
 932                break;
 933        case 13:
 934                dev_err(privptr->dev,
 935                        "Connecting the IUCV device would exceed the maximum"
 936                        " number of IUCV connections\n");
 937                fsm_newstate(fi, CONN_STATE_CONNERR);
 938                break;
 939        case 14:
 940                dev_err(privptr->dev,
 941                        "z/VM guest %s has too many IUCV connections"
 942                        " to connect with the IUCV device\n",
 943                        netiucv_printname(conn->userid, 8));
 944                fsm_newstate(fi, CONN_STATE_CONNERR);
 945                break;
 946        case 15:
 947                dev_err(privptr->dev,
 948                        "The IUCV device cannot connect to a z/VM guest with no"
 949                        " IUCV authorization\n");
 950                fsm_newstate(fi, CONN_STATE_CONNERR);
 951                break;
 952        default:
 953                dev_err(privptr->dev,
 954                        "Connecting the IUCV device failed with error %d\n",
 955                        rc);
 956                fsm_newstate(fi, CONN_STATE_CONNERR);
 957                break;
 958        }
 959        IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
 960        kfree(conn->path);
 961        conn->path = NULL;
 962}
 963
 964static void netiucv_purge_skb_queue(struct sk_buff_head *q)
 965{
 966        struct sk_buff *skb;
 967
 968        while ((skb = skb_dequeue(q))) {
 969                atomic_dec(&skb->users);
 970                dev_kfree_skb_any(skb);
 971        }
 972}
 973
 974static void conn_action_stop(fsm_instance *fi, int event, void *arg)
 975{
 976        struct iucv_event *ev = arg;
 977        struct iucv_connection *conn = ev->conn;
 978        struct net_device *netdev = conn->netdev;
 979        struct netiucv_priv *privptr = netdev_priv(netdev);
 980
 981        IUCV_DBF_TEXT(trace, 3, __func__);
 982
 983        fsm_deltimer(&conn->timer);
 984        fsm_newstate(fi, CONN_STATE_STOPPED);
 985        netiucv_purge_skb_queue(&conn->collect_queue);
 986        if (conn->path) {
 987                IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
 988                iucv_path_sever(conn->path, conn->userdata);
 989                kfree(conn->path);
 990                conn->path = NULL;
 991        }
 992        netiucv_purge_skb_queue(&conn->commit_queue);
 993        fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 994}
 995
 996static void conn_action_inval(fsm_instance *fi, int event, void *arg)
 997{
 998        struct iucv_connection *conn = arg;
 999        struct net_device *netdev = conn->netdev;
1000
1001        IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
1002                netdev->name, conn->userid);
1003}
1004
1005static const fsm_node conn_fsm[] = {
1006        { CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
1007        { CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
1008
1009        { CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1010        { CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1011        { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1012        { CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1013        { CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1014        { CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1015        { CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1016
1017        { CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1018        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1019        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1020        { CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1021        { CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1022
1023        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1024        { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1025
1026        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1027        { CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1028        { CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1029
1030        { CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1031        { CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1032
1033        { CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1034        { CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1035};
1036
1037static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1038
1039
1040/*
1041 * Actions for interface - statemachine.
1042 */
1043
1044/**
1045 * dev_action_start
1046 * @fi: An instance of an interface statemachine.
1047 * @event: The event, just happened.
1048 * @arg: Generic pointer, casted from struct net_device * upon call.
1049 *
1050 * Startup connection by sending CONN_EVENT_START to it.
1051 */
1052static void dev_action_start(fsm_instance *fi, int event, void *arg)
1053{
1054        struct net_device   *dev = arg;
1055        struct netiucv_priv *privptr = netdev_priv(dev);
1056
1057        IUCV_DBF_TEXT(trace, 3, __func__);
1058
1059        fsm_newstate(fi, DEV_STATE_STARTWAIT);
1060        fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1061}
1062
1063/**
1064 * Shutdown connection by sending CONN_EVENT_STOP to it.
1065 *
1066 * @param fi    An instance of an interface statemachine.
1067 * @param event The event, just happened.
1068 * @param arg   Generic pointer, casted from struct net_device * upon call.
1069 */
1070static void
1071dev_action_stop(fsm_instance *fi, int event, void *arg)
1072{
1073        struct net_device   *dev = arg;
1074        struct netiucv_priv *privptr = netdev_priv(dev);
1075        struct iucv_event   ev;
1076
1077        IUCV_DBF_TEXT(trace, 3, __func__);
1078
1079        ev.conn = privptr->conn;
1080
1081        fsm_newstate(fi, DEV_STATE_STOPWAIT);
1082        fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1083}
1084
1085/**
1086 * Called from connection statemachine
1087 * when a connection is up and running.
1088 *
1089 * @param fi    An instance of an interface statemachine.
1090 * @param event The event, just happened.
1091 * @param arg   Generic pointer, casted from struct net_device * upon call.
1092 */
1093static void
1094dev_action_connup(fsm_instance *fi, int event, void *arg)
1095{
1096        struct net_device   *dev = arg;
1097        struct netiucv_priv *privptr = netdev_priv(dev);
1098
1099        IUCV_DBF_TEXT(trace, 3, __func__);
1100
1101        switch (fsm_getstate(fi)) {
1102                case DEV_STATE_STARTWAIT:
1103                        fsm_newstate(fi, DEV_STATE_RUNNING);
1104                        dev_info(privptr->dev,
1105                                "The IUCV device has been connected"
1106                                " successfully to %s\n",
1107                                netiucv_printuser(privptr->conn));
1108                        IUCV_DBF_TEXT(setup, 3,
1109                                "connection is up and running\n");
1110                        break;
1111                case DEV_STATE_STOPWAIT:
1112                        IUCV_DBF_TEXT(data, 2,
1113                                "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1114                        break;
1115        }
1116}
1117
1118/**
1119 * Called from connection statemachine
1120 * when a connection has been shutdown.
1121 *
1122 * @param fi    An instance of an interface statemachine.
1123 * @param event The event, just happened.
1124 * @param arg   Generic pointer, casted from struct net_device * upon call.
1125 */
1126static void
1127dev_action_conndown(fsm_instance *fi, int event, void *arg)
1128{
1129        IUCV_DBF_TEXT(trace, 3, __func__);
1130
1131        switch (fsm_getstate(fi)) {
1132                case DEV_STATE_RUNNING:
1133                        fsm_newstate(fi, DEV_STATE_STARTWAIT);
1134                        break;
1135                case DEV_STATE_STOPWAIT:
1136                        fsm_newstate(fi, DEV_STATE_STOPPED);
1137                        IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1138                        break;
1139        }
1140}
1141
1142static const fsm_node dev_fsm[] = {
1143        { DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1144
1145        { DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1146        { DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1147
1148        { DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1149        { DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1150
1151        { DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1152        { DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1153        { DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1154};
1155
1156static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1157
1158/**
1159 * Transmit a packet.
1160 * This is a helper function for netiucv_tx().
1161 *
1162 * @param conn Connection to be used for sending.
1163 * @param skb Pointer to struct sk_buff of packet to send.
1164 *            The linklevel header has already been set up
1165 *            by netiucv_tx().
1166 *
1167 * @return 0 on success, -ERRNO on failure. (Never fails.)
1168 */
1169static int netiucv_transmit_skb(struct iucv_connection *conn,
1170                                struct sk_buff *skb)
1171{
1172        struct iucv_message msg;
1173        unsigned long saveflags;
1174        struct ll_header header;
1175        int rc;
1176
1177        if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1178                int l = skb->len + NETIUCV_HDRLEN;
1179
1180                spin_lock_irqsave(&conn->collect_lock, saveflags);
1181                if (conn->collect_len + l >
1182                    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1183                        rc = -EBUSY;
1184                        IUCV_DBF_TEXT(data, 2,
1185                                      "EBUSY from netiucv_transmit_skb\n");
1186                } else {
1187                        atomic_inc(&skb->users);
1188                        skb_queue_tail(&conn->collect_queue, skb);
1189                        conn->collect_len += l;
1190                        rc = 0;
1191                }
1192                spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1193        } else {
1194                struct sk_buff *nskb = skb;
1195                /**
1196                 * Copy the skb to a new allocated skb in lowmem only if the
1197                 * data is located above 2G in memory or tailroom is < 2.
1198                 */
1199                unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1200                                    NETIUCV_HDRLEN)) >> 31;
1201                int copied = 0;
1202                if (hi || (skb_tailroom(skb) < 2)) {
1203                        nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1204                                         NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1205                        if (!nskb) {
1206                                IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1207                                rc = -ENOMEM;
1208                                return rc;
1209                        } else {
1210                                skb_reserve(nskb, NETIUCV_HDRLEN);
1211                                memcpy(skb_put(nskb, skb->len),
1212                                       skb->data, skb->len);
1213                        }
1214                        copied = 1;
1215                }
1216                /**
1217                 * skb now is below 2G and has enough room. Add headers.
1218                 */
1219                header.next = nskb->len + NETIUCV_HDRLEN;
1220                memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1221                header.next = 0;
1222                memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1223
1224                fsm_newstate(conn->fsm, CONN_STATE_TX);
1225                conn->prof.send_stamp = current_kernel_time();
1226
1227                msg.tag = 1;
1228                msg.class = 0;
1229                rc = iucv_message_send(conn->path, &msg, 0, 0,
1230                                       nskb->data, nskb->len);
1231                conn->prof.doios_single++;
1232                conn->prof.txlen += skb->len;
1233                conn->prof.tx_pending++;
1234                if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1235                        conn->prof.tx_max_pending = conn->prof.tx_pending;
1236                if (rc) {
1237                        struct netiucv_priv *privptr;
1238                        fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1239                        conn->prof.tx_pending--;
1240                        privptr = netdev_priv(conn->netdev);
1241                        if (privptr)
1242                                privptr->stats.tx_errors++;
1243                        if (copied)
1244                                dev_kfree_skb(nskb);
1245                        else {
1246                                /**
1247                                 * Remove our headers. They get added
1248                                 * again on retransmit.
1249                                 */
1250                                skb_pull(skb, NETIUCV_HDRLEN);
1251                                skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1252                        }
1253                        IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1254                } else {
1255                        if (copied)
1256                                dev_kfree_skb(skb);
1257                        atomic_inc(&nskb->users);
1258                        skb_queue_tail(&conn->commit_queue, nskb);
1259                }
1260        }
1261
1262        return rc;
1263}
1264
1265/*
1266 * Interface API for upper network layers
1267 */
1268
1269/**
1270 * Open an interface.
1271 * Called from generic network layer when ifconfig up is run.
1272 *
1273 * @param dev Pointer to interface struct.
1274 *
1275 * @return 0 on success, -ERRNO on failure. (Never fails.)
1276 */
1277static int netiucv_open(struct net_device *dev)
1278{
1279        struct netiucv_priv *priv = netdev_priv(dev);
1280
1281        fsm_event(priv->fsm, DEV_EVENT_START, dev);
1282        return 0;
1283}
1284
1285/**
1286 * Close an interface.
1287 * Called from generic network layer when ifconfig down is run.
1288 *
1289 * @param dev Pointer to interface struct.
1290 *
1291 * @return 0 on success, -ERRNO on failure. (Never fails.)
1292 */
1293static int netiucv_close(struct net_device *dev)
1294{
1295        struct netiucv_priv *priv = netdev_priv(dev);
1296
1297        fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1298        return 0;
1299}
1300
1301static int netiucv_pm_prepare(struct device *dev)
1302{
1303        IUCV_DBF_TEXT(trace, 3, __func__);
1304        return 0;
1305}
1306
1307static void netiucv_pm_complete(struct device *dev)
1308{
1309        IUCV_DBF_TEXT(trace, 3, __func__);
1310        return;
1311}
1312
1313/**
1314 * netiucv_pm_freeze() - Freeze PM callback
1315 * @dev:        netiucv device
1316 *
1317 * close open netiucv interfaces
1318 */
1319static int netiucv_pm_freeze(struct device *dev)
1320{
1321        struct netiucv_priv *priv = dev_get_drvdata(dev);
1322        struct net_device *ndev = NULL;
1323        int rc = 0;
1324
1325        IUCV_DBF_TEXT(trace, 3, __func__);
1326        if (priv && priv->conn)
1327                ndev = priv->conn->netdev;
1328        if (!ndev)
1329                goto out;
1330        netif_device_detach(ndev);
1331        priv->pm_state = fsm_getstate(priv->fsm);
1332        rc = netiucv_close(ndev);
1333out:
1334        return rc;
1335}
1336
1337/**
1338 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1339 * @dev:        netiucv device
1340 *
1341 * re-open netiucv interfaces closed during freeze
1342 */
1343static int netiucv_pm_restore_thaw(struct device *dev)
1344{
1345        struct netiucv_priv *priv = dev_get_drvdata(dev);
1346        struct net_device *ndev = NULL;
1347        int rc = 0;
1348
1349        IUCV_DBF_TEXT(trace, 3, __func__);
1350        if (priv && priv->conn)
1351                ndev = priv->conn->netdev;
1352        if (!ndev)
1353                goto out;
1354        switch (priv->pm_state) {
1355        case DEV_STATE_RUNNING:
1356        case DEV_STATE_STARTWAIT:
1357                rc = netiucv_open(ndev);
1358                break;
1359        default:
1360                break;
1361        }
1362        netif_device_attach(ndev);
1363out:
1364        return rc;
1365}
1366
1367/**
1368 * Start transmission of a packet.
1369 * Called from generic network device layer.
1370 *
1371 * @param skb Pointer to buffer containing the packet.
1372 * @param dev Pointer to interface struct.
1373 *
1374 * @return 0 if packet consumed, !0 if packet rejected.
1375 *         Note: If we return !0, then the packet is free'd by
1376 *               the generic network layer.
1377 */
1378static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1379{
1380        struct netiucv_priv *privptr = netdev_priv(dev);
1381        int rc;
1382
1383        IUCV_DBF_TEXT(trace, 4, __func__);
1384        /**
1385         * Some sanity checks ...
1386         */
1387        if (skb == NULL) {
1388                IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1389                privptr->stats.tx_dropped++;
1390                return NETDEV_TX_OK;
1391        }
1392        if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1393                IUCV_DBF_TEXT(data, 2,
1394                        "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1395                dev_kfree_skb(skb);
1396                privptr->stats.tx_dropped++;
1397                return NETDEV_TX_OK;
1398        }
1399
1400        /**
1401         * If connection is not running, try to restart it
1402         * and throw away packet.
1403         */
1404        if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1405                dev_kfree_skb(skb);
1406                privptr->stats.tx_dropped++;
1407                privptr->stats.tx_errors++;
1408                privptr->stats.tx_carrier_errors++;
1409                return NETDEV_TX_OK;
1410        }
1411
1412        if (netiucv_test_and_set_busy(dev)) {
1413                IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1414                return NETDEV_TX_BUSY;
1415        }
1416        dev->trans_start = jiffies;
1417        rc = netiucv_transmit_skb(privptr->conn, skb);
1418        netiucv_clear_busy(dev);
1419        return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1420}
1421
1422/**
1423 * netiucv_stats
1424 * @dev: Pointer to interface struct.
1425 *
1426 * Returns interface statistics of a device.
1427 *
1428 * Returns pointer to stats struct of this interface.
1429 */
1430static struct net_device_stats *netiucv_stats (struct net_device * dev)
1431{
1432        struct netiucv_priv *priv = netdev_priv(dev);
1433
1434        IUCV_DBF_TEXT(trace, 5, __func__);
1435        return &priv->stats;
1436}
1437
1438/**
1439 * netiucv_change_mtu
1440 * @dev: Pointer to interface struct.
1441 * @new_mtu: The new MTU to use for this interface.
1442 *
1443 * Sets MTU of an interface.
1444 *
1445 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1446 *         (valid range is 576 .. NETIUCV_MTU_MAX).
1447 */
1448static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1449{
1450        IUCV_DBF_TEXT(trace, 3, __func__);
1451        if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1452                IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1453                return -EINVAL;
1454        }
1455        dev->mtu = new_mtu;
1456        return 0;
1457}
1458
1459/*
1460 * attributes in sysfs
1461 */
1462
1463static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1464                         char *buf)
1465{
1466        struct netiucv_priv *priv = dev_get_drvdata(dev);
1467
1468        IUCV_DBF_TEXT(trace, 5, __func__);
1469        return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1470}
1471
1472static int netiucv_check_user(const char *buf, size_t count, char *username,
1473                              char *userdata)
1474{
1475        const char *p;
1476        int i;
1477
1478        p = strchr(buf, '.');
1479        if ((p && ((count > 26) ||
1480                   ((p - buf) > 8) ||
1481                   (buf + count - p > 18))) ||
1482            (!p && (count > 9))) {
1483                IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1484                return -EINVAL;
1485        }
1486
1487        for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1488                if (isalnum(*p) || *p == '$') {
1489                        username[i] = toupper(*p);
1490                        continue;
1491                }
1492                if (*p == '\n')
1493                        /* trailing lf, grr */
1494                        break;
1495                IUCV_DBF_TEXT_(setup, 2,
1496                               "conn_write: invalid character %02x\n", *p);
1497                return -EINVAL;
1498        }
1499        while (i < 8)
1500                username[i++] = ' ';
1501        username[8] = '\0';
1502
1503        if (*p == '.') {
1504                p++;
1505                for (i = 0; i < 16 && *p; i++, p++) {
1506                        if (*p == '\n')
1507                                break;
1508                        userdata[i] = toupper(*p);
1509                }
1510                while (i > 0 && i < 16)
1511                        userdata[i++] = ' ';
1512        } else
1513                memcpy(userdata, iucvMagic_ascii, 16);
1514        userdata[16] = '\0';
1515        ASCEBC(userdata, 16);
1516
1517        return 0;
1518}
1519
1520static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1521                          const char *buf, size_t count)
1522{
1523        struct netiucv_priv *priv = dev_get_drvdata(dev);
1524        struct net_device *ndev = priv->conn->netdev;
1525        char    username[9];
1526        char    userdata[17];
1527        int     rc;
1528        struct iucv_connection *cp;
1529
1530        IUCV_DBF_TEXT(trace, 3, __func__);
1531        rc = netiucv_check_user(buf, count, username, userdata);
1532        if (rc)
1533                return rc;
1534
1535        if (memcmp(username, priv->conn->userid, 9) &&
1536            (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1537                /* username changed while the interface is active. */
1538                IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1539                return -EPERM;
1540        }
1541        read_lock_bh(&iucv_connection_rwlock);
1542        list_for_each_entry(cp, &iucv_connection_list, list) {
1543                if (!strncmp(username, cp->userid, 9) &&
1544                   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1545                        read_unlock_bh(&iucv_connection_rwlock);
1546                        IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1547                                "already exists\n", netiucv_printuser(cp));
1548                        return -EEXIST;
1549                }
1550        }
1551        read_unlock_bh(&iucv_connection_rwlock);
1552        memcpy(priv->conn->userid, username, 9);
1553        memcpy(priv->conn->userdata, userdata, 17);
1554        return count;
1555}
1556
1557static DEVICE_ATTR(user, 0644, user_show, user_write);
1558
1559static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1560                            char *buf)
1561{
1562        struct netiucv_priv *priv = dev_get_drvdata(dev);
1563
1564        IUCV_DBF_TEXT(trace, 5, __func__);
1565        return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1566}
1567
1568static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1569                             const char *buf, size_t count)
1570{
1571        struct netiucv_priv *priv = dev_get_drvdata(dev);
1572        struct net_device *ndev = priv->conn->netdev;
1573        char         *e;
1574        int          bs1;
1575
1576        IUCV_DBF_TEXT(trace, 3, __func__);
1577        if (count >= 39)
1578                return -EINVAL;
1579
1580        bs1 = simple_strtoul(buf, &e, 0);
1581
1582        if (e && (!isspace(*e))) {
1583                IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1584                        *e);
1585                return -EINVAL;
1586        }
1587        if (bs1 > NETIUCV_BUFSIZE_MAX) {
1588                IUCV_DBF_TEXT_(setup, 2,
1589                        "buffer_write: buffer size %d too large\n",
1590                        bs1);
1591                return -EINVAL;
1592        }
1593        if ((ndev->flags & IFF_RUNNING) &&
1594            (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1595                IUCV_DBF_TEXT_(setup, 2,
1596                        "buffer_write: buffer size %d too small\n",
1597                        bs1);
1598                return -EINVAL;
1599        }
1600        if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1601                IUCV_DBF_TEXT_(setup, 2,
1602                        "buffer_write: buffer size %d too small\n",
1603                        bs1);
1604                return -EINVAL;
1605        }
1606
1607        priv->conn->max_buffsize = bs1;
1608        if (!(ndev->flags & IFF_RUNNING))
1609                ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1610
1611        return count;
1612
1613}
1614
1615static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1616
1617static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1618                             char *buf)
1619{
1620        struct netiucv_priv *priv = dev_get_drvdata(dev);
1621
1622        IUCV_DBF_TEXT(trace, 5, __func__);
1623        return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1624}
1625
1626static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1627
1628static ssize_t conn_fsm_show (struct device *dev,
1629                              struct device_attribute *attr, char *buf)
1630{
1631        struct netiucv_priv *priv = dev_get_drvdata(dev);
1632
1633        IUCV_DBF_TEXT(trace, 5, __func__);
1634        return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1635}
1636
1637static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1638
1639static ssize_t maxmulti_show (struct device *dev,
1640                              struct device_attribute *attr, char *buf)
1641{
1642        struct netiucv_priv *priv = dev_get_drvdata(dev);
1643
1644        IUCV_DBF_TEXT(trace, 5, __func__);
1645        return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1646}
1647
1648static ssize_t maxmulti_write (struct device *dev,
1649                               struct device_attribute *attr,
1650                               const char *buf, size_t count)
1651{
1652        struct netiucv_priv *priv = dev_get_drvdata(dev);
1653
1654        IUCV_DBF_TEXT(trace, 4, __func__);
1655        priv->conn->prof.maxmulti = 0;
1656        return count;
1657}
1658
1659static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1660
1661static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1662                           char *buf)
1663{
1664        struct netiucv_priv *priv = dev_get_drvdata(dev);
1665
1666        IUCV_DBF_TEXT(trace, 5, __func__);
1667        return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1668}
1669
1670static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1671                            const char *buf, size_t count)
1672{
1673        struct netiucv_priv *priv = dev_get_drvdata(dev);
1674
1675        IUCV_DBF_TEXT(trace, 4, __func__);
1676        priv->conn->prof.maxcqueue = 0;
1677        return count;
1678}
1679
1680static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1681
1682static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1683                           char *buf)
1684{
1685        struct netiucv_priv *priv = dev_get_drvdata(dev);
1686
1687        IUCV_DBF_TEXT(trace, 5, __func__);
1688        return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1689}
1690
1691static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1692                            const char *buf, size_t count)
1693{
1694        struct netiucv_priv *priv = dev_get_drvdata(dev);
1695
1696        IUCV_DBF_TEXT(trace, 4, __func__);
1697        priv->conn->prof.doios_single = 0;
1698        return count;
1699}
1700
1701static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1702
1703static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1704                           char *buf)
1705{
1706        struct netiucv_priv *priv = dev_get_drvdata(dev);
1707
1708        IUCV_DBF_TEXT(trace, 5, __func__);
1709        return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1710}
1711
1712static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1713                            const char *buf, size_t count)
1714{
1715        struct netiucv_priv *priv = dev_get_drvdata(dev);
1716
1717        IUCV_DBF_TEXT(trace, 5, __func__);
1718        priv->conn->prof.doios_multi = 0;
1719        return count;
1720}
1721
1722static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1723
1724static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1725                           char *buf)
1726{
1727        struct netiucv_priv *priv = dev_get_drvdata(dev);
1728
1729        IUCV_DBF_TEXT(trace, 5, __func__);
1730        return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1731}
1732
1733static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1734                            const char *buf, size_t count)
1735{
1736        struct netiucv_priv *priv = dev_get_drvdata(dev);
1737
1738        IUCV_DBF_TEXT(trace, 4, __func__);
1739        priv->conn->prof.txlen = 0;
1740        return count;
1741}
1742
1743static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1744
1745static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1746                            char *buf)
1747{
1748        struct netiucv_priv *priv = dev_get_drvdata(dev);
1749
1750        IUCV_DBF_TEXT(trace, 5, __func__);
1751        return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1752}
1753
1754static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1755                             const char *buf, size_t count)
1756{
1757        struct netiucv_priv *priv = dev_get_drvdata(dev);
1758
1759        IUCV_DBF_TEXT(trace, 4, __func__);
1760        priv->conn->prof.tx_time = 0;
1761        return count;
1762}
1763
1764static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1765
1766static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1767                            char *buf)
1768{
1769        struct netiucv_priv *priv = dev_get_drvdata(dev);
1770
1771        IUCV_DBF_TEXT(trace, 5, __func__);
1772        return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1773}
1774
1775static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1776                             const char *buf, size_t count)
1777{
1778        struct netiucv_priv *priv = dev_get_drvdata(dev);
1779
1780        IUCV_DBF_TEXT(trace, 4, __func__);
1781        priv->conn->prof.tx_pending = 0;
1782        return count;
1783}
1784
1785static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1786
1787static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1788                            char *buf)
1789{
1790        struct netiucv_priv *priv = dev_get_drvdata(dev);
1791
1792        IUCV_DBF_TEXT(trace, 5, __func__);
1793        return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1794}
1795
1796static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1797                             const char *buf, size_t count)
1798{
1799        struct netiucv_priv *priv = dev_get_drvdata(dev);
1800
1801        IUCV_DBF_TEXT(trace, 4, __func__);
1802        priv->conn->prof.tx_max_pending = 0;
1803        return count;
1804}
1805
1806static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1807
1808static struct attribute *netiucv_attrs[] = {
1809        &dev_attr_buffer.attr,
1810        &dev_attr_user.attr,
1811        NULL,
1812};
1813
1814static struct attribute_group netiucv_attr_group = {
1815        .attrs = netiucv_attrs,
1816};
1817
1818static struct attribute *netiucv_stat_attrs[] = {
1819        &dev_attr_device_fsm_state.attr,
1820        &dev_attr_connection_fsm_state.attr,
1821        &dev_attr_max_tx_buffer_used.attr,
1822        &dev_attr_max_chained_skbs.attr,
1823        &dev_attr_tx_single_write_ops.attr,
1824        &dev_attr_tx_multi_write_ops.attr,
1825        &dev_attr_netto_bytes.attr,
1826        &dev_attr_max_tx_io_time.attr,
1827        &dev_attr_tx_pending.attr,
1828        &dev_attr_tx_max_pending.attr,
1829        NULL,
1830};
1831
1832static struct attribute_group netiucv_stat_attr_group = {
1833        .name  = "stats",
1834        .attrs = netiucv_stat_attrs,
1835};
1836
1837static const struct attribute_group *netiucv_attr_groups[] = {
1838        &netiucv_stat_attr_group,
1839        &netiucv_attr_group,
1840        NULL,
1841};
1842
1843static int netiucv_register_device(struct net_device *ndev)
1844{
1845        struct netiucv_priv *priv = netdev_priv(ndev);
1846        struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1847        int ret;
1848
1849        IUCV_DBF_TEXT(trace, 3, __func__);
1850
1851        if (dev) {
1852                dev_set_name(dev, "net%s", ndev->name);
1853                dev->bus = &iucv_bus;
1854                dev->parent = iucv_root;
1855                dev->groups = netiucv_attr_groups;
1856                /*
1857                 * The release function could be called after the
1858                 * module has been unloaded. It's _only_ task is to
1859                 * free the struct. Therefore, we specify kfree()
1860                 * directly here. (Probably a little bit obfuscating
1861                 * but legitime ...).
1862                 */
1863                dev->release = (void (*)(struct device *))kfree;
1864                dev->driver = &netiucv_driver;
1865        } else
1866                return -ENOMEM;
1867
1868        ret = device_register(dev);
1869        if (ret) {
1870                put_device(dev);
1871                return ret;
1872        }
1873        priv->dev = dev;
1874        dev_set_drvdata(dev, priv);
1875        return 0;
1876}
1877
1878static void netiucv_unregister_device(struct device *dev)
1879{
1880        IUCV_DBF_TEXT(trace, 3, __func__);
1881        device_unregister(dev);
1882}
1883
1884/**
1885 * Allocate and initialize a new connection structure.
1886 * Add it to the list of netiucv connections;
1887 */
1888static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1889                                                      char *username,
1890                                                      char *userdata)
1891{
1892        struct iucv_connection *conn;
1893
1894        conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1895        if (!conn)
1896                goto out;
1897        skb_queue_head_init(&conn->collect_queue);
1898        skb_queue_head_init(&conn->commit_queue);
1899        spin_lock_init(&conn->collect_lock);
1900        conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1901        conn->netdev = dev;
1902
1903        conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1904        if (!conn->rx_buff)
1905                goto out_conn;
1906        conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1907        if (!conn->tx_buff)
1908                goto out_rx;
1909        conn->fsm = init_fsm("netiucvconn", conn_state_names,
1910                             conn_event_names, NR_CONN_STATES,
1911                             NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1912                             GFP_KERNEL);
1913        if (!conn->fsm)
1914                goto out_tx;
1915
1916        fsm_settimer(conn->fsm, &conn->timer);
1917        fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1918
1919        if (userdata)
1920                memcpy(conn->userdata, userdata, 17);
1921        if (username) {
1922                memcpy(conn->userid, username, 9);
1923                fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1924        }
1925
1926        write_lock_bh(&iucv_connection_rwlock);
1927        list_add_tail(&conn->list, &iucv_connection_list);
1928        write_unlock_bh(&iucv_connection_rwlock);
1929        return conn;
1930
1931out_tx:
1932        kfree_skb(conn->tx_buff);
1933out_rx:
1934        kfree_skb(conn->rx_buff);
1935out_conn:
1936        kfree(conn);
1937out:
1938        return NULL;
1939}
1940
1941/**
1942 * Release a connection structure and remove it from the
1943 * list of netiucv connections.
1944 */
1945static void netiucv_remove_connection(struct iucv_connection *conn)
1946{
1947
1948        IUCV_DBF_TEXT(trace, 3, __func__);
1949        write_lock_bh(&iucv_connection_rwlock);
1950        list_del_init(&conn->list);
1951        write_unlock_bh(&iucv_connection_rwlock);
1952        fsm_deltimer(&conn->timer);
1953        netiucv_purge_skb_queue(&conn->collect_queue);
1954        if (conn->path) {
1955                iucv_path_sever(conn->path, conn->userdata);
1956                kfree(conn->path);
1957                conn->path = NULL;
1958        }
1959        netiucv_purge_skb_queue(&conn->commit_queue);
1960        kfree_fsm(conn->fsm);
1961        kfree_skb(conn->rx_buff);
1962        kfree_skb(conn->tx_buff);
1963}
1964
1965/**
1966 * Release everything of a net device.
1967 */
1968static void netiucv_free_netdevice(struct net_device *dev)
1969{
1970        struct netiucv_priv *privptr = netdev_priv(dev);
1971
1972        IUCV_DBF_TEXT(trace, 3, __func__);
1973
1974        if (!dev)
1975                return;
1976
1977        if (privptr) {
1978                if (privptr->conn)
1979                        netiucv_remove_connection(privptr->conn);
1980                if (privptr->fsm)
1981                        kfree_fsm(privptr->fsm);
1982                privptr->conn = NULL; privptr->fsm = NULL;
1983                /* privptr gets freed by free_netdev() */
1984        }
1985        free_netdev(dev);
1986}
1987
1988/**
1989 * Initialize a net device. (Called from kernel in alloc_netdev())
1990 */
1991static const struct net_device_ops netiucv_netdev_ops = {
1992        .ndo_open               = netiucv_open,
1993        .ndo_stop               = netiucv_close,
1994        .ndo_get_stats          = netiucv_stats,
1995        .ndo_start_xmit         = netiucv_tx,
1996        .ndo_change_mtu         = netiucv_change_mtu,
1997};
1998
1999static void netiucv_setup_netdevice(struct net_device *dev)
2000{
2001        dev->mtu                 = NETIUCV_MTU_DEFAULT;
2002        dev->destructor          = netiucv_free_netdevice;
2003        dev->hard_header_len     = NETIUCV_HDRLEN;
2004        dev->addr_len            = 0;
2005        dev->type                = ARPHRD_SLIP;
2006        dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
2007        dev->flags               = IFF_POINTOPOINT | IFF_NOARP;
2008        dev->netdev_ops          = &netiucv_netdev_ops;
2009}
2010
2011/**
2012 * Allocate and initialize everything of a net device.
2013 */
2014static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2015{
2016        struct netiucv_priv *privptr;
2017        struct net_device *dev;
2018
2019        dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2020                           netiucv_setup_netdevice);
2021        if (!dev)
2022                return NULL;
2023        rtnl_lock();
2024        if (dev_alloc_name(dev, dev->name) < 0)
2025                goto out_netdev;
2026
2027        privptr = netdev_priv(dev);
2028        privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2029                                dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2030                                dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2031        if (!privptr->fsm)
2032                goto out_netdev;
2033
2034        privptr->conn = netiucv_new_connection(dev, username, userdata);
2035        if (!privptr->conn) {
2036                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2037                goto out_fsm;
2038        }
2039        fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2040        return dev;
2041
2042out_fsm:
2043        kfree_fsm(privptr->fsm);
2044out_netdev:
2045        rtnl_unlock();
2046        free_netdev(dev);
2047        return NULL;
2048}
2049
2050static ssize_t conn_write(struct device_driver *drv,
2051                          const char *buf, size_t count)
2052{
2053        char username[9];
2054        char userdata[17];
2055        int rc;
2056        struct net_device *dev;
2057        struct netiucv_priv *priv;
2058        struct iucv_connection *cp;
2059
2060        IUCV_DBF_TEXT(trace, 3, __func__);
2061        rc = netiucv_check_user(buf, count, username, userdata);
2062        if (rc)
2063                return rc;
2064
2065        read_lock_bh(&iucv_connection_rwlock);
2066        list_for_each_entry(cp, &iucv_connection_list, list) {
2067                if (!strncmp(username, cp->userid, 9) &&
2068                    !strncmp(userdata, cp->userdata, 17)) {
2069                        read_unlock_bh(&iucv_connection_rwlock);
2070                        IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2071                                "already exists\n", netiucv_printuser(cp));
2072                        return -EEXIST;
2073                }
2074        }
2075        read_unlock_bh(&iucv_connection_rwlock);
2076
2077        dev = netiucv_init_netdevice(username, userdata);
2078        if (!dev) {
2079                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2080                return -ENODEV;
2081        }
2082
2083        rc = netiucv_register_device(dev);
2084        if (rc) {
2085                rtnl_unlock();
2086                IUCV_DBF_TEXT_(setup, 2,
2087                        "ret %d from netiucv_register_device\n", rc);
2088                goto out_free_ndev;
2089        }
2090
2091        /* sysfs magic */
2092        priv = netdev_priv(dev);
2093        SET_NETDEV_DEV(dev, priv->dev);
2094
2095        rc = register_netdevice(dev);
2096        rtnl_unlock();
2097        if (rc)
2098                goto out_unreg;
2099
2100        dev_info(priv->dev, "The IUCV interface to %s has been established "
2101                            "successfully\n",
2102                netiucv_printuser(priv->conn));
2103
2104        return count;
2105
2106out_unreg:
2107        netiucv_unregister_device(priv->dev);
2108out_free_ndev:
2109        netiucv_free_netdevice(dev);
2110        return rc;
2111}
2112
2113static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2114
2115static ssize_t remove_write (struct device_driver *drv,
2116                             const char *buf, size_t count)
2117{
2118        struct iucv_connection *cp;
2119        struct net_device *ndev;
2120        struct netiucv_priv *priv;
2121        struct device *dev;
2122        char name[IFNAMSIZ];
2123        const char *p;
2124        int i;
2125
2126        IUCV_DBF_TEXT(trace, 3, __func__);
2127
2128        if (count >= IFNAMSIZ)
2129                count = IFNAMSIZ - 1;
2130
2131        for (i = 0, p = buf; i < count && *p; i++, p++) {
2132                if (*p == '\n' || *p == ' ')
2133                        /* trailing lf, grr */
2134                        break;
2135                name[i] = *p;
2136        }
2137        name[i] = '\0';
2138
2139        read_lock_bh(&iucv_connection_rwlock);
2140        list_for_each_entry(cp, &iucv_connection_list, list) {
2141                ndev = cp->netdev;
2142                priv = netdev_priv(ndev);
2143                dev = priv->dev;
2144                if (strncmp(name, ndev->name, count))
2145                        continue;
2146                read_unlock_bh(&iucv_connection_rwlock);
2147                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2148                        dev_warn(dev, "The IUCV device is connected"
2149                                " to %s and cannot be removed\n",
2150                                priv->conn->userid);
2151                        IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2152                        return -EPERM;
2153                }
2154                unregister_netdev(ndev);
2155                netiucv_unregister_device(dev);
2156                return count;
2157        }
2158        read_unlock_bh(&iucv_connection_rwlock);
2159        IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2160        return -EINVAL;
2161}
2162
2163static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2164
2165static struct attribute * netiucv_drv_attrs[] = {
2166        &driver_attr_connection.attr,
2167        &driver_attr_remove.attr,
2168        NULL,
2169};
2170
2171static struct attribute_group netiucv_drv_attr_group = {
2172        .attrs = netiucv_drv_attrs,
2173};
2174
2175static const struct attribute_group *netiucv_drv_attr_groups[] = {
2176        &netiucv_drv_attr_group,
2177        NULL,
2178};
2179
2180static void netiucv_banner(void)
2181{
2182        pr_info("driver initialized\n");
2183}
2184
2185static void __exit netiucv_exit(void)
2186{
2187        struct iucv_connection *cp;
2188        struct net_device *ndev;
2189        struct netiucv_priv *priv;
2190        struct device *dev;
2191
2192        IUCV_DBF_TEXT(trace, 3, __func__);
2193        while (!list_empty(&iucv_connection_list)) {
2194                cp = list_entry(iucv_connection_list.next,
2195                                struct iucv_connection, list);
2196                ndev = cp->netdev;
2197                priv = netdev_priv(ndev);
2198                dev = priv->dev;
2199
2200                unregister_netdev(ndev);
2201                netiucv_unregister_device(dev);
2202        }
2203
2204        device_unregister(netiucv_dev);
2205        driver_unregister(&netiucv_driver);
2206        iucv_unregister(&netiucv_handler, 1);
2207        iucv_unregister_dbf_views();
2208
2209        pr_info("driver unloaded\n");
2210        return;
2211}
2212
2213static int __init netiucv_init(void)
2214{
2215        int rc;
2216
2217        rc = iucv_register_dbf_views();
2218        if (rc)
2219                goto out;
2220        rc = iucv_register(&netiucv_handler, 1);
2221        if (rc)
2222                goto out_dbf;
2223        IUCV_DBF_TEXT(trace, 3, __func__);
2224        netiucv_driver.groups = netiucv_drv_attr_groups;
2225        rc = driver_register(&netiucv_driver);
2226        if (rc) {
2227                IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2228                goto out_iucv;
2229        }
2230        /* establish dummy device */
2231        netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2232        if (!netiucv_dev) {
2233                rc = -ENOMEM;
2234                goto out_driver;
2235        }
2236        dev_set_name(netiucv_dev, "netiucv");
2237        netiucv_dev->bus = &iucv_bus;
2238        netiucv_dev->parent = iucv_root;
2239        netiucv_dev->release = (void (*)(struct device *))kfree;
2240        netiucv_dev->driver = &netiucv_driver;
2241        rc = device_register(netiucv_dev);
2242        if (rc) {
2243                put_device(netiucv_dev);
2244                goto out_driver;
2245        }
2246        netiucv_banner();
2247        return rc;
2248
2249out_driver:
2250        driver_unregister(&netiucv_driver);
2251out_iucv:
2252        iucv_unregister(&netiucv_handler, 1);
2253out_dbf:
2254        iucv_unregister_dbf_views();
2255out:
2256        return rc;
2257}
2258
2259module_init(netiucv_init);
2260module_exit(netiucv_exit);
2261MODULE_LICENSE("GPL");
2262