linux/drivers/s390/net/netiucv.c
<<
>>
Prefs
   1/*
   2 * IUCV network driver
   3 *
   4 * Copyright IBM Corp. 2001, 2009
   5 *
   6 * Author(s):
   7 *      Original netiucv driver:
   8 *              Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
   9 *      Sysfs integration and all bugs therein:
  10 *              Cornelia Huck (cornelia.huck@de.ibm.com)
  11 *      PM functions:
  12 *              Ursula Braun (ursula.braun@de.ibm.com)
  13 *
  14 * Documentation used:
  15 *  the source of the original IUCV driver by:
  16 *    Stefan Hegewald <hegewald@de.ibm.com>
  17 *    Hartmut Penner <hpenner@de.ibm.com>
  18 *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  19 *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  20 *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
  21 *
  22 * This program is free software; you can redistribute it and/or modify
  23 * it under the terms of the GNU General Public License as published by
  24 * the Free Software Foundation; either version 2, or (at your option)
  25 * any later version.
  26 *
  27 * This program is distributed in the hope that it will be useful,
  28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  30 * GNU General Public License for more details.
  31 *
  32 * You should have received a copy of the GNU General Public License
  33 * along with this program; if not, write to the Free Software
  34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  35 *
  36 */
  37
  38#define KMSG_COMPONENT "netiucv"
  39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  40
  41#undef DEBUG
  42
  43#include <linux/module.h>
  44#include <linux/init.h>
  45#include <linux/kernel.h>
  46#include <linux/slab.h>
  47#include <linux/errno.h>
  48#include <linux/types.h>
  49#include <linux/interrupt.h>
  50#include <linux/timer.h>
  51#include <linux/bitops.h>
  52
  53#include <linux/signal.h>
  54#include <linux/string.h>
  55#include <linux/device.h>
  56
  57#include <linux/ip.h>
  58#include <linux/if_arp.h>
  59#include <linux/tcp.h>
  60#include <linux/skbuff.h>
  61#include <linux/ctype.h>
  62#include <net/dst.h>
  63
  64#include <asm/io.h>
  65#include <asm/uaccess.h>
  66#include <asm/ebcdic.h>
  67
  68#include <net/iucv/iucv.h>
  69#include "fsm.h"
  70
  71MODULE_AUTHOR
  72    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  73MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  74
  75/**
  76 * Debug Facility stuff
  77 */
  78#define IUCV_DBF_SETUP_NAME "iucv_setup"
  79#define IUCV_DBF_SETUP_LEN 64
  80#define IUCV_DBF_SETUP_PAGES 2
  81#define IUCV_DBF_SETUP_NR_AREAS 1
  82#define IUCV_DBF_SETUP_LEVEL 3
  83
  84#define IUCV_DBF_DATA_NAME "iucv_data"
  85#define IUCV_DBF_DATA_LEN 128
  86#define IUCV_DBF_DATA_PAGES 2
  87#define IUCV_DBF_DATA_NR_AREAS 1
  88#define IUCV_DBF_DATA_LEVEL 2
  89
  90#define IUCV_DBF_TRACE_NAME "iucv_trace"
  91#define IUCV_DBF_TRACE_LEN 16
  92#define IUCV_DBF_TRACE_PAGES 4
  93#define IUCV_DBF_TRACE_NR_AREAS 1
  94#define IUCV_DBF_TRACE_LEVEL 3
  95
  96#define IUCV_DBF_TEXT(name,level,text) \
  97        do { \
  98                debug_text_event(iucv_dbf_##name,level,text); \
  99        } while (0)
 100
 101#define IUCV_DBF_HEX(name,level,addr,len) \
 102        do { \
 103                debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
 104        } while (0)
 105
 106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
 107
 108#define IUCV_DBF_TEXT_(name, level, text...) \
 109        do { \
 110                if (debug_level_enabled(iucv_dbf_##name, level)) { \
 111                        char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
 112                        sprintf(__buf, text); \
 113                        debug_text_event(iucv_dbf_##name, level, __buf); \
 114                        put_cpu_var(iucv_dbf_txt_buf); \
 115                } \
 116        } while (0)
 117
 118#define IUCV_DBF_SPRINTF(name,level,text...) \
 119        do { \
 120                debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
 121                debug_sprintf_event(iucv_dbf_trace, level, text ); \
 122        } while (0)
 123
 124/**
 125 * some more debug stuff
 126 */
 127#define PRINTK_HEADER " iucv: "       /* for debugging */
 128
 129/* dummy device to make sure netiucv_pm functions are called */
 130static struct device *netiucv_dev;
 131
 132static int netiucv_pm_prepare(struct device *);
 133static void netiucv_pm_complete(struct device *);
 134static int netiucv_pm_freeze(struct device *);
 135static int netiucv_pm_restore_thaw(struct device *);
 136
 137static const struct dev_pm_ops netiucv_pm_ops = {
 138        .prepare = netiucv_pm_prepare,
 139        .complete = netiucv_pm_complete,
 140        .freeze = netiucv_pm_freeze,
 141        .thaw = netiucv_pm_restore_thaw,
 142        .restore = netiucv_pm_restore_thaw,
 143};
 144
 145static struct device_driver netiucv_driver = {
 146        .owner = THIS_MODULE,
 147        .name = "netiucv",
 148        .bus  = &iucv_bus,
 149        .pm = &netiucv_pm_ops,
 150};
 151
 152static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
 153static void netiucv_callback_connack(struct iucv_path *, u8 *);
 154static void netiucv_callback_connrej(struct iucv_path *, u8 *);
 155static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
 156static void netiucv_callback_connres(struct iucv_path *, u8 *);
 157static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
 158static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
 159
 160static struct iucv_handler netiucv_handler = {
 161        .path_pending     = netiucv_callback_connreq,
 162        .path_complete    = netiucv_callback_connack,
 163        .path_severed     = netiucv_callback_connrej,
 164        .path_quiesced    = netiucv_callback_connsusp,
 165        .path_resumed     = netiucv_callback_connres,
 166        .message_pending  = netiucv_callback_rx,
 167        .message_complete = netiucv_callback_txdone
 168};
 169
 170/**
 171 * Per connection profiling data
 172 */
 173struct connection_profile {
 174        unsigned long maxmulti;
 175        unsigned long maxcqueue;
 176        unsigned long doios_single;
 177        unsigned long doios_multi;
 178        unsigned long txlen;
 179        unsigned long tx_time;
 180        unsigned long send_stamp;
 181        unsigned long tx_pending;
 182        unsigned long tx_max_pending;
 183};
 184
 185/**
 186 * Representation of one iucv connection
 187 */
 188struct iucv_connection {
 189        struct list_head          list;
 190        struct iucv_path          *path;
 191        struct sk_buff            *rx_buff;
 192        struct sk_buff            *tx_buff;
 193        struct sk_buff_head       collect_queue;
 194        struct sk_buff_head       commit_queue;
 195        spinlock_t                collect_lock;
 196        int                       collect_len;
 197        int                       max_buffsize;
 198        fsm_timer                 timer;
 199        fsm_instance              *fsm;
 200        struct net_device         *netdev;
 201        struct connection_profile prof;
 202        char                      userid[9];
 203        char                      userdata[17];
 204};
 205
 206/**
 207 * Linked list of all connection structs.
 208 */
 209static LIST_HEAD(iucv_connection_list);
 210static DEFINE_RWLOCK(iucv_connection_rwlock);
 211
 212/**
 213 * Representation of event-data for the
 214 * connection state machine.
 215 */
 216struct iucv_event {
 217        struct iucv_connection *conn;
 218        void                   *data;
 219};
 220
 221/**
 222 * Private part of the network device structure
 223 */
 224struct netiucv_priv {
 225        struct net_device_stats stats;
 226        unsigned long           tbusy;
 227        fsm_instance            *fsm;
 228        struct iucv_connection  *conn;
 229        struct device           *dev;
 230        int                      pm_state;
 231};
 232
 233/**
 234 * Link level header for a packet.
 235 */
 236struct ll_header {
 237        u16 next;
 238};
 239
 240#define NETIUCV_HDRLEN           (sizeof(struct ll_header))
 241#define NETIUCV_BUFSIZE_MAX      65537
 242#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 243#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 244#define NETIUCV_MTU_DEFAULT      9216
 245#define NETIUCV_QUEUELEN_DEFAULT 50
 246#define NETIUCV_TIMEOUT_5SEC     5000
 247
 248/**
 249 * Compatibility macros for busy handling
 250 * of network devices.
 251 */
 252static inline void netiucv_clear_busy(struct net_device *dev)
 253{
 254        struct netiucv_priv *priv = netdev_priv(dev);
 255        clear_bit(0, &priv->tbusy);
 256        netif_wake_queue(dev);
 257}
 258
 259static inline int netiucv_test_and_set_busy(struct net_device *dev)
 260{
 261        struct netiucv_priv *priv = netdev_priv(dev);
 262        netif_stop_queue(dev);
 263        return test_and_set_bit(0, &priv->tbusy);
 264}
 265
 266static u8 iucvMagic_ascii[16] = {
 267        0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
 268        0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
 269};
 270
 271static u8 iucvMagic_ebcdic[16] = {
 272        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
 273        0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 274};
 275
 276/**
 277 * Convert an iucv userId to its printable
 278 * form (strip whitespace at end).
 279 *
 280 * @param An iucv userId
 281 *
 282 * @returns The printable string (static data!!)
 283 */
 284static char *netiucv_printname(char *name, int len)
 285{
 286        static char tmp[17];
 287        char *p = tmp;
 288        memcpy(tmp, name, len);
 289        tmp[len] = '\0';
 290        while (*p && ((p - tmp) < len) && (!isspace(*p)))
 291                p++;
 292        *p = '\0';
 293        return tmp;
 294}
 295
 296static char *netiucv_printuser(struct iucv_connection *conn)
 297{
 298        static char tmp_uid[9];
 299        static char tmp_udat[17];
 300        static char buf[100];
 301
 302        if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
 303                tmp_uid[8] = '\0';
 304                tmp_udat[16] = '\0';
 305                memcpy(tmp_uid, conn->userid, 8);
 306                memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
 307                memcpy(tmp_udat, conn->userdata, 16);
 308                EBCASC(tmp_udat, 16);
 309                memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
 310                sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
 311                return buf;
 312        } else
 313                return netiucv_printname(conn->userid, 8);
 314}
 315
 316/**
 317 * States of the interface statemachine.
 318 */
 319enum dev_states {
 320        DEV_STATE_STOPPED,
 321        DEV_STATE_STARTWAIT,
 322        DEV_STATE_STOPWAIT,
 323        DEV_STATE_RUNNING,
 324        /**
 325         * MUST be always the last element!!
 326         */
 327        NR_DEV_STATES
 328};
 329
 330static const char *dev_state_names[] = {
 331        "Stopped",
 332        "StartWait",
 333        "StopWait",
 334        "Running",
 335};
 336
 337/**
 338 * Events of the interface statemachine.
 339 */
 340enum dev_events {
 341        DEV_EVENT_START,
 342        DEV_EVENT_STOP,
 343        DEV_EVENT_CONUP,
 344        DEV_EVENT_CONDOWN,
 345        /**
 346         * MUST be always the last element!!
 347         */
 348        NR_DEV_EVENTS
 349};
 350
 351static const char *dev_event_names[] = {
 352        "Start",
 353        "Stop",
 354        "Connection up",
 355        "Connection down",
 356};
 357
 358/**
 359 * Events of the connection statemachine
 360 */
 361enum conn_events {
 362        /**
 363         * Events, representing callbacks from
 364         * lowlevel iucv layer)
 365         */
 366        CONN_EVENT_CONN_REQ,
 367        CONN_EVENT_CONN_ACK,
 368        CONN_EVENT_CONN_REJ,
 369        CONN_EVENT_CONN_SUS,
 370        CONN_EVENT_CONN_RES,
 371        CONN_EVENT_RX,
 372        CONN_EVENT_TXDONE,
 373
 374        /**
 375         * Events, representing errors return codes from
 376         * calls to lowlevel iucv layer
 377         */
 378
 379        /**
 380         * Event, representing timer expiry.
 381         */
 382        CONN_EVENT_TIMER,
 383
 384        /**
 385         * Events, representing commands from upper levels.
 386         */
 387        CONN_EVENT_START,
 388        CONN_EVENT_STOP,
 389
 390        /**
 391         * MUST be always the last element!!
 392         */
 393        NR_CONN_EVENTS,
 394};
 395
 396static const char *conn_event_names[] = {
 397        "Remote connection request",
 398        "Remote connection acknowledge",
 399        "Remote connection reject",
 400        "Connection suspended",
 401        "Connection resumed",
 402        "Data received",
 403        "Data sent",
 404
 405        "Timer",
 406
 407        "Start",
 408        "Stop",
 409};
 410
 411/**
 412 * States of the connection statemachine.
 413 */
 414enum conn_states {
 415        /**
 416         * Connection not assigned to any device,
 417         * initial state, invalid
 418         */
 419        CONN_STATE_INVALID,
 420
 421        /**
 422         * Userid assigned but not operating
 423         */
 424        CONN_STATE_STOPPED,
 425
 426        /**
 427         * Connection registered,
 428         * no connection request sent yet,
 429         * no connection request received
 430         */
 431        CONN_STATE_STARTWAIT,
 432
 433        /**
 434         * Connection registered and connection request sent,
 435         * no acknowledge and no connection request received yet.
 436         */
 437        CONN_STATE_SETUPWAIT,
 438
 439        /**
 440         * Connection up and running idle
 441         */
 442        CONN_STATE_IDLE,
 443
 444        /**
 445         * Data sent, awaiting CONN_EVENT_TXDONE
 446         */
 447        CONN_STATE_TX,
 448
 449        /**
 450         * Error during registration.
 451         */
 452        CONN_STATE_REGERR,
 453
 454        /**
 455         * Error during registration.
 456         */
 457        CONN_STATE_CONNERR,
 458
 459        /**
 460         * MUST be always the last element!!
 461         */
 462        NR_CONN_STATES,
 463};
 464
 465static const char *conn_state_names[] = {
 466        "Invalid",
 467        "Stopped",
 468        "StartWait",
 469        "SetupWait",
 470        "Idle",
 471        "TX",
 472        "Terminating",
 473        "Registration error",
 474        "Connect error",
 475};
 476
 477
 478/**
 479 * Debug Facility Stuff
 480 */
 481static debug_info_t *iucv_dbf_setup = NULL;
 482static debug_info_t *iucv_dbf_data = NULL;
 483static debug_info_t *iucv_dbf_trace = NULL;
 484
 485DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
 486
 487static void iucv_unregister_dbf_views(void)
 488{
 489        debug_unregister(iucv_dbf_setup);
 490        debug_unregister(iucv_dbf_data);
 491        debug_unregister(iucv_dbf_trace);
 492}
 493static int iucv_register_dbf_views(void)
 494{
 495        iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
 496                                        IUCV_DBF_SETUP_PAGES,
 497                                        IUCV_DBF_SETUP_NR_AREAS,
 498                                        IUCV_DBF_SETUP_LEN);
 499        iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
 500                                       IUCV_DBF_DATA_PAGES,
 501                                       IUCV_DBF_DATA_NR_AREAS,
 502                                       IUCV_DBF_DATA_LEN);
 503        iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
 504                                        IUCV_DBF_TRACE_PAGES,
 505                                        IUCV_DBF_TRACE_NR_AREAS,
 506                                        IUCV_DBF_TRACE_LEN);
 507
 508        if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
 509            (iucv_dbf_trace == NULL)) {
 510                iucv_unregister_dbf_views();
 511                return -ENOMEM;
 512        }
 513        debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
 514        debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
 515
 516        debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
 517        debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
 518
 519        debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
 520        debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
 521
 522        return 0;
 523}
 524
 525/*
 526 * Callback-wrappers, called from lowlevel iucv layer.
 527 */
 528
 529static void netiucv_callback_rx(struct iucv_path *path,
 530                                struct iucv_message *msg)
 531{
 532        struct iucv_connection *conn = path->private;
 533        struct iucv_event ev;
 534
 535        ev.conn = conn;
 536        ev.data = msg;
 537        fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
 538}
 539
 540static void netiucv_callback_txdone(struct iucv_path *path,
 541                                    struct iucv_message *msg)
 542{
 543        struct iucv_connection *conn = path->private;
 544        struct iucv_event ev;
 545
 546        ev.conn = conn;
 547        ev.data = msg;
 548        fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
 549}
 550
 551static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
 552{
 553        struct iucv_connection *conn = path->private;
 554
 555        fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
 556}
 557
 558static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
 559                                    u8 *ipuser)
 560{
 561        struct iucv_connection *conn = path->private;
 562        struct iucv_event ev;
 563        static char tmp_user[9];
 564        static char tmp_udat[17];
 565        int rc;
 566
 567        rc = -EINVAL;
 568        memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
 569        memcpy(tmp_udat, ipuser, 16);
 570        EBCASC(tmp_udat, 16);
 571        read_lock_bh(&iucv_connection_rwlock);
 572        list_for_each_entry(conn, &iucv_connection_list, list) {
 573                if (strncmp(ipvmid, conn->userid, 8) ||
 574                    strncmp(ipuser, conn->userdata, 16))
 575                        continue;
 576                /* Found a matching connection for this path. */
 577                conn->path = path;
 578                ev.conn = conn;
 579                ev.data = path;
 580                fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
 581                rc = 0;
 582        }
 583        IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
 584                       tmp_user, netiucv_printname(tmp_udat, 16));
 585        read_unlock_bh(&iucv_connection_rwlock);
 586        return rc;
 587}
 588
 589static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
 590{
 591        struct iucv_connection *conn = path->private;
 592
 593        fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
 594}
 595
 596static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
 597{
 598        struct iucv_connection *conn = path->private;
 599
 600        fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
 601}
 602
 603static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
 604{
 605        struct iucv_connection *conn = path->private;
 606
 607        fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
 608}
 609
 610/**
 611 * NOP action for statemachines
 612 */
 613static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
 614{
 615}
 616
 617/*
 618 * Actions of the connection statemachine
 619 */
 620
 621/**
 622 * netiucv_unpack_skb
 623 * @conn: The connection where this skb has been received.
 624 * @pskb: The received skb.
 625 *
 626 * Unpack a just received skb and hand it over to upper layers.
 627 * Helper function for conn_action_rx.
 628 */
 629static void netiucv_unpack_skb(struct iucv_connection *conn,
 630                               struct sk_buff *pskb)
 631{
 632        struct net_device     *dev = conn->netdev;
 633        struct netiucv_priv   *privptr = netdev_priv(dev);
 634        u16 offset = 0;
 635
 636        skb_put(pskb, NETIUCV_HDRLEN);
 637        pskb->dev = dev;
 638        pskb->ip_summed = CHECKSUM_NONE;
 639        pskb->protocol = ntohs(ETH_P_IP);
 640
 641        while (1) {
 642                struct sk_buff *skb;
 643                struct ll_header *header = (struct ll_header *) pskb->data;
 644
 645                if (!header->next)
 646                        break;
 647
 648                skb_pull(pskb, NETIUCV_HDRLEN);
 649                header->next -= offset;
 650                offset += header->next;
 651                header->next -= NETIUCV_HDRLEN;
 652                if (skb_tailroom(pskb) < header->next) {
 653                        IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
 654                                header->next, skb_tailroom(pskb));
 655                        return;
 656                }
 657                skb_put(pskb, header->next);
 658                skb_reset_mac_header(pskb);
 659                skb = dev_alloc_skb(pskb->len);
 660                if (!skb) {
 661                        IUCV_DBF_TEXT(data, 2,
 662                                "Out of memory in netiucv_unpack_skb\n");
 663                        privptr->stats.rx_dropped++;
 664                        return;
 665                }
 666                skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
 667                                          pskb->len);
 668                skb_reset_mac_header(skb);
 669                skb->dev = pskb->dev;
 670                skb->protocol = pskb->protocol;
 671                pskb->ip_summed = CHECKSUM_UNNECESSARY;
 672                privptr->stats.rx_packets++;
 673                privptr->stats.rx_bytes += skb->len;
 674                /*
 675                 * Since receiving is always initiated from a tasklet (in iucv.c),
 676                 * we must use netif_rx_ni() instead of netif_rx()
 677                 */
 678                netif_rx_ni(skb);
 679                skb_pull(pskb, header->next);
 680                skb_put(pskb, NETIUCV_HDRLEN);
 681        }
 682}
 683
 684static void conn_action_rx(fsm_instance *fi, int event, void *arg)
 685{
 686        struct iucv_event *ev = arg;
 687        struct iucv_connection *conn = ev->conn;
 688        struct iucv_message *msg = ev->data;
 689        struct netiucv_priv *privptr = netdev_priv(conn->netdev);
 690        int rc;
 691
 692        IUCV_DBF_TEXT(trace, 4, __func__);
 693
 694        if (!conn->netdev) {
 695                iucv_message_reject(conn->path, msg);
 696                IUCV_DBF_TEXT(data, 2,
 697                              "Received data for unlinked connection\n");
 698                return;
 699        }
 700        if (msg->length > conn->max_buffsize) {
 701                iucv_message_reject(conn->path, msg);
 702                privptr->stats.rx_dropped++;
 703                IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
 704                               msg->length, conn->max_buffsize);
 705                return;
 706        }
 707        conn->rx_buff->data = conn->rx_buff->head;
 708        skb_reset_tail_pointer(conn->rx_buff);
 709        conn->rx_buff->len = 0;
 710        rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
 711                                  msg->length, NULL);
 712        if (rc || msg->length < 5) {
 713                privptr->stats.rx_errors++;
 714                IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
 715                return;
 716        }
 717        netiucv_unpack_skb(conn, conn->rx_buff);
 718}
 719
 720static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 721{
 722        struct iucv_event *ev = arg;
 723        struct iucv_connection *conn = ev->conn;
 724        struct iucv_message *msg = ev->data;
 725        struct iucv_message txmsg;
 726        struct netiucv_priv *privptr = NULL;
 727        u32 single_flag = msg->tag;
 728        u32 txbytes = 0;
 729        u32 txpackets = 0;
 730        u32 stat_maxcq = 0;
 731        struct sk_buff *skb;
 732        unsigned long saveflags;
 733        struct ll_header header;
 734        int rc;
 735
 736        IUCV_DBF_TEXT(trace, 4, __func__);
 737
 738        if (!conn || !conn->netdev) {
 739                IUCV_DBF_TEXT(data, 2,
 740                              "Send confirmation for unlinked connection\n");
 741                return;
 742        }
 743        privptr = netdev_priv(conn->netdev);
 744        conn->prof.tx_pending--;
 745        if (single_flag) {
 746                if ((skb = skb_dequeue(&conn->commit_queue))) {
 747                        atomic_dec(&skb->users);
 748                        if (privptr) {
 749                                privptr->stats.tx_packets++;
 750                                privptr->stats.tx_bytes +=
 751                                        (skb->len - NETIUCV_HDRLEN
 752                                                  - NETIUCV_HDRLEN);
 753                        }
 754                        dev_kfree_skb_any(skb);
 755                }
 756        }
 757        conn->tx_buff->data = conn->tx_buff->head;
 758        skb_reset_tail_pointer(conn->tx_buff);
 759        conn->tx_buff->len = 0;
 760        spin_lock_irqsave(&conn->collect_lock, saveflags);
 761        while ((skb = skb_dequeue(&conn->collect_queue))) {
 762                header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
 763                memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
 764                       NETIUCV_HDRLEN);
 765                skb_copy_from_linear_data(skb,
 766                                          skb_put(conn->tx_buff, skb->len),
 767                                          skb->len);
 768                txbytes += skb->len;
 769                txpackets++;
 770                stat_maxcq++;
 771                atomic_dec(&skb->users);
 772                dev_kfree_skb_any(skb);
 773        }
 774        if (conn->collect_len > conn->prof.maxmulti)
 775                conn->prof.maxmulti = conn->collect_len;
 776        conn->collect_len = 0;
 777        spin_unlock_irqrestore(&conn->collect_lock, saveflags);
 778        if (conn->tx_buff->len == 0) {
 779                fsm_newstate(fi, CONN_STATE_IDLE);
 780                return;
 781        }
 782
 783        header.next = 0;
 784        memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
 785        conn->prof.send_stamp = jiffies;
 786        txmsg.class = 0;
 787        txmsg.tag = 0;
 788        rc = iucv_message_send(conn->path, &txmsg, 0, 0,
 789                               conn->tx_buff->data, conn->tx_buff->len);
 790        conn->prof.doios_multi++;
 791        conn->prof.txlen += conn->tx_buff->len;
 792        conn->prof.tx_pending++;
 793        if (conn->prof.tx_pending > conn->prof.tx_max_pending)
 794                conn->prof.tx_max_pending = conn->prof.tx_pending;
 795        if (rc) {
 796                conn->prof.tx_pending--;
 797                fsm_newstate(fi, CONN_STATE_IDLE);
 798                if (privptr)
 799                        privptr->stats.tx_errors += txpackets;
 800                IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
 801        } else {
 802                if (privptr) {
 803                        privptr->stats.tx_packets += txpackets;
 804                        privptr->stats.tx_bytes += txbytes;
 805                }
 806                if (stat_maxcq > conn->prof.maxcqueue)
 807                        conn->prof.maxcqueue = stat_maxcq;
 808        }
 809}
 810
 811static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
 812{
 813        struct iucv_event *ev = arg;
 814        struct iucv_connection *conn = ev->conn;
 815        struct iucv_path *path = ev->data;
 816        struct net_device *netdev = conn->netdev;
 817        struct netiucv_priv *privptr = netdev_priv(netdev);
 818        int rc;
 819
 820        IUCV_DBF_TEXT(trace, 3, __func__);
 821
 822        conn->path = path;
 823        path->msglim = NETIUCV_QUEUELEN_DEFAULT;
 824        path->flags = 0;
 825        rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
 826        if (rc) {
 827                IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
 828                return;
 829        }
 830        fsm_newstate(fi, CONN_STATE_IDLE);
 831        netdev->tx_queue_len = conn->path->msglim;
 832        fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 833}
 834
 835static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
 836{
 837        struct iucv_event *ev = arg;
 838        struct iucv_path *path = ev->data;
 839
 840        IUCV_DBF_TEXT(trace, 3, __func__);
 841        iucv_path_sever(path, NULL);
 842}
 843
 844static void conn_action_connack(fsm_instance *fi, int event, void *arg)
 845{
 846        struct iucv_connection *conn = arg;
 847        struct net_device *netdev = conn->netdev;
 848        struct netiucv_priv *privptr = netdev_priv(netdev);
 849
 850        IUCV_DBF_TEXT(trace, 3, __func__);
 851        fsm_deltimer(&conn->timer);
 852        fsm_newstate(fi, CONN_STATE_IDLE);
 853        netdev->tx_queue_len = conn->path->msglim;
 854        fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 855}
 856
 857static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 858{
 859        struct iucv_connection *conn = arg;
 860
 861        IUCV_DBF_TEXT(trace, 3, __func__);
 862        fsm_deltimer(&conn->timer);
 863        iucv_path_sever(conn->path, conn->userdata);
 864        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 865}
 866
 867static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
 868{
 869        struct iucv_connection *conn = arg;
 870        struct net_device *netdev = conn->netdev;
 871        struct netiucv_priv *privptr = netdev_priv(netdev);
 872
 873        IUCV_DBF_TEXT(trace, 3, __func__);
 874
 875        fsm_deltimer(&conn->timer);
 876        iucv_path_sever(conn->path, conn->userdata);
 877        dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
 878                               "connection\n", netiucv_printuser(conn));
 879        IUCV_DBF_TEXT(data, 2,
 880                      "conn_action_connsever: Remote dropped connection\n");
 881        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 882        fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 883}
 884
 885static void conn_action_start(fsm_instance *fi, int event, void *arg)
 886{
 887        struct iucv_connection *conn = arg;
 888        struct net_device *netdev = conn->netdev;
 889        struct netiucv_priv *privptr = netdev_priv(netdev);
 890        int rc;
 891
 892        IUCV_DBF_TEXT(trace, 3, __func__);
 893
 894        fsm_newstate(fi, CONN_STATE_STARTWAIT);
 895
 896        /*
 897         * We must set the state before calling iucv_connect because the
 898         * callback handler could be called at any point after the connection
 899         * request is sent
 900         */
 901
 902        fsm_newstate(fi, CONN_STATE_SETUPWAIT);
 903        conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
 904        IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
 905                netdev->name, netiucv_printuser(conn));
 906
 907        rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
 908                               NULL, conn->userdata, conn);
 909        switch (rc) {
 910        case 0:
 911                netdev->tx_queue_len = conn->path->msglim;
 912                fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
 913                             CONN_EVENT_TIMER, conn);
 914                return;
 915        case 11:
 916                dev_warn(privptr->dev,
 917                        "The IUCV device failed to connect to z/VM guest %s\n",
 918                        netiucv_printname(conn->userid, 8));
 919                fsm_newstate(fi, CONN_STATE_STARTWAIT);
 920                break;
 921        case 12:
 922                dev_warn(privptr->dev,
 923                        "The IUCV device failed to connect to the peer on z/VM"
 924                        " guest %s\n", netiucv_printname(conn->userid, 8));
 925                fsm_newstate(fi, CONN_STATE_STARTWAIT);
 926                break;
 927        case 13:
 928                dev_err(privptr->dev,
 929                        "Connecting the IUCV device would exceed the maximum"
 930                        " number of IUCV connections\n");
 931                fsm_newstate(fi, CONN_STATE_CONNERR);
 932                break;
 933        case 14:
 934                dev_err(privptr->dev,
 935                        "z/VM guest %s has too many IUCV connections"
 936                        " to connect with the IUCV device\n",
 937                        netiucv_printname(conn->userid, 8));
 938                fsm_newstate(fi, CONN_STATE_CONNERR);
 939                break;
 940        case 15:
 941                dev_err(privptr->dev,
 942                        "The IUCV device cannot connect to a z/VM guest with no"
 943                        " IUCV authorization\n");
 944                fsm_newstate(fi, CONN_STATE_CONNERR);
 945                break;
 946        default:
 947                dev_err(privptr->dev,
 948                        "Connecting the IUCV device failed with error %d\n",
 949                        rc);
 950                fsm_newstate(fi, CONN_STATE_CONNERR);
 951                break;
 952        }
 953        IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
 954        kfree(conn->path);
 955        conn->path = NULL;
 956}
 957
 958static void netiucv_purge_skb_queue(struct sk_buff_head *q)
 959{
 960        struct sk_buff *skb;
 961
 962        while ((skb = skb_dequeue(q))) {
 963                atomic_dec(&skb->users);
 964                dev_kfree_skb_any(skb);
 965        }
 966}
 967
 968static void conn_action_stop(fsm_instance *fi, int event, void *arg)
 969{
 970        struct iucv_event *ev = arg;
 971        struct iucv_connection *conn = ev->conn;
 972        struct net_device *netdev = conn->netdev;
 973        struct netiucv_priv *privptr = netdev_priv(netdev);
 974
 975        IUCV_DBF_TEXT(trace, 3, __func__);
 976
 977        fsm_deltimer(&conn->timer);
 978        fsm_newstate(fi, CONN_STATE_STOPPED);
 979        netiucv_purge_skb_queue(&conn->collect_queue);
 980        if (conn->path) {
 981                IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
 982                iucv_path_sever(conn->path, conn->userdata);
 983                kfree(conn->path);
 984                conn->path = NULL;
 985        }
 986        netiucv_purge_skb_queue(&conn->commit_queue);
 987        fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 988}
 989
 990static void conn_action_inval(fsm_instance *fi, int event, void *arg)
 991{
 992        struct iucv_connection *conn = arg;
 993        struct net_device *netdev = conn->netdev;
 994
 995        IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
 996                netdev->name, conn->userid);
 997}
 998
 999static const fsm_node conn_fsm[] = {
1000        { CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
1001        { CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
1002
1003        { CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1004        { CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1005        { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1006        { CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1007        { CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1008        { CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1009        { CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1010
1011        { CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1012        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1013        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1014        { CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1015        { CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1016
1017        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1018        { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1019
1020        { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1021        { CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1022        { CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1023
1024        { CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1025        { CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1026
1027        { CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1028        { CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1029};
1030
1031static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1032
1033
1034/*
1035 * Actions for interface - statemachine.
1036 */
1037
1038/**
1039 * dev_action_start
1040 * @fi: An instance of an interface statemachine.
1041 * @event: The event, just happened.
1042 * @arg: Generic pointer, casted from struct net_device * upon call.
1043 *
1044 * Startup connection by sending CONN_EVENT_START to it.
1045 */
1046static void dev_action_start(fsm_instance *fi, int event, void *arg)
1047{
1048        struct net_device   *dev = arg;
1049        struct netiucv_priv *privptr = netdev_priv(dev);
1050
1051        IUCV_DBF_TEXT(trace, 3, __func__);
1052
1053        fsm_newstate(fi, DEV_STATE_STARTWAIT);
1054        fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1055}
1056
1057/**
1058 * Shutdown connection by sending CONN_EVENT_STOP to it.
1059 *
1060 * @param fi    An instance of an interface statemachine.
1061 * @param event The event, just happened.
1062 * @param arg   Generic pointer, casted from struct net_device * upon call.
1063 */
1064static void
1065dev_action_stop(fsm_instance *fi, int event, void *arg)
1066{
1067        struct net_device   *dev = arg;
1068        struct netiucv_priv *privptr = netdev_priv(dev);
1069        struct iucv_event   ev;
1070
1071        IUCV_DBF_TEXT(trace, 3, __func__);
1072
1073        ev.conn = privptr->conn;
1074
1075        fsm_newstate(fi, DEV_STATE_STOPWAIT);
1076        fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1077}
1078
1079/**
1080 * Called from connection statemachine
1081 * when a connection is up and running.
1082 *
1083 * @param fi    An instance of an interface statemachine.
1084 * @param event The event, just happened.
1085 * @param arg   Generic pointer, casted from struct net_device * upon call.
1086 */
1087static void
1088dev_action_connup(fsm_instance *fi, int event, void *arg)
1089{
1090        struct net_device   *dev = arg;
1091        struct netiucv_priv *privptr = netdev_priv(dev);
1092
1093        IUCV_DBF_TEXT(trace, 3, __func__);
1094
1095        switch (fsm_getstate(fi)) {
1096                case DEV_STATE_STARTWAIT:
1097                        fsm_newstate(fi, DEV_STATE_RUNNING);
1098                        dev_info(privptr->dev,
1099                                "The IUCV device has been connected"
1100                                " successfully to %s\n",
1101                                netiucv_printuser(privptr->conn));
1102                        IUCV_DBF_TEXT(setup, 3,
1103                                "connection is up and running\n");
1104                        break;
1105                case DEV_STATE_STOPWAIT:
1106                        IUCV_DBF_TEXT(data, 2,
1107                                "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1108                        break;
1109        }
1110}
1111
1112/**
1113 * Called from connection statemachine
1114 * when a connection has been shutdown.
1115 *
1116 * @param fi    An instance of an interface statemachine.
1117 * @param event The event, just happened.
1118 * @param arg   Generic pointer, casted from struct net_device * upon call.
1119 */
1120static void
1121dev_action_conndown(fsm_instance *fi, int event, void *arg)
1122{
1123        IUCV_DBF_TEXT(trace, 3, __func__);
1124
1125        switch (fsm_getstate(fi)) {
1126                case DEV_STATE_RUNNING:
1127                        fsm_newstate(fi, DEV_STATE_STARTWAIT);
1128                        break;
1129                case DEV_STATE_STOPWAIT:
1130                        fsm_newstate(fi, DEV_STATE_STOPPED);
1131                        IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1132                        break;
1133        }
1134}
1135
1136static const fsm_node dev_fsm[] = {
1137        { DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1138
1139        { DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1140        { DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1141
1142        { DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1143        { DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1144
1145        { DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1146        { DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1147        { DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1148};
1149
1150static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1151
1152/**
1153 * Transmit a packet.
1154 * This is a helper function for netiucv_tx().
1155 *
1156 * @param conn Connection to be used for sending.
1157 * @param skb Pointer to struct sk_buff of packet to send.
1158 *            The linklevel header has already been set up
1159 *            by netiucv_tx().
1160 *
1161 * @return 0 on success, -ERRNO on failure. (Never fails.)
1162 */
1163static int netiucv_transmit_skb(struct iucv_connection *conn,
1164                                struct sk_buff *skb)
1165{
1166        struct iucv_message msg;
1167        unsigned long saveflags;
1168        struct ll_header header;
1169        int rc;
1170
1171        if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1172                int l = skb->len + NETIUCV_HDRLEN;
1173
1174                spin_lock_irqsave(&conn->collect_lock, saveflags);
1175                if (conn->collect_len + l >
1176                    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1177                        rc = -EBUSY;
1178                        IUCV_DBF_TEXT(data, 2,
1179                                      "EBUSY from netiucv_transmit_skb\n");
1180                } else {
1181                        atomic_inc(&skb->users);
1182                        skb_queue_tail(&conn->collect_queue, skb);
1183                        conn->collect_len += l;
1184                        rc = 0;
1185                }
1186                spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1187        } else {
1188                struct sk_buff *nskb = skb;
1189                /**
1190                 * Copy the skb to a new allocated skb in lowmem only if the
1191                 * data is located above 2G in memory or tailroom is < 2.
1192                 */
1193                unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1194                                    NETIUCV_HDRLEN)) >> 31;
1195                int copied = 0;
1196                if (hi || (skb_tailroom(skb) < 2)) {
1197                        nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1198                                         NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1199                        if (!nskb) {
1200                                IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1201                                rc = -ENOMEM;
1202                                return rc;
1203                        } else {
1204                                skb_reserve(nskb, NETIUCV_HDRLEN);
1205                                memcpy(skb_put(nskb, skb->len),
1206                                       skb->data, skb->len);
1207                        }
1208                        copied = 1;
1209                }
1210                /**
1211                 * skb now is below 2G and has enough room. Add headers.
1212                 */
1213                header.next = nskb->len + NETIUCV_HDRLEN;
1214                memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1215                header.next = 0;
1216                memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1217
1218                fsm_newstate(conn->fsm, CONN_STATE_TX);
1219                conn->prof.send_stamp = jiffies;
1220
1221                msg.tag = 1;
1222                msg.class = 0;
1223                rc = iucv_message_send(conn->path, &msg, 0, 0,
1224                                       nskb->data, nskb->len);
1225                conn->prof.doios_single++;
1226                conn->prof.txlen += skb->len;
1227                conn->prof.tx_pending++;
1228                if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1229                        conn->prof.tx_max_pending = conn->prof.tx_pending;
1230                if (rc) {
1231                        struct netiucv_priv *privptr;
1232                        fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1233                        conn->prof.tx_pending--;
1234                        privptr = netdev_priv(conn->netdev);
1235                        if (privptr)
1236                                privptr->stats.tx_errors++;
1237                        if (copied)
1238                                dev_kfree_skb(nskb);
1239                        else {
1240                                /**
1241                                 * Remove our headers. They get added
1242                                 * again on retransmit.
1243                                 */
1244                                skb_pull(skb, NETIUCV_HDRLEN);
1245                                skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1246                        }
1247                        IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1248                } else {
1249                        if (copied)
1250                                dev_kfree_skb(skb);
1251                        atomic_inc(&nskb->users);
1252                        skb_queue_tail(&conn->commit_queue, nskb);
1253                }
1254        }
1255
1256        return rc;
1257}
1258
1259/*
1260 * Interface API for upper network layers
1261 */
1262
1263/**
1264 * Open an interface.
1265 * Called from generic network layer when ifconfig up is run.
1266 *
1267 * @param dev Pointer to interface struct.
1268 *
1269 * @return 0 on success, -ERRNO on failure. (Never fails.)
1270 */
1271static int netiucv_open(struct net_device *dev)
1272{
1273        struct netiucv_priv *priv = netdev_priv(dev);
1274
1275        fsm_event(priv->fsm, DEV_EVENT_START, dev);
1276        return 0;
1277}
1278
1279/**
1280 * Close an interface.
1281 * Called from generic network layer when ifconfig down is run.
1282 *
1283 * @param dev Pointer to interface struct.
1284 *
1285 * @return 0 on success, -ERRNO on failure. (Never fails.)
1286 */
1287static int netiucv_close(struct net_device *dev)
1288{
1289        struct netiucv_priv *priv = netdev_priv(dev);
1290
1291        fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1292        return 0;
1293}
1294
1295static int netiucv_pm_prepare(struct device *dev)
1296{
1297        IUCV_DBF_TEXT(trace, 3, __func__);
1298        return 0;
1299}
1300
1301static void netiucv_pm_complete(struct device *dev)
1302{
1303        IUCV_DBF_TEXT(trace, 3, __func__);
1304        return;
1305}
1306
1307/**
1308 * netiucv_pm_freeze() - Freeze PM callback
1309 * @dev:        netiucv device
1310 *
1311 * close open netiucv interfaces
1312 */
1313static int netiucv_pm_freeze(struct device *dev)
1314{
1315        struct netiucv_priv *priv = dev_get_drvdata(dev);
1316        struct net_device *ndev = NULL;
1317        int rc = 0;
1318
1319        IUCV_DBF_TEXT(trace, 3, __func__);
1320        if (priv && priv->conn)
1321                ndev = priv->conn->netdev;
1322        if (!ndev)
1323                goto out;
1324        netif_device_detach(ndev);
1325        priv->pm_state = fsm_getstate(priv->fsm);
1326        rc = netiucv_close(ndev);
1327out:
1328        return rc;
1329}
1330
1331/**
1332 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1333 * @dev:        netiucv device
1334 *
1335 * re-open netiucv interfaces closed during freeze
1336 */
1337static int netiucv_pm_restore_thaw(struct device *dev)
1338{
1339        struct netiucv_priv *priv = dev_get_drvdata(dev);
1340        struct net_device *ndev = NULL;
1341        int rc = 0;
1342
1343        IUCV_DBF_TEXT(trace, 3, __func__);
1344        if (priv && priv->conn)
1345                ndev = priv->conn->netdev;
1346        if (!ndev)
1347                goto out;
1348        switch (priv->pm_state) {
1349        case DEV_STATE_RUNNING:
1350        case DEV_STATE_STARTWAIT:
1351                rc = netiucv_open(ndev);
1352                break;
1353        default:
1354                break;
1355        }
1356        netif_device_attach(ndev);
1357out:
1358        return rc;
1359}
1360
1361/**
1362 * Start transmission of a packet.
1363 * Called from generic network device layer.
1364 *
1365 * @param skb Pointer to buffer containing the packet.
1366 * @param dev Pointer to interface struct.
1367 *
1368 * @return 0 if packet consumed, !0 if packet rejected.
1369 *         Note: If we return !0, then the packet is free'd by
1370 *               the generic network layer.
1371 */
1372static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1373{
1374        struct netiucv_priv *privptr = netdev_priv(dev);
1375        int rc;
1376
1377        IUCV_DBF_TEXT(trace, 4, __func__);
1378        /**
1379         * Some sanity checks ...
1380         */
1381        if (skb == NULL) {
1382                IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1383                privptr->stats.tx_dropped++;
1384                return NETDEV_TX_OK;
1385        }
1386        if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1387                IUCV_DBF_TEXT(data, 2,
1388                        "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1389                dev_kfree_skb(skb);
1390                privptr->stats.tx_dropped++;
1391                return NETDEV_TX_OK;
1392        }
1393
1394        /**
1395         * If connection is not running, try to restart it
1396         * and throw away packet.
1397         */
1398        if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1399                dev_kfree_skb(skb);
1400                privptr->stats.tx_dropped++;
1401                privptr->stats.tx_errors++;
1402                privptr->stats.tx_carrier_errors++;
1403                return NETDEV_TX_OK;
1404        }
1405
1406        if (netiucv_test_and_set_busy(dev)) {
1407                IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1408                return NETDEV_TX_BUSY;
1409        }
1410        dev->trans_start = jiffies;
1411        rc = netiucv_transmit_skb(privptr->conn, skb);
1412        netiucv_clear_busy(dev);
1413        return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1414}
1415
1416/**
1417 * netiucv_stats
1418 * @dev: Pointer to interface struct.
1419 *
1420 * Returns interface statistics of a device.
1421 *
1422 * Returns pointer to stats struct of this interface.
1423 */
1424static struct net_device_stats *netiucv_stats (struct net_device * dev)
1425{
1426        struct netiucv_priv *priv = netdev_priv(dev);
1427
1428        IUCV_DBF_TEXT(trace, 5, __func__);
1429        return &priv->stats;
1430}
1431
1432/**
1433 * netiucv_change_mtu
1434 * @dev: Pointer to interface struct.
1435 * @new_mtu: The new MTU to use for this interface.
1436 *
1437 * Sets MTU of an interface.
1438 *
1439 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1440 *         (valid range is 576 .. NETIUCV_MTU_MAX).
1441 */
1442static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1443{
1444        IUCV_DBF_TEXT(trace, 3, __func__);
1445        if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1446                IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1447                return -EINVAL;
1448        }
1449        dev->mtu = new_mtu;
1450        return 0;
1451}
1452
1453/*
1454 * attributes in sysfs
1455 */
1456
1457static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1458                         char *buf)
1459{
1460        struct netiucv_priv *priv = dev_get_drvdata(dev);
1461
1462        IUCV_DBF_TEXT(trace, 5, __func__);
1463        return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1464}
1465
1466static int netiucv_check_user(const char *buf, size_t count, char *username,
1467                              char *userdata)
1468{
1469        const char *p;
1470        int i;
1471
1472        p = strchr(buf, '.');
1473        if ((p && ((count > 26) ||
1474                   ((p - buf) > 8) ||
1475                   (buf + count - p > 18))) ||
1476            (!p && (count > 9))) {
1477                IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1478                return -EINVAL;
1479        }
1480
1481        for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1482                if (isalnum(*p) || *p == '$') {
1483                        username[i] = toupper(*p);
1484                        continue;
1485                }
1486                if (*p == '\n')
1487                        /* trailing lf, grr */
1488                        break;
1489                IUCV_DBF_TEXT_(setup, 2,
1490                               "conn_write: invalid character %02x\n", *p);
1491                return -EINVAL;
1492        }
1493        while (i < 8)
1494                username[i++] = ' ';
1495        username[8] = '\0';
1496
1497        if (*p == '.') {
1498                p++;
1499                for (i = 0; i < 16 && *p; i++, p++) {
1500                        if (*p == '\n')
1501                                break;
1502                        userdata[i] = toupper(*p);
1503                }
1504                while (i > 0 && i < 16)
1505                        userdata[i++] = ' ';
1506        } else
1507                memcpy(userdata, iucvMagic_ascii, 16);
1508        userdata[16] = '\0';
1509        ASCEBC(userdata, 16);
1510
1511        return 0;
1512}
1513
1514static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1515                          const char *buf, size_t count)
1516{
1517        struct netiucv_priv *priv = dev_get_drvdata(dev);
1518        struct net_device *ndev = priv->conn->netdev;
1519        char    username[9];
1520        char    userdata[17];
1521        int     rc;
1522        struct iucv_connection *cp;
1523
1524        IUCV_DBF_TEXT(trace, 3, __func__);
1525        rc = netiucv_check_user(buf, count, username, userdata);
1526        if (rc)
1527                return rc;
1528
1529        if (memcmp(username, priv->conn->userid, 9) &&
1530            (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1531                /* username changed while the interface is active. */
1532                IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1533                return -EPERM;
1534        }
1535        read_lock_bh(&iucv_connection_rwlock);
1536        list_for_each_entry(cp, &iucv_connection_list, list) {
1537                if (!strncmp(username, cp->userid, 9) &&
1538                   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1539                        read_unlock_bh(&iucv_connection_rwlock);
1540                        IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1541                                "already exists\n", netiucv_printuser(cp));
1542                        return -EEXIST;
1543                }
1544        }
1545        read_unlock_bh(&iucv_connection_rwlock);
1546        memcpy(priv->conn->userid, username, 9);
1547        memcpy(priv->conn->userdata, userdata, 17);
1548        return count;
1549}
1550
1551static DEVICE_ATTR(user, 0644, user_show, user_write);
1552
1553static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1554                            char *buf)
1555{
1556        struct netiucv_priv *priv = dev_get_drvdata(dev);
1557
1558        IUCV_DBF_TEXT(trace, 5, __func__);
1559        return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1560}
1561
1562static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1563                             const char *buf, size_t count)
1564{
1565        struct netiucv_priv *priv = dev_get_drvdata(dev);
1566        struct net_device *ndev = priv->conn->netdev;
1567        char         *e;
1568        int          bs1;
1569
1570        IUCV_DBF_TEXT(trace, 3, __func__);
1571        if (count >= 39)
1572                return -EINVAL;
1573
1574        bs1 = simple_strtoul(buf, &e, 0);
1575
1576        if (e && (!isspace(*e))) {
1577                IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1578                        *e);
1579                return -EINVAL;
1580        }
1581        if (bs1 > NETIUCV_BUFSIZE_MAX) {
1582                IUCV_DBF_TEXT_(setup, 2,
1583                        "buffer_write: buffer size %d too large\n",
1584                        bs1);
1585                return -EINVAL;
1586        }
1587        if ((ndev->flags & IFF_RUNNING) &&
1588            (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1589                IUCV_DBF_TEXT_(setup, 2,
1590                        "buffer_write: buffer size %d too small\n",
1591                        bs1);
1592                return -EINVAL;
1593        }
1594        if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1595                IUCV_DBF_TEXT_(setup, 2,
1596                        "buffer_write: buffer size %d too small\n",
1597                        bs1);
1598                return -EINVAL;
1599        }
1600
1601        priv->conn->max_buffsize = bs1;
1602        if (!(ndev->flags & IFF_RUNNING))
1603                ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1604
1605        return count;
1606
1607}
1608
1609static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1610
1611static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1612                             char *buf)
1613{
1614        struct netiucv_priv *priv = dev_get_drvdata(dev);
1615
1616        IUCV_DBF_TEXT(trace, 5, __func__);
1617        return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1618}
1619
1620static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1621
1622static ssize_t conn_fsm_show (struct device *dev,
1623                              struct device_attribute *attr, char *buf)
1624{
1625        struct netiucv_priv *priv = dev_get_drvdata(dev);
1626
1627        IUCV_DBF_TEXT(trace, 5, __func__);
1628        return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1629}
1630
1631static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1632
1633static ssize_t maxmulti_show (struct device *dev,
1634                              struct device_attribute *attr, char *buf)
1635{
1636        struct netiucv_priv *priv = dev_get_drvdata(dev);
1637
1638        IUCV_DBF_TEXT(trace, 5, __func__);
1639        return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1640}
1641
1642static ssize_t maxmulti_write (struct device *dev,
1643                               struct device_attribute *attr,
1644                               const char *buf, size_t count)
1645{
1646        struct netiucv_priv *priv = dev_get_drvdata(dev);
1647
1648        IUCV_DBF_TEXT(trace, 4, __func__);
1649        priv->conn->prof.maxmulti = 0;
1650        return count;
1651}
1652
1653static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1654
1655static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1656                           char *buf)
1657{
1658        struct netiucv_priv *priv = dev_get_drvdata(dev);
1659
1660        IUCV_DBF_TEXT(trace, 5, __func__);
1661        return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1662}
1663
1664static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1665                            const char *buf, size_t count)
1666{
1667        struct netiucv_priv *priv = dev_get_drvdata(dev);
1668
1669        IUCV_DBF_TEXT(trace, 4, __func__);
1670        priv->conn->prof.maxcqueue = 0;
1671        return count;
1672}
1673
1674static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1675
1676static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1677                           char *buf)
1678{
1679        struct netiucv_priv *priv = dev_get_drvdata(dev);
1680
1681        IUCV_DBF_TEXT(trace, 5, __func__);
1682        return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1683}
1684
1685static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1686                            const char *buf, size_t count)
1687{
1688        struct netiucv_priv *priv = dev_get_drvdata(dev);
1689
1690        IUCV_DBF_TEXT(trace, 4, __func__);
1691        priv->conn->prof.doios_single = 0;
1692        return count;
1693}
1694
1695static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1696
1697static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1698                           char *buf)
1699{
1700        struct netiucv_priv *priv = dev_get_drvdata(dev);
1701
1702        IUCV_DBF_TEXT(trace, 5, __func__);
1703        return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1704}
1705
1706static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1707                            const char *buf, size_t count)
1708{
1709        struct netiucv_priv *priv = dev_get_drvdata(dev);
1710
1711        IUCV_DBF_TEXT(trace, 5, __func__);
1712        priv->conn->prof.doios_multi = 0;
1713        return count;
1714}
1715
1716static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1717
1718static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1719                           char *buf)
1720{
1721        struct netiucv_priv *priv = dev_get_drvdata(dev);
1722
1723        IUCV_DBF_TEXT(trace, 5, __func__);
1724        return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1725}
1726
1727static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1728                            const char *buf, size_t count)
1729{
1730        struct netiucv_priv *priv = dev_get_drvdata(dev);
1731
1732        IUCV_DBF_TEXT(trace, 4, __func__);
1733        priv->conn->prof.txlen = 0;
1734        return count;
1735}
1736
1737static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1738
1739static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1740                            char *buf)
1741{
1742        struct netiucv_priv *priv = dev_get_drvdata(dev);
1743
1744        IUCV_DBF_TEXT(trace, 5, __func__);
1745        return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1746}
1747
1748static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1749                             const char *buf, size_t count)
1750{
1751        struct netiucv_priv *priv = dev_get_drvdata(dev);
1752
1753        IUCV_DBF_TEXT(trace, 4, __func__);
1754        priv->conn->prof.tx_time = 0;
1755        return count;
1756}
1757
1758static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1759
1760static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1761                            char *buf)
1762{
1763        struct netiucv_priv *priv = dev_get_drvdata(dev);
1764
1765        IUCV_DBF_TEXT(trace, 5, __func__);
1766        return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1767}
1768
1769static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1770                             const char *buf, size_t count)
1771{
1772        struct netiucv_priv *priv = dev_get_drvdata(dev);
1773
1774        IUCV_DBF_TEXT(trace, 4, __func__);
1775        priv->conn->prof.tx_pending = 0;
1776        return count;
1777}
1778
1779static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1780
1781static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1782                            char *buf)
1783{
1784        struct netiucv_priv *priv = dev_get_drvdata(dev);
1785
1786        IUCV_DBF_TEXT(trace, 5, __func__);
1787        return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1788}
1789
1790static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1791                             const char *buf, size_t count)
1792{
1793        struct netiucv_priv *priv = dev_get_drvdata(dev);
1794
1795        IUCV_DBF_TEXT(trace, 4, __func__);
1796        priv->conn->prof.tx_max_pending = 0;
1797        return count;
1798}
1799
1800static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1801
1802static struct attribute *netiucv_attrs[] = {
1803        &dev_attr_buffer.attr,
1804        &dev_attr_user.attr,
1805        NULL,
1806};
1807
1808static struct attribute_group netiucv_attr_group = {
1809        .attrs = netiucv_attrs,
1810};
1811
1812static struct attribute *netiucv_stat_attrs[] = {
1813        &dev_attr_device_fsm_state.attr,
1814        &dev_attr_connection_fsm_state.attr,
1815        &dev_attr_max_tx_buffer_used.attr,
1816        &dev_attr_max_chained_skbs.attr,
1817        &dev_attr_tx_single_write_ops.attr,
1818        &dev_attr_tx_multi_write_ops.attr,
1819        &dev_attr_netto_bytes.attr,
1820        &dev_attr_max_tx_io_time.attr,
1821        &dev_attr_tx_pending.attr,
1822        &dev_attr_tx_max_pending.attr,
1823        NULL,
1824};
1825
1826static struct attribute_group netiucv_stat_attr_group = {
1827        .name  = "stats",
1828        .attrs = netiucv_stat_attrs,
1829};
1830
1831static const struct attribute_group *netiucv_attr_groups[] = {
1832        &netiucv_stat_attr_group,
1833        &netiucv_attr_group,
1834        NULL,
1835};
1836
1837static int netiucv_register_device(struct net_device *ndev)
1838{
1839        struct netiucv_priv *priv = netdev_priv(ndev);
1840        struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1841        int ret;
1842
1843        IUCV_DBF_TEXT(trace, 3, __func__);
1844
1845        if (dev) {
1846                dev_set_name(dev, "net%s", ndev->name);
1847                dev->bus = &iucv_bus;
1848                dev->parent = iucv_root;
1849                dev->groups = netiucv_attr_groups;
1850                /*
1851                 * The release function could be called after the
1852                 * module has been unloaded. It's _only_ task is to
1853                 * free the struct. Therefore, we specify kfree()
1854                 * directly here. (Probably a little bit obfuscating
1855                 * but legitime ...).
1856                 */
1857                dev->release = (void (*)(struct device *))kfree;
1858                dev->driver = &netiucv_driver;
1859        } else
1860                return -ENOMEM;
1861
1862        ret = device_register(dev);
1863        if (ret) {
1864                put_device(dev);
1865                return ret;
1866        }
1867        priv->dev = dev;
1868        dev_set_drvdata(dev, priv);
1869        return 0;
1870}
1871
1872static void netiucv_unregister_device(struct device *dev)
1873{
1874        IUCV_DBF_TEXT(trace, 3, __func__);
1875        device_unregister(dev);
1876}
1877
1878/**
1879 * Allocate and initialize a new connection structure.
1880 * Add it to the list of netiucv connections;
1881 */
1882static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1883                                                      char *username,
1884                                                      char *userdata)
1885{
1886        struct iucv_connection *conn;
1887
1888        conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1889        if (!conn)
1890                goto out;
1891        skb_queue_head_init(&conn->collect_queue);
1892        skb_queue_head_init(&conn->commit_queue);
1893        spin_lock_init(&conn->collect_lock);
1894        conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1895        conn->netdev = dev;
1896
1897        conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1898        if (!conn->rx_buff)
1899                goto out_conn;
1900        conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1901        if (!conn->tx_buff)
1902                goto out_rx;
1903        conn->fsm = init_fsm("netiucvconn", conn_state_names,
1904                             conn_event_names, NR_CONN_STATES,
1905                             NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1906                             GFP_KERNEL);
1907        if (!conn->fsm)
1908                goto out_tx;
1909
1910        fsm_settimer(conn->fsm, &conn->timer);
1911        fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1912
1913        if (userdata)
1914                memcpy(conn->userdata, userdata, 17);
1915        if (username) {
1916                memcpy(conn->userid, username, 9);
1917                fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1918        }
1919
1920        write_lock_bh(&iucv_connection_rwlock);
1921        list_add_tail(&conn->list, &iucv_connection_list);
1922        write_unlock_bh(&iucv_connection_rwlock);
1923        return conn;
1924
1925out_tx:
1926        kfree_skb(conn->tx_buff);
1927out_rx:
1928        kfree_skb(conn->rx_buff);
1929out_conn:
1930        kfree(conn);
1931out:
1932        return NULL;
1933}
1934
1935/**
1936 * Release a connection structure and remove it from the
1937 * list of netiucv connections.
1938 */
1939static void netiucv_remove_connection(struct iucv_connection *conn)
1940{
1941
1942        IUCV_DBF_TEXT(trace, 3, __func__);
1943        write_lock_bh(&iucv_connection_rwlock);
1944        list_del_init(&conn->list);
1945        write_unlock_bh(&iucv_connection_rwlock);
1946        fsm_deltimer(&conn->timer);
1947        netiucv_purge_skb_queue(&conn->collect_queue);
1948        if (conn->path) {
1949                iucv_path_sever(conn->path, conn->userdata);
1950                kfree(conn->path);
1951                conn->path = NULL;
1952        }
1953        netiucv_purge_skb_queue(&conn->commit_queue);
1954        kfree_fsm(conn->fsm);
1955        kfree_skb(conn->rx_buff);
1956        kfree_skb(conn->tx_buff);
1957}
1958
1959/**
1960 * Release everything of a net device.
1961 */
1962static void netiucv_free_netdevice(struct net_device *dev)
1963{
1964        struct netiucv_priv *privptr = netdev_priv(dev);
1965
1966        IUCV_DBF_TEXT(trace, 3, __func__);
1967
1968        if (!dev)
1969                return;
1970
1971        if (privptr) {
1972                if (privptr->conn)
1973                        netiucv_remove_connection(privptr->conn);
1974                if (privptr->fsm)
1975                        kfree_fsm(privptr->fsm);
1976                privptr->conn = NULL; privptr->fsm = NULL;
1977                /* privptr gets freed by free_netdev() */
1978        }
1979        free_netdev(dev);
1980}
1981
1982/**
1983 * Initialize a net device. (Called from kernel in alloc_netdev())
1984 */
1985static const struct net_device_ops netiucv_netdev_ops = {
1986        .ndo_open               = netiucv_open,
1987        .ndo_stop               = netiucv_close,
1988        .ndo_get_stats          = netiucv_stats,
1989        .ndo_start_xmit         = netiucv_tx,
1990        .ndo_change_mtu         = netiucv_change_mtu,
1991};
1992
1993static void netiucv_setup_netdevice(struct net_device *dev)
1994{
1995        dev->mtu                 = NETIUCV_MTU_DEFAULT;
1996        dev->destructor          = netiucv_free_netdevice;
1997        dev->hard_header_len     = NETIUCV_HDRLEN;
1998        dev->addr_len            = 0;
1999        dev->type                = ARPHRD_SLIP;
2000        dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
2001        dev->flags               = IFF_POINTOPOINT | IFF_NOARP;
2002        dev->netdev_ops          = &netiucv_netdev_ops;
2003}
2004
2005/**
2006 * Allocate and initialize everything of a net device.
2007 */
2008static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2009{
2010        struct netiucv_priv *privptr;
2011        struct net_device *dev;
2012
2013        dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2014                           NET_NAME_UNKNOWN, netiucv_setup_netdevice);
2015        if (!dev)
2016                return NULL;
2017        rtnl_lock();
2018        if (dev_alloc_name(dev, dev->name) < 0)
2019                goto out_netdev;
2020
2021        privptr = netdev_priv(dev);
2022        privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2023                                dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2024                                dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2025        if (!privptr->fsm)
2026                goto out_netdev;
2027
2028        privptr->conn = netiucv_new_connection(dev, username, userdata);
2029        if (!privptr->conn) {
2030                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2031                goto out_fsm;
2032        }
2033        fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2034        return dev;
2035
2036out_fsm:
2037        kfree_fsm(privptr->fsm);
2038out_netdev:
2039        rtnl_unlock();
2040        free_netdev(dev);
2041        return NULL;
2042}
2043
2044static ssize_t conn_write(struct device_driver *drv,
2045                          const char *buf, size_t count)
2046{
2047        char username[9];
2048        char userdata[17];
2049        int rc;
2050        struct net_device *dev;
2051        struct netiucv_priv *priv;
2052        struct iucv_connection *cp;
2053
2054        IUCV_DBF_TEXT(trace, 3, __func__);
2055        rc = netiucv_check_user(buf, count, username, userdata);
2056        if (rc)
2057                return rc;
2058
2059        read_lock_bh(&iucv_connection_rwlock);
2060        list_for_each_entry(cp, &iucv_connection_list, list) {
2061                if (!strncmp(username, cp->userid, 9) &&
2062                    !strncmp(userdata, cp->userdata, 17)) {
2063                        read_unlock_bh(&iucv_connection_rwlock);
2064                        IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2065                                "already exists\n", netiucv_printuser(cp));
2066                        return -EEXIST;
2067                }
2068        }
2069        read_unlock_bh(&iucv_connection_rwlock);
2070
2071        dev = netiucv_init_netdevice(username, userdata);
2072        if (!dev) {
2073                IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2074                return -ENODEV;
2075        }
2076
2077        rc = netiucv_register_device(dev);
2078        if (rc) {
2079                rtnl_unlock();
2080                IUCV_DBF_TEXT_(setup, 2,
2081                        "ret %d from netiucv_register_device\n", rc);
2082                goto out_free_ndev;
2083        }
2084
2085        /* sysfs magic */
2086        priv = netdev_priv(dev);
2087        SET_NETDEV_DEV(dev, priv->dev);
2088
2089        rc = register_netdevice(dev);
2090        rtnl_unlock();
2091        if (rc)
2092                goto out_unreg;
2093
2094        dev_info(priv->dev, "The IUCV interface to %s has been established "
2095                            "successfully\n",
2096                netiucv_printuser(priv->conn));
2097
2098        return count;
2099
2100out_unreg:
2101        netiucv_unregister_device(priv->dev);
2102out_free_ndev:
2103        netiucv_free_netdevice(dev);
2104        return rc;
2105}
2106
2107static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2108
2109static ssize_t remove_write (struct device_driver *drv,
2110                             const char *buf, size_t count)
2111{
2112        struct iucv_connection *cp;
2113        struct net_device *ndev;
2114        struct netiucv_priv *priv;
2115        struct device *dev;
2116        char name[IFNAMSIZ];
2117        const char *p;
2118        int i;
2119
2120        IUCV_DBF_TEXT(trace, 3, __func__);
2121
2122        if (count >= IFNAMSIZ)
2123                count = IFNAMSIZ - 1;
2124
2125        for (i = 0, p = buf; i < count && *p; i++, p++) {
2126                if (*p == '\n' || *p == ' ')
2127                        /* trailing lf, grr */
2128                        break;
2129                name[i] = *p;
2130        }
2131        name[i] = '\0';
2132
2133        read_lock_bh(&iucv_connection_rwlock);
2134        list_for_each_entry(cp, &iucv_connection_list, list) {
2135                ndev = cp->netdev;
2136                priv = netdev_priv(ndev);
2137                dev = priv->dev;
2138                if (strncmp(name, ndev->name, count))
2139                        continue;
2140                read_unlock_bh(&iucv_connection_rwlock);
2141                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2142                        dev_warn(dev, "The IUCV device is connected"
2143                                " to %s and cannot be removed\n",
2144                                priv->conn->userid);
2145                        IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2146                        return -EPERM;
2147                }
2148                unregister_netdev(ndev);
2149                netiucv_unregister_device(dev);
2150                return count;
2151        }
2152        read_unlock_bh(&iucv_connection_rwlock);
2153        IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2154        return -EINVAL;
2155}
2156
2157static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2158
2159static struct attribute * netiucv_drv_attrs[] = {
2160        &driver_attr_connection.attr,
2161        &driver_attr_remove.attr,
2162        NULL,
2163};
2164
2165static struct attribute_group netiucv_drv_attr_group = {
2166        .attrs = netiucv_drv_attrs,
2167};
2168
2169static const struct attribute_group *netiucv_drv_attr_groups[] = {
2170        &netiucv_drv_attr_group,
2171        NULL,
2172};
2173
2174static void netiucv_banner(void)
2175{
2176        pr_info("driver initialized\n");
2177}
2178
2179static void __exit netiucv_exit(void)
2180{
2181        struct iucv_connection *cp;
2182        struct net_device *ndev;
2183        struct netiucv_priv *priv;
2184        struct device *dev;
2185
2186        IUCV_DBF_TEXT(trace, 3, __func__);
2187        while (!list_empty(&iucv_connection_list)) {
2188                cp = list_entry(iucv_connection_list.next,
2189                                struct iucv_connection, list);
2190                ndev = cp->netdev;
2191                priv = netdev_priv(ndev);
2192                dev = priv->dev;
2193
2194                unregister_netdev(ndev);
2195                netiucv_unregister_device(dev);
2196        }
2197
2198        device_unregister(netiucv_dev);
2199        driver_unregister(&netiucv_driver);
2200        iucv_unregister(&netiucv_handler, 1);
2201        iucv_unregister_dbf_views();
2202
2203        pr_info("driver unloaded\n");
2204        return;
2205}
2206
2207static int __init netiucv_init(void)
2208{
2209        int rc;
2210
2211        rc = iucv_register_dbf_views();
2212        if (rc)
2213                goto out;
2214        rc = iucv_register(&netiucv_handler, 1);
2215        if (rc)
2216                goto out_dbf;
2217        IUCV_DBF_TEXT(trace, 3, __func__);
2218        netiucv_driver.groups = netiucv_drv_attr_groups;
2219        rc = driver_register(&netiucv_driver);
2220        if (rc) {
2221                IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2222                goto out_iucv;
2223        }
2224        /* establish dummy device */
2225        netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2226        if (!netiucv_dev) {
2227                rc = -ENOMEM;
2228                goto out_driver;
2229        }
2230        dev_set_name(netiucv_dev, "netiucv");
2231        netiucv_dev->bus = &iucv_bus;
2232        netiucv_dev->parent = iucv_root;
2233        netiucv_dev->release = (void (*)(struct device *))kfree;
2234        netiucv_dev->driver = &netiucv_driver;
2235        rc = device_register(netiucv_dev);
2236        if (rc) {
2237                put_device(netiucv_dev);
2238                goto out_driver;
2239        }
2240        netiucv_banner();
2241        return rc;
2242
2243out_driver:
2244        driver_unregister(&netiucv_driver);
2245out_iucv:
2246        iucv_unregister(&netiucv_handler, 1);
2247out_dbf:
2248        iucv_unregister_dbf_views();
2249out:
2250        return rc;
2251}
2252
2253module_init(netiucv_init);
2254module_exit(netiucv_exit);
2255MODULE_LICENSE("GPL");
2256