linux/drivers/net/plip/plip.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
   3/* PLIP: A parallel port "network" driver for Linux. */
   4/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
   5/*
   6 * Authors:     Donald Becker <becker@scyld.com>
   7 *              Tommy Thorn <thorn@daimi.aau.dk>
   8 *              Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
   9 *              Alan Cox <gw4pts@gw4pts.ampr.org>
  10 *              Peter Bauer <100136.3530@compuserve.com>
  11 *              Niibe Yutaka <gniibe@mri.co.jp>
  12 *              Nimrod Zimerman <zimerman@mailandnews.com>
  13 *
  14 * Enhancements:
  15 *              Modularization and ifreq/ifmap support by Alan Cox.
  16 *              Rewritten by Niibe Yutaka.
  17 *              parport-sharing awareness code by Philip Blundell.
  18 *              SMP locking by Niibe Yutaka.
  19 *              Support for parallel ports with no IRQ (poll mode),
  20 *              Modifications to use the parallel port API
  21 *              by Nimrod Zimerman.
  22 *
  23 * Fixes:
  24 *              Niibe Yutaka
  25 *                - Module initialization.
  26 *                - MTU fix.
  27 *                - Make sure other end is OK, before sending a packet.
  28 *                - Fix immediate timer problem.
  29 *
  30 *              Al Viro
  31 *                - Changed {enable,disable}_irq handling to make it work
  32 *                  with new ("stack") semantics.
  33 */
  34
  35/*
  36 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
  37 * inspired by Russ Nelson's parallel port packet driver.
  38 *
  39 * NOTE:
  40 *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
  41 *     Because of the necessity to communicate to DOS machines with the
  42 *     Crynwr packet driver, Peter Bauer changed the protocol again
  43 *     back to original protocol.
  44 *
  45 *     This version follows original PLIP protocol.
  46 *     So, this PLIP can't communicate the PLIP of Linux v1.0.
  47 */
  48
  49/*
  50 *     To use with DOS box, please do (Turn on ARP switch):
  51 *      # ifconfig plip[0-2] arp
  52 */
  53static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
  54
  55/*
  56  Sources:
  57        Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
  58        "parallel.asm" parallel port packet driver.
  59
  60  The "Crynwr" parallel port standard specifies the following protocol:
  61    Trigger by sending nibble '0x8' (this causes interrupt on other end)
  62    count-low octet
  63    count-high octet
  64    ... data octets
  65    checksum octet
  66  Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
  67                        <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
  68
  69  The packet is encapsulated as if it were ethernet.
  70
  71  The cable used is a de facto standard parallel null cable -- sold as
  72  a "LapLink" cable by various places.  You'll need a 12-conductor cable to
  73  make one yourself.  The wiring is:
  74    SLCTIN      17 - 17
  75    GROUND      25 - 25
  76    D0->ERROR   2 - 15          15 - 2
  77    D1->SLCT    3 - 13          13 - 3
  78    D2->PAPOUT  4 - 12          12 - 4
  79    D3->ACK     5 - 10          10 - 5
  80    D4->BUSY    6 - 11          11 - 6
  81  Do not connect the other pins.  They are
  82    D5,D6,D7 are 7,8,9
  83    STROBE is 1, FEED is 14, INIT is 16
  84    extra grounds are 18,19,20,21,22,23,24
  85*/
  86
  87#include <linux/compat.h>
  88#include <linux/module.h>
  89#include <linux/kernel.h>
  90#include <linux/types.h>
  91#include <linux/fcntl.h>
  92#include <linux/interrupt.h>
  93#include <linux/string.h>
  94#include <linux/slab.h>
  95#include <linux/if_ether.h>
  96#include <linux/in.h>
  97#include <linux/errno.h>
  98#include <linux/delay.h>
  99#include <linux/init.h>
 100#include <linux/netdevice.h>
 101#include <linux/etherdevice.h>
 102#include <linux/inetdevice.h>
 103#include <linux/skbuff.h>
 104#include <linux/if_plip.h>
 105#include <linux/workqueue.h>
 106#include <linux/spinlock.h>
 107#include <linux/completion.h>
 108#include <linux/parport.h>
 109#include <linux/bitops.h>
 110
 111#include <net/neighbour.h>
 112
 113#include <asm/irq.h>
 114#include <asm/byteorder.h>
 115
 116/* Maximum number of devices to support. */
 117#define PLIP_MAX  8
 118
 119/* Use 0 for production, 1 for verification, >2 for debug */
 120#ifndef NET_DEBUG
 121#define NET_DEBUG 1
 122#endif
 123static const unsigned int net_debug = NET_DEBUG;
 124
 125#define ENABLE(irq)  if (irq != -1) enable_irq(irq)
 126#define DISABLE(irq) if (irq != -1) disable_irq(irq)
 127
 128/* In micro second */
 129#define PLIP_DELAY_UNIT            1
 130
 131/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
 132#define PLIP_TRIGGER_WAIT        500
 133
 134/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
 135#define PLIP_NIBBLE_WAIT        3000
 136
 137/* Bottom halves */
 138static void plip_kick_bh(struct work_struct *work);
 139static void plip_bh(struct work_struct *work);
 140static void plip_timer_bh(struct work_struct *work);
 141
 142/* Interrupt handler */
 143static void plip_interrupt(void *dev_id);
 144
 145/* Functions for DEV methods */
 146static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
 147static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
 148                            unsigned short type, const void *daddr,
 149                            const void *saddr, unsigned len);
 150static int plip_hard_header_cache(const struct neighbour *neigh,
 151                                  struct hh_cache *hh, __be16 type);
 152static int plip_open(struct net_device *dev);
 153static int plip_close(struct net_device *dev);
 154static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
 155                               void __user *data, int cmd);
 156static int plip_preempt(void *handle);
 157static void plip_wakeup(void *handle);
 158
 159enum plip_connection_state {
 160        PLIP_CN_NONE=0,
 161        PLIP_CN_RECEIVE,
 162        PLIP_CN_SEND,
 163        PLIP_CN_CLOSING,
 164        PLIP_CN_ERROR
 165};
 166
 167enum plip_packet_state {
 168        PLIP_PK_DONE=0,
 169        PLIP_PK_TRIGGER,
 170        PLIP_PK_LENGTH_LSB,
 171        PLIP_PK_LENGTH_MSB,
 172        PLIP_PK_DATA,
 173        PLIP_PK_CHECKSUM
 174};
 175
 176enum plip_nibble_state {
 177        PLIP_NB_BEGIN,
 178        PLIP_NB_1,
 179        PLIP_NB_2,
 180};
 181
 182struct plip_local {
 183        enum plip_packet_state state;
 184        enum plip_nibble_state nibble;
 185        union {
 186                struct {
 187#if defined(__LITTLE_ENDIAN)
 188                        unsigned char lsb;
 189                        unsigned char msb;
 190#elif defined(__BIG_ENDIAN)
 191                        unsigned char msb;
 192                        unsigned char lsb;
 193#else
 194#error  "Please fix the endianness defines in <asm/byteorder.h>"
 195#endif
 196                } b;
 197                unsigned short h;
 198        } length;
 199        unsigned short byte;
 200        unsigned char  checksum;
 201        unsigned char  data;
 202        struct sk_buff *skb;
 203};
 204
 205struct net_local {
 206        struct net_device *dev;
 207        struct work_struct immediate;
 208        struct delayed_work deferred;
 209        struct delayed_work timer;
 210        struct plip_local snd_data;
 211        struct plip_local rcv_data;
 212        struct pardevice *pardev;
 213        unsigned long  trigger;
 214        unsigned long  nibble;
 215        enum plip_connection_state connection;
 216        unsigned short timeout_count;
 217        int is_deferred;
 218        int port_owner;
 219        int should_relinquish;
 220        spinlock_t lock;
 221        atomic_t kill_timer;
 222        struct completion killed_timer_cmp;
 223};
 224
 225static inline void enable_parport_interrupts (struct net_device *dev)
 226{
 227        if (dev->irq != -1)
 228        {
 229                struct parport *port =
 230                   ((struct net_local *)netdev_priv(dev))->pardev->port;
 231                port->ops->enable_irq (port);
 232        }
 233}
 234
 235static inline void disable_parport_interrupts (struct net_device *dev)
 236{
 237        if (dev->irq != -1)
 238        {
 239                struct parport *port =
 240                   ((struct net_local *)netdev_priv(dev))->pardev->port;
 241                port->ops->disable_irq (port);
 242        }
 243}
 244
 245static inline void write_data (struct net_device *dev, unsigned char data)
 246{
 247        struct parport *port =
 248           ((struct net_local *)netdev_priv(dev))->pardev->port;
 249
 250        port->ops->write_data (port, data);
 251}
 252
 253static inline unsigned char read_status (struct net_device *dev)
 254{
 255        struct parport *port =
 256           ((struct net_local *)netdev_priv(dev))->pardev->port;
 257
 258        return port->ops->read_status (port);
 259}
 260
 261static const struct header_ops plip_header_ops = {
 262        .create = plip_hard_header,
 263        .cache  = plip_hard_header_cache,
 264};
 265
 266static const struct net_device_ops plip_netdev_ops = {
 267        .ndo_open                = plip_open,
 268        .ndo_stop                = plip_close,
 269        .ndo_start_xmit          = plip_tx_packet,
 270        .ndo_siocdevprivate      = plip_siocdevprivate,
 271        .ndo_set_mac_address     = eth_mac_addr,
 272        .ndo_validate_addr       = eth_validate_addr,
 273};
 274
 275/* Entry point of PLIP driver.
 276   Probe the hardware, and register/initialize the driver.
 277
 278   PLIP is rather weird, because of the way it interacts with the parport
 279   system.  It is _not_ initialised from Space.c.  Instead, plip_init()
 280   is called, and that function makes up a "struct net_device" for each port, and
 281   then calls us here.
 282
 283   */
 284static void
 285plip_init_netdev(struct net_device *dev)
 286{
 287        struct net_local *nl = netdev_priv(dev);
 288
 289        /* Then, override parts of it */
 290        dev->tx_queue_len        = 10;
 291        dev->flags               = IFF_POINTOPOINT|IFF_NOARP;
 292        memset(dev->dev_addr, 0xfc, ETH_ALEN);
 293
 294        dev->netdev_ops          = &plip_netdev_ops;
 295        dev->header_ops          = &plip_header_ops;
 296
 297
 298        nl->port_owner = 0;
 299
 300        /* Initialize constants */
 301        nl->trigger     = PLIP_TRIGGER_WAIT;
 302        nl->nibble      = PLIP_NIBBLE_WAIT;
 303
 304        /* Initialize task queue structures */
 305        INIT_WORK(&nl->immediate, plip_bh);
 306        INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 307
 308        if (dev->irq == -1)
 309                INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 310
 311        spin_lock_init(&nl->lock);
 312}
 313
 314/* Bottom half handler for the delayed request.
 315   This routine is kicked by do_timer().
 316   Request `plip_bh' to be invoked. */
 317static void
 318plip_kick_bh(struct work_struct *work)
 319{
 320        struct net_local *nl =
 321                container_of(work, struct net_local, deferred.work);
 322
 323        if (nl->is_deferred)
 324                schedule_work(&nl->immediate);
 325}
 326
 327/* Forward declarations of internal routines */
 328static int plip_none(struct net_device *, struct net_local *,
 329                     struct plip_local *, struct plip_local *);
 330static int plip_receive_packet(struct net_device *, struct net_local *,
 331                               struct plip_local *, struct plip_local *);
 332static int plip_send_packet(struct net_device *, struct net_local *,
 333                            struct plip_local *, struct plip_local *);
 334static int plip_connection_close(struct net_device *, struct net_local *,
 335                                 struct plip_local *, struct plip_local *);
 336static int plip_error(struct net_device *, struct net_local *,
 337                      struct plip_local *, struct plip_local *);
 338static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 339                                 struct plip_local *snd,
 340                                 struct plip_local *rcv,
 341                                 int error);
 342
 343#define OK        0
 344#define TIMEOUT   1
 345#define ERROR     2
 346#define HS_TIMEOUT      3
 347
 348typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
 349                         struct plip_local *snd, struct plip_local *rcv);
 350
 351static const plip_func connection_state_table[] =
 352{
 353        plip_none,
 354        plip_receive_packet,
 355        plip_send_packet,
 356        plip_connection_close,
 357        plip_error
 358};
 359
 360/* Bottom half handler of PLIP. */
 361static void
 362plip_bh(struct work_struct *work)
 363{
 364        struct net_local *nl = container_of(work, struct net_local, immediate);
 365        struct plip_local *snd = &nl->snd_data;
 366        struct plip_local *rcv = &nl->rcv_data;
 367        plip_func f;
 368        int r;
 369
 370        nl->is_deferred = 0;
 371        f = connection_state_table[nl->connection];
 372        if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
 373            (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 374                nl->is_deferred = 1;
 375                schedule_delayed_work(&nl->deferred, 1);
 376        }
 377}
 378
 379static void
 380plip_timer_bh(struct work_struct *work)
 381{
 382        struct net_local *nl =
 383                container_of(work, struct net_local, timer.work);
 384
 385        if (!(atomic_read (&nl->kill_timer))) {
 386                plip_interrupt (nl->dev);
 387
 388                schedule_delayed_work(&nl->timer, 1);
 389        }
 390        else {
 391                complete(&nl->killed_timer_cmp);
 392        }
 393}
 394
 395static int
 396plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 397                      struct plip_local *snd, struct plip_local *rcv,
 398                      int error)
 399{
 400        unsigned char c0;
 401        /*
 402         * This is tricky. If we got here from the beginning of send (either
 403         * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
 404         * already disabled. With the old variant of {enable,disable}_irq()
 405         * extra disable_irq() was a no-op. Now it became mortal - it's
 406         * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
 407         * that is). So we have to treat HS_TIMEOUT and ERROR from send
 408         * in a special way.
 409         */
 410
 411        spin_lock_irq(&nl->lock);
 412        if (nl->connection == PLIP_CN_SEND) {
 413
 414                if (error != ERROR) { /* Timeout */
 415                        nl->timeout_count++;
 416                        if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
 417                            nl->timeout_count <= 3) {
 418                                spin_unlock_irq(&nl->lock);
 419                                /* Try again later */
 420                                return TIMEOUT;
 421                        }
 422                        c0 = read_status(dev);
 423                        printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
 424                               dev->name, snd->state, c0);
 425                } else
 426                        error = HS_TIMEOUT;
 427                dev->stats.tx_errors++;
 428                dev->stats.tx_aborted_errors++;
 429        } else if (nl->connection == PLIP_CN_RECEIVE) {
 430                if (rcv->state == PLIP_PK_TRIGGER) {
 431                        /* Transmission was interrupted. */
 432                        spin_unlock_irq(&nl->lock);
 433                        return OK;
 434                }
 435                if (error != ERROR) { /* Timeout */
 436                        if (++nl->timeout_count <= 3) {
 437                                spin_unlock_irq(&nl->lock);
 438                                /* Try again later */
 439                                return TIMEOUT;
 440                        }
 441                        c0 = read_status(dev);
 442                        printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
 443                               dev->name, rcv->state, c0);
 444                }
 445                dev->stats.rx_dropped++;
 446        }
 447        rcv->state = PLIP_PK_DONE;
 448        if (rcv->skb) {
 449                kfree_skb(rcv->skb);
 450                rcv->skb = NULL;
 451        }
 452        snd->state = PLIP_PK_DONE;
 453        if (snd->skb) {
 454                dev_kfree_skb(snd->skb);
 455                snd->skb = NULL;
 456        }
 457        spin_unlock_irq(&nl->lock);
 458        if (error == HS_TIMEOUT) {
 459                DISABLE(dev->irq);
 460                synchronize_irq(dev->irq);
 461        }
 462        disable_parport_interrupts (dev);
 463        netif_stop_queue (dev);
 464        nl->connection = PLIP_CN_ERROR;
 465        write_data (dev, 0x00);
 466
 467        return TIMEOUT;
 468}
 469
 470static int
 471plip_none(struct net_device *dev, struct net_local *nl,
 472          struct plip_local *snd, struct plip_local *rcv)
 473{
 474        return OK;
 475}
 476
 477/* PLIP_RECEIVE --- receive a byte(two nibbles)
 478   Returns OK on success, TIMEOUT on timeout */
 479static inline int
 480plip_receive(unsigned short nibble_timeout, struct net_device *dev,
 481             enum plip_nibble_state *ns_p, unsigned char *data_p)
 482{
 483        unsigned char c0, c1;
 484        unsigned int cx;
 485
 486        switch (*ns_p) {
 487        case PLIP_NB_BEGIN:
 488                cx = nibble_timeout;
 489                while (1) {
 490                        c0 = read_status(dev);
 491                        udelay(PLIP_DELAY_UNIT);
 492                        if ((c0 & 0x80) == 0) {
 493                                c1 = read_status(dev);
 494                                if (c0 == c1)
 495                                        break;
 496                        }
 497                        if (--cx == 0)
 498                                return TIMEOUT;
 499                }
 500                *data_p = (c0 >> 3) & 0x0f;
 501                write_data (dev, 0x10); /* send ACK */
 502                *ns_p = PLIP_NB_1;
 503                fallthrough;
 504
 505        case PLIP_NB_1:
 506                cx = nibble_timeout;
 507                while (1) {
 508                        c0 = read_status(dev);
 509                        udelay(PLIP_DELAY_UNIT);
 510                        if (c0 & 0x80) {
 511                                c1 = read_status(dev);
 512                                if (c0 == c1)
 513                                        break;
 514                        }
 515                        if (--cx == 0)
 516                                return TIMEOUT;
 517                }
 518                *data_p |= (c0 << 1) & 0xf0;
 519                write_data (dev, 0x00); /* send ACK */
 520                *ns_p = PLIP_NB_BEGIN;
 521                break;
 522        case PLIP_NB_2:
 523                break;
 524        }
 525        return OK;
 526}
 527
 528/*
 529 *      Determine the packet's protocol ID. The rule here is that we
 530 *      assume 802.3 if the type field is short enough to be a length.
 531 *      This is normal practice and works for any 'now in use' protocol.
 532 *
 533 *      PLIP is ethernet ish but the daddr might not be valid if unicast.
 534 *      PLIP fortunately has no bus architecture (its Point-to-point).
 535 *
 536 *      We can't fix the daddr thing as that quirk (more bug) is embedded
 537 *      in far too many old systems not all even running Linux.
 538 */
 539
 540static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
 541{
 542        struct ethhdr *eth;
 543        unsigned char *rawp;
 544
 545        skb_reset_mac_header(skb);
 546        skb_pull(skb,dev->hard_header_len);
 547        eth = eth_hdr(skb);
 548
 549        if(is_multicast_ether_addr(eth->h_dest))
 550        {
 551                if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 552                        skb->pkt_type=PACKET_BROADCAST;
 553                else
 554                        skb->pkt_type=PACKET_MULTICAST;
 555        }
 556
 557        /*
 558         *      This ALLMULTI check should be redundant by 1.4
 559         *      so don't forget to remove it.
 560         */
 561
 562        if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
 563                return eth->h_proto;
 564
 565        rawp = skb->data;
 566
 567        /*
 568         *      This is a magic hack to spot IPX packets. Older Novell breaks
 569         *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
 570         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
 571         *      won't work for fault tolerant netware but does for the rest.
 572         */
 573        if (*(unsigned short *)rawp == 0xFFFF)
 574                return htons(ETH_P_802_3);
 575
 576        /*
 577         *      Real 802.2 LLC
 578         */
 579        return htons(ETH_P_802_2);
 580}
 581
 582/* PLIP_RECEIVE_PACKET --- receive a packet */
 583static int
 584plip_receive_packet(struct net_device *dev, struct net_local *nl,
 585                    struct plip_local *snd, struct plip_local *rcv)
 586{
 587        unsigned short nibble_timeout = nl->nibble;
 588        unsigned char *lbuf;
 589
 590        switch (rcv->state) {
 591        case PLIP_PK_TRIGGER:
 592                DISABLE(dev->irq);
 593                /* Don't need to synchronize irq, as we can safely ignore it */
 594                disable_parport_interrupts (dev);
 595                write_data (dev, 0x01); /* send ACK */
 596                if (net_debug > 2)
 597                        printk(KERN_DEBUG "%s: receive start\n", dev->name);
 598                rcv->state = PLIP_PK_LENGTH_LSB;
 599                rcv->nibble = PLIP_NB_BEGIN;
 600                fallthrough;
 601
 602        case PLIP_PK_LENGTH_LSB:
 603                if (snd->state != PLIP_PK_DONE) {
 604                        if (plip_receive(nl->trigger, dev,
 605                                         &rcv->nibble, &rcv->length.b.lsb)) {
 606                                /* collision, here dev->tbusy == 1 */
 607                                rcv->state = PLIP_PK_DONE;
 608                                nl->is_deferred = 1;
 609                                nl->connection = PLIP_CN_SEND;
 610                                schedule_delayed_work(&nl->deferred, 1);
 611                                enable_parport_interrupts (dev);
 612                                ENABLE(dev->irq);
 613                                return OK;
 614                        }
 615                } else {
 616                        if (plip_receive(nibble_timeout, dev,
 617                                         &rcv->nibble, &rcv->length.b.lsb))
 618                                return TIMEOUT;
 619                }
 620                rcv->state = PLIP_PK_LENGTH_MSB;
 621                fallthrough;
 622
 623        case PLIP_PK_LENGTH_MSB:
 624                if (plip_receive(nibble_timeout, dev,
 625                                 &rcv->nibble, &rcv->length.b.msb))
 626                        return TIMEOUT;
 627                if (rcv->length.h > dev->mtu + dev->hard_header_len ||
 628                    rcv->length.h < 8) {
 629                        printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
 630                        return ERROR;
 631                }
 632                /* Malloc up new buffer. */
 633                rcv->skb = dev_alloc_skb(rcv->length.h + 2);
 634                if (rcv->skb == NULL) {
 635                        printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
 636                        return ERROR;
 637                }
 638                skb_reserve(rcv->skb, 2);       /* Align IP on 16 byte boundaries */
 639                skb_put(rcv->skb,rcv->length.h);
 640                rcv->skb->dev = dev;
 641                rcv->state = PLIP_PK_DATA;
 642                rcv->byte = 0;
 643                rcv->checksum = 0;
 644                fallthrough;
 645
 646        case PLIP_PK_DATA:
 647                lbuf = rcv->skb->data;
 648                do {
 649                        if (plip_receive(nibble_timeout, dev,
 650                                         &rcv->nibble, &lbuf[rcv->byte]))
 651                                return TIMEOUT;
 652                } while (++rcv->byte < rcv->length.h);
 653                do {
 654                        rcv->checksum += lbuf[--rcv->byte];
 655                } while (rcv->byte);
 656                rcv->state = PLIP_PK_CHECKSUM;
 657                fallthrough;
 658
 659        case PLIP_PK_CHECKSUM:
 660                if (plip_receive(nibble_timeout, dev,
 661                                 &rcv->nibble, &rcv->data))
 662                        return TIMEOUT;
 663                if (rcv->data != rcv->checksum) {
 664                        dev->stats.rx_crc_errors++;
 665                        if (net_debug)
 666                                printk(KERN_DEBUG "%s: checksum error\n", dev->name);
 667                        return ERROR;
 668                }
 669                rcv->state = PLIP_PK_DONE;
 670                fallthrough;
 671
 672        case PLIP_PK_DONE:
 673                /* Inform the upper layer for the arrival of a packet. */
 674                rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
 675                netif_rx_ni(rcv->skb);
 676                dev->stats.rx_bytes += rcv->length.h;
 677                dev->stats.rx_packets++;
 678                rcv->skb = NULL;
 679                if (net_debug > 2)
 680                        printk(KERN_DEBUG "%s: receive end\n", dev->name);
 681
 682                /* Close the connection. */
 683                write_data (dev, 0x00);
 684                spin_lock_irq(&nl->lock);
 685                if (snd->state != PLIP_PK_DONE) {
 686                        nl->connection = PLIP_CN_SEND;
 687                        spin_unlock_irq(&nl->lock);
 688                        schedule_work(&nl->immediate);
 689                        enable_parport_interrupts (dev);
 690                        ENABLE(dev->irq);
 691                        return OK;
 692                } else {
 693                        nl->connection = PLIP_CN_NONE;
 694                        spin_unlock_irq(&nl->lock);
 695                        enable_parport_interrupts (dev);
 696                        ENABLE(dev->irq);
 697                        return OK;
 698                }
 699        }
 700        return OK;
 701}
 702
 703/* PLIP_SEND --- send a byte (two nibbles)
 704   Returns OK on success, TIMEOUT when timeout    */
 705static inline int
 706plip_send(unsigned short nibble_timeout, struct net_device *dev,
 707          enum plip_nibble_state *ns_p, unsigned char data)
 708{
 709        unsigned char c0;
 710        unsigned int cx;
 711
 712        switch (*ns_p) {
 713        case PLIP_NB_BEGIN:
 714                write_data (dev, data & 0x0f);
 715                *ns_p = PLIP_NB_1;
 716                fallthrough;
 717
 718        case PLIP_NB_1:
 719                write_data (dev, 0x10 | (data & 0x0f));
 720                cx = nibble_timeout;
 721                while (1) {
 722                        c0 = read_status(dev);
 723                        if ((c0 & 0x80) == 0)
 724                                break;
 725                        if (--cx == 0)
 726                                return TIMEOUT;
 727                        udelay(PLIP_DELAY_UNIT);
 728                }
 729                write_data (dev, 0x10 | (data >> 4));
 730                *ns_p = PLIP_NB_2;
 731                fallthrough;
 732
 733        case PLIP_NB_2:
 734                write_data (dev, (data >> 4));
 735                cx = nibble_timeout;
 736                while (1) {
 737                        c0 = read_status(dev);
 738                        if (c0 & 0x80)
 739                                break;
 740                        if (--cx == 0)
 741                                return TIMEOUT;
 742                        udelay(PLIP_DELAY_UNIT);
 743                }
 744                *ns_p = PLIP_NB_BEGIN;
 745                return OK;
 746        }
 747        return OK;
 748}
 749
 750/* PLIP_SEND_PACKET --- send a packet */
 751static int
 752plip_send_packet(struct net_device *dev, struct net_local *nl,
 753                 struct plip_local *snd, struct plip_local *rcv)
 754{
 755        unsigned short nibble_timeout = nl->nibble;
 756        unsigned char *lbuf;
 757        unsigned char c0;
 758        unsigned int cx;
 759
 760        if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
 761                printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
 762                snd->state = PLIP_PK_DONE;
 763                snd->skb = NULL;
 764                return ERROR;
 765        }
 766
 767        switch (snd->state) {
 768        case PLIP_PK_TRIGGER:
 769                if ((read_status(dev) & 0xf8) != 0x80)
 770                        return HS_TIMEOUT;
 771
 772                /* Trigger remote rx interrupt. */
 773                write_data (dev, 0x08);
 774                cx = nl->trigger;
 775                while (1) {
 776                        udelay(PLIP_DELAY_UNIT);
 777                        spin_lock_irq(&nl->lock);
 778                        if (nl->connection == PLIP_CN_RECEIVE) {
 779                                spin_unlock_irq(&nl->lock);
 780                                /* Interrupted. */
 781                                dev->stats.collisions++;
 782                                return OK;
 783                        }
 784                        c0 = read_status(dev);
 785                        if (c0 & 0x08) {
 786                                spin_unlock_irq(&nl->lock);
 787                                DISABLE(dev->irq);
 788                                synchronize_irq(dev->irq);
 789                                if (nl->connection == PLIP_CN_RECEIVE) {
 790                                        /* Interrupted.
 791                                           We don't need to enable irq,
 792                                           as it is soon disabled.    */
 793                                        /* Yes, we do. New variant of
 794                                           {enable,disable}_irq *counts*
 795                                           them.  -- AV  */
 796                                        ENABLE(dev->irq);
 797                                        dev->stats.collisions++;
 798                                        return OK;
 799                                }
 800                                disable_parport_interrupts (dev);
 801                                if (net_debug > 2)
 802                                        printk(KERN_DEBUG "%s: send start\n", dev->name);
 803                                snd->state = PLIP_PK_LENGTH_LSB;
 804                                snd->nibble = PLIP_NB_BEGIN;
 805                                nl->timeout_count = 0;
 806                                break;
 807                        }
 808                        spin_unlock_irq(&nl->lock);
 809                        if (--cx == 0) {
 810                                write_data (dev, 0x00);
 811                                return HS_TIMEOUT;
 812                        }
 813                }
 814                break;
 815
 816        case PLIP_PK_LENGTH_LSB:
 817                if (plip_send(nibble_timeout, dev,
 818                              &snd->nibble, snd->length.b.lsb))
 819                        return TIMEOUT;
 820                snd->state = PLIP_PK_LENGTH_MSB;
 821                fallthrough;
 822
 823        case PLIP_PK_LENGTH_MSB:
 824                if (plip_send(nibble_timeout, dev,
 825                              &snd->nibble, snd->length.b.msb))
 826                        return TIMEOUT;
 827                snd->state = PLIP_PK_DATA;
 828                snd->byte = 0;
 829                snd->checksum = 0;
 830                fallthrough;
 831
 832        case PLIP_PK_DATA:
 833                do {
 834                        if (plip_send(nibble_timeout, dev,
 835                                      &snd->nibble, lbuf[snd->byte]))
 836                                return TIMEOUT;
 837                } while (++snd->byte < snd->length.h);
 838                do {
 839                        snd->checksum += lbuf[--snd->byte];
 840                } while (snd->byte);
 841                snd->state = PLIP_PK_CHECKSUM;
 842                fallthrough;
 843
 844        case PLIP_PK_CHECKSUM:
 845                if (plip_send(nibble_timeout, dev,
 846                              &snd->nibble, snd->checksum))
 847                        return TIMEOUT;
 848
 849                dev->stats.tx_bytes += snd->skb->len;
 850                dev_kfree_skb(snd->skb);
 851                dev->stats.tx_packets++;
 852                snd->state = PLIP_PK_DONE;
 853                fallthrough;
 854
 855        case PLIP_PK_DONE:
 856                /* Close the connection */
 857                write_data (dev, 0x00);
 858                snd->skb = NULL;
 859                if (net_debug > 2)
 860                        printk(KERN_DEBUG "%s: send end\n", dev->name);
 861                nl->connection = PLIP_CN_CLOSING;
 862                nl->is_deferred = 1;
 863                schedule_delayed_work(&nl->deferred, 1);
 864                enable_parport_interrupts (dev);
 865                ENABLE(dev->irq);
 866                return OK;
 867        }
 868        return OK;
 869}
 870
 871static int
 872plip_connection_close(struct net_device *dev, struct net_local *nl,
 873                      struct plip_local *snd, struct plip_local *rcv)
 874{
 875        spin_lock_irq(&nl->lock);
 876        if (nl->connection == PLIP_CN_CLOSING) {
 877                nl->connection = PLIP_CN_NONE;
 878                netif_wake_queue (dev);
 879        }
 880        spin_unlock_irq(&nl->lock);
 881        if (nl->should_relinquish) {
 882                nl->should_relinquish = nl->port_owner = 0;
 883                parport_release(nl->pardev);
 884        }
 885        return OK;
 886}
 887
 888/* PLIP_ERROR --- wait till other end settled */
 889static int
 890plip_error(struct net_device *dev, struct net_local *nl,
 891           struct plip_local *snd, struct plip_local *rcv)
 892{
 893        unsigned char status;
 894
 895        status = read_status(dev);
 896        if ((status & 0xf8) == 0x80) {
 897                if (net_debug > 2)
 898                        printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
 899                nl->connection = PLIP_CN_NONE;
 900                nl->should_relinquish = 0;
 901                netif_start_queue (dev);
 902                enable_parport_interrupts (dev);
 903                ENABLE(dev->irq);
 904                netif_wake_queue (dev);
 905        } else {
 906                nl->is_deferred = 1;
 907                schedule_delayed_work(&nl->deferred, 1);
 908        }
 909
 910        return OK;
 911}
 912
 913/* Handle the parallel port interrupts. */
 914static void
 915plip_interrupt(void *dev_id)
 916{
 917        struct net_device *dev = dev_id;
 918        struct net_local *nl;
 919        struct plip_local *rcv;
 920        unsigned char c0;
 921        unsigned long flags;
 922
 923        nl = netdev_priv(dev);
 924        rcv = &nl->rcv_data;
 925
 926        spin_lock_irqsave (&nl->lock, flags);
 927
 928        c0 = read_status(dev);
 929        if ((c0 & 0xf8) != 0xc0) {
 930                if ((dev->irq != -1) && (net_debug > 1))
 931                        printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
 932                spin_unlock_irqrestore (&nl->lock, flags);
 933                return;
 934        }
 935
 936        if (net_debug > 3)
 937                printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
 938
 939        switch (nl->connection) {
 940        case PLIP_CN_CLOSING:
 941                netif_wake_queue (dev);
 942                fallthrough;
 943        case PLIP_CN_NONE:
 944        case PLIP_CN_SEND:
 945                rcv->state = PLIP_PK_TRIGGER;
 946                nl->connection = PLIP_CN_RECEIVE;
 947                nl->timeout_count = 0;
 948                schedule_work(&nl->immediate);
 949                break;
 950
 951        case PLIP_CN_RECEIVE:
 952                /* May occur because there is race condition
 953                   around test and set of dev->interrupt.
 954                   Ignore this interrupt. */
 955                break;
 956
 957        case PLIP_CN_ERROR:
 958                printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
 959                break;
 960        }
 961
 962        spin_unlock_irqrestore(&nl->lock, flags);
 963}
 964
 965static netdev_tx_t
 966plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
 967{
 968        struct net_local *nl = netdev_priv(dev);
 969        struct plip_local *snd = &nl->snd_data;
 970
 971        if (netif_queue_stopped(dev))
 972                return NETDEV_TX_BUSY;
 973
 974        /* We may need to grab the bus */
 975        if (!nl->port_owner) {
 976                if (parport_claim(nl->pardev))
 977                        return NETDEV_TX_BUSY;
 978                nl->port_owner = 1;
 979        }
 980
 981        netif_stop_queue (dev);
 982
 983        if (skb->len > dev->mtu + dev->hard_header_len) {
 984                printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
 985                netif_start_queue (dev);
 986                return NETDEV_TX_BUSY;
 987        }
 988
 989        if (net_debug > 2)
 990                printk(KERN_DEBUG "%s: send request\n", dev->name);
 991
 992        spin_lock_irq(&nl->lock);
 993        snd->skb = skb;
 994        snd->length.h = skb->len;
 995        snd->state = PLIP_PK_TRIGGER;
 996        if (nl->connection == PLIP_CN_NONE) {
 997                nl->connection = PLIP_CN_SEND;
 998                nl->timeout_count = 0;
 999        }
1000        schedule_work(&nl->immediate);
1001        spin_unlock_irq(&nl->lock);
1002
1003        return NETDEV_TX_OK;
1004}
1005
1006static void
1007plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1008{
1009        const struct in_device *in_dev;
1010
1011        rcu_read_lock();
1012        in_dev = __in_dev_get_rcu(dev);
1013        if (in_dev) {
1014                /* Any address will do - we take the first */
1015                const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1016                if (ifa) {
1017                        memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1018                        memset(eth->h_dest, 0xfc, 2);
1019                        memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1020                }
1021        }
1022        rcu_read_unlock();
1023}
1024
1025static int
1026plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1027                 unsigned short type, const void *daddr,
1028                 const void *saddr, unsigned len)
1029{
1030        int ret;
1031
1032        ret = eth_header(skb, dev, type, daddr, saddr, len);
1033        if (ret >= 0)
1034                plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1035
1036        return ret;
1037}
1038
1039static int plip_hard_header_cache(const struct neighbour *neigh,
1040                                  struct hh_cache *hh, __be16 type)
1041{
1042        int ret;
1043
1044        ret = eth_header_cache(neigh, hh, type);
1045        if (ret == 0) {
1046                struct ethhdr *eth;
1047
1048                eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1049                                       HH_DATA_OFF(sizeof(*eth)));
1050                plip_rewrite_address (neigh->dev, eth);
1051        }
1052
1053        return ret;
1054}
1055
1056/* Open/initialize the board.  This is called (in the current kernel)
1057   sometime after booting when the 'ifconfig' program is run.
1058
1059   This routine gets exclusive access to the parallel port by allocating
1060   its IRQ line.
1061 */
1062static int
1063plip_open(struct net_device *dev)
1064{
1065        struct net_local *nl = netdev_priv(dev);
1066        struct in_device *in_dev;
1067
1068        /* Grab the port */
1069        if (!nl->port_owner) {
1070                if (parport_claim(nl->pardev)) return -EAGAIN;
1071                nl->port_owner = 1;
1072        }
1073
1074        nl->should_relinquish = 0;
1075
1076        /* Clear the data port. */
1077        write_data (dev, 0x00);
1078
1079        /* Enable rx interrupt. */
1080        enable_parport_interrupts (dev);
1081        if (dev->irq == -1)
1082        {
1083                atomic_set (&nl->kill_timer, 0);
1084                schedule_delayed_work(&nl->timer, 1);
1085        }
1086
1087        /* Initialize the state machine. */
1088        nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1089        nl->rcv_data.skb = nl->snd_data.skb = NULL;
1090        nl->connection = PLIP_CN_NONE;
1091        nl->is_deferred = 0;
1092
1093        /* Fill in the MAC-level header.
1094           We used to abuse dev->broadcast to store the point-to-point
1095           MAC address, but we no longer do it. Instead, we fetch the
1096           interface address whenever it is needed, which is cheap enough
1097           because we use the hh_cache. Actually, abusing dev->broadcast
1098           didn't work, because when using plip_open the point-to-point
1099           address isn't yet known.
1100           PLIP doesn't have a real MAC address, but we need it to be
1101           DOS compatible, and to properly support taps (otherwise,
1102           when the device address isn't identical to the address of a
1103           received frame, the kernel incorrectly drops it).             */
1104
1105        in_dev=__in_dev_get_rtnl(dev);
1106        if (in_dev) {
1107                /* Any address will do - we take the first. We already
1108                   have the first two bytes filled with 0xfc, from
1109                   plip_init_dev(). */
1110                const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1111                if (ifa != NULL) {
1112                        memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1113                }
1114        }
1115
1116        netif_start_queue (dev);
1117
1118        return 0;
1119}
1120
1121/* The inverse routine to plip_open (). */
1122static int
1123plip_close(struct net_device *dev)
1124{
1125        struct net_local *nl = netdev_priv(dev);
1126        struct plip_local *snd = &nl->snd_data;
1127        struct plip_local *rcv = &nl->rcv_data;
1128
1129        netif_stop_queue (dev);
1130        DISABLE(dev->irq);
1131        synchronize_irq(dev->irq);
1132
1133        if (dev->irq == -1)
1134        {
1135                init_completion(&nl->killed_timer_cmp);
1136                atomic_set (&nl->kill_timer, 1);
1137                wait_for_completion(&nl->killed_timer_cmp);
1138        }
1139
1140#ifdef NOTDEF
1141        outb(0x00, PAR_DATA(dev));
1142#endif
1143        nl->is_deferred = 0;
1144        nl->connection = PLIP_CN_NONE;
1145        if (nl->port_owner) {
1146                parport_release(nl->pardev);
1147                nl->port_owner = 0;
1148        }
1149
1150        snd->state = PLIP_PK_DONE;
1151        if (snd->skb) {
1152                dev_kfree_skb(snd->skb);
1153                snd->skb = NULL;
1154        }
1155        rcv->state = PLIP_PK_DONE;
1156        if (rcv->skb) {
1157                kfree_skb(rcv->skb);
1158                rcv->skb = NULL;
1159        }
1160
1161#ifdef NOTDEF
1162        /* Reset. */
1163        outb(0x00, PAR_CONTROL(dev));
1164#endif
1165        return 0;
1166}
1167
1168static int
1169plip_preempt(void *handle)
1170{
1171        struct net_device *dev = (struct net_device *)handle;
1172        struct net_local *nl = netdev_priv(dev);
1173
1174        /* Stand our ground if a datagram is on the wire */
1175        if (nl->connection != PLIP_CN_NONE) {
1176                nl->should_relinquish = 1;
1177                return 1;
1178        }
1179
1180        nl->port_owner = 0;     /* Remember that we released the bus */
1181        return 0;
1182}
1183
1184static void
1185plip_wakeup(void *handle)
1186{
1187        struct net_device *dev = (struct net_device *)handle;
1188        struct net_local *nl = netdev_priv(dev);
1189
1190        if (nl->port_owner) {
1191                /* Why are we being woken up? */
1192                printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1193                if (!parport_claim(nl->pardev))
1194                        /* bus_owner is already set (but why?) */
1195                        printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1196                else
1197                        return;
1198        }
1199
1200        if (!(dev->flags & IFF_UP))
1201                /* Don't need the port when the interface is down */
1202                return;
1203
1204        if (!parport_claim(nl->pardev)) {
1205                nl->port_owner = 1;
1206                /* Clear the data port. */
1207                write_data (dev, 0x00);
1208        }
1209}
1210
1211static int
1212plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1213                    void __user *data, int cmd)
1214{
1215        struct net_local *nl = netdev_priv(dev);
1216        struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1217
1218        if (cmd != SIOCDEVPLIP)
1219                return -EOPNOTSUPP;
1220
1221        if (in_compat_syscall())
1222                return -EOPNOTSUPP;
1223
1224        switch(pc->pcmd) {
1225        case PLIP_GET_TIMEOUT:
1226                pc->trigger = nl->trigger;
1227                pc->nibble  = nl->nibble;
1228                break;
1229        case PLIP_SET_TIMEOUT:
1230                if(!capable(CAP_NET_ADMIN))
1231                        return -EPERM;
1232                nl->trigger = pc->trigger;
1233                nl->nibble  = pc->nibble;
1234                break;
1235        default:
1236                return -EOPNOTSUPP;
1237        }
1238        return 0;
1239}
1240
1241static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1242static int timid;
1243
1244module_param_array(parport, int, NULL, 0);
1245module_param(timid, int, 0);
1246MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1247
1248static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1249
1250static inline int
1251plip_searchfor(int list[], int a)
1252{
1253        int i;
1254        for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1255                if (list[i] == a) return 1;
1256        }
1257        return 0;
1258}
1259
1260/* plip_attach() is called (by the parport code) when a port is
1261 * available to use. */
1262static void plip_attach (struct parport *port)
1263{
1264        static int unit;
1265        struct net_device *dev;
1266        struct net_local *nl;
1267        char name[IFNAMSIZ];
1268        struct pardev_cb plip_cb;
1269
1270        if ((parport[0] == -1 && (!timid || !port->devices)) ||
1271            plip_searchfor(parport, port->number)) {
1272                if (unit == PLIP_MAX) {
1273                        printk(KERN_ERR "plip: too many devices\n");
1274                        return;
1275                }
1276
1277                sprintf(name, "plip%d", unit);
1278                dev = alloc_etherdev(sizeof(struct net_local));
1279                if (!dev)
1280                        return;
1281
1282                strcpy(dev->name, name);
1283
1284                dev->irq = port->irq;
1285                dev->base_addr = port->base;
1286                if (port->irq == -1) {
1287                        printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1288                                 "which is fairly inefficient!\n", port->name);
1289                }
1290
1291                nl = netdev_priv(dev);
1292                nl->dev = dev;
1293
1294                memset(&plip_cb, 0, sizeof(plip_cb));
1295                plip_cb.private = dev;
1296                plip_cb.preempt = plip_preempt;
1297                plip_cb.wakeup = plip_wakeup;
1298                plip_cb.irq_func = plip_interrupt;
1299
1300                nl->pardev = parport_register_dev_model(port, dev->name,
1301                                                        &plip_cb, unit);
1302
1303                if (!nl->pardev) {
1304                        printk(KERN_ERR "%s: parport_register failed\n", name);
1305                        goto err_free_dev;
1306                }
1307
1308                plip_init_netdev(dev);
1309
1310                if (register_netdev(dev)) {
1311                        printk(KERN_ERR "%s: network register failed\n", name);
1312                        goto err_parport_unregister;
1313                }
1314
1315                printk(KERN_INFO "%s", version);
1316                if (dev->irq != -1)
1317                        printk(KERN_INFO "%s: Parallel port at %#3lx, "
1318                                         "using IRQ %d.\n",
1319                                         dev->name, dev->base_addr, dev->irq);
1320                else
1321                        printk(KERN_INFO "%s: Parallel port at %#3lx, "
1322                                         "not using IRQ.\n",
1323                                         dev->name, dev->base_addr);
1324                dev_plip[unit++] = dev;
1325        }
1326        return;
1327
1328err_parport_unregister:
1329        parport_unregister_device(nl->pardev);
1330err_free_dev:
1331        free_netdev(dev);
1332}
1333
1334/* plip_detach() is called (by the parport code) when a port is
1335 * no longer available to use. */
1336static void plip_detach (struct parport *port)
1337{
1338        /* Nothing to do */
1339}
1340
1341static int plip_probe(struct pardevice *par_dev)
1342{
1343        struct device_driver *drv = par_dev->dev.driver;
1344        int len = strlen(drv->name);
1345
1346        if (strncmp(par_dev->name, drv->name, len))
1347                return -ENODEV;
1348
1349        return 0;
1350}
1351
1352static struct parport_driver plip_driver = {
1353        .name           = "plip",
1354        .probe          = plip_probe,
1355        .match_port     = plip_attach,
1356        .detach         = plip_detach,
1357        .devmodel       = true,
1358};
1359
1360static void __exit plip_cleanup_module (void)
1361{
1362        struct net_device *dev;
1363        int i;
1364
1365        for (i=0; i < PLIP_MAX; i++) {
1366                if ((dev = dev_plip[i])) {
1367                        struct net_local *nl = netdev_priv(dev);
1368                        unregister_netdev(dev);
1369                        if (nl->port_owner)
1370                                parport_release(nl->pardev);
1371                        parport_unregister_device(nl->pardev);
1372                        free_netdev(dev);
1373                        dev_plip[i] = NULL;
1374                }
1375        }
1376
1377        parport_unregister_driver(&plip_driver);
1378}
1379
1380#ifndef MODULE
1381
1382static int parport_ptr;
1383
1384static int __init plip_setup(char *str)
1385{
1386        int ints[4];
1387
1388        str = get_options(str, ARRAY_SIZE(ints), ints);
1389
1390        /* Ugh. */
1391        if (!strncmp(str, "parport", 7)) {
1392                int n = simple_strtoul(str+7, NULL, 10);
1393                if (parport_ptr < PLIP_MAX)
1394                        parport[parport_ptr++] = n;
1395                else
1396                        printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1397                               str);
1398        } else if (!strcmp(str, "timid")) {
1399                timid = 1;
1400        } else {
1401                if (ints[0] == 0 || ints[1] == 0) {
1402                        /* disable driver on "plip=" or "plip=0" */
1403                        parport[0] = -2;
1404                } else {
1405                        printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1406                               ints[1]);
1407                }
1408        }
1409        return 1;
1410}
1411
1412__setup("plip=", plip_setup);
1413
1414#endif /* !MODULE */
1415
1416static int __init plip_init (void)
1417{
1418        if (parport[0] == -2)
1419                return 0;
1420
1421        if (parport[0] != -1 && timid) {
1422                printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1423                timid = 0;
1424        }
1425
1426        if (parport_register_driver (&plip_driver)) {
1427                printk (KERN_WARNING "plip: couldn't register driver\n");
1428                return 1;
1429        }
1430
1431        return 0;
1432}
1433
1434module_init(plip_init);
1435module_exit(plip_cleanup_module);
1436MODULE_LICENSE("GPL");
1437