linux/drivers/net/irda/sir_dev.c
<<
>>
Prefs
   1/*********************************************************************
   2 *
   3 *      sir_dev.c:      irda sir network device
   4 * 
   5 *      Copyright (c) 2002 Martin Diehl
   6 * 
   7 *      This program is free software; you can redistribute it and/or 
   8 *      modify it under the terms of the GNU General Public License as 
   9 *      published by the Free Software Foundation; either version 2 of 
  10 *      the License, or (at your option) any later version.
  11 *
  12 ********************************************************************/    
  13
  14#include <linux/hardirq.h>
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/delay.h>
  20
  21#include <net/irda/irda.h>
  22#include <net/irda/wrapper.h>
  23#include <net/irda/irda_device.h>
  24
  25#include "sir-dev.h"
  26
  27
  28static struct workqueue_struct *irda_sir_wq;
  29
  30/* STATE MACHINE */
  31
  32/* substate handler of the config-fsm to handle the cases where we want
  33 * to wait for transmit completion before changing the port configuration
  34 */
  35
  36static int sirdev_tx_complete_fsm(struct sir_dev *dev)
  37{
  38        struct sir_fsm *fsm = &dev->fsm;
  39        unsigned next_state, delay;
  40        unsigned bytes_left;
  41
  42        do {
  43                next_state = fsm->substate;     /* default: stay in current substate */
  44                delay = 0;
  45
  46                switch(fsm->substate) {
  47
  48                case SIRDEV_STATE_WAIT_XMIT:
  49                        if (dev->drv->chars_in_buffer)
  50                                bytes_left = dev->drv->chars_in_buffer(dev);
  51                        else
  52                                bytes_left = 0;
  53                        if (!bytes_left) {
  54                                next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
  55                                break;
  56                        }
  57
  58                        if (dev->speed > 115200)
  59                                delay = (bytes_left*8*10000) / (dev->speed/100);
  60                        else if (dev->speed > 0)
  61                                delay = (bytes_left*10*10000) / (dev->speed/100);
  62                        else
  63                                delay = 0;
  64                        /* expected delay (usec) until remaining bytes are sent */
  65                        if (delay < 100) {
  66                                udelay(delay);
  67                                delay = 0;
  68                                break;
  69                        }
  70                        /* sleep some longer delay (msec) */
  71                        delay = (delay+999) / 1000;
  72                        break;
  73
  74                case SIRDEV_STATE_WAIT_UNTIL_SENT:
  75                        /* block until underlaying hardware buffer are empty */
  76                        if (dev->drv->wait_until_sent)
  77                                dev->drv->wait_until_sent(dev);
  78                        next_state = SIRDEV_STATE_TX_DONE;
  79                        break;
  80
  81                case SIRDEV_STATE_TX_DONE:
  82                        return 0;
  83
  84                default:
  85                        net_err_ratelimited("%s - undefined state\n", __func__);
  86                        return -EINVAL;
  87                }
  88                fsm->substate = next_state;
  89        } while (delay == 0);
  90        return delay;
  91}
  92
  93/*
  94 * Function sirdev_config_fsm
  95 *
  96 * State machine to handle the configuration of the device (and attached dongle, if any).
  97 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
  98 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
  99 * long. Instead, for longer delays we start a timer to reschedule us later.
 100 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
 101 * Both must be unlocked/restarted on completion - but only on final exit.
 102 */
 103
 104static void sirdev_config_fsm(struct work_struct *work)
 105{
 106        struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
 107        struct sir_fsm *fsm = &dev->fsm;
 108        int next_state;
 109        int ret = -1;
 110        unsigned delay;
 111
 112        pr_debug("%s(), <%ld>\n", __func__, jiffies);
 113
 114        do {
 115                pr_debug("%s - state=0x%04x / substate=0x%04x\n",
 116                         __func__, fsm->state, fsm->substate);
 117
 118                next_state = fsm->state;
 119                delay = 0;
 120
 121                switch(fsm->state) {
 122
 123                case SIRDEV_STATE_DONGLE_OPEN:
 124                        if (dev->dongle_drv != NULL) {
 125                                ret = sirdev_put_dongle(dev);
 126                                if (ret) {
 127                                        fsm->result = -EINVAL;
 128                                        next_state = SIRDEV_STATE_ERROR;
 129                                        break;
 130                                }
 131                        }
 132
 133                        /* Initialize dongle */
 134                        ret = sirdev_get_dongle(dev, fsm->param);
 135                        if (ret) {
 136                                fsm->result = ret;
 137                                next_state = SIRDEV_STATE_ERROR;
 138                                break;
 139                        }
 140
 141                        /* Dongles are powered through the modem control lines which
 142                         * were just set during open. Before resetting, let's wait for
 143                         * the power to stabilize. This is what some dongle drivers did
 144                         * in open before, while others didn't - should be safe anyway.
 145                         */
 146
 147                        delay = 50;
 148                        fsm->substate = SIRDEV_STATE_DONGLE_RESET;
 149                        next_state = SIRDEV_STATE_DONGLE_RESET;
 150
 151                        fsm->param = 9600;
 152
 153                        break;
 154
 155                case SIRDEV_STATE_DONGLE_CLOSE:
 156                        /* shouldn't we just treat this as success=? */
 157                        if (dev->dongle_drv == NULL) {
 158                                fsm->result = -EINVAL;
 159                                next_state = SIRDEV_STATE_ERROR;
 160                                break;
 161                        }
 162
 163                        ret = sirdev_put_dongle(dev);
 164                        if (ret) {
 165                                fsm->result = ret;
 166                                next_state = SIRDEV_STATE_ERROR;
 167                                break;
 168                        }
 169                        next_state = SIRDEV_STATE_DONE;
 170                        break;
 171
 172                case SIRDEV_STATE_SET_DTR_RTS:
 173                        ret = sirdev_set_dtr_rts(dev,
 174                                (fsm->param&0x02) ? TRUE : FALSE,
 175                                (fsm->param&0x01) ? TRUE : FALSE);
 176                        next_state = SIRDEV_STATE_DONE;
 177                        break;
 178
 179                case SIRDEV_STATE_SET_SPEED:
 180                        fsm->substate = SIRDEV_STATE_WAIT_XMIT;
 181                        next_state = SIRDEV_STATE_DONGLE_CHECK;
 182                        break;
 183
 184                case SIRDEV_STATE_DONGLE_CHECK:
 185                        ret = sirdev_tx_complete_fsm(dev);
 186                        if (ret < 0) {
 187                                fsm->result = ret;
 188                                next_state = SIRDEV_STATE_ERROR;
 189                                break;
 190                        }
 191                        if ((delay=ret) != 0)
 192                                break;
 193
 194                        if (dev->dongle_drv) {
 195                                fsm->substate = SIRDEV_STATE_DONGLE_RESET;
 196                                next_state = SIRDEV_STATE_DONGLE_RESET;
 197                        }
 198                        else {
 199                                dev->speed = fsm->param;
 200                                next_state = SIRDEV_STATE_PORT_SPEED;
 201                        }
 202                        break;
 203
 204                case SIRDEV_STATE_DONGLE_RESET:
 205                        if (dev->dongle_drv->reset) {
 206                                ret = dev->dongle_drv->reset(dev);
 207                                if (ret < 0) {
 208                                        fsm->result = ret;
 209                                        next_state = SIRDEV_STATE_ERROR;
 210                                        break;
 211                                }
 212                        }
 213                        else
 214                                ret = 0;
 215                        if ((delay=ret) == 0) {
 216                                /* set serial port according to dongle default speed */
 217                                if (dev->drv->set_speed)
 218                                        dev->drv->set_speed(dev, dev->speed);
 219                                fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
 220                                next_state = SIRDEV_STATE_DONGLE_SPEED;
 221                        }
 222                        break;
 223
 224                case SIRDEV_STATE_DONGLE_SPEED:
 225                        if (dev->dongle_drv->set_speed) {
 226                                ret = dev->dongle_drv->set_speed(dev, fsm->param);
 227                                if (ret < 0) {
 228                                        fsm->result = ret;
 229                                        next_state = SIRDEV_STATE_ERROR;
 230                                        break;
 231                                }
 232                        }
 233                        else
 234                                ret = 0;
 235                        if ((delay=ret) == 0)
 236                                next_state = SIRDEV_STATE_PORT_SPEED;
 237                        break;
 238
 239                case SIRDEV_STATE_PORT_SPEED:
 240                        /* Finally we are ready to change the serial port speed */
 241                        if (dev->drv->set_speed)
 242                                dev->drv->set_speed(dev, dev->speed);
 243                        dev->new_speed = 0;
 244                        next_state = SIRDEV_STATE_DONE;
 245                        break;
 246
 247                case SIRDEV_STATE_DONE:
 248                        /* Signal network layer so it can send more frames */
 249                        netif_wake_queue(dev->netdev);
 250                        next_state = SIRDEV_STATE_COMPLETE;
 251                        break;
 252
 253                default:
 254                        net_err_ratelimited("%s - undefined state\n", __func__);
 255                        fsm->result = -EINVAL;
 256                        /* fall thru */
 257
 258                case SIRDEV_STATE_ERROR:
 259                        net_err_ratelimited("%s - error: %d\n",
 260                                            __func__, fsm->result);
 261
 262#if 0   /* don't enable this before we have netdev->tx_timeout to recover */
 263                        netif_stop_queue(dev->netdev);
 264#else
 265                        netif_wake_queue(dev->netdev);
 266#endif
 267                        /* fall thru */
 268
 269                case SIRDEV_STATE_COMPLETE:
 270                        /* config change finished, so we are not busy any longer */
 271                        sirdev_enable_rx(dev);
 272                        up(&fsm->sem);
 273                        return;
 274                }
 275                fsm->state = next_state;
 276        } while(!delay);
 277
 278        queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
 279}
 280
 281/* schedule some device configuration task for execution by kIrDAd
 282 * on behalf of the above state machine.
 283 * can be called from process or interrupt/tasklet context.
 284 */
 285
 286int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
 287{
 288        struct sir_fsm *fsm = &dev->fsm;
 289
 290        pr_debug("%s - state=0x%04x / param=%u\n", __func__,
 291                 initial_state, param);
 292
 293        if (down_trylock(&fsm->sem)) {
 294                if (in_interrupt()  ||  in_atomic()  ||  irqs_disabled()) {
 295                        pr_debug("%s(), state machine busy!\n", __func__);
 296                        return -EWOULDBLOCK;
 297                } else
 298                        down(&fsm->sem);
 299        }
 300
 301        if (fsm->state == SIRDEV_STATE_DEAD) {
 302                /* race with sirdev_close should never happen */
 303                net_err_ratelimited("%s(), instance staled!\n", __func__);
 304                up(&fsm->sem);
 305                return -ESTALE;         /* or better EPIPE? */
 306        }
 307
 308        netif_stop_queue(dev->netdev);
 309        atomic_set(&dev->enable_rx, 0);
 310
 311        fsm->state = initial_state;
 312        fsm->param = param;
 313        fsm->result = 0;
 314
 315        INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
 316        queue_delayed_work(irda_sir_wq, &fsm->work, 0);
 317        return 0;
 318}
 319
 320
 321/***************************************************************************/
 322
 323void sirdev_enable_rx(struct sir_dev *dev)
 324{
 325        if (unlikely(atomic_read(&dev->enable_rx)))
 326                return;
 327
 328        /* flush rx-buffer - should also help in case of problems with echo cancelation */
 329        dev->rx_buff.data = dev->rx_buff.head;
 330        dev->rx_buff.len = 0;
 331        dev->rx_buff.in_frame = FALSE;
 332        dev->rx_buff.state = OUTSIDE_FRAME;
 333        atomic_set(&dev->enable_rx, 1);
 334}
 335
 336static int sirdev_is_receiving(struct sir_dev *dev)
 337{
 338        if (!atomic_read(&dev->enable_rx))
 339                return 0;
 340
 341        return dev->rx_buff.state != OUTSIDE_FRAME;
 342}
 343
 344int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
 345{
 346        int err;
 347
 348        pr_debug("%s : requesting dongle %d.\n", __func__, type);
 349
 350        err = sirdev_schedule_dongle_open(dev, type);
 351        if (unlikely(err))
 352                return err;
 353        down(&dev->fsm.sem);            /* block until config change completed */
 354        err = dev->fsm.result;
 355        up(&dev->fsm.sem);
 356        return err;
 357}
 358EXPORT_SYMBOL(sirdev_set_dongle);
 359
 360/* used by dongle drivers for dongle programming */
 361
 362int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
 363{
 364        unsigned long flags;
 365        int ret;
 366
 367        if (unlikely(len > dev->tx_buff.truesize))
 368                return -ENOSPC;
 369
 370        spin_lock_irqsave(&dev->tx_lock, flags);        /* serialize with other tx operations */
 371        while (dev->tx_buff.len > 0) {                  /* wait until tx idle */
 372                spin_unlock_irqrestore(&dev->tx_lock, flags);
 373                msleep(10);
 374                spin_lock_irqsave(&dev->tx_lock, flags);
 375        }
 376
 377        dev->tx_buff.data = dev->tx_buff.head;
 378        memcpy(dev->tx_buff.data, buf, len);    
 379        dev->tx_buff.len = len;
 380
 381        ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 382        if (ret > 0) {
 383                pr_debug("%s(), raw-tx started\n", __func__);
 384
 385                dev->tx_buff.data += ret;
 386                dev->tx_buff.len -= ret;
 387                dev->raw_tx = 1;
 388                ret = len;              /* all data is going to be sent */
 389        }
 390        spin_unlock_irqrestore(&dev->tx_lock, flags);
 391        return ret;
 392}
 393EXPORT_SYMBOL(sirdev_raw_write);
 394
 395/* seems some dongle drivers may need this */
 396
 397int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
 398{
 399        int count;
 400
 401        if (atomic_read(&dev->enable_rx))
 402                return -EIO;            /* fail if we expect irda-frames */
 403
 404        count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
 405
 406        if (count > 0) {
 407                memcpy(buf, dev->rx_buff.data, count);
 408                dev->rx_buff.data += count;
 409                dev->rx_buff.len -= count;
 410        }
 411
 412        /* remaining stuff gets flushed when re-enabling normal rx */
 413
 414        return count;
 415}
 416EXPORT_SYMBOL(sirdev_raw_read);
 417
 418int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
 419{
 420        int ret = -ENXIO;
 421        if (dev->drv->set_dtr_rts)
 422                ret =  dev->drv->set_dtr_rts(dev, dtr, rts);
 423        return ret;
 424}
 425EXPORT_SYMBOL(sirdev_set_dtr_rts);
 426
 427/**********************************************************************/
 428
 429/* called from client driver - likely with bh-context - to indicate
 430 * it made some progress with transmission. Hence we send the next
 431 * chunk, if any, or complete the skb otherwise
 432 */
 433
 434void sirdev_write_complete(struct sir_dev *dev)
 435{
 436        unsigned long flags;
 437        struct sk_buff *skb;
 438        int actual = 0;
 439        int err;
 440        
 441        spin_lock_irqsave(&dev->tx_lock, flags);
 442
 443        pr_debug("%s() - dev->tx_buff.len = %d\n",
 444                 __func__, dev->tx_buff.len);
 445
 446        if (likely(dev->tx_buff.len > 0))  {
 447                /* Write data left in transmit buffer */
 448                actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 449
 450                if (likely(actual>0)) {
 451                        dev->tx_buff.data += actual;
 452                        dev->tx_buff.len  -= actual;
 453                }
 454                else if (unlikely(actual<0)) {
 455                        /* could be dropped later when we have tx_timeout to recover */
 456                        net_err_ratelimited("%s: drv->do_write failed (%d)\n",
 457                                            __func__, actual);
 458                        if ((skb=dev->tx_skb) != NULL) {
 459                                dev->tx_skb = NULL;
 460                                dev_kfree_skb_any(skb);
 461                                dev->netdev->stats.tx_errors++;
 462                                dev->netdev->stats.tx_dropped++;
 463                        }
 464                        dev->tx_buff.len = 0;
 465                }
 466                if (dev->tx_buff.len > 0)
 467                        goto done;      /* more data to send later */
 468        }
 469
 470        if (unlikely(dev->raw_tx != 0)) {
 471                /* in raw mode we are just done now after the buffer was sent
 472                 * completely. Since this was requested by some dongle driver
 473                 * running under the control of the irda-thread we must take
 474                 * care here not to re-enable the queue. The queue will be
 475                 * restarted when the irda-thread has completed the request.
 476                 */
 477
 478                pr_debug("%s(), raw-tx done\n", __func__);
 479                dev->raw_tx = 0;
 480                goto done;      /* no post-frame handling in raw mode */
 481        }
 482
 483        /* we have finished now sending this skb.
 484         * update statistics and free the skb.
 485         * finally we check and trigger a pending speed change, if any.
 486         * if not we switch to rx mode and wake the queue for further
 487         * packets.
 488         * note the scheduled speed request blocks until the lower
 489         * client driver and the corresponding hardware has really
 490         * finished sending all data (xmit fifo drained f.e.)
 491         * before the speed change gets finally done and the queue
 492         * re-activated.
 493         */
 494
 495        pr_debug("%s(), finished with frame!\n", __func__);
 496                
 497        if ((skb=dev->tx_skb) != NULL) {
 498                dev->tx_skb = NULL;
 499                dev->netdev->stats.tx_packets++;
 500                dev->netdev->stats.tx_bytes += skb->len;
 501                dev_kfree_skb_any(skb);
 502        }
 503
 504        if (unlikely(dev->new_speed > 0)) {
 505                pr_debug("%s(), Changing speed!\n", __func__);
 506                err = sirdev_schedule_speed(dev, dev->new_speed);
 507                if (unlikely(err)) {
 508                        /* should never happen
 509                         * forget the speed change and hope the stack recovers
 510                         */
 511                        net_err_ratelimited("%s - schedule speed change failed: %d\n",
 512                                            __func__, err);
 513                        netif_wake_queue(dev->netdev);
 514                }
 515                /* else: success
 516                 *      speed change in progress now
 517                 *      on completion dev->new_speed gets cleared,
 518                 *      rx-reenabled and the queue restarted
 519                 */
 520        }
 521        else {
 522                sirdev_enable_rx(dev);
 523                netif_wake_queue(dev->netdev);
 524        }
 525
 526done:
 527        spin_unlock_irqrestore(&dev->tx_lock, flags);
 528}
 529EXPORT_SYMBOL(sirdev_write_complete);
 530
 531/* called from client driver - likely with bh-context - to give us
 532 * some more received bytes. We put them into the rx-buffer,
 533 * normally unwrapping and building LAP-skb's (unless rx disabled)
 534 */
 535
 536int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 
 537{
 538        if (!dev || !dev->netdev) {
 539                net_warn_ratelimited("%s(), not ready yet!\n", __func__);
 540                return -1;
 541        }
 542
 543        if (!dev->irlap) {
 544                net_warn_ratelimited("%s - too early: %p / %zd!\n",
 545                                     __func__, cp, count);
 546                return -1;
 547        }
 548
 549        if (cp==NULL) {
 550                /* error already at lower level receive
 551                 * just update stats and set media busy
 552                 */
 553                irda_device_set_media_busy(dev->netdev, TRUE);
 554                dev->netdev->stats.rx_dropped++;
 555                pr_debug("%s; rx-drop: %zd\n", __func__, count);
 556                return 0;
 557        }
 558
 559        /* Read the characters into the buffer */
 560        if (likely(atomic_read(&dev->enable_rx))) {
 561                while (count--)
 562                        /* Unwrap and destuff one byte */
 563                        async_unwrap_char(dev->netdev, &dev->netdev->stats,
 564                                          &dev->rx_buff, *cp++);
 565        } else {
 566                while (count--) {
 567                        /* rx not enabled: save the raw bytes and never
 568                         * trigger any netif_rx. The received bytes are flushed
 569                         * later when we re-enable rx but might be read meanwhile
 570                         * by the dongle driver.
 571                         */
 572                        dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
 573
 574                        /* What should we do when the buffer is full? */
 575                        if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
 576                                dev->rx_buff.len = 0;
 577                }
 578        }
 579
 580        return 0;
 581}
 582EXPORT_SYMBOL(sirdev_receive);
 583
 584/**********************************************************************/
 585
 586/* callbacks from network layer */
 587
 588static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
 589                                          struct net_device *ndev)
 590{
 591        struct sir_dev *dev = netdev_priv(ndev);
 592        unsigned long flags;
 593        int actual = 0;
 594        int err;
 595        s32 speed;
 596
 597        IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
 598
 599        netif_stop_queue(ndev);
 600
 601        pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
 602
 603        speed = irda_get_next_speed(skb);
 604        if ((speed != dev->speed) && (speed != -1)) {
 605                if (!skb->len) {
 606                        err = sirdev_schedule_speed(dev, speed);
 607                        if (unlikely(err == -EWOULDBLOCK)) {
 608                                /* Failed to initiate the speed change, likely the fsm
 609                                 * is still busy (pretty unlikely, but...)
 610                                 * We refuse to accept the skb and return with the queue
 611                                 * stopped so the network layer will retry after the
 612                                 * fsm completes and wakes the queue.
 613                                 */
 614                                 return NETDEV_TX_BUSY;
 615                        }
 616                        else if (unlikely(err)) {
 617                                /* other fatal error - forget the speed change and
 618                                 * hope the stack will recover somehow
 619                                 */
 620                                 netif_start_queue(ndev);
 621                        }
 622                        /* else: success
 623                         *      speed change in progress now
 624                         *      on completion the queue gets restarted
 625                         */
 626
 627                        dev_kfree_skb_any(skb);
 628                        return NETDEV_TX_OK;
 629                } else
 630                        dev->new_speed = speed;
 631        }
 632
 633        /* Init tx buffer*/
 634        dev->tx_buff.data = dev->tx_buff.head;
 635
 636        /* Check problems */
 637        if(spin_is_locked(&dev->tx_lock)) {
 638                pr_debug("%s(), write not completed\n", __func__);
 639        }
 640
 641        /* serialize with write completion */
 642        spin_lock_irqsave(&dev->tx_lock, flags);
 643
 644        /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
 645        dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); 
 646
 647        /* transmission will start now - disable receive.
 648         * if we are just in the middle of an incoming frame,
 649         * treat it as collision. probably it's a good idea to
 650         * reset the rx_buf OUTSIDE_FRAME in this case too?
 651         */
 652        atomic_set(&dev->enable_rx, 0);
 653        if (unlikely(sirdev_is_receiving(dev)))
 654                dev->netdev->stats.collisions++;
 655
 656        actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 657
 658        if (likely(actual > 0)) {
 659                dev->tx_skb = skb;
 660                dev->tx_buff.data += actual;
 661                dev->tx_buff.len -= actual;
 662        }
 663        else if (unlikely(actual < 0)) {
 664                /* could be dropped later when we have tx_timeout to recover */
 665                net_err_ratelimited("%s: drv->do_write failed (%d)\n",
 666                                    __func__, actual);
 667                dev_kfree_skb_any(skb);
 668                dev->netdev->stats.tx_errors++;
 669                dev->netdev->stats.tx_dropped++;
 670                netif_wake_queue(ndev);
 671        }
 672        spin_unlock_irqrestore(&dev->tx_lock, flags);
 673
 674        return NETDEV_TX_OK;
 675}
 676
 677/* called from network layer with rtnl hold */
 678
 679static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 680{
 681        struct if_irda_req *irq = (struct if_irda_req *) rq;
 682        struct sir_dev *dev = netdev_priv(ndev);
 683        int ret = 0;
 684
 685        IRDA_ASSERT(dev != NULL, return -1;);
 686
 687        pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
 688        
 689        switch (cmd) {
 690        case SIOCSBANDWIDTH: /* Set bandwidth */
 691                if (!capable(CAP_NET_ADMIN))
 692                        ret = -EPERM;
 693                else
 694                        ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
 695                /* cannot sleep here for completion
 696                 * we are called from network layer with rtnl hold
 697                 */
 698                break;
 699
 700        case SIOCSDONGLE: /* Set dongle */
 701                if (!capable(CAP_NET_ADMIN))
 702                        ret = -EPERM;
 703                else
 704                        ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
 705                /* cannot sleep here for completion
 706                 * we are called from network layer with rtnl hold
 707                 */
 708                break;
 709
 710        case SIOCSMEDIABUSY: /* Set media busy */
 711                if (!capable(CAP_NET_ADMIN))
 712                        ret = -EPERM;
 713                else
 714                        irda_device_set_media_busy(dev->netdev, TRUE);
 715                break;
 716
 717        case SIOCGRECEIVING: /* Check if we are receiving right now */
 718                irq->ifr_receiving = sirdev_is_receiving(dev);
 719                break;
 720
 721        case SIOCSDTRRTS:
 722                if (!capable(CAP_NET_ADMIN))
 723                        ret = -EPERM;
 724                else
 725                        ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
 726                /* cannot sleep here for completion
 727                 * we are called from network layer with rtnl hold
 728                 */
 729                break;
 730
 731        case SIOCSMODE:
 732#if 0
 733                if (!capable(CAP_NET_ADMIN))
 734                        ret = -EPERM;
 735                else
 736                        ret = sirdev_schedule_mode(dev, irq->ifr_mode);
 737                /* cannot sleep here for completion
 738                 * we are called from network layer with rtnl hold
 739                 */
 740                break;
 741#endif
 742        default:
 743                ret = -EOPNOTSUPP;
 744        }
 745        
 746        return ret;
 747}
 748
 749/* ----------------------------------------------------------------------------- */
 750
 751#define SIRBUF_ALLOCSIZE 4269   /* worst case size of a wrapped IrLAP frame */
 752
 753static int sirdev_alloc_buffers(struct sir_dev *dev)
 754{
 755        dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
 756        dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; 
 757
 758        /* Bootstrap ZeroCopy Rx */
 759        dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
 760                                              GFP_KERNEL);
 761        if (dev->rx_buff.skb == NULL)
 762                return -ENOMEM;
 763        skb_reserve(dev->rx_buff.skb, 1);
 764        dev->rx_buff.head = dev->rx_buff.skb->data;
 765
 766        dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
 767        if (dev->tx_buff.head == NULL) {
 768                kfree_skb(dev->rx_buff.skb);
 769                dev->rx_buff.skb = NULL;
 770                dev->rx_buff.head = NULL;
 771                return -ENOMEM;
 772        }
 773
 774        dev->tx_buff.data = dev->tx_buff.head;
 775        dev->rx_buff.data = dev->rx_buff.head;
 776        dev->tx_buff.len = 0;
 777        dev->rx_buff.len = 0;
 778
 779        dev->rx_buff.in_frame = FALSE;
 780        dev->rx_buff.state = OUTSIDE_FRAME;
 781        return 0;
 782};
 783
 784static void sirdev_free_buffers(struct sir_dev *dev)
 785{
 786        kfree_skb(dev->rx_buff.skb);
 787        kfree(dev->tx_buff.head);
 788        dev->rx_buff.head = dev->tx_buff.head = NULL;
 789        dev->rx_buff.skb = NULL;
 790}
 791
 792static int sirdev_open(struct net_device *ndev)
 793{
 794        struct sir_dev *dev = netdev_priv(ndev);
 795        const struct sir_driver *drv = dev->drv;
 796
 797        if (!drv)
 798                return -ENODEV;
 799
 800        /* increase the reference count of the driver module before doing serious stuff */
 801        if (!try_module_get(drv->owner))
 802                return -ESTALE;
 803
 804        if (sirdev_alloc_buffers(dev))
 805                goto errout_dec;
 806
 807        if (!dev->drv->start_dev  ||  dev->drv->start_dev(dev))
 808                goto errout_free;
 809
 810        sirdev_enable_rx(dev);
 811        dev->raw_tx = 0;
 812
 813        netif_start_queue(ndev);
 814        dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
 815        if (!dev->irlap)
 816                goto errout_stop;
 817
 818        netif_wake_queue(ndev);
 819
 820        pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
 821
 822        return 0;
 823
 824errout_stop:
 825        atomic_set(&dev->enable_rx, 0);
 826        if (dev->drv->stop_dev)
 827                dev->drv->stop_dev(dev);
 828errout_free:
 829        sirdev_free_buffers(dev);
 830errout_dec:
 831        module_put(drv->owner);
 832        return -EAGAIN;
 833}
 834
 835static int sirdev_close(struct net_device *ndev)
 836{
 837        struct sir_dev *dev = netdev_priv(ndev);
 838        const struct sir_driver *drv;
 839
 840/* pr_debug("%s\n", __func__); */
 841
 842        netif_stop_queue(ndev);
 843
 844        down(&dev->fsm.sem);            /* block on pending config completion */
 845
 846        atomic_set(&dev->enable_rx, 0);
 847
 848        if (unlikely(!dev->irlap))
 849                goto out;
 850        irlap_close(dev->irlap);
 851        dev->irlap = NULL;
 852
 853        drv = dev->drv;
 854        if (unlikely(!drv  ||  !dev->priv))
 855                goto out;
 856
 857        if (drv->stop_dev)
 858                drv->stop_dev(dev);
 859
 860        sirdev_free_buffers(dev);
 861        module_put(drv->owner);
 862
 863out:
 864        dev->speed = 0;
 865        up(&dev->fsm.sem);
 866        return 0;
 867}
 868
 869static const struct net_device_ops sirdev_ops = {
 870        .ndo_start_xmit = sirdev_hard_xmit,
 871        .ndo_open       = sirdev_open,
 872        .ndo_stop       = sirdev_close,
 873        .ndo_do_ioctl   = sirdev_ioctl,
 874};
 875/* ----------------------------------------------------------------------------- */
 876
 877struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
 878{
 879        struct net_device *ndev;
 880        struct sir_dev *dev;
 881
 882        pr_debug("%s - %s\n", __func__, name);
 883
 884        /* instead of adding tests to protect against drv->do_write==NULL
 885         * at several places we refuse to create a sir_dev instance for
 886         * drivers which don't implement do_write.
 887         */
 888        if (!drv ||  !drv->do_write)
 889                return NULL;
 890
 891        /*
 892         *  Allocate new instance of the device
 893         */
 894        ndev = alloc_irdadev(sizeof(*dev));
 895        if (ndev == NULL) {
 896                net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
 897                                    __func__);
 898                goto out;
 899        }
 900        dev = netdev_priv(ndev);
 901
 902        irda_init_max_qos_capabilies(&dev->qos);
 903        dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
 904        dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
 905        irda_qos_bits_to_value(&dev->qos);
 906
 907        strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
 908
 909        atomic_set(&dev->enable_rx, 0);
 910        dev->tx_skb = NULL;
 911
 912        spin_lock_init(&dev->tx_lock);
 913        sema_init(&dev->fsm.sem, 1);
 914
 915        dev->drv = drv;
 916        dev->netdev = ndev;
 917
 918        /* Override the network functions we need to use */
 919        ndev->netdev_ops = &sirdev_ops;
 920
 921        if (register_netdev(ndev)) {
 922                net_err_ratelimited("%s(), register_netdev() failed!\n",
 923                                    __func__);
 924                goto out_freenetdev;
 925        }
 926
 927        return dev;
 928
 929out_freenetdev:
 930        free_netdev(ndev);
 931out:
 932        return NULL;
 933}
 934EXPORT_SYMBOL(sirdev_get_instance);
 935
 936int sirdev_put_instance(struct sir_dev *dev)
 937{
 938        int err = 0;
 939
 940        pr_debug("%s\n", __func__);
 941
 942        atomic_set(&dev->enable_rx, 0);
 943
 944        netif_carrier_off(dev->netdev);
 945        netif_device_detach(dev->netdev);
 946
 947        if (dev->dongle_drv)
 948                err = sirdev_schedule_dongle_close(dev);
 949        if (err)
 950                net_err_ratelimited("%s - error %d\n", __func__, err);
 951
 952        sirdev_close(dev->netdev);
 953
 954        down(&dev->fsm.sem);
 955        dev->fsm.state = SIRDEV_STATE_DEAD;     /* mark staled */
 956        dev->dongle_drv = NULL;
 957        dev->priv = NULL;
 958        up(&dev->fsm.sem);
 959
 960        /* Remove netdevice */
 961        unregister_netdev(dev->netdev);
 962
 963        free_netdev(dev->netdev);
 964
 965        return 0;
 966}
 967EXPORT_SYMBOL(sirdev_put_instance);
 968
 969static int __init sir_wq_init(void)
 970{
 971        irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
 972        if (!irda_sir_wq)
 973                return -ENOMEM;
 974        return 0;
 975}
 976
 977static void __exit sir_wq_exit(void)
 978{
 979        destroy_workqueue(irda_sir_wq);
 980}
 981
 982module_init(sir_wq_init);
 983module_exit(sir_wq_exit);
 984
 985MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
 986MODULE_DESCRIPTION("IrDA SIR core");
 987MODULE_LICENSE("GPL");
 988