linux/drivers/net/irda/sir_dev.c
<<
>>
Prefs
   1/*********************************************************************
   2 *
   3 *      sir_dev.c:      irda sir network device
   4 * 
   5 *      Copyright (c) 2002 Martin Diehl
   6 * 
   7 *      This program is free software; you can redistribute it and/or 
   8 *      modify it under the terms of the GNU General Public License as 
   9 *      published by the Free Software Foundation; either version 2 of 
  10 *      the License, or (at your option) any later version.
  11 *
  12 ********************************************************************/    
  13
  14#include <linux/hardirq.h>
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/delay.h>
  20
  21#include <net/irda/irda.h>
  22#include <net/irda/wrapper.h>
  23#include <net/irda/irda_device.h>
  24
  25#include "sir-dev.h"
  26
  27
  28static struct workqueue_struct *irda_sir_wq;
  29
  30/* STATE MACHINE */
  31
  32/* substate handler of the config-fsm to handle the cases where we want
  33 * to wait for transmit completion before changing the port configuration
  34 */
  35
  36static int sirdev_tx_complete_fsm(struct sir_dev *dev)
  37{
  38        struct sir_fsm *fsm = &dev->fsm;
  39        unsigned next_state, delay;
  40        unsigned bytes_left;
  41
  42        do {
  43                next_state = fsm->substate;     /* default: stay in current substate */
  44                delay = 0;
  45
  46                switch(fsm->substate) {
  47
  48                case SIRDEV_STATE_WAIT_XMIT:
  49                        if (dev->drv->chars_in_buffer)
  50                                bytes_left = dev->drv->chars_in_buffer(dev);
  51                        else
  52                                bytes_left = 0;
  53                        if (!bytes_left) {
  54                                next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
  55                                break;
  56                        }
  57
  58                        if (dev->speed > 115200)
  59                                delay = (bytes_left*8*10000) / (dev->speed/100);
  60                        else if (dev->speed > 0)
  61                                delay = (bytes_left*10*10000) / (dev->speed/100);
  62                        else
  63                                delay = 0;
  64                        /* expected delay (usec) until remaining bytes are sent */
  65                        if (delay < 100) {
  66                                udelay(delay);
  67                                delay = 0;
  68                                break;
  69                        }
  70                        /* sleep some longer delay (msec) */
  71                        delay = (delay+999) / 1000;
  72                        break;
  73
  74                case SIRDEV_STATE_WAIT_UNTIL_SENT:
  75                        /* block until underlaying hardware buffer are empty */
  76                        if (dev->drv->wait_until_sent)
  77                                dev->drv->wait_until_sent(dev);
  78                        next_state = SIRDEV_STATE_TX_DONE;
  79                        break;
  80
  81                case SIRDEV_STATE_TX_DONE:
  82                        return 0;
  83
  84                default:
  85                        IRDA_ERROR("%s - undefined state\n", __func__);
  86                        return -EINVAL;
  87                }
  88                fsm->substate = next_state;
  89        } while (delay == 0);
  90        return delay;
  91}
  92
  93/*
  94 * Function sirdev_config_fsm
  95 *
  96 * State machine to handle the configuration of the device (and attached dongle, if any).
  97 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
  98 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
  99 * long. Instead, for longer delays we start a timer to reschedule us later.
 100 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
 101 * Both must be unlocked/restarted on completion - but only on final exit.
 102 */
 103
 104static void sirdev_config_fsm(struct work_struct *work)
 105{
 106        struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
 107        struct sir_fsm *fsm = &dev->fsm;
 108        int next_state;
 109        int ret = -1;
 110        unsigned delay;
 111
 112        IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
 113
 114        do {
 115                IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
 116                        __func__, fsm->state, fsm->substate);
 117
 118                next_state = fsm->state;
 119                delay = 0;
 120
 121                switch(fsm->state) {
 122
 123                case SIRDEV_STATE_DONGLE_OPEN:
 124                        if (dev->dongle_drv != NULL) {
 125                                ret = sirdev_put_dongle(dev);
 126                                if (ret) {
 127                                        fsm->result = -EINVAL;
 128                                        next_state = SIRDEV_STATE_ERROR;
 129                                        break;
 130                                }
 131                        }
 132
 133                        /* Initialize dongle */
 134                        ret = sirdev_get_dongle(dev, fsm->param);
 135                        if (ret) {
 136                                fsm->result = ret;
 137                                next_state = SIRDEV_STATE_ERROR;
 138                                break;
 139                        }
 140
 141                        /* Dongles are powered through the modem control lines which
 142                         * were just set during open. Before resetting, let's wait for
 143                         * the power to stabilize. This is what some dongle drivers did
 144                         * in open before, while others didn't - should be safe anyway.
 145                         */
 146
 147                        delay = 50;
 148                        fsm->substate = SIRDEV_STATE_DONGLE_RESET;
 149                        next_state = SIRDEV_STATE_DONGLE_RESET;
 150
 151                        fsm->param = 9600;
 152
 153                        break;
 154
 155                case SIRDEV_STATE_DONGLE_CLOSE:
 156                        /* shouldn't we just treat this as success=? */
 157                        if (dev->dongle_drv == NULL) {
 158                                fsm->result = -EINVAL;
 159                                next_state = SIRDEV_STATE_ERROR;
 160                                break;
 161                        }
 162
 163                        ret = sirdev_put_dongle(dev);
 164                        if (ret) {
 165                                fsm->result = ret;
 166                                next_state = SIRDEV_STATE_ERROR;
 167                                break;
 168                        }
 169                        next_state = SIRDEV_STATE_DONE;
 170                        break;
 171
 172                case SIRDEV_STATE_SET_DTR_RTS:
 173                        ret = sirdev_set_dtr_rts(dev,
 174                                (fsm->param&0x02) ? TRUE : FALSE,
 175                                (fsm->param&0x01) ? TRUE : FALSE);
 176                        next_state = SIRDEV_STATE_DONE;
 177                        break;
 178
 179                case SIRDEV_STATE_SET_SPEED:
 180                        fsm->substate = SIRDEV_STATE_WAIT_XMIT;
 181                        next_state = SIRDEV_STATE_DONGLE_CHECK;
 182                        break;
 183
 184                case SIRDEV_STATE_DONGLE_CHECK:
 185                        ret = sirdev_tx_complete_fsm(dev);
 186                        if (ret < 0) {
 187                                fsm->result = ret;
 188                                next_state = SIRDEV_STATE_ERROR;
 189                                break;
 190                        }
 191                        if ((delay=ret) != 0)
 192                                break;
 193
 194                        if (dev->dongle_drv) {
 195                                fsm->substate = SIRDEV_STATE_DONGLE_RESET;
 196                                next_state = SIRDEV_STATE_DONGLE_RESET;
 197                        }
 198                        else {
 199                                dev->speed = fsm->param;
 200                                next_state = SIRDEV_STATE_PORT_SPEED;
 201                        }
 202                        break;
 203
 204                case SIRDEV_STATE_DONGLE_RESET:
 205                        if (dev->dongle_drv->reset) {
 206                                ret = dev->dongle_drv->reset(dev);
 207                                if (ret < 0) {
 208                                        fsm->result = ret;
 209                                        next_state = SIRDEV_STATE_ERROR;
 210                                        break;
 211                                }
 212                        }
 213                        else
 214                                ret = 0;
 215                        if ((delay=ret) == 0) {
 216                                /* set serial port according to dongle default speed */
 217                                if (dev->drv->set_speed)
 218                                        dev->drv->set_speed(dev, dev->speed);
 219                                fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
 220                                next_state = SIRDEV_STATE_DONGLE_SPEED;
 221                        }
 222                        break;
 223
 224                case SIRDEV_STATE_DONGLE_SPEED:
 225                        if (dev->dongle_drv->set_speed) {
 226                                ret = dev->dongle_drv->set_speed(dev, fsm->param);
 227                                if (ret < 0) {
 228                                        fsm->result = ret;
 229                                        next_state = SIRDEV_STATE_ERROR;
 230                                        break;
 231                                }
 232                        }
 233                        else
 234                                ret = 0;
 235                        if ((delay=ret) == 0)
 236                                next_state = SIRDEV_STATE_PORT_SPEED;
 237                        break;
 238
 239                case SIRDEV_STATE_PORT_SPEED:
 240                        /* Finally we are ready to change the serial port speed */
 241                        if (dev->drv->set_speed)
 242                                dev->drv->set_speed(dev, dev->speed);
 243                        dev->new_speed = 0;
 244                        next_state = SIRDEV_STATE_DONE;
 245                        break;
 246
 247                case SIRDEV_STATE_DONE:
 248                        /* Signal network layer so it can send more frames */
 249                        netif_wake_queue(dev->netdev);
 250                        next_state = SIRDEV_STATE_COMPLETE;
 251                        break;
 252
 253                default:
 254                        IRDA_ERROR("%s - undefined state\n", __func__);
 255                        fsm->result = -EINVAL;
 256                        /* fall thru */
 257
 258                case SIRDEV_STATE_ERROR:
 259                        IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
 260
 261#if 0   /* don't enable this before we have netdev->tx_timeout to recover */
 262                        netif_stop_queue(dev->netdev);
 263#else
 264                        netif_wake_queue(dev->netdev);
 265#endif
 266                        /* fall thru */
 267
 268                case SIRDEV_STATE_COMPLETE:
 269                        /* config change finished, so we are not busy any longer */
 270                        sirdev_enable_rx(dev);
 271                        up(&fsm->sem);
 272                        return;
 273                }
 274                fsm->state = next_state;
 275        } while(!delay);
 276
 277        queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
 278}
 279
 280/* schedule some device configuration task for execution by kIrDAd
 281 * on behalf of the above state machine.
 282 * can be called from process or interrupt/tasklet context.
 283 */
 284
 285int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
 286{
 287        struct sir_fsm *fsm = &dev->fsm;
 288
 289        IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
 290                        initial_state, param);
 291
 292        if (down_trylock(&fsm->sem)) {
 293                if (in_interrupt()  ||  in_atomic()  ||  irqs_disabled()) {
 294                        IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
 295                        return -EWOULDBLOCK;
 296                } else
 297                        down(&fsm->sem);
 298        }
 299
 300        if (fsm->state == SIRDEV_STATE_DEAD) {
 301                /* race with sirdev_close should never happen */
 302                IRDA_ERROR("%s(), instance staled!\n", __func__);
 303                up(&fsm->sem);
 304                return -ESTALE;         /* or better EPIPE? */
 305        }
 306
 307        netif_stop_queue(dev->netdev);
 308        atomic_set(&dev->enable_rx, 0);
 309
 310        fsm->state = initial_state;
 311        fsm->param = param;
 312        fsm->result = 0;
 313
 314        INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
 315        queue_delayed_work(irda_sir_wq, &fsm->work, 0);
 316        return 0;
 317}
 318
 319
 320/***************************************************************************/
 321
 322void sirdev_enable_rx(struct sir_dev *dev)
 323{
 324        if (unlikely(atomic_read(&dev->enable_rx)))
 325                return;
 326
 327        /* flush rx-buffer - should also help in case of problems with echo cancelation */
 328        dev->rx_buff.data = dev->rx_buff.head;
 329        dev->rx_buff.len = 0;
 330        dev->rx_buff.in_frame = FALSE;
 331        dev->rx_buff.state = OUTSIDE_FRAME;
 332        atomic_set(&dev->enable_rx, 1);
 333}
 334
 335static int sirdev_is_receiving(struct sir_dev *dev)
 336{
 337        if (!atomic_read(&dev->enable_rx))
 338                return 0;
 339
 340        return dev->rx_buff.state != OUTSIDE_FRAME;
 341}
 342
 343int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
 344{
 345        int err;
 346
 347        IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
 348
 349        err = sirdev_schedule_dongle_open(dev, type);
 350        if (unlikely(err))
 351                return err;
 352        down(&dev->fsm.sem);            /* block until config change completed */
 353        err = dev->fsm.result;
 354        up(&dev->fsm.sem);
 355        return err;
 356}
 357EXPORT_SYMBOL(sirdev_set_dongle);
 358
 359/* used by dongle drivers for dongle programming */
 360
 361int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
 362{
 363        unsigned long flags;
 364        int ret;
 365
 366        if (unlikely(len > dev->tx_buff.truesize))
 367                return -ENOSPC;
 368
 369        spin_lock_irqsave(&dev->tx_lock, flags);        /* serialize with other tx operations */
 370        while (dev->tx_buff.len > 0) {                  /* wait until tx idle */
 371                spin_unlock_irqrestore(&dev->tx_lock, flags);
 372                msleep(10);
 373                spin_lock_irqsave(&dev->tx_lock, flags);
 374        }
 375
 376        dev->tx_buff.data = dev->tx_buff.head;
 377        memcpy(dev->tx_buff.data, buf, len);    
 378        dev->tx_buff.len = len;
 379
 380        ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 381        if (ret > 0) {
 382                IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
 383
 384                dev->tx_buff.data += ret;
 385                dev->tx_buff.len -= ret;
 386                dev->raw_tx = 1;
 387                ret = len;              /* all data is going to be sent */
 388        }
 389        spin_unlock_irqrestore(&dev->tx_lock, flags);
 390        return ret;
 391}
 392EXPORT_SYMBOL(sirdev_raw_write);
 393
 394/* seems some dongle drivers may need this */
 395
 396int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
 397{
 398        int count;
 399
 400        if (atomic_read(&dev->enable_rx))
 401                return -EIO;            /* fail if we expect irda-frames */
 402
 403        count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
 404
 405        if (count > 0) {
 406                memcpy(buf, dev->rx_buff.data, count);
 407                dev->rx_buff.data += count;
 408                dev->rx_buff.len -= count;
 409        }
 410
 411        /* remaining stuff gets flushed when re-enabling normal rx */
 412
 413        return count;
 414}
 415EXPORT_SYMBOL(sirdev_raw_read);
 416
 417int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
 418{
 419        int ret = -ENXIO;
 420        if (dev->drv->set_dtr_rts)
 421                ret =  dev->drv->set_dtr_rts(dev, dtr, rts);
 422        return ret;
 423}
 424EXPORT_SYMBOL(sirdev_set_dtr_rts);
 425
 426/**********************************************************************/
 427
 428/* called from client driver - likely with bh-context - to indicate
 429 * it made some progress with transmission. Hence we send the next
 430 * chunk, if any, or complete the skb otherwise
 431 */
 432
 433void sirdev_write_complete(struct sir_dev *dev)
 434{
 435        unsigned long flags;
 436        struct sk_buff *skb;
 437        int actual = 0;
 438        int err;
 439        
 440        spin_lock_irqsave(&dev->tx_lock, flags);
 441
 442        IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
 443                   __func__, dev->tx_buff.len);
 444
 445        if (likely(dev->tx_buff.len > 0))  {
 446                /* Write data left in transmit buffer */
 447                actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 448
 449                if (likely(actual>0)) {
 450                        dev->tx_buff.data += actual;
 451                        dev->tx_buff.len  -= actual;
 452                }
 453                else if (unlikely(actual<0)) {
 454                        /* could be dropped later when we have tx_timeout to recover */
 455                        IRDA_ERROR("%s: drv->do_write failed (%d)\n",
 456                                   __func__, actual);
 457                        if ((skb=dev->tx_skb) != NULL) {
 458                                dev->tx_skb = NULL;
 459                                dev_kfree_skb_any(skb);
 460                                dev->netdev->stats.tx_errors++;
 461                                dev->netdev->stats.tx_dropped++;
 462                        }
 463                        dev->tx_buff.len = 0;
 464                }
 465                if (dev->tx_buff.len > 0)
 466                        goto done;      /* more data to send later */
 467        }
 468
 469        if (unlikely(dev->raw_tx != 0)) {
 470                /* in raw mode we are just done now after the buffer was sent
 471                 * completely. Since this was requested by some dongle driver
 472                 * running under the control of the irda-thread we must take
 473                 * care here not to re-enable the queue. The queue will be
 474                 * restarted when the irda-thread has completed the request.
 475                 */
 476
 477                IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
 478                dev->raw_tx = 0;
 479                goto done;      /* no post-frame handling in raw mode */
 480        }
 481
 482        /* we have finished now sending this skb.
 483         * update statistics and free the skb.
 484         * finally we check and trigger a pending speed change, if any.
 485         * if not we switch to rx mode and wake the queue for further
 486         * packets.
 487         * note the scheduled speed request blocks until the lower
 488         * client driver and the corresponding hardware has really
 489         * finished sending all data (xmit fifo drained f.e.)
 490         * before the speed change gets finally done and the queue
 491         * re-activated.
 492         */
 493
 494        IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
 495                
 496        if ((skb=dev->tx_skb) != NULL) {
 497                dev->tx_skb = NULL;
 498                dev->netdev->stats.tx_packets++;
 499                dev->netdev->stats.tx_bytes += skb->len;
 500                dev_kfree_skb_any(skb);
 501        }
 502
 503        if (unlikely(dev->new_speed > 0)) {
 504                IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
 505                err = sirdev_schedule_speed(dev, dev->new_speed);
 506                if (unlikely(err)) {
 507                        /* should never happen
 508                         * forget the speed change and hope the stack recovers
 509                         */
 510                        IRDA_ERROR("%s - schedule speed change failed: %d\n",
 511                                   __func__, err);
 512                        netif_wake_queue(dev->netdev);
 513                }
 514                /* else: success
 515                 *      speed change in progress now
 516                 *      on completion dev->new_speed gets cleared,
 517                 *      rx-reenabled and the queue restarted
 518                 */
 519        }
 520        else {
 521                sirdev_enable_rx(dev);
 522                netif_wake_queue(dev->netdev);
 523        }
 524
 525done:
 526        spin_unlock_irqrestore(&dev->tx_lock, flags);
 527}
 528EXPORT_SYMBOL(sirdev_write_complete);
 529
 530/* called from client driver - likely with bh-context - to give us
 531 * some more received bytes. We put them into the rx-buffer,
 532 * normally unwrapping and building LAP-skb's (unless rx disabled)
 533 */
 534
 535int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 
 536{
 537        if (!dev || !dev->netdev) {
 538                IRDA_WARNING("%s(), not ready yet!\n", __func__);
 539                return -1;
 540        }
 541
 542        if (!dev->irlap) {
 543                IRDA_WARNING("%s - too early: %p / %zd!\n",
 544                             __func__, cp, count);
 545                return -1;
 546        }
 547
 548        if (cp==NULL) {
 549                /* error already at lower level receive
 550                 * just update stats and set media busy
 551                 */
 552                irda_device_set_media_busy(dev->netdev, TRUE);
 553                dev->netdev->stats.rx_dropped++;
 554                IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
 555                return 0;
 556        }
 557
 558        /* Read the characters into the buffer */
 559        if (likely(atomic_read(&dev->enable_rx))) {
 560                while (count--)
 561                        /* Unwrap and destuff one byte */
 562                        async_unwrap_char(dev->netdev, &dev->netdev->stats,
 563                                          &dev->rx_buff, *cp++);
 564        } else {
 565                while (count--) {
 566                        /* rx not enabled: save the raw bytes and never
 567                         * trigger any netif_rx. The received bytes are flushed
 568                         * later when we re-enable rx but might be read meanwhile
 569                         * by the dongle driver.
 570                         */
 571                        dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
 572
 573                        /* What should we do when the buffer is full? */
 574                        if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
 575                                dev->rx_buff.len = 0;
 576                }
 577        }
 578
 579        return 0;
 580}
 581EXPORT_SYMBOL(sirdev_receive);
 582
 583/**********************************************************************/
 584
 585/* callbacks from network layer */
 586
 587static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
 588                                          struct net_device *ndev)
 589{
 590        struct sir_dev *dev = netdev_priv(ndev);
 591        unsigned long flags;
 592        int actual = 0;
 593        int err;
 594        s32 speed;
 595
 596        IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
 597
 598        netif_stop_queue(ndev);
 599
 600        IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
 601
 602        speed = irda_get_next_speed(skb);
 603        if ((speed != dev->speed) && (speed != -1)) {
 604                if (!skb->len) {
 605                        err = sirdev_schedule_speed(dev, speed);
 606                        if (unlikely(err == -EWOULDBLOCK)) {
 607                                /* Failed to initiate the speed change, likely the fsm
 608                                 * is still busy (pretty unlikely, but...)
 609                                 * We refuse to accept the skb and return with the queue
 610                                 * stopped so the network layer will retry after the
 611                                 * fsm completes and wakes the queue.
 612                                 */
 613                                 return NETDEV_TX_BUSY;
 614                        }
 615                        else if (unlikely(err)) {
 616                                /* other fatal error - forget the speed change and
 617                                 * hope the stack will recover somehow
 618                                 */
 619                                 netif_start_queue(ndev);
 620                        }
 621                        /* else: success
 622                         *      speed change in progress now
 623                         *      on completion the queue gets restarted
 624                         */
 625
 626                        dev_kfree_skb_any(skb);
 627                        return NETDEV_TX_OK;
 628                } else
 629                        dev->new_speed = speed;
 630        }
 631
 632        /* Init tx buffer*/
 633        dev->tx_buff.data = dev->tx_buff.head;
 634
 635        /* Check problems */
 636        if(spin_is_locked(&dev->tx_lock)) {
 637                IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
 638        }
 639
 640        /* serialize with write completion */
 641        spin_lock_irqsave(&dev->tx_lock, flags);
 642
 643        /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
 644        dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); 
 645
 646        /* transmission will start now - disable receive.
 647         * if we are just in the middle of an incoming frame,
 648         * treat it as collision. probably it's a good idea to
 649         * reset the rx_buf OUTSIDE_FRAME in this case too?
 650         */
 651        atomic_set(&dev->enable_rx, 0);
 652        if (unlikely(sirdev_is_receiving(dev)))
 653                dev->netdev->stats.collisions++;
 654
 655        actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 656
 657        if (likely(actual > 0)) {
 658                dev->tx_skb = skb;
 659                dev->tx_buff.data += actual;
 660                dev->tx_buff.len -= actual;
 661        }
 662        else if (unlikely(actual < 0)) {
 663                /* could be dropped later when we have tx_timeout to recover */
 664                IRDA_ERROR("%s: drv->do_write failed (%d)\n",
 665                           __func__, actual);
 666                dev_kfree_skb_any(skb);
 667                dev->netdev->stats.tx_errors++;
 668                dev->netdev->stats.tx_dropped++;
 669                netif_wake_queue(ndev);
 670        }
 671        spin_unlock_irqrestore(&dev->tx_lock, flags);
 672
 673        return NETDEV_TX_OK;
 674}
 675
 676/* called from network layer with rtnl hold */
 677
 678static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 679{
 680        struct if_irda_req *irq = (struct if_irda_req *) rq;
 681        struct sir_dev *dev = netdev_priv(ndev);
 682        int ret = 0;
 683
 684        IRDA_ASSERT(dev != NULL, return -1;);
 685
 686        IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
 687        
 688        switch (cmd) {
 689        case SIOCSBANDWIDTH: /* Set bandwidth */
 690                if (!capable(CAP_NET_ADMIN))
 691                        ret = -EPERM;
 692                else
 693                        ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
 694                /* cannot sleep here for completion
 695                 * we are called from network layer with rtnl hold
 696                 */
 697                break;
 698
 699        case SIOCSDONGLE: /* Set dongle */
 700                if (!capable(CAP_NET_ADMIN))
 701                        ret = -EPERM;
 702                else
 703                        ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
 704                /* cannot sleep here for completion
 705                 * we are called from network layer with rtnl hold
 706                 */
 707                break;
 708
 709        case SIOCSMEDIABUSY: /* Set media busy */
 710                if (!capable(CAP_NET_ADMIN))
 711                        ret = -EPERM;
 712                else
 713                        irda_device_set_media_busy(dev->netdev, TRUE);
 714                break;
 715
 716        case SIOCGRECEIVING: /* Check if we are receiving right now */
 717                irq->ifr_receiving = sirdev_is_receiving(dev);
 718                break;
 719
 720        case SIOCSDTRRTS:
 721                if (!capable(CAP_NET_ADMIN))
 722                        ret = -EPERM;
 723                else
 724                        ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
 725                /* cannot sleep here for completion
 726                 * we are called from network layer with rtnl hold
 727                 */
 728                break;
 729
 730        case SIOCSMODE:
 731#if 0
 732                if (!capable(CAP_NET_ADMIN))
 733                        ret = -EPERM;
 734                else
 735                        ret = sirdev_schedule_mode(dev, irq->ifr_mode);
 736                /* cannot sleep here for completion
 737                 * we are called from network layer with rtnl hold
 738                 */
 739                break;
 740#endif
 741        default:
 742                ret = -EOPNOTSUPP;
 743        }
 744        
 745        return ret;
 746}
 747
 748/* ----------------------------------------------------------------------------- */
 749
 750#define SIRBUF_ALLOCSIZE 4269   /* worst case size of a wrapped IrLAP frame */
 751
 752static int sirdev_alloc_buffers(struct sir_dev *dev)
 753{
 754        dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
 755        dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; 
 756
 757        /* Bootstrap ZeroCopy Rx */
 758        dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
 759                                              GFP_KERNEL);
 760        if (dev->rx_buff.skb == NULL)
 761                return -ENOMEM;
 762        skb_reserve(dev->rx_buff.skb, 1);
 763        dev->rx_buff.head = dev->rx_buff.skb->data;
 764
 765        dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
 766        if (dev->tx_buff.head == NULL) {
 767                kfree_skb(dev->rx_buff.skb);
 768                dev->rx_buff.skb = NULL;
 769                dev->rx_buff.head = NULL;
 770                return -ENOMEM;
 771        }
 772
 773        dev->tx_buff.data = dev->tx_buff.head;
 774        dev->rx_buff.data = dev->rx_buff.head;
 775        dev->tx_buff.len = 0;
 776        dev->rx_buff.len = 0;
 777
 778        dev->rx_buff.in_frame = FALSE;
 779        dev->rx_buff.state = OUTSIDE_FRAME;
 780        return 0;
 781};
 782
 783static void sirdev_free_buffers(struct sir_dev *dev)
 784{
 785        kfree_skb(dev->rx_buff.skb);
 786        kfree(dev->tx_buff.head);
 787        dev->rx_buff.head = dev->tx_buff.head = NULL;
 788        dev->rx_buff.skb = NULL;
 789}
 790
 791static int sirdev_open(struct net_device *ndev)
 792{
 793        struct sir_dev *dev = netdev_priv(ndev);
 794        const struct sir_driver *drv = dev->drv;
 795
 796        if (!drv)
 797                return -ENODEV;
 798
 799        /* increase the reference count of the driver module before doing serious stuff */
 800        if (!try_module_get(drv->owner))
 801                return -ESTALE;
 802
 803        IRDA_DEBUG(2, "%s()\n", __func__);
 804
 805        if (sirdev_alloc_buffers(dev))
 806                goto errout_dec;
 807
 808        if (!dev->drv->start_dev  ||  dev->drv->start_dev(dev))
 809                goto errout_free;
 810
 811        sirdev_enable_rx(dev);
 812        dev->raw_tx = 0;
 813
 814        netif_start_queue(ndev);
 815        dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
 816        if (!dev->irlap)
 817                goto errout_stop;
 818
 819        netif_wake_queue(ndev);
 820
 821        IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
 822
 823        return 0;
 824
 825errout_stop:
 826        atomic_set(&dev->enable_rx, 0);
 827        if (dev->drv->stop_dev)
 828                dev->drv->stop_dev(dev);
 829errout_free:
 830        sirdev_free_buffers(dev);
 831errout_dec:
 832        module_put(drv->owner);
 833        return -EAGAIN;
 834}
 835
 836static int sirdev_close(struct net_device *ndev)
 837{
 838        struct sir_dev *dev = netdev_priv(ndev);
 839        const struct sir_driver *drv;
 840
 841//      IRDA_DEBUG(0, "%s\n", __func__);
 842
 843        netif_stop_queue(ndev);
 844
 845        down(&dev->fsm.sem);            /* block on pending config completion */
 846
 847        atomic_set(&dev->enable_rx, 0);
 848
 849        if (unlikely(!dev->irlap))
 850                goto out;
 851        irlap_close(dev->irlap);
 852        dev->irlap = NULL;
 853
 854        drv = dev->drv;
 855        if (unlikely(!drv  ||  !dev->priv))
 856                goto out;
 857
 858        if (drv->stop_dev)
 859                drv->stop_dev(dev);
 860
 861        sirdev_free_buffers(dev);
 862        module_put(drv->owner);
 863
 864out:
 865        dev->speed = 0;
 866        up(&dev->fsm.sem);
 867        return 0;
 868}
 869
 870static const struct net_device_ops sirdev_ops = {
 871        .ndo_start_xmit = sirdev_hard_xmit,
 872        .ndo_open       = sirdev_open,
 873        .ndo_stop       = sirdev_close,
 874        .ndo_do_ioctl   = sirdev_ioctl,
 875};
 876/* ----------------------------------------------------------------------------- */
 877
 878struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
 879{
 880        struct net_device *ndev;
 881        struct sir_dev *dev;
 882
 883        IRDA_DEBUG(0, "%s - %s\n", __func__, name);
 884
 885        /* instead of adding tests to protect against drv->do_write==NULL
 886         * at several places we refuse to create a sir_dev instance for
 887         * drivers which don't implement do_write.
 888         */
 889        if (!drv ||  !drv->do_write)
 890                return NULL;
 891
 892        /*
 893         *  Allocate new instance of the device
 894         */
 895        ndev = alloc_irdadev(sizeof(*dev));
 896        if (ndev == NULL) {
 897                IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
 898                goto out;
 899        }
 900        dev = netdev_priv(ndev);
 901
 902        irda_init_max_qos_capabilies(&dev->qos);
 903        dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
 904        dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
 905        irda_qos_bits_to_value(&dev->qos);
 906
 907        strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
 908
 909        atomic_set(&dev->enable_rx, 0);
 910        dev->tx_skb = NULL;
 911
 912        spin_lock_init(&dev->tx_lock);
 913        sema_init(&dev->fsm.sem, 1);
 914
 915        dev->drv = drv;
 916        dev->netdev = ndev;
 917
 918        /* Override the network functions we need to use */
 919        ndev->netdev_ops = &sirdev_ops;
 920
 921        if (register_netdev(ndev)) {
 922                IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
 923                goto out_freenetdev;
 924        }
 925
 926        return dev;
 927
 928out_freenetdev:
 929        free_netdev(ndev);
 930out:
 931        return NULL;
 932}
 933EXPORT_SYMBOL(sirdev_get_instance);
 934
 935int sirdev_put_instance(struct sir_dev *dev)
 936{
 937        int err = 0;
 938
 939        IRDA_DEBUG(0, "%s\n", __func__);
 940
 941        atomic_set(&dev->enable_rx, 0);
 942
 943        netif_carrier_off(dev->netdev);
 944        netif_device_detach(dev->netdev);
 945
 946        if (dev->dongle_drv)
 947                err = sirdev_schedule_dongle_close(dev);
 948        if (err)
 949                IRDA_ERROR("%s - error %d\n", __func__, err);
 950
 951        sirdev_close(dev->netdev);
 952
 953        down(&dev->fsm.sem);
 954        dev->fsm.state = SIRDEV_STATE_DEAD;     /* mark staled */
 955        dev->dongle_drv = NULL;
 956        dev->priv = NULL;
 957        up(&dev->fsm.sem);
 958
 959        /* Remove netdevice */
 960        unregister_netdev(dev->netdev);
 961
 962        free_netdev(dev->netdev);
 963
 964        return 0;
 965}
 966EXPORT_SYMBOL(sirdev_put_instance);
 967
 968static int __init sir_wq_init(void)
 969{
 970        irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
 971        if (!irda_sir_wq)
 972                return -ENOMEM;
 973        return 0;
 974}
 975
 976static void __exit sir_wq_exit(void)
 977{
 978        destroy_workqueue(irda_sir_wq);
 979}
 980
 981module_init(sir_wq_init);
 982module_exit(sir_wq_exit);
 983
 984MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
 985MODULE_DESCRIPTION("IrDA SIR core");
 986MODULE_LICENSE("GPL");
 987