linux/drivers/net/irda/bfin_sir.c
<<
>>
Prefs
   1/*
   2 * Blackfin Infra-red Driver
   3 *
   4 * Copyright 2006-2009 Analog Devices Inc.
   5 *
   6 * Enter bugs at http://blackfin.uclinux.org/
   7 *
   8 * Licensed under the GPL-2 or later.
   9 *
  10 */
  11#include "bfin_sir.h"
  12
  13#ifdef CONFIG_SIR_BFIN_DMA
  14#define DMA_SIR_RX_XCNT        10
  15#define DMA_SIR_RX_YCNT        (PAGE_SIZE / DMA_SIR_RX_XCNT)
  16#define DMA_SIR_RX_FLUSH_JIFS  (HZ * 4 / 250)
  17#endif
  18
  19#if ANOMALY_05000447
  20static int max_rate = 57600;
  21#else
  22static int max_rate = 115200;
  23#endif
  24
  25static void turnaround_delay(unsigned long last_jif, int mtt)
  26{
  27        long ticks;
  28
  29        mtt = mtt < 10000 ? 10000 : mtt;
  30        ticks = 1 + mtt / (USEC_PER_SEC / HZ);
  31        schedule_timeout_uninterruptible(ticks);
  32}
  33
  34static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
  35{
  36        int i;
  37        struct resource *res;
  38
  39        for (i = 0; i < pdev->num_resources; i++) {
  40                res = &pdev->resource[i];
  41                switch (res->flags) {
  42                case IORESOURCE_MEM:
  43                        sp->membase   = (void __iomem *)res->start;
  44                        break;
  45                case IORESOURCE_IRQ:
  46                        sp->irq = res->start;
  47                        break;
  48                case IORESOURCE_DMA:
  49                        sp->rx_dma_channel = res->start;
  50                        sp->tx_dma_channel = res->end;
  51                        break;
  52                default:
  53                        break;
  54                }
  55        }
  56
  57        sp->clk = get_sclk();
  58#ifdef CONFIG_SIR_BFIN_DMA
  59        sp->tx_done        = 1;
  60        init_timer(&(sp->rx_dma_timer));
  61#endif
  62}
  63
  64static void bfin_sir_stop_tx(struct bfin_sir_port *port)
  65{
  66#ifdef CONFIG_SIR_BFIN_DMA
  67        disable_dma(port->tx_dma_channel);
  68#endif
  69
  70        while (!(SIR_UART_GET_LSR(port) & THRE)) {
  71                cpu_relax();
  72                continue;
  73        }
  74
  75        SIR_UART_STOP_TX(port);
  76}
  77
  78static void bfin_sir_enable_tx(struct bfin_sir_port *port)
  79{
  80        SIR_UART_ENABLE_TX(port);
  81}
  82
  83static void bfin_sir_stop_rx(struct bfin_sir_port *port)
  84{
  85        SIR_UART_STOP_RX(port);
  86}
  87
  88static void bfin_sir_enable_rx(struct bfin_sir_port *port)
  89{
  90        SIR_UART_ENABLE_RX(port);
  91}
  92
  93static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
  94{
  95        int ret = -EINVAL;
  96        unsigned int quot;
  97        unsigned short val, lsr, lcr;
  98        static int utime;
  99        int count = 10;
 100
 101        lcr = WLS(8);
 102
 103        switch (speed) {
 104        case 9600:
 105        case 19200:
 106        case 38400:
 107        case 57600:
 108        case 115200:
 109
 110                quot = (port->clk + (8 * speed)) / (16 * speed)\
 111                                                - ANOMALY_05000230;
 112
 113                do {
 114                        udelay(utime);
 115                        lsr = SIR_UART_GET_LSR(port);
 116                } while (!(lsr & TEMT) && count--);
 117
 118                /* The useconds for 1 bits to transmit */
 119                utime = 1000000 / speed + 1;
 120
 121                /* Clear UCEN bit to reset the UART state machine
 122                 * and control registers
 123                 */
 124                val = SIR_UART_GET_GCTL(port);
 125                val &= ~UCEN;
 126                SIR_UART_PUT_GCTL(port, val);
 127
 128                /* Set DLAB in LCR to Access THR RBR IER */
 129                SIR_UART_SET_DLAB(port);
 130                SSYNC();
 131
 132                SIR_UART_PUT_DLL(port, quot & 0xFF);
 133                SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF);
 134                SSYNC();
 135
 136                /* Clear DLAB in LCR */
 137                SIR_UART_CLEAR_DLAB(port);
 138                SSYNC();
 139
 140                SIR_UART_PUT_LCR(port, lcr);
 141
 142                val = SIR_UART_GET_GCTL(port);
 143                val |= UCEN;
 144                SIR_UART_PUT_GCTL(port, val);
 145
 146                ret = 0;
 147                break;
 148        default:
 149                printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
 150                break;
 151        }
 152
 153        val = SIR_UART_GET_GCTL(port);
 154        /* If not add the 'RPOLC', we can't catch the receive interrupt.
 155         * It's related with the HW layout and the IR transiver.
 156         */
 157        val |= IREN | RPOLC;
 158        SIR_UART_PUT_GCTL(port, val);
 159        return ret;
 160}
 161
 162static int bfin_sir_is_receiving(struct net_device *dev)
 163{
 164        struct bfin_sir_self *self = netdev_priv(dev);
 165        struct bfin_sir_port *port = self->sir_port;
 166
 167        if (!(SIR_UART_GET_IER(port) & ERBFI))
 168                return 0;
 169        return self->rx_buff.state != OUTSIDE_FRAME;
 170}
 171
 172#ifdef CONFIG_SIR_BFIN_PIO
 173static void bfin_sir_tx_chars(struct net_device *dev)
 174{
 175        unsigned int chr;
 176        struct bfin_sir_self *self = netdev_priv(dev);
 177        struct bfin_sir_port *port = self->sir_port;
 178
 179        if (self->tx_buff.len != 0) {
 180                chr = *(self->tx_buff.data);
 181                SIR_UART_PUT_CHAR(port, chr);
 182                self->tx_buff.data++;
 183                self->tx_buff.len--;
 184        } else {
 185                self->stats.tx_packets++;
 186                self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
 187                if (self->newspeed) {
 188                        bfin_sir_set_speed(port, self->newspeed);
 189                        self->speed = self->newspeed;
 190                        self->newspeed = 0;
 191                }
 192                bfin_sir_stop_tx(port);
 193                bfin_sir_enable_rx(port);
 194                /* I'm hungry! */
 195                netif_wake_queue(dev);
 196        }
 197}
 198
 199static void bfin_sir_rx_chars(struct net_device *dev)
 200{
 201        struct bfin_sir_self *self = netdev_priv(dev);
 202        struct bfin_sir_port *port = self->sir_port;
 203        unsigned char ch;
 204
 205        SIR_UART_CLEAR_LSR(port);
 206        ch = SIR_UART_GET_CHAR(port);
 207        async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
 208        dev->last_rx = jiffies;
 209}
 210
 211static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
 212{
 213        struct net_device *dev = dev_id;
 214        struct bfin_sir_self *self = netdev_priv(dev);
 215        struct bfin_sir_port *port = self->sir_port;
 216
 217        spin_lock(&self->lock);
 218        while ((SIR_UART_GET_LSR(port) & DR))
 219                bfin_sir_rx_chars(dev);
 220        spin_unlock(&self->lock);
 221
 222        return IRQ_HANDLED;
 223}
 224
 225static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
 226{
 227        struct net_device *dev = dev_id;
 228        struct bfin_sir_self *self = netdev_priv(dev);
 229        struct bfin_sir_port *port = self->sir_port;
 230
 231        spin_lock(&self->lock);
 232        if (SIR_UART_GET_LSR(port) & THRE)
 233                bfin_sir_tx_chars(dev);
 234        spin_unlock(&self->lock);
 235
 236        return IRQ_HANDLED;
 237}
 238#endif /* CONFIG_SIR_BFIN_PIO */
 239
 240#ifdef CONFIG_SIR_BFIN_DMA
 241static void bfin_sir_dma_tx_chars(struct net_device *dev)
 242{
 243        struct bfin_sir_self *self = netdev_priv(dev);
 244        struct bfin_sir_port *port = self->sir_port;
 245
 246        if (!port->tx_done)
 247                return;
 248        port->tx_done = 0;
 249
 250        if (self->tx_buff.len == 0) {
 251                self->stats.tx_packets++;
 252                if (self->newspeed) {
 253                        bfin_sir_set_speed(port, self->newspeed);
 254                        self->speed = self->newspeed;
 255                        self->newspeed = 0;
 256                }
 257                bfin_sir_enable_rx(port);
 258                port->tx_done = 1;
 259                netif_wake_queue(dev);
 260                return;
 261        }
 262
 263        blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
 264                (unsigned long)(self->tx_buff.data+self->tx_buff.len));
 265        set_dma_config(port->tx_dma_channel,
 266                set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
 267                        INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
 268                        DMA_SYNC_RESTART));
 269        set_dma_start_addr(port->tx_dma_channel,
 270                (unsigned long)(self->tx_buff.data));
 271        set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
 272        set_dma_x_modify(port->tx_dma_channel, 1);
 273        enable_dma(port->tx_dma_channel);
 274}
 275
 276static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
 277{
 278        struct net_device *dev = dev_id;
 279        struct bfin_sir_self *self = netdev_priv(dev);
 280        struct bfin_sir_port *port = self->sir_port;
 281
 282        spin_lock(&self->lock);
 283        if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
 284                clear_dma_irqstat(port->tx_dma_channel);
 285                bfin_sir_stop_tx(port);
 286
 287                self->stats.tx_packets++;
 288                self->stats.tx_bytes += self->tx_buff.len;
 289                self->tx_buff.len = 0;
 290                if (self->newspeed) {
 291                        bfin_sir_set_speed(port, self->newspeed);
 292                        self->speed = self->newspeed;
 293                        self->newspeed = 0;
 294                }
 295                bfin_sir_enable_rx(port);
 296                /* I'm hungry! */
 297                netif_wake_queue(dev);
 298                port->tx_done = 1;
 299        }
 300        spin_unlock(&self->lock);
 301
 302        return IRQ_HANDLED;
 303}
 304
 305static void bfin_sir_dma_rx_chars(struct net_device *dev)
 306{
 307        struct bfin_sir_self *self = netdev_priv(dev);
 308        struct bfin_sir_port *port = self->sir_port;
 309        int i;
 310
 311        SIR_UART_CLEAR_LSR(port);
 312
 313        for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
 314                async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
 315}
 316
 317void bfin_sir_rx_dma_timeout(struct net_device *dev)
 318{
 319        struct bfin_sir_self *self = netdev_priv(dev);
 320        struct bfin_sir_port *port = self->sir_port;
 321        int x_pos, pos;
 322        unsigned long flags;
 323
 324        spin_lock_irqsave(&self->lock, flags);
 325        x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
 326        if (x_pos == DMA_SIR_RX_XCNT)
 327                x_pos = 0;
 328
 329        pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
 330
 331        if (pos > port->rx_dma_buf.tail) {
 332                port->rx_dma_buf.tail = pos;
 333                bfin_sir_dma_rx_chars(dev);
 334                port->rx_dma_buf.head = port->rx_dma_buf.tail;
 335        }
 336        spin_unlock_irqrestore(&self->lock, flags);
 337}
 338
 339static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
 340{
 341        struct net_device *dev = dev_id;
 342        struct bfin_sir_self *self = netdev_priv(dev);
 343        struct bfin_sir_port *port = self->sir_port;
 344        unsigned short irqstat;
 345
 346        spin_lock(&self->lock);
 347
 348        port->rx_dma_nrows++;
 349        port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
 350        bfin_sir_dma_rx_chars(dev);
 351        if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
 352                port->rx_dma_nrows = 0;
 353                port->rx_dma_buf.tail = 0;
 354        }
 355        port->rx_dma_buf.head = port->rx_dma_buf.tail;
 356
 357        irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
 358        clear_dma_irqstat(port->rx_dma_channel);
 359        spin_unlock(&self->lock);
 360
 361        mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
 362        return IRQ_HANDLED;
 363}
 364#endif /* CONFIG_SIR_BFIN_DMA */
 365
 366static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
 367{
 368#ifdef CONFIG_SIR_BFIN_DMA
 369        dma_addr_t dma_handle;
 370#endif /* CONFIG_SIR_BFIN_DMA */
 371
 372        if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
 373                dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
 374                return -EBUSY;
 375        }
 376
 377        if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
 378                dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
 379                free_dma(port->rx_dma_channel);
 380                return -EBUSY;
 381        }
 382
 383#ifdef CONFIG_SIR_BFIN_DMA
 384
 385        set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
 386        set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
 387
 388        port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
 389        port->rx_dma_buf.head = 0;
 390        port->rx_dma_buf.tail = 0;
 391        port->rx_dma_nrows = 0;
 392
 393        set_dma_config(port->rx_dma_channel,
 394                                set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
 395                                                                        INTR_ON_ROW, DIMENSION_2D,
 396                                                                        DATA_SIZE_8, DMA_SYNC_RESTART));
 397        set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
 398        set_dma_x_modify(port->rx_dma_channel, 1);
 399        set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
 400        set_dma_y_modify(port->rx_dma_channel, 1);
 401        set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
 402        enable_dma(port->rx_dma_channel);
 403
 404        port->rx_dma_timer.data = (unsigned long)(dev);
 405        port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
 406
 407#else
 408
 409        if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
 410                dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
 411                return -EBUSY;
 412        }
 413
 414        if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
 415                dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
 416                free_irq(port->irq, dev);
 417                return -EBUSY;
 418        }
 419#endif
 420
 421        return 0;
 422}
 423
 424static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
 425{
 426        unsigned short val;
 427
 428        bfin_sir_stop_rx(port);
 429        SIR_UART_DISABLE_INTS(port);
 430
 431        val = SIR_UART_GET_GCTL(port);
 432        val &= ~(UCEN | IREN | RPOLC);
 433        SIR_UART_PUT_GCTL(port, val);
 434
 435#ifdef CONFIG_SIR_BFIN_DMA
 436        disable_dma(port->tx_dma_channel);
 437        disable_dma(port->rx_dma_channel);
 438        del_timer(&(port->rx_dma_timer));
 439        dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
 440#else
 441        free_irq(port->irq+1, dev);
 442        free_irq(port->irq, dev);
 443#endif
 444        free_dma(port->tx_dma_channel);
 445        free_dma(port->rx_dma_channel);
 446}
 447
 448#ifdef CONFIG_PM
 449static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
 450{
 451        struct bfin_sir_port *sir_port;
 452        struct net_device *dev;
 453        struct bfin_sir_self *self;
 454
 455        sir_port = platform_get_drvdata(pdev);
 456        if (!sir_port)
 457                return 0;
 458
 459        dev = sir_port->dev;
 460        self = netdev_priv(dev);
 461        if (self->open) {
 462                flush_work(&self->work);
 463                bfin_sir_shutdown(self->sir_port, dev);
 464                netif_device_detach(dev);
 465        }
 466
 467        return 0;
 468}
 469static int bfin_sir_resume(struct platform_device *pdev)
 470{
 471        struct bfin_sir_port *sir_port;
 472        struct net_device *dev;
 473        struct bfin_sir_self *self;
 474        struct bfin_sir_port *port;
 475
 476        sir_port = platform_get_drvdata(pdev);
 477        if (!sir_port)
 478                return 0;
 479
 480        dev = sir_port->dev;
 481        self = netdev_priv(dev);
 482        port = self->sir_port;
 483        if (self->open) {
 484                if (self->newspeed) {
 485                        self->speed = self->newspeed;
 486                        self->newspeed = 0;
 487                }
 488                bfin_sir_startup(port, dev);
 489                bfin_sir_set_speed(port, 9600);
 490                bfin_sir_enable_rx(port);
 491                netif_device_attach(dev);
 492        }
 493        return 0;
 494}
 495#else
 496#define bfin_sir_suspend   NULL
 497#define bfin_sir_resume    NULL
 498#endif
 499
 500static void bfin_sir_send_work(struct work_struct *work)
 501{
 502        struct bfin_sir_self  *self = container_of(work, struct bfin_sir_self, work);
 503        struct net_device *dev = self->sir_port->dev;
 504        struct bfin_sir_port *port = self->sir_port;
 505        unsigned short val;
 506        int tx_cnt = 10;
 507
 508        while (bfin_sir_is_receiving(dev) && --tx_cnt)
 509                turnaround_delay(dev->last_rx, self->mtt);
 510
 511        bfin_sir_stop_rx(port);
 512
 513        /* To avoid losting RX interrupt, we reset IR function before
 514         * sending data. We also can set the speed, which will
 515         * reset all the UART.
 516         */
 517        val = SIR_UART_GET_GCTL(port);
 518        val &= ~(IREN | RPOLC);
 519        SIR_UART_PUT_GCTL(port, val);
 520        SSYNC();
 521        val |= IREN | RPOLC;
 522        SIR_UART_PUT_GCTL(port, val);
 523        SSYNC();
 524        /* bfin_sir_set_speed(port, self->speed); */
 525
 526#ifdef CONFIG_SIR_BFIN_DMA
 527        bfin_sir_dma_tx_chars(dev);
 528#endif
 529        bfin_sir_enable_tx(port);
 530        dev->trans_start = jiffies;
 531}
 532
 533static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
 534{
 535        struct bfin_sir_self *self = netdev_priv(dev);
 536        int speed = irda_get_next_speed(skb);
 537
 538        netif_stop_queue(dev);
 539
 540        self->mtt = irda_get_mtt(skb);
 541
 542        if (speed != self->speed && speed != -1)
 543                self->newspeed = speed;
 544
 545        self->tx_buff.data = self->tx_buff.head;
 546        if (skb->len == 0)
 547                self->tx_buff.len = 0;
 548        else
 549                self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
 550
 551        schedule_work(&self->work);
 552        dev_kfree_skb(skb);
 553
 554        return 0;
 555}
 556
 557static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
 558{
 559        struct if_irda_req *rq = (struct if_irda_req *)ifreq;
 560        struct bfin_sir_self *self = netdev_priv(dev);
 561        struct bfin_sir_port *port = self->sir_port;
 562        int ret = 0;
 563
 564        switch (cmd) {
 565        case SIOCSBANDWIDTH:
 566                if (capable(CAP_NET_ADMIN)) {
 567                        if (self->open) {
 568                                ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
 569                                bfin_sir_enable_rx(port);
 570                        } else {
 571                                dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
 572                                ret = 0;
 573                        }
 574                }
 575                break;
 576
 577        case SIOCSMEDIABUSY:
 578                ret = -EPERM;
 579                if (capable(CAP_NET_ADMIN)) {
 580                        irda_device_set_media_busy(dev, TRUE);
 581                        ret = 0;
 582                }
 583                break;
 584
 585        case SIOCGRECEIVING:
 586                rq->ifr_receiving = bfin_sir_is_receiving(dev);
 587                break;
 588
 589        default:
 590                ret = -EOPNOTSUPP;
 591                break;
 592        }
 593
 594        return ret;
 595}
 596
 597static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
 598{
 599        struct bfin_sir_self *self = netdev_priv(dev);
 600
 601        return &self->stats;
 602}
 603
 604static int bfin_sir_open(struct net_device *dev)
 605{
 606        struct bfin_sir_self *self = netdev_priv(dev);
 607        struct bfin_sir_port *port = self->sir_port;
 608        int err = -ENOMEM;
 609
 610        self->newspeed = 0;
 611        self->speed = 9600;
 612
 613        spin_lock_init(&self->lock);
 614
 615        err = bfin_sir_startup(port, dev);
 616        if (err)
 617                goto err_startup;
 618
 619        bfin_sir_set_speed(port, 9600);
 620
 621        self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
 622        if (!self->irlap)
 623                goto err_irlap;
 624
 625        INIT_WORK(&self->work, bfin_sir_send_work);
 626
 627        /*
 628         * Now enable the interrupt then start the queue
 629         */
 630        self->open = 1;
 631        bfin_sir_enable_rx(port);
 632
 633        netif_start_queue(dev);
 634
 635        return 0;
 636
 637err_irlap:
 638        self->open = 0;
 639        bfin_sir_shutdown(port, dev);
 640err_startup:
 641        return err;
 642}
 643
 644static int bfin_sir_stop(struct net_device *dev)
 645{
 646        struct bfin_sir_self *self = netdev_priv(dev);
 647
 648        flush_work(&self->work);
 649        bfin_sir_shutdown(self->sir_port, dev);
 650
 651        if (self->rxskb) {
 652                dev_kfree_skb(self->rxskb);
 653                self->rxskb = NULL;
 654        }
 655
 656        /* Stop IrLAP */
 657        if (self->irlap) {
 658                irlap_close(self->irlap);
 659                self->irlap = NULL;
 660        }
 661
 662        netif_stop_queue(dev);
 663        self->open = 0;
 664
 665        return 0;
 666}
 667
 668static int bfin_sir_init_iobuf(iobuff_t *io, int size)
 669{
 670        io->head = kmalloc(size, GFP_KERNEL);
 671        if (!io->head)
 672                return -ENOMEM;
 673        io->truesize = size;
 674        io->in_frame = FALSE;
 675        io->state    = OUTSIDE_FRAME;
 676        io->data     = io->head;
 677        return 0;
 678}
 679
 680static const struct net_device_ops bfin_sir_ndo = {
 681        .ndo_open               = bfin_sir_open,
 682        .ndo_stop               = bfin_sir_stop,
 683        .ndo_start_xmit         = bfin_sir_hard_xmit,
 684        .ndo_do_ioctl           = bfin_sir_ioctl,
 685        .ndo_get_stats          = bfin_sir_stats,
 686};
 687
 688static int __devinit bfin_sir_probe(struct platform_device *pdev)
 689{
 690        struct net_device *dev;
 691        struct bfin_sir_self *self;
 692        unsigned int baudrate_mask;
 693        struct bfin_sir_port *sir_port;
 694        int err;
 695
 696        if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
 697                                per[pdev->id][3] == pdev->id) {
 698                err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
 699                if (err)
 700                        return err;
 701        } else {
 702                dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
 703                return -ENODEV;
 704        }
 705
 706        err = -ENOMEM;
 707        sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
 708        if (!sir_port)
 709                goto err_mem_0;
 710
 711        bfin_sir_init_ports(sir_port, pdev);
 712
 713        dev = alloc_irdadev(sizeof(*self));
 714        if (!dev)
 715                goto err_mem_1;
 716
 717        self = netdev_priv(dev);
 718        self->dev = &pdev->dev;
 719        self->sir_port = sir_port;
 720        sir_port->dev = dev;
 721
 722        err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
 723        if (err)
 724                goto err_mem_2;
 725        err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
 726        if (err)
 727                goto err_mem_3;
 728
 729        dev->netdev_ops = &bfin_sir_ndo;
 730        dev->irq = sir_port->irq;
 731
 732        irda_init_max_qos_capabilies(&self->qos);
 733
 734        baudrate_mask = IR_9600;
 735
 736        switch (max_rate) {
 737        case 115200:
 738                baudrate_mask |= IR_115200;
 739        case 57600:
 740                baudrate_mask |= IR_57600;
 741        case 38400:
 742                baudrate_mask |= IR_38400;
 743        case 19200:
 744                baudrate_mask |= IR_19200;
 745        case 9600:
 746                break;
 747        default:
 748                dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
 749        }
 750
 751        self->qos.baud_rate.bits &= baudrate_mask;
 752
 753        self->qos.min_turn_time.bits = 1; /* 10 ms or more */
 754
 755        irda_qos_bits_to_value(&self->qos);
 756
 757        err = register_netdev(dev);
 758
 759        if (err) {
 760                kfree(self->tx_buff.head);
 761err_mem_3:
 762                kfree(self->rx_buff.head);
 763err_mem_2:
 764                free_netdev(dev);
 765err_mem_1:
 766                kfree(sir_port);
 767err_mem_0:
 768                peripheral_free_list(per[pdev->id]);
 769        } else
 770                platform_set_drvdata(pdev, sir_port);
 771
 772        return err;
 773}
 774
 775static int __devexit bfin_sir_remove(struct platform_device *pdev)
 776{
 777        struct bfin_sir_port *sir_port;
 778        struct net_device *dev = NULL;
 779        struct bfin_sir_self *self;
 780
 781        sir_port = platform_get_drvdata(pdev);
 782        if (!sir_port)
 783                return 0;
 784        dev = sir_port->dev;
 785        self = netdev_priv(dev);
 786        unregister_netdev(dev);
 787        kfree(self->tx_buff.head);
 788        kfree(self->rx_buff.head);
 789        free_netdev(dev);
 790        kfree(sir_port);
 791        platform_set_drvdata(pdev, NULL);
 792
 793        return 0;
 794}
 795
 796static struct platform_driver bfin_ir_driver = {
 797        .probe   = bfin_sir_probe,
 798        .remove  = __devexit_p(bfin_sir_remove),
 799        .suspend = bfin_sir_suspend,
 800        .resume  = bfin_sir_resume,
 801        .driver  = {
 802                .name = DRIVER_NAME,
 803        },
 804};
 805
 806static int __init bfin_sir_init(void)
 807{
 808        return platform_driver_register(&bfin_ir_driver);
 809}
 810
 811static void __exit bfin_sir_exit(void)
 812{
 813        platform_driver_unregister(&bfin_ir_driver);
 814}
 815
 816module_init(bfin_sir_init);
 817module_exit(bfin_sir_exit);
 818
 819module_param(max_rate, int, 0);
 820MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
 821
 822MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
 823MODULE_DESCRIPTION("Blackfin IrDA driver");
 824MODULE_LICENSE("GPL");
 825