linux/drivers/net/irda/bfin_sir.c
<<
>>
Prefs
   1/*
   2 * Blackfin Infra-red Driver
   3 *
   4 * Copyright 2006-2009 Analog Devices Inc.
   5 *
   6 * Enter bugs at http://blackfin.uclinux.org/
   7 *
   8 * Licensed under the GPL-2 or later.
   9 *
  10 */
  11#include "bfin_sir.h"
  12
  13#ifdef CONFIG_SIR_BFIN_DMA
  14#define DMA_SIR_RX_XCNT        10
  15#define DMA_SIR_RX_YCNT        (PAGE_SIZE / DMA_SIR_RX_XCNT)
  16#define DMA_SIR_RX_FLUSH_JIFS  (HZ * 4 / 250)
  17#endif
  18
  19#if ANOMALY_05000447
  20static int max_rate = 57600;
  21#else
  22static int max_rate = 115200;
  23#endif
  24
  25static void turnaround_delay(int mtt)
  26{
  27        long ticks;
  28
  29        mtt = mtt < 10000 ? 10000 : mtt;
  30        ticks = 1 + mtt / (USEC_PER_SEC / HZ);
  31        schedule_timeout_uninterruptible(ticks);
  32}
  33
  34static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
  35{
  36        int i;
  37        struct resource *res;
  38
  39        for (i = 0; i < pdev->num_resources; i++) {
  40                res = &pdev->resource[i];
  41                switch (res->flags) {
  42                case IORESOURCE_MEM:
  43                        sp->membase   = (void __iomem *)res->start;
  44                        break;
  45                case IORESOURCE_IRQ:
  46                        sp->irq = res->start;
  47                        break;
  48                case IORESOURCE_DMA:
  49                        sp->rx_dma_channel = res->start;
  50                        sp->tx_dma_channel = res->end;
  51                        break;
  52                default:
  53                        break;
  54                }
  55        }
  56
  57        sp->clk = get_sclk();
  58#ifdef CONFIG_SIR_BFIN_DMA
  59        sp->tx_done        = 1;
  60        init_timer(&(sp->rx_dma_timer));
  61#endif
  62}
  63
  64static void bfin_sir_stop_tx(struct bfin_sir_port *port)
  65{
  66#ifdef CONFIG_SIR_BFIN_DMA
  67        disable_dma(port->tx_dma_channel);
  68#endif
  69
  70        while (!(UART_GET_LSR(port) & THRE)) {
  71                cpu_relax();
  72                continue;
  73        }
  74
  75        UART_CLEAR_IER(port, ETBEI);
  76}
  77
  78static void bfin_sir_enable_tx(struct bfin_sir_port *port)
  79{
  80        UART_SET_IER(port, ETBEI);
  81}
  82
  83static void bfin_sir_stop_rx(struct bfin_sir_port *port)
  84{
  85        UART_CLEAR_IER(port, ERBFI);
  86}
  87
  88static void bfin_sir_enable_rx(struct bfin_sir_port *port)
  89{
  90        UART_SET_IER(port, ERBFI);
  91}
  92
  93static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
  94{
  95        int ret = -EINVAL;
  96        unsigned int quot;
  97        unsigned short val, lsr, lcr;
  98        static int utime;
  99        int count = 10;
 100
 101        lcr = WLS(8);
 102
 103        switch (speed) {
 104        case 9600:
 105        case 19200:
 106        case 38400:
 107        case 57600:
 108        case 115200:
 109
 110                /*
 111                 * IRDA is not affected by anomaly 05000230, so there is no
 112                 * need to tweak the divisor like he UART driver (which will
 113                 * slightly speed up the baud rate on us).
 114                 */
 115                quot = (port->clk + (8 * speed)) / (16 * speed);
 116
 117                do {
 118                        udelay(utime);
 119                        lsr = UART_GET_LSR(port);
 120                } while (!(lsr & TEMT) && count--);
 121
 122                /* The useconds for 1 bits to transmit */
 123                utime = 1000000 / speed + 1;
 124
 125                /* Clear UCEN bit to reset the UART state machine
 126                 * and control registers
 127                 */
 128                val = UART_GET_GCTL(port);
 129                val &= ~UCEN;
 130                UART_PUT_GCTL(port, val);
 131
 132                /* Set DLAB in LCR to Access THR RBR IER */
 133                UART_SET_DLAB(port);
 134                SSYNC();
 135
 136                UART_PUT_DLL(port, quot & 0xFF);
 137                UART_PUT_DLH(port, (quot >> 8) & 0xFF);
 138                SSYNC();
 139
 140                /* Clear DLAB in LCR */
 141                UART_CLEAR_DLAB(port);
 142                SSYNC();
 143
 144                UART_PUT_LCR(port, lcr);
 145
 146                val = UART_GET_GCTL(port);
 147                val |= UCEN;
 148                UART_PUT_GCTL(port, val);
 149
 150                ret = 0;
 151                break;
 152        default:
 153                printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
 154                break;
 155        }
 156
 157        val = UART_GET_GCTL(port);
 158        /* If not add the 'RPOLC', we can't catch the receive interrupt.
 159         * It's related with the HW layout and the IR transiver.
 160         */
 161        val |= UMOD_IRDA | RPOLC;
 162        UART_PUT_GCTL(port, val);
 163        return ret;
 164}
 165
 166static int bfin_sir_is_receiving(struct net_device *dev)
 167{
 168        struct bfin_sir_self *self = netdev_priv(dev);
 169        struct bfin_sir_port *port = self->sir_port;
 170
 171        if (!(UART_GET_IER(port) & ERBFI))
 172                return 0;
 173        return self->rx_buff.state != OUTSIDE_FRAME;
 174}
 175
 176#ifdef CONFIG_SIR_BFIN_PIO
 177static void bfin_sir_tx_chars(struct net_device *dev)
 178{
 179        unsigned int chr;
 180        struct bfin_sir_self *self = netdev_priv(dev);
 181        struct bfin_sir_port *port = self->sir_port;
 182
 183        if (self->tx_buff.len != 0) {
 184                chr = *(self->tx_buff.data);
 185                UART_PUT_CHAR(port, chr);
 186                self->tx_buff.data++;
 187                self->tx_buff.len--;
 188        } else {
 189                self->stats.tx_packets++;
 190                self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
 191                if (self->newspeed) {
 192                        bfin_sir_set_speed(port, self->newspeed);
 193                        self->speed = self->newspeed;
 194                        self->newspeed = 0;
 195                }
 196                bfin_sir_stop_tx(port);
 197                bfin_sir_enable_rx(port);
 198                /* I'm hungry! */
 199                netif_wake_queue(dev);
 200        }
 201}
 202
 203static void bfin_sir_rx_chars(struct net_device *dev)
 204{
 205        struct bfin_sir_self *self = netdev_priv(dev);
 206        struct bfin_sir_port *port = self->sir_port;
 207        unsigned char ch;
 208
 209        UART_CLEAR_LSR(port);
 210        ch = UART_GET_CHAR(port);
 211        async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
 212}
 213
 214static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
 215{
 216        struct net_device *dev = dev_id;
 217        struct bfin_sir_self *self = netdev_priv(dev);
 218        struct bfin_sir_port *port = self->sir_port;
 219
 220        spin_lock(&self->lock);
 221        while ((UART_GET_LSR(port) & DR))
 222                bfin_sir_rx_chars(dev);
 223        spin_unlock(&self->lock);
 224
 225        return IRQ_HANDLED;
 226}
 227
 228static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
 229{
 230        struct net_device *dev = dev_id;
 231        struct bfin_sir_self *self = netdev_priv(dev);
 232        struct bfin_sir_port *port = self->sir_port;
 233
 234        spin_lock(&self->lock);
 235        if (UART_GET_LSR(port) & THRE)
 236                bfin_sir_tx_chars(dev);
 237        spin_unlock(&self->lock);
 238
 239        return IRQ_HANDLED;
 240}
 241#endif /* CONFIG_SIR_BFIN_PIO */
 242
 243#ifdef CONFIG_SIR_BFIN_DMA
 244static void bfin_sir_dma_tx_chars(struct net_device *dev)
 245{
 246        struct bfin_sir_self *self = netdev_priv(dev);
 247        struct bfin_sir_port *port = self->sir_port;
 248
 249        if (!port->tx_done)
 250                return;
 251        port->tx_done = 0;
 252
 253        if (self->tx_buff.len == 0) {
 254                self->stats.tx_packets++;
 255                if (self->newspeed) {
 256                        bfin_sir_set_speed(port, self->newspeed);
 257                        self->speed = self->newspeed;
 258                        self->newspeed = 0;
 259                }
 260                bfin_sir_enable_rx(port);
 261                port->tx_done = 1;
 262                netif_wake_queue(dev);
 263                return;
 264        }
 265
 266        blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
 267                (unsigned long)(self->tx_buff.data+self->tx_buff.len));
 268        set_dma_config(port->tx_dma_channel,
 269                set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
 270                        INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
 271                        DMA_SYNC_RESTART));
 272        set_dma_start_addr(port->tx_dma_channel,
 273                (unsigned long)(self->tx_buff.data));
 274        set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
 275        set_dma_x_modify(port->tx_dma_channel, 1);
 276        enable_dma(port->tx_dma_channel);
 277}
 278
 279static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
 280{
 281        struct net_device *dev = dev_id;
 282        struct bfin_sir_self *self = netdev_priv(dev);
 283        struct bfin_sir_port *port = self->sir_port;
 284
 285        spin_lock(&self->lock);
 286        if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
 287                clear_dma_irqstat(port->tx_dma_channel);
 288                bfin_sir_stop_tx(port);
 289
 290                self->stats.tx_packets++;
 291                self->stats.tx_bytes += self->tx_buff.len;
 292                self->tx_buff.len = 0;
 293                if (self->newspeed) {
 294                        bfin_sir_set_speed(port, self->newspeed);
 295                        self->speed = self->newspeed;
 296                        self->newspeed = 0;
 297                }
 298                bfin_sir_enable_rx(port);
 299                /* I'm hungry! */
 300                netif_wake_queue(dev);
 301                port->tx_done = 1;
 302        }
 303        spin_unlock(&self->lock);
 304
 305        return IRQ_HANDLED;
 306}
 307
 308static void bfin_sir_dma_rx_chars(struct net_device *dev)
 309{
 310        struct bfin_sir_self *self = netdev_priv(dev);
 311        struct bfin_sir_port *port = self->sir_port;
 312        int i;
 313
 314        UART_CLEAR_LSR(port);
 315
 316        for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
 317                async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
 318}
 319
 320void bfin_sir_rx_dma_timeout(struct net_device *dev)
 321{
 322        struct bfin_sir_self *self = netdev_priv(dev);
 323        struct bfin_sir_port *port = self->sir_port;
 324        int x_pos, pos;
 325        unsigned long flags;
 326
 327        spin_lock_irqsave(&self->lock, flags);
 328        x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
 329        if (x_pos == DMA_SIR_RX_XCNT)
 330                x_pos = 0;
 331
 332        pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
 333
 334        if (pos > port->rx_dma_buf.tail) {
 335                port->rx_dma_buf.tail = pos;
 336                bfin_sir_dma_rx_chars(dev);
 337                port->rx_dma_buf.head = port->rx_dma_buf.tail;
 338        }
 339        spin_unlock_irqrestore(&self->lock, flags);
 340}
 341
 342static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
 343{
 344        struct net_device *dev = dev_id;
 345        struct bfin_sir_self *self = netdev_priv(dev);
 346        struct bfin_sir_port *port = self->sir_port;
 347        unsigned short irqstat;
 348
 349        spin_lock(&self->lock);
 350
 351        port->rx_dma_nrows++;
 352        port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
 353        bfin_sir_dma_rx_chars(dev);
 354        if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
 355                port->rx_dma_nrows = 0;
 356                port->rx_dma_buf.tail = 0;
 357        }
 358        port->rx_dma_buf.head = port->rx_dma_buf.tail;
 359
 360        irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
 361        clear_dma_irqstat(port->rx_dma_channel);
 362        spin_unlock(&self->lock);
 363
 364        mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
 365        return IRQ_HANDLED;
 366}
 367#endif /* CONFIG_SIR_BFIN_DMA */
 368
 369static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
 370{
 371#ifdef CONFIG_SIR_BFIN_DMA
 372        dma_addr_t dma_handle;
 373#endif /* CONFIG_SIR_BFIN_DMA */
 374
 375        if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
 376                dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
 377                return -EBUSY;
 378        }
 379
 380        if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
 381                dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
 382                free_dma(port->rx_dma_channel);
 383                return -EBUSY;
 384        }
 385
 386#ifdef CONFIG_SIR_BFIN_DMA
 387
 388        set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
 389        set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
 390
 391        port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
 392                                                  &dma_handle, GFP_DMA);
 393        port->rx_dma_buf.head = 0;
 394        port->rx_dma_buf.tail = 0;
 395        port->rx_dma_nrows = 0;
 396
 397        set_dma_config(port->rx_dma_channel,
 398                                set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
 399                                                                        INTR_ON_ROW, DIMENSION_2D,
 400                                                                        DATA_SIZE_8, DMA_SYNC_RESTART));
 401        set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
 402        set_dma_x_modify(port->rx_dma_channel, 1);
 403        set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
 404        set_dma_y_modify(port->rx_dma_channel, 1);
 405        set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
 406        enable_dma(port->rx_dma_channel);
 407
 408        port->rx_dma_timer.data = (unsigned long)(dev);
 409        port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
 410
 411#else
 412
 413        if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
 414                dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
 415                return -EBUSY;
 416        }
 417
 418        if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
 419                dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
 420                free_irq(port->irq, dev);
 421                return -EBUSY;
 422        }
 423#endif
 424
 425        return 0;
 426}
 427
 428static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
 429{
 430        unsigned short val;
 431
 432        bfin_sir_stop_rx(port);
 433
 434        val = UART_GET_GCTL(port);
 435        val &= ~(UCEN | UMOD_MASK | RPOLC);
 436        UART_PUT_GCTL(port, val);
 437
 438#ifdef CONFIG_SIR_BFIN_DMA
 439        disable_dma(port->tx_dma_channel);
 440        disable_dma(port->rx_dma_channel);
 441        del_timer(&(port->rx_dma_timer));
 442        dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
 443#else
 444        free_irq(port->irq+1, dev);
 445        free_irq(port->irq, dev);
 446#endif
 447        free_dma(port->tx_dma_channel);
 448        free_dma(port->rx_dma_channel);
 449}
 450
 451#ifdef CONFIG_PM
 452static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
 453{
 454        struct bfin_sir_port *sir_port;
 455        struct net_device *dev;
 456        struct bfin_sir_self *self;
 457
 458        sir_port = platform_get_drvdata(pdev);
 459        if (!sir_port)
 460                return 0;
 461
 462        dev = sir_port->dev;
 463        self = netdev_priv(dev);
 464        if (self->open) {
 465                flush_work(&self->work);
 466                bfin_sir_shutdown(self->sir_port, dev);
 467                netif_device_detach(dev);
 468        }
 469
 470        return 0;
 471}
 472static int bfin_sir_resume(struct platform_device *pdev)
 473{
 474        struct bfin_sir_port *sir_port;
 475        struct net_device *dev;
 476        struct bfin_sir_self *self;
 477        struct bfin_sir_port *port;
 478
 479        sir_port = platform_get_drvdata(pdev);
 480        if (!sir_port)
 481                return 0;
 482
 483        dev = sir_port->dev;
 484        self = netdev_priv(dev);
 485        port = self->sir_port;
 486        if (self->open) {
 487                if (self->newspeed) {
 488                        self->speed = self->newspeed;
 489                        self->newspeed = 0;
 490                }
 491                bfin_sir_startup(port, dev);
 492                bfin_sir_set_speed(port, 9600);
 493                bfin_sir_enable_rx(port);
 494                netif_device_attach(dev);
 495        }
 496        return 0;
 497}
 498#else
 499#define bfin_sir_suspend   NULL
 500#define bfin_sir_resume    NULL
 501#endif
 502
 503static void bfin_sir_send_work(struct work_struct *work)
 504{
 505        struct bfin_sir_self  *self = container_of(work, struct bfin_sir_self, work);
 506        struct net_device *dev = self->sir_port->dev;
 507        struct bfin_sir_port *port = self->sir_port;
 508        unsigned short val;
 509        int tx_cnt = 10;
 510
 511        while (bfin_sir_is_receiving(dev) && --tx_cnt)
 512                turnaround_delay(self->mtt);
 513
 514        bfin_sir_stop_rx(port);
 515
 516        /* To avoid losting RX interrupt, we reset IR function before
 517         * sending data. We also can set the speed, which will
 518         * reset all the UART.
 519         */
 520        val = UART_GET_GCTL(port);
 521        val &= ~(UMOD_MASK | RPOLC);
 522        UART_PUT_GCTL(port, val);
 523        SSYNC();
 524        val |= UMOD_IRDA | RPOLC;
 525        UART_PUT_GCTL(port, val);
 526        SSYNC();
 527        /* bfin_sir_set_speed(port, self->speed); */
 528
 529#ifdef CONFIG_SIR_BFIN_DMA
 530        bfin_sir_dma_tx_chars(dev);
 531#endif
 532        bfin_sir_enable_tx(port);
 533        netif_trans_update(dev);
 534}
 535
 536static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
 537{
 538        struct bfin_sir_self *self = netdev_priv(dev);
 539        int speed = irda_get_next_speed(skb);
 540
 541        netif_stop_queue(dev);
 542
 543        self->mtt = irda_get_mtt(skb);
 544
 545        if (speed != self->speed && speed != -1)
 546                self->newspeed = speed;
 547
 548        self->tx_buff.data = self->tx_buff.head;
 549        if (skb->len == 0)
 550                self->tx_buff.len = 0;
 551        else
 552                self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
 553
 554        schedule_work(&self->work);
 555        dev_kfree_skb(skb);
 556
 557        return 0;
 558}
 559
 560static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
 561{
 562        struct if_irda_req *rq = (struct if_irda_req *)ifreq;
 563        struct bfin_sir_self *self = netdev_priv(dev);
 564        struct bfin_sir_port *port = self->sir_port;
 565        int ret = 0;
 566
 567        switch (cmd) {
 568        case SIOCSBANDWIDTH:
 569                if (capable(CAP_NET_ADMIN)) {
 570                        if (self->open) {
 571                                ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
 572                                bfin_sir_enable_rx(port);
 573                        } else {
 574                                dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
 575                                ret = 0;
 576                        }
 577                }
 578                break;
 579
 580        case SIOCSMEDIABUSY:
 581                ret = -EPERM;
 582                if (capable(CAP_NET_ADMIN)) {
 583                        irda_device_set_media_busy(dev, TRUE);
 584                        ret = 0;
 585                }
 586                break;
 587
 588        case SIOCGRECEIVING:
 589                rq->ifr_receiving = bfin_sir_is_receiving(dev);
 590                break;
 591
 592        default:
 593                ret = -EOPNOTSUPP;
 594                break;
 595        }
 596
 597        return ret;
 598}
 599
 600static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
 601{
 602        struct bfin_sir_self *self = netdev_priv(dev);
 603
 604        return &self->stats;
 605}
 606
 607static int bfin_sir_open(struct net_device *dev)
 608{
 609        struct bfin_sir_self *self = netdev_priv(dev);
 610        struct bfin_sir_port *port = self->sir_port;
 611        int err;
 612
 613        self->newspeed = 0;
 614        self->speed = 9600;
 615
 616        spin_lock_init(&self->lock);
 617
 618        err = bfin_sir_startup(port, dev);
 619        if (err)
 620                goto err_startup;
 621
 622        bfin_sir_set_speed(port, 9600);
 623
 624        self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
 625        if (!self->irlap) {
 626                err = -ENOMEM;
 627                goto err_irlap;
 628        }
 629
 630        INIT_WORK(&self->work, bfin_sir_send_work);
 631
 632        /*
 633         * Now enable the interrupt then start the queue
 634         */
 635        self->open = 1;
 636        bfin_sir_enable_rx(port);
 637
 638        netif_start_queue(dev);
 639
 640        return 0;
 641
 642err_irlap:
 643        self->open = 0;
 644        bfin_sir_shutdown(port, dev);
 645err_startup:
 646        return err;
 647}
 648
 649static int bfin_sir_stop(struct net_device *dev)
 650{
 651        struct bfin_sir_self *self = netdev_priv(dev);
 652
 653        flush_work(&self->work);
 654        bfin_sir_shutdown(self->sir_port, dev);
 655
 656        if (self->rxskb) {
 657                dev_kfree_skb(self->rxskb);
 658                self->rxskb = NULL;
 659        }
 660
 661        /* Stop IrLAP */
 662        if (self->irlap) {
 663                irlap_close(self->irlap);
 664                self->irlap = NULL;
 665        }
 666
 667        netif_stop_queue(dev);
 668        self->open = 0;
 669
 670        return 0;
 671}
 672
 673static int bfin_sir_init_iobuf(iobuff_t *io, int size)
 674{
 675        io->head = kmalloc(size, GFP_KERNEL);
 676        if (!io->head)
 677                return -ENOMEM;
 678        io->truesize = size;
 679        io->in_frame = FALSE;
 680        io->state    = OUTSIDE_FRAME;
 681        io->data     = io->head;
 682        return 0;
 683}
 684
 685static const struct net_device_ops bfin_sir_ndo = {
 686        .ndo_open               = bfin_sir_open,
 687        .ndo_stop               = bfin_sir_stop,
 688        .ndo_start_xmit         = bfin_sir_hard_xmit,
 689        .ndo_do_ioctl           = bfin_sir_ioctl,
 690        .ndo_get_stats          = bfin_sir_stats,
 691};
 692
 693static int bfin_sir_probe(struct platform_device *pdev)
 694{
 695        struct net_device *dev;
 696        struct bfin_sir_self *self;
 697        unsigned int baudrate_mask;
 698        struct bfin_sir_port *sir_port;
 699        int err;
 700
 701        if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
 702                                per[pdev->id][3] == pdev->id) {
 703                err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
 704                if (err)
 705                        return err;
 706        } else {
 707                dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
 708                return -ENODEV;
 709        }
 710
 711        err = -ENOMEM;
 712        sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
 713        if (!sir_port)
 714                goto err_mem_0;
 715
 716        bfin_sir_init_ports(sir_port, pdev);
 717
 718        dev = alloc_irdadev(sizeof(*self));
 719        if (!dev)
 720                goto err_mem_1;
 721
 722        self = netdev_priv(dev);
 723        self->dev = &pdev->dev;
 724        self->sir_port = sir_port;
 725        sir_port->dev = dev;
 726
 727        err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
 728        if (err)
 729                goto err_mem_2;
 730        err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
 731        if (err)
 732                goto err_mem_3;
 733
 734        dev->netdev_ops = &bfin_sir_ndo;
 735        dev->irq = sir_port->irq;
 736
 737        irda_init_max_qos_capabilies(&self->qos);
 738
 739        baudrate_mask = IR_9600;
 740
 741        switch (max_rate) {
 742        case 115200:
 743                baudrate_mask |= IR_115200;
 744        case 57600:
 745                baudrate_mask |= IR_57600;
 746        case 38400:
 747                baudrate_mask |= IR_38400;
 748        case 19200:
 749                baudrate_mask |= IR_19200;
 750        case 9600:
 751                break;
 752        default:
 753                dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
 754        }
 755
 756        self->qos.baud_rate.bits &= baudrate_mask;
 757
 758        self->qos.min_turn_time.bits = 1; /* 10 ms or more */
 759
 760        irda_qos_bits_to_value(&self->qos);
 761
 762        err = register_netdev(dev);
 763
 764        if (err) {
 765                kfree(self->tx_buff.head);
 766err_mem_3:
 767                kfree(self->rx_buff.head);
 768err_mem_2:
 769                free_netdev(dev);
 770err_mem_1:
 771                kfree(sir_port);
 772err_mem_0:
 773                peripheral_free_list(per[pdev->id]);
 774        } else
 775                platform_set_drvdata(pdev, sir_port);
 776
 777        return err;
 778}
 779
 780static int bfin_sir_remove(struct platform_device *pdev)
 781{
 782        struct bfin_sir_port *sir_port;
 783        struct net_device *dev = NULL;
 784        struct bfin_sir_self *self;
 785
 786        sir_port = platform_get_drvdata(pdev);
 787        if (!sir_port)
 788                return 0;
 789        dev = sir_port->dev;
 790        self = netdev_priv(dev);
 791        unregister_netdev(dev);
 792        kfree(self->tx_buff.head);
 793        kfree(self->rx_buff.head);
 794        free_netdev(dev);
 795        kfree(sir_port);
 796        platform_set_drvdata(pdev, NULL);
 797
 798        return 0;
 799}
 800
 801static struct platform_driver bfin_ir_driver = {
 802        .probe   = bfin_sir_probe,
 803        .remove  = bfin_sir_remove,
 804        .suspend = bfin_sir_suspend,
 805        .resume  = bfin_sir_resume,
 806        .driver  = {
 807                .name = DRIVER_NAME,
 808        },
 809};
 810
 811module_platform_driver(bfin_ir_driver);
 812
 813module_param(max_rate, int, 0);
 814MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
 815
 816MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
 817MODULE_DESCRIPTION("Blackfin IrDA driver");
 818MODULE_LICENSE("GPL");
 819