linux/drivers/spi/spi-bfin-sport.c
<<
>>
Prefs
   1/*
   2 * SPI bus via the Blackfin SPORT peripheral
   3 *
   4 * Enter bugs at http://blackfin.uclinux.org/
   5 *
   6 * Copyright 2009-2011 Analog Devices Inc.
   7 *
   8 * Licensed under the GPL-2 or later.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/delay.h>
  13#include <linux/device.h>
  14#include <linux/gpio.h>
  15#include <linux/io.h>
  16#include <linux/ioport.h>
  17#include <linux/irq.h>
  18#include <linux/errno.h>
  19#include <linux/interrupt.h>
  20#include <linux/platform_device.h>
  21#include <linux/spi/spi.h>
  22#include <linux/workqueue.h>
  23
  24#include <asm/portmux.h>
  25#include <asm/bfin5xx_spi.h>
  26#include <asm/blackfin.h>
  27#include <asm/bfin_sport.h>
  28#include <asm/cacheflush.h>
  29
  30#define DRV_NAME        "bfin-sport-spi"
  31#define DRV_DESC        "SPI bus via the Blackfin SPORT"
  32
  33MODULE_AUTHOR("Cliff Cai");
  34MODULE_DESCRIPTION(DRV_DESC);
  35MODULE_LICENSE("GPL");
  36MODULE_ALIAS("platform:bfin-sport-spi");
  37
  38enum bfin_sport_spi_state {
  39        START_STATE,
  40        RUNNING_STATE,
  41        DONE_STATE,
  42        ERROR_STATE,
  43};
  44
  45struct bfin_sport_spi_master_data;
  46
  47struct bfin_sport_transfer_ops {
  48        void (*write) (struct bfin_sport_spi_master_data *);
  49        void (*read) (struct bfin_sport_spi_master_data *);
  50        void (*duplex) (struct bfin_sport_spi_master_data *);
  51};
  52
  53struct bfin_sport_spi_master_data {
  54        /* Driver model hookup */
  55        struct device *dev;
  56
  57        /* SPI framework hookup */
  58        struct spi_master *master;
  59
  60        /* Regs base of SPI controller */
  61        struct sport_register __iomem *regs;
  62        int err_irq;
  63
  64        /* Pin request list */
  65        u16 *pin_req;
  66
  67        /* Driver message queue */
  68        struct workqueue_struct *workqueue;
  69        struct work_struct pump_messages;
  70        spinlock_t lock;
  71        struct list_head queue;
  72        int busy;
  73        bool run;
  74
  75        /* Message Transfer pump */
  76        struct tasklet_struct pump_transfers;
  77
  78        /* Current message transfer state info */
  79        enum bfin_sport_spi_state state;
  80        struct spi_message *cur_msg;
  81        struct spi_transfer *cur_transfer;
  82        struct bfin_sport_spi_slave_data *cur_chip;
  83        union {
  84                void *tx;
  85                u8 *tx8;
  86                u16 *tx16;
  87        };
  88        void *tx_end;
  89        union {
  90                void *rx;
  91                u8 *rx8;
  92                u16 *rx16;
  93        };
  94        void *rx_end;
  95
  96        int cs_change;
  97        struct bfin_sport_transfer_ops *ops;
  98};
  99
 100struct bfin_sport_spi_slave_data {
 101        u16 ctl_reg;
 102        u16 baud;
 103        u16 cs_chg_udelay;      /* Some devices require > 255usec delay */
 104        u32 cs_gpio;
 105        u16 idle_tx_val;
 106        struct bfin_sport_transfer_ops *ops;
 107};
 108
 109static void
 110bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
 111{
 112        bfin_write_or(&drv_data->regs->tcr1, TSPEN);
 113        bfin_write_or(&drv_data->regs->rcr1, TSPEN);
 114        SSYNC();
 115}
 116
 117static void
 118bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
 119{
 120        bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
 121        bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
 122        SSYNC();
 123}
 124
 125/* Caculate the SPI_BAUD register value based on input HZ */
 126static u16
 127bfin_sport_hz_to_spi_baud(u32 speed_hz)
 128{
 129        u_long clk, sclk = get_sclk();
 130        int div = (sclk / (2 * speed_hz)) - 1;
 131
 132        if (div < 0)
 133                div = 0;
 134
 135        clk = sclk / (2 * (div + 1));
 136
 137        if (clk > speed_hz)
 138                div++;
 139
 140        return div;
 141}
 142
 143/* Chip select operation functions for cs_change flag */
 144static void
 145bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
 146{
 147        gpio_direction_output(chip->cs_gpio, 0);
 148}
 149
 150static void
 151bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
 152{
 153        gpio_direction_output(chip->cs_gpio, 1);
 154        /* Move delay here for consistency */
 155        if (chip->cs_chg_udelay)
 156                udelay(chip->cs_chg_udelay);
 157}
 158
 159static void
 160bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
 161{
 162        unsigned long timeout = jiffies + HZ;
 163        while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
 164                if (!time_before(jiffies, timeout))
 165                        break;
 166        }
 167}
 168
 169static void
 170bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
 171{
 172        u16 dummy;
 173
 174        while (drv_data->tx < drv_data->tx_end) {
 175                bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
 176                bfin_sport_spi_stat_poll_complete(drv_data);
 177                dummy = bfin_read(&drv_data->regs->rx16);
 178        }
 179}
 180
 181static void
 182bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
 183{
 184        u16 tx_val = drv_data->cur_chip->idle_tx_val;
 185
 186        while (drv_data->rx < drv_data->rx_end) {
 187                bfin_write(&drv_data->regs->tx16, tx_val);
 188                bfin_sport_spi_stat_poll_complete(drv_data);
 189                *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
 190        }
 191}
 192
 193static void
 194bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
 195{
 196        while (drv_data->rx < drv_data->rx_end) {
 197                bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
 198                bfin_sport_spi_stat_poll_complete(drv_data);
 199                *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
 200        }
 201}
 202
 203static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
 204        .write  = bfin_sport_spi_u8_writer,
 205        .read   = bfin_sport_spi_u8_reader,
 206        .duplex = bfin_sport_spi_u8_duplex,
 207};
 208
 209static void
 210bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
 211{
 212        u16 dummy;
 213
 214        while (drv_data->tx < drv_data->tx_end) {
 215                bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
 216                bfin_sport_spi_stat_poll_complete(drv_data);
 217                dummy = bfin_read(&drv_data->regs->rx16);
 218        }
 219}
 220
 221static void
 222bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
 223{
 224        u16 tx_val = drv_data->cur_chip->idle_tx_val;
 225
 226        while (drv_data->rx < drv_data->rx_end) {
 227                bfin_write(&drv_data->regs->tx16, tx_val);
 228                bfin_sport_spi_stat_poll_complete(drv_data);
 229                *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
 230        }
 231}
 232
 233static void
 234bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
 235{
 236        while (drv_data->rx < drv_data->rx_end) {
 237                bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
 238                bfin_sport_spi_stat_poll_complete(drv_data);
 239                *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
 240        }
 241}
 242
 243static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
 244        .write  = bfin_sport_spi_u16_writer,
 245        .read   = bfin_sport_spi_u16_reader,
 246        .duplex = bfin_sport_spi_u16_duplex,
 247};
 248
 249/* stop controller and re-config current chip */
 250static void
 251bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
 252{
 253        struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
 254
 255        bfin_sport_spi_disable(drv_data);
 256        dev_dbg(drv_data->dev, "restoring spi ctl state\n");
 257
 258        bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
 259        bfin_write(&drv_data->regs->tclkdiv, chip->baud);
 260        SSYNC();
 261
 262        bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
 263        SSYNC();
 264
 265        bfin_sport_spi_cs_active(chip);
 266}
 267
 268/* test if there is more transfer to be done */
 269static enum bfin_sport_spi_state
 270bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
 271{
 272        struct spi_message *msg = drv_data->cur_msg;
 273        struct spi_transfer *trans = drv_data->cur_transfer;
 274
 275        /* Move to next transfer */
 276        if (trans->transfer_list.next != &msg->transfers) {
 277                drv_data->cur_transfer =
 278                    list_entry(trans->transfer_list.next,
 279                               struct spi_transfer, transfer_list);
 280                return RUNNING_STATE;
 281        }
 282
 283        return DONE_STATE;
 284}
 285
 286/*
 287 * caller already set message->status;
 288 * dma and pio irqs are blocked give finished message back
 289 */
 290static void
 291bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
 292{
 293        struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
 294        unsigned long flags;
 295        struct spi_message *msg;
 296
 297        spin_lock_irqsave(&drv_data->lock, flags);
 298        msg = drv_data->cur_msg;
 299        drv_data->state = START_STATE;
 300        drv_data->cur_msg = NULL;
 301        drv_data->cur_transfer = NULL;
 302        drv_data->cur_chip = NULL;
 303        queue_work(drv_data->workqueue, &drv_data->pump_messages);
 304        spin_unlock_irqrestore(&drv_data->lock, flags);
 305
 306        if (!drv_data->cs_change)
 307                bfin_sport_spi_cs_deactive(chip);
 308
 309        if (msg->complete)
 310                msg->complete(msg->context);
 311}
 312
 313static irqreturn_t
 314sport_err_handler(int irq, void *dev_id)
 315{
 316        struct bfin_sport_spi_master_data *drv_data = dev_id;
 317        u16 status;
 318
 319        dev_dbg(drv_data->dev, "%s enter\n", __func__);
 320        status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
 321
 322        if (status) {
 323                bfin_write(&drv_data->regs->stat, status);
 324                SSYNC();
 325
 326                bfin_sport_spi_disable(drv_data);
 327                dev_err(drv_data->dev, "status error:%s%s%s%s\n",
 328                        status & TOVF ? " TOVF" : "",
 329                        status & TUVF ? " TUVF" : "",
 330                        status & ROVF ? " ROVF" : "",
 331                        status & RUVF ? " RUVF" : "");
 332        }
 333
 334        return IRQ_HANDLED;
 335}
 336
 337static void
 338bfin_sport_spi_pump_transfers(unsigned long data)
 339{
 340        struct bfin_sport_spi_master_data *drv_data = (void *)data;
 341        struct spi_message *message = NULL;
 342        struct spi_transfer *transfer = NULL;
 343        struct spi_transfer *previous = NULL;
 344        struct bfin_sport_spi_slave_data *chip = NULL;
 345        unsigned int bits_per_word;
 346        u32 tranf_success = 1;
 347        u32 transfer_speed;
 348        u8 full_duplex = 0;
 349
 350        /* Get current state information */
 351        message = drv_data->cur_msg;
 352        transfer = drv_data->cur_transfer;
 353        chip = drv_data->cur_chip;
 354
 355        if (transfer->speed_hz)
 356                transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
 357        else
 358                transfer_speed = chip->baud;
 359        bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
 360        SSYNC();
 361
 362        /*
 363         * if msg is error or done, report it back using complete() callback
 364         */
 365
 366         /* Handle for abort */
 367        if (drv_data->state == ERROR_STATE) {
 368                dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
 369                message->status = -EIO;
 370                bfin_sport_spi_giveback(drv_data);
 371                return;
 372        }
 373
 374        /* Handle end of message */
 375        if (drv_data->state == DONE_STATE) {
 376                dev_dbg(drv_data->dev, "transfer: all done!\n");
 377                message->status = 0;
 378                bfin_sport_spi_giveback(drv_data);
 379                return;
 380        }
 381
 382        /* Delay if requested at end of transfer */
 383        if (drv_data->state == RUNNING_STATE) {
 384                dev_dbg(drv_data->dev, "transfer: still running ...\n");
 385                previous = list_entry(transfer->transfer_list.prev,
 386                                      struct spi_transfer, transfer_list);
 387                if (previous->delay_usecs)
 388                        udelay(previous->delay_usecs);
 389        }
 390
 391        if (transfer->len == 0) {
 392                /* Move to next transfer of this msg */
 393                drv_data->state = bfin_sport_spi_next_transfer(drv_data);
 394                /* Schedule next transfer tasklet */
 395                tasklet_schedule(&drv_data->pump_transfers);
 396        }
 397
 398        if (transfer->tx_buf != NULL) {
 399                drv_data->tx = (void *)transfer->tx_buf;
 400                drv_data->tx_end = drv_data->tx + transfer->len;
 401                dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
 402                        transfer->tx_buf, drv_data->tx_end);
 403        } else
 404                drv_data->tx = NULL;
 405
 406        if (transfer->rx_buf != NULL) {
 407                full_duplex = transfer->tx_buf != NULL;
 408                drv_data->rx = transfer->rx_buf;
 409                drv_data->rx_end = drv_data->rx + transfer->len;
 410                dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
 411                        transfer->rx_buf, drv_data->rx_end);
 412        } else
 413                drv_data->rx = NULL;
 414
 415        drv_data->cs_change = transfer->cs_change;
 416
 417        /* Bits per word setup */
 418        bits_per_word = transfer->bits_per_word;
 419        if (bits_per_word == 16)
 420                drv_data->ops = &bfin_sport_transfer_ops_u16;
 421        else
 422                drv_data->ops = &bfin_sport_transfer_ops_u8;
 423        bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
 424        bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
 425        bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
 426
 427        drv_data->state = RUNNING_STATE;
 428
 429        if (drv_data->cs_change)
 430                bfin_sport_spi_cs_active(chip);
 431
 432        dev_dbg(drv_data->dev,
 433                "now pumping a transfer: width is %d, len is %d\n",
 434                bits_per_word, transfer->len);
 435
 436        /* PIO mode write then read */
 437        dev_dbg(drv_data->dev, "doing IO transfer\n");
 438
 439        bfin_sport_spi_enable(drv_data);
 440        if (full_duplex) {
 441                /* full duplex mode */
 442                BUG_ON((drv_data->tx_end - drv_data->tx) !=
 443                       (drv_data->rx_end - drv_data->rx));
 444                drv_data->ops->duplex(drv_data);
 445
 446                if (drv_data->tx != drv_data->tx_end)
 447                        tranf_success = 0;
 448        } else if (drv_data->tx != NULL) {
 449                /* write only half duplex */
 450
 451                drv_data->ops->write(drv_data);
 452
 453                if (drv_data->tx != drv_data->tx_end)
 454                        tranf_success = 0;
 455        } else if (drv_data->rx != NULL) {
 456                /* read only half duplex */
 457
 458                drv_data->ops->read(drv_data);
 459                if (drv_data->rx != drv_data->rx_end)
 460                        tranf_success = 0;
 461        }
 462        bfin_sport_spi_disable(drv_data);
 463
 464        if (!tranf_success) {
 465                dev_dbg(drv_data->dev, "IO write error!\n");
 466                drv_data->state = ERROR_STATE;
 467        } else {
 468                /* Update total byte transferred */
 469                message->actual_length += transfer->len;
 470                /* Move to next transfer of this msg */
 471                drv_data->state = bfin_sport_spi_next_transfer(drv_data);
 472                if (drv_data->cs_change)
 473                        bfin_sport_spi_cs_deactive(chip);
 474        }
 475
 476        /* Schedule next transfer tasklet */
 477        tasklet_schedule(&drv_data->pump_transfers);
 478}
 479
 480/* pop a msg from queue and kick off real transfer */
 481static void
 482bfin_sport_spi_pump_messages(struct work_struct *work)
 483{
 484        struct bfin_sport_spi_master_data *drv_data;
 485        unsigned long flags;
 486        struct spi_message *next_msg;
 487
 488        drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
 489
 490        /* Lock queue and check for queue work */
 491        spin_lock_irqsave(&drv_data->lock, flags);
 492        if (list_empty(&drv_data->queue) || !drv_data->run) {
 493                /* pumper kicked off but no work to do */
 494                drv_data->busy = 0;
 495                spin_unlock_irqrestore(&drv_data->lock, flags);
 496                return;
 497        }
 498
 499        /* Make sure we are not already running a message */
 500        if (drv_data->cur_msg) {
 501                spin_unlock_irqrestore(&drv_data->lock, flags);
 502                return;
 503        }
 504
 505        /* Extract head of queue */
 506        next_msg = list_entry(drv_data->queue.next,
 507                struct spi_message, queue);
 508
 509        drv_data->cur_msg = next_msg;
 510
 511        /* Setup the SSP using the per chip configuration */
 512        drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
 513
 514        list_del_init(&drv_data->cur_msg->queue);
 515
 516        /* Initialize message state */
 517        drv_data->cur_msg->state = START_STATE;
 518        drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
 519                                            struct spi_transfer, transfer_list);
 520        bfin_sport_spi_restore_state(drv_data);
 521        dev_dbg(drv_data->dev, "got a message to pump, "
 522                "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
 523                drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
 524                drv_data->cur_chip->ctl_reg);
 525
 526        dev_dbg(drv_data->dev,
 527                "the first transfer len is %d\n",
 528                drv_data->cur_transfer->len);
 529
 530        /* Mark as busy and launch transfers */
 531        tasklet_schedule(&drv_data->pump_transfers);
 532
 533        drv_data->busy = 1;
 534        spin_unlock_irqrestore(&drv_data->lock, flags);
 535}
 536
 537/*
 538 * got a msg to transfer, queue it in drv_data->queue.
 539 * And kick off message pumper
 540 */
 541static int
 542bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
 543{
 544        struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
 545        unsigned long flags;
 546
 547        spin_lock_irqsave(&drv_data->lock, flags);
 548
 549        if (!drv_data->run) {
 550                spin_unlock_irqrestore(&drv_data->lock, flags);
 551                return -ESHUTDOWN;
 552        }
 553
 554        msg->actual_length = 0;
 555        msg->status = -EINPROGRESS;
 556        msg->state = START_STATE;
 557
 558        dev_dbg(&spi->dev, "adding an msg in transfer()\n");
 559        list_add_tail(&msg->queue, &drv_data->queue);
 560
 561        if (drv_data->run && !drv_data->busy)
 562                queue_work(drv_data->workqueue, &drv_data->pump_messages);
 563
 564        spin_unlock_irqrestore(&drv_data->lock, flags);
 565
 566        return 0;
 567}
 568
 569/* Called every time common spi devices change state */
 570static int
 571bfin_sport_spi_setup(struct spi_device *spi)
 572{
 573        struct bfin_sport_spi_slave_data *chip, *first = NULL;
 574        int ret;
 575
 576        /* Only alloc (or use chip_info) on first setup */
 577        chip = spi_get_ctldata(spi);
 578        if (chip == NULL) {
 579                struct bfin5xx_spi_chip *chip_info;
 580
 581                chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
 582                if (!chip)
 583                        return -ENOMEM;
 584
 585                /* platform chip_info isn't required */
 586                chip_info = spi->controller_data;
 587                if (chip_info) {
 588                        /*
 589                         * DITFS and TDTYPE are only thing we don't set, but
 590                         * they probably shouldn't be changed by people.
 591                         */
 592                        if (chip_info->ctl_reg || chip_info->enable_dma) {
 593                                ret = -EINVAL;
 594                                dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
 595                                goto error;
 596                        }
 597                        chip->cs_chg_udelay = chip_info->cs_chg_udelay;
 598                        chip->idle_tx_val = chip_info->idle_tx_val;
 599                }
 600        }
 601
 602        /* translate common spi framework into our register
 603         * following configure contents are same for tx and rx.
 604         */
 605
 606        if (spi->mode & SPI_CPHA)
 607                chip->ctl_reg &= ~TCKFE;
 608        else
 609                chip->ctl_reg |= TCKFE;
 610
 611        if (spi->mode & SPI_LSB_FIRST)
 612                chip->ctl_reg |= TLSBIT;
 613        else
 614                chip->ctl_reg &= ~TLSBIT;
 615
 616        /* Sport in master mode */
 617        chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
 618
 619        chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
 620
 621        chip->cs_gpio = spi->chip_select;
 622        ret = gpio_request(chip->cs_gpio, spi->modalias);
 623        if (ret)
 624                goto error;
 625
 626        dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
 627                        spi->modalias, spi->bits_per_word);
 628        dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
 629                        chip->ctl_reg, spi->chip_select);
 630
 631        spi_set_ctldata(spi, chip);
 632
 633        bfin_sport_spi_cs_deactive(chip);
 634
 635        return ret;
 636
 637 error:
 638        kfree(first);
 639        return ret;
 640}
 641
 642/*
 643 * callback for spi framework.
 644 * clean driver specific data
 645 */
 646static void
 647bfin_sport_spi_cleanup(struct spi_device *spi)
 648{
 649        struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
 650
 651        if (!chip)
 652                return;
 653
 654        gpio_free(chip->cs_gpio);
 655
 656        kfree(chip);
 657}
 658
 659static int
 660bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
 661{
 662        INIT_LIST_HEAD(&drv_data->queue);
 663        spin_lock_init(&drv_data->lock);
 664
 665        drv_data->run = false;
 666        drv_data->busy = 0;
 667
 668        /* init transfer tasklet */
 669        tasklet_init(&drv_data->pump_transfers,
 670                     bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
 671
 672        /* init messages workqueue */
 673        INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
 674        drv_data->workqueue =
 675            create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
 676        if (drv_data->workqueue == NULL)
 677                return -EBUSY;
 678
 679        return 0;
 680}
 681
 682static int
 683bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
 684{
 685        unsigned long flags;
 686
 687        spin_lock_irqsave(&drv_data->lock, flags);
 688
 689        if (drv_data->run || drv_data->busy) {
 690                spin_unlock_irqrestore(&drv_data->lock, flags);
 691                return -EBUSY;
 692        }
 693
 694        drv_data->run = true;
 695        drv_data->cur_msg = NULL;
 696        drv_data->cur_transfer = NULL;
 697        drv_data->cur_chip = NULL;
 698        spin_unlock_irqrestore(&drv_data->lock, flags);
 699
 700        queue_work(drv_data->workqueue, &drv_data->pump_messages);
 701
 702        return 0;
 703}
 704
 705static inline int
 706bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
 707{
 708        unsigned long flags;
 709        unsigned limit = 500;
 710        int status = 0;
 711
 712        spin_lock_irqsave(&drv_data->lock, flags);
 713
 714        /*
 715         * This is a bit lame, but is optimized for the common execution path.
 716         * A wait_queue on the drv_data->busy could be used, but then the common
 717         * execution path (pump_messages) would be required to call wake_up or
 718         * friends on every SPI message. Do this instead
 719         */
 720        drv_data->run = false;
 721        while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
 722                spin_unlock_irqrestore(&drv_data->lock, flags);
 723                msleep(10);
 724                spin_lock_irqsave(&drv_data->lock, flags);
 725        }
 726
 727        if (!list_empty(&drv_data->queue) || drv_data->busy)
 728                status = -EBUSY;
 729
 730        spin_unlock_irqrestore(&drv_data->lock, flags);
 731
 732        return status;
 733}
 734
 735static inline int
 736bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
 737{
 738        int status;
 739
 740        status = bfin_sport_spi_stop_queue(drv_data);
 741        if (status)
 742                return status;
 743
 744        destroy_workqueue(drv_data->workqueue);
 745
 746        return 0;
 747}
 748
 749static int bfin_sport_spi_probe(struct platform_device *pdev)
 750{
 751        struct device *dev = &pdev->dev;
 752        struct bfin5xx_spi_master *platform_info;
 753        struct spi_master *master;
 754        struct resource *res, *ires;
 755        struct bfin_sport_spi_master_data *drv_data;
 756        int status;
 757
 758        platform_info = dev_get_platdata(dev);
 759
 760        /* Allocate master with space for drv_data */
 761        master = spi_alloc_master(dev, sizeof(*master) + 16);
 762        if (!master) {
 763                dev_err(dev, "cannot alloc spi_master\n");
 764                return -ENOMEM;
 765        }
 766
 767        drv_data = spi_master_get_devdata(master);
 768        drv_data->master = master;
 769        drv_data->dev = dev;
 770        drv_data->pin_req = platform_info->pin_req;
 771
 772        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
 773        master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
 774        master->bus_num = pdev->id;
 775        master->num_chipselect = platform_info->num_chipselect;
 776        master->cleanup = bfin_sport_spi_cleanup;
 777        master->setup = bfin_sport_spi_setup;
 778        master->transfer = bfin_sport_spi_transfer;
 779
 780        /* Find and map our resources */
 781        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 782        if (res == NULL) {
 783                dev_err(dev, "cannot get IORESOURCE_MEM\n");
 784                status = -ENOENT;
 785                goto out_error_get_res;
 786        }
 787
 788        drv_data->regs = ioremap(res->start, resource_size(res));
 789        if (drv_data->regs == NULL) {
 790                dev_err(dev, "cannot map registers\n");
 791                status = -ENXIO;
 792                goto out_error_ioremap;
 793        }
 794
 795        ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 796        if (!ires) {
 797                dev_err(dev, "cannot get IORESOURCE_IRQ\n");
 798                status = -ENODEV;
 799                goto out_error_get_ires;
 800        }
 801        drv_data->err_irq = ires->start;
 802
 803        /* Initial and start queue */
 804        status = bfin_sport_spi_init_queue(drv_data);
 805        if (status) {
 806                dev_err(dev, "problem initializing queue\n");
 807                goto out_error_queue_alloc;
 808        }
 809
 810        status = bfin_sport_spi_start_queue(drv_data);
 811        if (status) {
 812                dev_err(dev, "problem starting queue\n");
 813                goto out_error_queue_alloc;
 814        }
 815
 816        status = request_irq(drv_data->err_irq, sport_err_handler,
 817                0, "sport_spi_err", drv_data);
 818        if (status) {
 819                dev_err(dev, "unable to request sport err irq\n");
 820                goto out_error_irq;
 821        }
 822
 823        status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
 824        if (status) {
 825                dev_err(dev, "requesting peripherals failed\n");
 826                goto out_error_peripheral;
 827        }
 828
 829        /* Register with the SPI framework */
 830        platform_set_drvdata(pdev, drv_data);
 831        status = spi_register_master(master);
 832        if (status) {
 833                dev_err(dev, "problem registering spi master\n");
 834                goto out_error_master;
 835        }
 836
 837        dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
 838        return 0;
 839
 840 out_error_master:
 841        peripheral_free_list(drv_data->pin_req);
 842 out_error_peripheral:
 843        free_irq(drv_data->err_irq, drv_data);
 844 out_error_irq:
 845 out_error_queue_alloc:
 846        bfin_sport_spi_destroy_queue(drv_data);
 847 out_error_get_ires:
 848        iounmap(drv_data->regs);
 849 out_error_ioremap:
 850 out_error_get_res:
 851        spi_master_put(master);
 852
 853        return status;
 854}
 855
 856/* stop hardware and remove the driver */
 857static int bfin_sport_spi_remove(struct platform_device *pdev)
 858{
 859        struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
 860        int status = 0;
 861
 862        if (!drv_data)
 863                return 0;
 864
 865        /* Remove the queue */
 866        status = bfin_sport_spi_destroy_queue(drv_data);
 867        if (status)
 868                return status;
 869
 870        /* Disable the SSP at the peripheral and SOC level */
 871        bfin_sport_spi_disable(drv_data);
 872
 873        /* Disconnect from the SPI framework */
 874        spi_unregister_master(drv_data->master);
 875
 876        peripheral_free_list(drv_data->pin_req);
 877
 878        return 0;
 879}
 880
 881#ifdef CONFIG_PM_SLEEP
 882static int bfin_sport_spi_suspend(struct device *dev)
 883{
 884        struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
 885        int status;
 886
 887        status = bfin_sport_spi_stop_queue(drv_data);
 888        if (status)
 889                return status;
 890
 891        /* stop hardware */
 892        bfin_sport_spi_disable(drv_data);
 893
 894        return status;
 895}
 896
 897static int bfin_sport_spi_resume(struct device *dev)
 898{
 899        struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
 900        int status;
 901
 902        /* Enable the SPI interface */
 903        bfin_sport_spi_enable(drv_data);
 904
 905        /* Start the queue running */
 906        status = bfin_sport_spi_start_queue(drv_data);
 907        if (status)
 908                dev_err(drv_data->dev, "problem resuming queue\n");
 909
 910        return status;
 911}
 912
 913static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
 914                        bfin_sport_spi_resume);
 915
 916#define BFIN_SPORT_SPI_PM_OPS           (&bfin_sport_spi_pm_ops)
 917#else
 918#define BFIN_SPORT_SPI_PM_OPS           NULL
 919#endif
 920
 921static struct platform_driver bfin_sport_spi_driver = {
 922        .driver = {
 923                .name   = DRV_NAME,
 924                .pm     = BFIN_SPORT_SPI_PM_OPS,
 925        },
 926        .probe   = bfin_sport_spi_probe,
 927        .remove  = bfin_sport_spi_remove,
 928};
 929module_platform_driver(bfin_sport_spi_driver);
 930