linux/drivers/spi/spi-ep93xx.c
<<
>>
Prefs
   1/*
   2 * Driver for Cirrus Logic EP93xx SPI controller.
   3 *
   4 * Copyright (C) 2010-2011 Mika Westerberg
   5 *
   6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
   7 *
   8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
   9 *
  10 * For more information about the SPI controller see documentation on Cirrus
  11 * Logic web site:
  12 *     http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/io.h>
  20#include <linux/clk.h>
  21#include <linux/err.h>
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/bitops.h>
  26#include <linux/interrupt.h>
  27#include <linux/module.h>
  28#include <linux/platform_device.h>
  29#include <linux/sched.h>
  30#include <linux/scatterlist.h>
  31#include <linux/spi/spi.h>
  32
  33#include <linux/platform_data/dma-ep93xx.h>
  34#include <linux/platform_data/spi-ep93xx.h>
  35
  36#define SSPCR0                  0x0000
  37#define SSPCR0_MODE_SHIFT       6
  38#define SSPCR0_SCR_SHIFT        8
  39
  40#define SSPCR1                  0x0004
  41#define SSPCR1_RIE              BIT(0)
  42#define SSPCR1_TIE              BIT(1)
  43#define SSPCR1_RORIE            BIT(2)
  44#define SSPCR1_LBM              BIT(3)
  45#define SSPCR1_SSE              BIT(4)
  46#define SSPCR1_MS               BIT(5)
  47#define SSPCR1_SOD              BIT(6)
  48
  49#define SSPDR                   0x0008
  50
  51#define SSPSR                   0x000c
  52#define SSPSR_TFE               BIT(0)
  53#define SSPSR_TNF               BIT(1)
  54#define SSPSR_RNE               BIT(2)
  55#define SSPSR_RFF               BIT(3)
  56#define SSPSR_BSY               BIT(4)
  57#define SSPCPSR                 0x0010
  58
  59#define SSPIIR                  0x0014
  60#define SSPIIR_RIS              BIT(0)
  61#define SSPIIR_TIS              BIT(1)
  62#define SSPIIR_RORIS            BIT(2)
  63#define SSPICR                  SSPIIR
  64
  65/* timeout in milliseconds */
  66#define SPI_TIMEOUT             5
  67/* maximum depth of RX/TX FIFO */
  68#define SPI_FIFO_SIZE           8
  69
  70/**
  71 * struct ep93xx_spi - EP93xx SPI controller structure
  72 * @pdev: pointer to platform device
  73 * @clk: clock for the controller
  74 * @regs_base: pointer to ioremap()'d registers
  75 * @sspdr_phys: physical address of the SSPDR register
  76 * @wait: wait here until given transfer is completed
  77 * @current_msg: message that is currently processed (or %NULL if none)
  78 * @tx: current byte in transfer to transmit
  79 * @rx: current byte in transfer to receive
  80 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  81 *              frame decreases this level and sending one frame increases it.
  82 * @dma_rx: RX DMA channel
  83 * @dma_tx: TX DMA channel
  84 * @dma_rx_data: RX parameters passed to the DMA engine
  85 * @dma_tx_data: TX parameters passed to the DMA engine
  86 * @rx_sgt: sg table for RX transfers
  87 * @tx_sgt: sg table for TX transfers
  88 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  89 *            the client
  90 */
  91struct ep93xx_spi {
  92        const struct platform_device    *pdev;
  93        struct clk                      *clk;
  94        void __iomem                    *regs_base;
  95        unsigned long                   sspdr_phys;
  96        struct completion               wait;
  97        struct spi_message              *current_msg;
  98        size_t                          tx;
  99        size_t                          rx;
 100        size_t                          fifo_level;
 101        struct dma_chan                 *dma_rx;
 102        struct dma_chan                 *dma_tx;
 103        struct ep93xx_dma_data          dma_rx_data;
 104        struct ep93xx_dma_data          dma_tx_data;
 105        struct sg_table                 rx_sgt;
 106        struct sg_table                 tx_sgt;
 107        void                            *zeropage;
 108};
 109
 110/**
 111 * struct ep93xx_spi_chip - SPI device hardware settings
 112 * @spi: back pointer to the SPI device
 113 * @ops: private chip operations
 114 */
 115struct ep93xx_spi_chip {
 116        const struct spi_device         *spi;
 117        struct ep93xx_spi_chip_ops      *ops;
 118};
 119
 120/* converts bits per word to CR0.DSS value */
 121#define bits_per_word_to_dss(bpw)       ((bpw) - 1)
 122
 123static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
 124                                u16 reg, u8 value)
 125{
 126        writeb(value, espi->regs_base + reg);
 127}
 128
 129static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
 130{
 131        return readb(spi->regs_base + reg);
 132}
 133
 134static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
 135                                 u16 reg, u16 value)
 136{
 137        writew(value, espi->regs_base + reg);
 138}
 139
 140static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
 141{
 142        return readw(spi->regs_base + reg);
 143}
 144
 145static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
 146{
 147        u8 regval;
 148        int err;
 149
 150        err = clk_enable(espi->clk);
 151        if (err)
 152                return err;
 153
 154        regval = ep93xx_spi_read_u8(espi, SSPCR1);
 155        regval |= SSPCR1_SSE;
 156        ep93xx_spi_write_u8(espi, SSPCR1, regval);
 157
 158        return 0;
 159}
 160
 161static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
 162{
 163        u8 regval;
 164
 165        regval = ep93xx_spi_read_u8(espi, SSPCR1);
 166        regval &= ~SSPCR1_SSE;
 167        ep93xx_spi_write_u8(espi, SSPCR1, regval);
 168
 169        clk_disable(espi->clk);
 170}
 171
 172static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
 173{
 174        u8 regval;
 175
 176        regval = ep93xx_spi_read_u8(espi, SSPCR1);
 177        regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
 178        ep93xx_spi_write_u8(espi, SSPCR1, regval);
 179}
 180
 181static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
 182{
 183        u8 regval;
 184
 185        regval = ep93xx_spi_read_u8(espi, SSPCR1);
 186        regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
 187        ep93xx_spi_write_u8(espi, SSPCR1, regval);
 188}
 189
 190/**
 191 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
 192 * @espi: ep93xx SPI controller struct
 193 * @rate: desired SPI output clock rate
 194 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
 195 * @div_scr: pointer to return the scr divider
 196 */
 197static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
 198                                    u32 rate, u8 *div_cpsr, u8 *div_scr)
 199{
 200        struct spi_master *master = platform_get_drvdata(espi->pdev);
 201        unsigned long spi_clk_rate = clk_get_rate(espi->clk);
 202        int cpsr, scr;
 203
 204        /*
 205         * Make sure that max value is between values supported by the
 206         * controller. Note that minimum value is already checked in
 207         * ep93xx_spi_transfer_one_message().
 208         */
 209        rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
 210
 211        /*
 212         * Calculate divisors so that we can get speed according the
 213         * following formula:
 214         *      rate = spi_clock_rate / (cpsr * (1 + scr))
 215         *
 216         * cpsr must be even number and starts from 2, scr can be any number
 217         * between 0 and 255.
 218         */
 219        for (cpsr = 2; cpsr <= 254; cpsr += 2) {
 220                for (scr = 0; scr <= 255; scr++) {
 221                        if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
 222                                *div_scr = (u8)scr;
 223                                *div_cpsr = (u8)cpsr;
 224                                return 0;
 225                        }
 226                }
 227        }
 228
 229        return -EINVAL;
 230}
 231
 232static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
 233{
 234        struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
 235        int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
 236
 237        if (chip->ops && chip->ops->cs_control)
 238                chip->ops->cs_control(spi, value);
 239}
 240
 241/**
 242 * ep93xx_spi_setup() - setup an SPI device
 243 * @spi: SPI device to setup
 244 *
 245 * This function sets up SPI device mode, speed etc. Can be called multiple
 246 * times for a single device. Returns %0 in case of success, negative error in
 247 * case of failure. When this function returns success, the device is
 248 * deselected.
 249 */
 250static int ep93xx_spi_setup(struct spi_device *spi)
 251{
 252        struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
 253        struct ep93xx_spi_chip *chip;
 254
 255        chip = spi_get_ctldata(spi);
 256        if (!chip) {
 257                dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
 258                        spi->modalias);
 259
 260                chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 261                if (!chip)
 262                        return -ENOMEM;
 263
 264                chip->spi = spi;
 265                chip->ops = spi->controller_data;
 266
 267                if (chip->ops && chip->ops->setup) {
 268                        int ret = chip->ops->setup(spi);
 269                        if (ret) {
 270                                kfree(chip);
 271                                return ret;
 272                        }
 273                }
 274
 275                spi_set_ctldata(spi, chip);
 276        }
 277
 278        ep93xx_spi_cs_control(spi, false);
 279        return 0;
 280}
 281
 282/**
 283 * ep93xx_spi_cleanup() - cleans up master controller specific state
 284 * @spi: SPI device to cleanup
 285 *
 286 * This function releases master controller specific state for given @spi
 287 * device.
 288 */
 289static void ep93xx_spi_cleanup(struct spi_device *spi)
 290{
 291        struct ep93xx_spi_chip *chip;
 292
 293        chip = spi_get_ctldata(spi);
 294        if (chip) {
 295                if (chip->ops && chip->ops->cleanup)
 296                        chip->ops->cleanup(spi);
 297                spi_set_ctldata(spi, NULL);
 298                kfree(chip);
 299        }
 300}
 301
 302/**
 303 * ep93xx_spi_chip_setup() - configures hardware according to given @chip
 304 * @espi: ep93xx SPI controller struct
 305 * @chip: chip specific settings
 306 * @speed_hz: transfer speed
 307 * @bits_per_word: transfer bits_per_word
 308 */
 309static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
 310                                 const struct ep93xx_spi_chip *chip,
 311                                 u32 speed_hz, u8 bits_per_word)
 312{
 313        u8 dss = bits_per_word_to_dss(bits_per_word);
 314        u8 div_cpsr = 0;
 315        u8 div_scr = 0;
 316        u16 cr0;
 317        int err;
 318
 319        err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
 320        if (err)
 321                return err;
 322
 323        cr0 = div_scr << SSPCR0_SCR_SHIFT;
 324        cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
 325        cr0 |= dss;
 326
 327        dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
 328                chip->spi->mode, div_cpsr, div_scr, dss);
 329        dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
 330
 331        ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
 332        ep93xx_spi_write_u16(espi, SSPCR0, cr0);
 333
 334        return 0;
 335}
 336
 337static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
 338{
 339        if (t->bits_per_word > 8) {
 340                u16 tx_val = 0;
 341
 342                if (t->tx_buf)
 343                        tx_val = ((u16 *)t->tx_buf)[espi->tx];
 344                ep93xx_spi_write_u16(espi, SSPDR, tx_val);
 345                espi->tx += sizeof(tx_val);
 346        } else {
 347                u8 tx_val = 0;
 348
 349                if (t->tx_buf)
 350                        tx_val = ((u8 *)t->tx_buf)[espi->tx];
 351                ep93xx_spi_write_u8(espi, SSPDR, tx_val);
 352                espi->tx += sizeof(tx_val);
 353        }
 354}
 355
 356static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
 357{
 358        if (t->bits_per_word > 8) {
 359                u16 rx_val;
 360
 361                rx_val = ep93xx_spi_read_u16(espi, SSPDR);
 362                if (t->rx_buf)
 363                        ((u16 *)t->rx_buf)[espi->rx] = rx_val;
 364                espi->rx += sizeof(rx_val);
 365        } else {
 366                u8 rx_val;
 367
 368                rx_val = ep93xx_spi_read_u8(espi, SSPDR);
 369                if (t->rx_buf)
 370                        ((u8 *)t->rx_buf)[espi->rx] = rx_val;
 371                espi->rx += sizeof(rx_val);
 372        }
 373}
 374
 375/**
 376 * ep93xx_spi_read_write() - perform next RX/TX transfer
 377 * @espi: ep93xx SPI controller struct
 378 *
 379 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
 380 * called several times, the whole transfer will be completed. Returns
 381 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
 382 *
 383 * When this function is finished, RX FIFO should be empty and TX FIFO should be
 384 * full.
 385 */
 386static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
 387{
 388        struct spi_message *msg = espi->current_msg;
 389        struct spi_transfer *t = msg->state;
 390
 391        /* read as long as RX FIFO has frames in it */
 392        while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
 393                ep93xx_do_read(espi, t);
 394                espi->fifo_level--;
 395        }
 396
 397        /* write as long as TX FIFO has room */
 398        while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
 399                ep93xx_do_write(espi, t);
 400                espi->fifo_level++;
 401        }
 402
 403        if (espi->rx == t->len)
 404                return 0;
 405
 406        return -EINPROGRESS;
 407}
 408
 409static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
 410{
 411        /*
 412         * Now everything is set up for the current transfer. We prime the TX
 413         * FIFO, enable interrupts, and wait for the transfer to complete.
 414         */
 415        if (ep93xx_spi_read_write(espi)) {
 416                ep93xx_spi_enable_interrupts(espi);
 417                wait_for_completion(&espi->wait);
 418        }
 419}
 420
 421/**
 422 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
 423 * @espi: ep93xx SPI controller struct
 424 * @dir: DMA transfer direction
 425 *
 426 * Function configures the DMA, maps the buffer and prepares the DMA
 427 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
 428 * in case of failure.
 429 */
 430static struct dma_async_tx_descriptor *
 431ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
 432{
 433        struct spi_transfer *t = espi->current_msg->state;
 434        struct dma_async_tx_descriptor *txd;
 435        enum dma_slave_buswidth buswidth;
 436        struct dma_slave_config conf;
 437        struct scatterlist *sg;
 438        struct sg_table *sgt;
 439        struct dma_chan *chan;
 440        const void *buf, *pbuf;
 441        size_t len = t->len;
 442        int i, ret, nents;
 443
 444        if (t->bits_per_word > 8)
 445                buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
 446        else
 447                buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
 448
 449        memset(&conf, 0, sizeof(conf));
 450        conf.direction = dir;
 451
 452        if (dir == DMA_DEV_TO_MEM) {
 453                chan = espi->dma_rx;
 454                buf = t->rx_buf;
 455                sgt = &espi->rx_sgt;
 456
 457                conf.src_addr = espi->sspdr_phys;
 458                conf.src_addr_width = buswidth;
 459        } else {
 460                chan = espi->dma_tx;
 461                buf = t->tx_buf;
 462                sgt = &espi->tx_sgt;
 463
 464                conf.dst_addr = espi->sspdr_phys;
 465                conf.dst_addr_width = buswidth;
 466        }
 467
 468        ret = dmaengine_slave_config(chan, &conf);
 469        if (ret)
 470                return ERR_PTR(ret);
 471
 472        /*
 473         * We need to split the transfer into PAGE_SIZE'd chunks. This is
 474         * because we are using @espi->zeropage to provide a zero RX buffer
 475         * for the TX transfers and we have only allocated one page for that.
 476         *
 477         * For performance reasons we allocate a new sg_table only when
 478         * needed. Otherwise we will re-use the current one. Eventually the
 479         * last sg_table is released in ep93xx_spi_release_dma().
 480         */
 481
 482        nents = DIV_ROUND_UP(len, PAGE_SIZE);
 483        if (nents != sgt->nents) {
 484                sg_free_table(sgt);
 485
 486                ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
 487                if (ret)
 488                        return ERR_PTR(ret);
 489        }
 490
 491        pbuf = buf;
 492        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 493                size_t bytes = min_t(size_t, len, PAGE_SIZE);
 494
 495                if (buf) {
 496                        sg_set_page(sg, virt_to_page(pbuf), bytes,
 497                                    offset_in_page(pbuf));
 498                } else {
 499                        sg_set_page(sg, virt_to_page(espi->zeropage),
 500                                    bytes, 0);
 501                }
 502
 503                pbuf += bytes;
 504                len -= bytes;
 505        }
 506
 507        if (WARN_ON(len)) {
 508                dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
 509                return ERR_PTR(-EINVAL);
 510        }
 511
 512        nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
 513        if (!nents)
 514                return ERR_PTR(-ENOMEM);
 515
 516        txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
 517        if (!txd) {
 518                dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
 519                return ERR_PTR(-ENOMEM);
 520        }
 521        return txd;
 522}
 523
 524/**
 525 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
 526 * @espi: ep93xx SPI controller struct
 527 * @dir: DMA transfer direction
 528 *
 529 * Function finishes with the DMA transfer. After this, the DMA buffer is
 530 * unmapped.
 531 */
 532static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
 533                                  enum dma_transfer_direction dir)
 534{
 535        struct dma_chan *chan;
 536        struct sg_table *sgt;
 537
 538        if (dir == DMA_DEV_TO_MEM) {
 539                chan = espi->dma_rx;
 540                sgt = &espi->rx_sgt;
 541        } else {
 542                chan = espi->dma_tx;
 543                sgt = &espi->tx_sgt;
 544        }
 545
 546        dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
 547}
 548
 549static void ep93xx_spi_dma_callback(void *callback_param)
 550{
 551        complete(callback_param);
 552}
 553
 554static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
 555{
 556        struct spi_message *msg = espi->current_msg;
 557        struct dma_async_tx_descriptor *rxd, *txd;
 558
 559        rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
 560        if (IS_ERR(rxd)) {
 561                dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
 562                msg->status = PTR_ERR(rxd);
 563                return;
 564        }
 565
 566        txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
 567        if (IS_ERR(txd)) {
 568                ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
 569                dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
 570                msg->status = PTR_ERR(txd);
 571                return;
 572        }
 573
 574        /* We are ready when RX is done */
 575        rxd->callback = ep93xx_spi_dma_callback;
 576        rxd->callback_param = &espi->wait;
 577
 578        /* Now submit both descriptors and wait while they finish */
 579        dmaengine_submit(rxd);
 580        dmaengine_submit(txd);
 581
 582        dma_async_issue_pending(espi->dma_rx);
 583        dma_async_issue_pending(espi->dma_tx);
 584
 585        wait_for_completion(&espi->wait);
 586
 587        ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
 588        ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
 589}
 590
 591/**
 592 * ep93xx_spi_process_transfer() - processes one SPI transfer
 593 * @espi: ep93xx SPI controller struct
 594 * @msg: current message
 595 * @t: transfer to process
 596 *
 597 * This function processes one SPI transfer given in @t. Function waits until
 598 * transfer is complete (may sleep) and updates @msg->status based on whether
 599 * transfer was successfully processed or not.
 600 */
 601static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
 602                                        struct spi_message *msg,
 603                                        struct spi_transfer *t)
 604{
 605        struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
 606        int err;
 607
 608        msg->state = t;
 609
 610        err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
 611        if (err) {
 612                dev_err(&espi->pdev->dev,
 613                        "failed to setup chip for transfer\n");
 614                msg->status = err;
 615                return;
 616        }
 617
 618        espi->rx = 0;
 619        espi->tx = 0;
 620
 621        /*
 622         * There is no point of setting up DMA for the transfers which will
 623         * fit into the FIFO and can be transferred with a single interrupt.
 624         * So in these cases we will be using PIO and don't bother for DMA.
 625         */
 626        if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
 627                ep93xx_spi_dma_transfer(espi);
 628        else
 629                ep93xx_spi_pio_transfer(espi);
 630
 631        /*
 632         * In case of error during transmit, we bail out from processing
 633         * the message.
 634         */
 635        if (msg->status)
 636                return;
 637
 638        msg->actual_length += t->len;
 639
 640        /*
 641         * After this transfer is finished, perform any possible
 642         * post-transfer actions requested by the protocol driver.
 643         */
 644        if (t->delay_usecs) {
 645                set_current_state(TASK_UNINTERRUPTIBLE);
 646                schedule_timeout(usecs_to_jiffies(t->delay_usecs));
 647        }
 648        if (t->cs_change) {
 649                if (!list_is_last(&t->transfer_list, &msg->transfers)) {
 650                        /*
 651                         * In case protocol driver is asking us to drop the
 652                         * chipselect briefly, we let the scheduler to handle
 653                         * any "delay" here.
 654                         */
 655                        ep93xx_spi_cs_control(msg->spi, false);
 656                        cond_resched();
 657                        ep93xx_spi_cs_control(msg->spi, true);
 658                }
 659        }
 660}
 661
 662/*
 663 * ep93xx_spi_process_message() - process one SPI message
 664 * @espi: ep93xx SPI controller struct
 665 * @msg: message to process
 666 *
 667 * This function processes a single SPI message. We go through all transfers in
 668 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
 669 * asserted during the whole message (unless per transfer cs_change is set).
 670 *
 671 * @msg->status contains %0 in case of success or negative error code in case of
 672 * failure.
 673 */
 674static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
 675                                       struct spi_message *msg)
 676{
 677        unsigned long timeout;
 678        struct spi_transfer *t;
 679        int err;
 680
 681        /*
 682         * Enable the SPI controller and its clock.
 683         */
 684        err = ep93xx_spi_enable(espi);
 685        if (err) {
 686                dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
 687                msg->status = err;
 688                return;
 689        }
 690
 691        /*
 692         * Just to be sure: flush any data from RX FIFO.
 693         */
 694        timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
 695        while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
 696                if (time_after(jiffies, timeout)) {
 697                        dev_warn(&espi->pdev->dev,
 698                                 "timeout while flushing RX FIFO\n");
 699                        msg->status = -ETIMEDOUT;
 700                        return;
 701                }
 702                ep93xx_spi_read_u16(espi, SSPDR);
 703        }
 704
 705        /*
 706         * We explicitly handle FIFO level. This way we don't have to check TX
 707         * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
 708         */
 709        espi->fifo_level = 0;
 710
 711        /*
 712         * Assert the chipselect.
 713         */
 714        ep93xx_spi_cs_control(msg->spi, true);
 715
 716        list_for_each_entry(t, &msg->transfers, transfer_list) {
 717                ep93xx_spi_process_transfer(espi, msg, t);
 718                if (msg->status)
 719                        break;
 720        }
 721
 722        /*
 723         * Now the whole message is transferred (or failed for some reason). We
 724         * deselect the device and disable the SPI controller.
 725         */
 726        ep93xx_spi_cs_control(msg->spi, false);
 727        ep93xx_spi_disable(espi);
 728}
 729
 730static int ep93xx_spi_transfer_one_message(struct spi_master *master,
 731                                           struct spi_message *msg)
 732{
 733        struct ep93xx_spi *espi = spi_master_get_devdata(master);
 734
 735        msg->state = NULL;
 736        msg->status = 0;
 737        msg->actual_length = 0;
 738
 739        espi->current_msg = msg;
 740        ep93xx_spi_process_message(espi, msg);
 741        espi->current_msg = NULL;
 742
 743        spi_finalize_current_message(master);
 744
 745        return 0;
 746}
 747
 748static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
 749{
 750        struct ep93xx_spi *espi = dev_id;
 751        u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
 752
 753        /*
 754         * If we got ROR (receive overrun) interrupt we know that something is
 755         * wrong. Just abort the message.
 756         */
 757        if (unlikely(irq_status & SSPIIR_RORIS)) {
 758                /* clear the overrun interrupt */
 759                ep93xx_spi_write_u8(espi, SSPICR, 0);
 760                dev_warn(&espi->pdev->dev,
 761                         "receive overrun, aborting the message\n");
 762                espi->current_msg->status = -EIO;
 763        } else {
 764                /*
 765                 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
 766                 * simply execute next data transfer.
 767                 */
 768                if (ep93xx_spi_read_write(espi)) {
 769                        /*
 770                         * In normal case, there still is some processing left
 771                         * for current transfer. Let's wait for the next
 772                         * interrupt then.
 773                         */
 774                        return IRQ_HANDLED;
 775                }
 776        }
 777
 778        /*
 779         * Current transfer is finished, either with error or with success. In
 780         * any case we disable interrupts and notify the worker to handle
 781         * any post-processing of the message.
 782         */
 783        ep93xx_spi_disable_interrupts(espi);
 784        complete(&espi->wait);
 785        return IRQ_HANDLED;
 786}
 787
 788static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
 789{
 790        if (ep93xx_dma_chan_is_m2p(chan))
 791                return false;
 792
 793        chan->private = filter_param;
 794        return true;
 795}
 796
 797static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
 798{
 799        dma_cap_mask_t mask;
 800        int ret;
 801
 802        espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
 803        if (!espi->zeropage)
 804                return -ENOMEM;
 805
 806        dma_cap_zero(mask);
 807        dma_cap_set(DMA_SLAVE, mask);
 808
 809        espi->dma_rx_data.port = EP93XX_DMA_SSP;
 810        espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
 811        espi->dma_rx_data.name = "ep93xx-spi-rx";
 812
 813        espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
 814                                           &espi->dma_rx_data);
 815        if (!espi->dma_rx) {
 816                ret = -ENODEV;
 817                goto fail_free_page;
 818        }
 819
 820        espi->dma_tx_data.port = EP93XX_DMA_SSP;
 821        espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
 822        espi->dma_tx_data.name = "ep93xx-spi-tx";
 823
 824        espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
 825                                           &espi->dma_tx_data);
 826        if (!espi->dma_tx) {
 827                ret = -ENODEV;
 828                goto fail_release_rx;
 829        }
 830
 831        return 0;
 832
 833fail_release_rx:
 834        dma_release_channel(espi->dma_rx);
 835        espi->dma_rx = NULL;
 836fail_free_page:
 837        free_page((unsigned long)espi->zeropage);
 838
 839        return ret;
 840}
 841
 842static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
 843{
 844        if (espi->dma_rx) {
 845                dma_release_channel(espi->dma_rx);
 846                sg_free_table(&espi->rx_sgt);
 847        }
 848        if (espi->dma_tx) {
 849                dma_release_channel(espi->dma_tx);
 850                sg_free_table(&espi->tx_sgt);
 851        }
 852
 853        if (espi->zeropage)
 854                free_page((unsigned long)espi->zeropage);
 855}
 856
 857static int ep93xx_spi_probe(struct platform_device *pdev)
 858{
 859        struct spi_master *master;
 860        struct ep93xx_spi_info *info;
 861        struct ep93xx_spi *espi;
 862        struct resource *res;
 863        int irq;
 864        int error;
 865
 866        info = dev_get_platdata(&pdev->dev);
 867
 868        irq = platform_get_irq(pdev, 0);
 869        if (irq < 0) {
 870                dev_err(&pdev->dev, "failed to get irq resources\n");
 871                return -EBUSY;
 872        }
 873
 874        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 875        if (!res) {
 876                dev_err(&pdev->dev, "unable to get iomem resource\n");
 877                return -ENODEV;
 878        }
 879
 880        master = spi_alloc_master(&pdev->dev, sizeof(*espi));
 881        if (!master)
 882                return -ENOMEM;
 883
 884        master->setup = ep93xx_spi_setup;
 885        master->transfer_one_message = ep93xx_spi_transfer_one_message;
 886        master->cleanup = ep93xx_spi_cleanup;
 887        master->bus_num = pdev->id;
 888        master->num_chipselect = info->num_chipselect;
 889        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
 890        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 891
 892        platform_set_drvdata(pdev, master);
 893
 894        espi = spi_master_get_devdata(master);
 895
 896        espi->clk = devm_clk_get(&pdev->dev, NULL);
 897        if (IS_ERR(espi->clk)) {
 898                dev_err(&pdev->dev, "unable to get spi clock\n");
 899                error = PTR_ERR(espi->clk);
 900                goto fail_release_master;
 901        }
 902
 903        init_completion(&espi->wait);
 904
 905        /*
 906         * Calculate maximum and minimum supported clock rates
 907         * for the controller.
 908         */
 909        master->max_speed_hz = clk_get_rate(espi->clk) / 2;
 910        master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
 911        espi->pdev = pdev;
 912
 913        espi->sspdr_phys = res->start + SSPDR;
 914
 915        espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
 916        if (IS_ERR(espi->regs_base)) {
 917                error = PTR_ERR(espi->regs_base);
 918                goto fail_release_master;
 919        }
 920
 921        error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
 922                                0, "ep93xx-spi", espi);
 923        if (error) {
 924                dev_err(&pdev->dev, "failed to request irq\n");
 925                goto fail_release_master;
 926        }
 927
 928        if (info->use_dma && ep93xx_spi_setup_dma(espi))
 929                dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
 930
 931        /* make sure that the hardware is disabled */
 932        ep93xx_spi_write_u8(espi, SSPCR1, 0);
 933
 934        error = devm_spi_register_master(&pdev->dev, master);
 935        if (error) {
 936                dev_err(&pdev->dev, "failed to register SPI master\n");
 937                goto fail_free_dma;
 938        }
 939
 940        dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
 941                 (unsigned long)res->start, irq);
 942
 943        return 0;
 944
 945fail_free_dma:
 946        ep93xx_spi_release_dma(espi);
 947fail_release_master:
 948        spi_master_put(master);
 949
 950        return error;
 951}
 952
 953static int ep93xx_spi_remove(struct platform_device *pdev)
 954{
 955        struct spi_master *master = platform_get_drvdata(pdev);
 956        struct ep93xx_spi *espi = spi_master_get_devdata(master);
 957
 958        ep93xx_spi_release_dma(espi);
 959
 960        return 0;
 961}
 962
 963static struct platform_driver ep93xx_spi_driver = {
 964        .driver         = {
 965                .name   = "ep93xx-spi",
 966                .owner  = THIS_MODULE,
 967        },
 968        .probe          = ep93xx_spi_probe,
 969        .remove         = ep93xx_spi_remove,
 970};
 971module_platform_driver(ep93xx_spi_driver);
 972
 973MODULE_DESCRIPTION("EP93xx SPI Controller driver");
 974MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
 975MODULE_LICENSE("GPL");
 976MODULE_ALIAS("platform:ep93xx-spi");
 977