linux/drivers/spi/spi-img-spfi.c
<<
>>
Prefs
   1/*
   2 * IMG SPFI controller driver
   3 *
   4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
   5 * Copyright (C) 2014 Google, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 */
  11
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dmaengine.h>
  15#include <linux/gpio.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/irq.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/scatterlist.h>
  24#include <linux/slab.h>
  25#include <linux/spi/spi.h>
  26#include <linux/spinlock.h>
  27
  28#define SPFI_DEVICE_PARAMETER(x)                (0x00 + 0x4 * (x))
  29#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT      24
  30#define SPFI_DEVICE_PARAMETER_BITCLK_MASK       0xff
  31#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT     16
  32#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK      0xff
  33#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT      8
  34#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK       0xff
  35#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT     0
  36#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK      0xff
  37
  38#define SPFI_CONTROL                            0x14
  39#define SPFI_CONTROL_CONTINUE                   BIT(12)
  40#define SPFI_CONTROL_SOFT_RESET                 BIT(11)
  41#define SPFI_CONTROL_SEND_DMA                   BIT(10)
  42#define SPFI_CONTROL_GET_DMA                    BIT(9)
  43#define SPFI_CONTROL_SE                 BIT(8)
  44#define SPFI_CONTROL_TMODE_SHIFT                5
  45#define SPFI_CONTROL_TMODE_MASK                 0x7
  46#define SPFI_CONTROL_TMODE_SINGLE               0
  47#define SPFI_CONTROL_TMODE_DUAL                 1
  48#define SPFI_CONTROL_TMODE_QUAD                 2
  49#define SPFI_CONTROL_SPFI_EN                    BIT(0)
  50
  51#define SPFI_TRANSACTION                        0x18
  52#define SPFI_TRANSACTION_TSIZE_SHIFT            16
  53#define SPFI_TRANSACTION_TSIZE_MASK             0xffff
  54
  55#define SPFI_PORT_STATE                         0x1c
  56#define SPFI_PORT_STATE_DEV_SEL_SHIFT           20
  57#define SPFI_PORT_STATE_DEV_SEL_MASK            0x7
  58#define SPFI_PORT_STATE_CK_POL(x)               BIT(19 - (x))
  59#define SPFI_PORT_STATE_CK_PHASE(x)             BIT(14 - (x))
  60
  61#define SPFI_TX_32BIT_VALID_DATA                0x20
  62#define SPFI_TX_8BIT_VALID_DATA                 0x24
  63#define SPFI_RX_32BIT_VALID_DATA                0x28
  64#define SPFI_RX_8BIT_VALID_DATA                 0x2c
  65
  66#define SPFI_INTERRUPT_STATUS                   0x30
  67#define SPFI_INTERRUPT_ENABLE                   0x34
  68#define SPFI_INTERRUPT_CLEAR                    0x38
  69#define SPFI_INTERRUPT_IACCESS                  BIT(12)
  70#define SPFI_INTERRUPT_GDEX8BIT                 BIT(11)
  71#define SPFI_INTERRUPT_ALLDONETRIG              BIT(9)
  72#define SPFI_INTERRUPT_GDFUL                    BIT(8)
  73#define SPFI_INTERRUPT_GDHF                     BIT(7)
  74#define SPFI_INTERRUPT_GDEX32BIT                BIT(6)
  75#define SPFI_INTERRUPT_GDTRIG                   BIT(5)
  76#define SPFI_INTERRUPT_SDFUL                    BIT(3)
  77#define SPFI_INTERRUPT_SDHF                     BIT(2)
  78#define SPFI_INTERRUPT_SDE                      BIT(1)
  79#define SPFI_INTERRUPT_SDTRIG                   BIT(0)
  80
  81/*
  82 * There are four parallel FIFOs of 16 bytes each.  The word buffer
  83 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
  84 * effective FIFO size of 64 bytes.  The byte buffer (*_8BIT_VALID_DATA)
  85 * accesses only a single FIFO, resulting in an effective FIFO size of
  86 * 16 bytes.
  87 */
  88#define SPFI_32BIT_FIFO_SIZE                    64
  89#define SPFI_8BIT_FIFO_SIZE                     16
  90
  91struct img_spfi {
  92        struct device *dev;
  93        struct spi_master *master;
  94        spinlock_t lock;
  95
  96        void __iomem *regs;
  97        phys_addr_t phys;
  98        int irq;
  99        struct clk *spfi_clk;
 100        struct clk *sys_clk;
 101
 102        struct dma_chan *rx_ch;
 103        struct dma_chan *tx_ch;
 104        bool tx_dma_busy;
 105        bool rx_dma_busy;
 106};
 107
 108struct img_spfi_device_data {
 109        bool gpio_requested;
 110};
 111
 112static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
 113{
 114        return readl(spfi->regs + reg);
 115}
 116
 117static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
 118{
 119        writel(val, spfi->regs + reg);
 120}
 121
 122static inline void spfi_start(struct img_spfi *spfi)
 123{
 124        u32 val;
 125
 126        val = spfi_readl(spfi, SPFI_CONTROL);
 127        val |= SPFI_CONTROL_SPFI_EN;
 128        spfi_writel(spfi, val, SPFI_CONTROL);
 129}
 130
 131static inline void spfi_reset(struct img_spfi *spfi)
 132{
 133        spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
 134        spfi_writel(spfi, 0, SPFI_CONTROL);
 135}
 136
 137static int spfi_wait_all_done(struct img_spfi *spfi)
 138{
 139        unsigned long timeout = jiffies + msecs_to_jiffies(50);
 140
 141        while (time_before(jiffies, timeout)) {
 142                u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 143
 144                if (status & SPFI_INTERRUPT_ALLDONETRIG) {
 145                        spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
 146                                    SPFI_INTERRUPT_CLEAR);
 147                        return 0;
 148                }
 149                cpu_relax();
 150        }
 151
 152        dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
 153        spfi_reset(spfi);
 154
 155        return -ETIMEDOUT;
 156}
 157
 158static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
 159                                     unsigned int max)
 160{
 161        unsigned int count = 0;
 162        u32 status;
 163
 164        while (count < max / 4) {
 165                spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
 166                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 167                if (status & SPFI_INTERRUPT_SDFUL)
 168                        break;
 169                spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
 170                count++;
 171        }
 172
 173        return count * 4;
 174}
 175
 176static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
 177                                    unsigned int max)
 178{
 179        unsigned int count = 0;
 180        u32 status;
 181
 182        while (count < max) {
 183                spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
 184                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 185                if (status & SPFI_INTERRUPT_SDFUL)
 186                        break;
 187                spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
 188                count++;
 189        }
 190
 191        return count;
 192}
 193
 194static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
 195                                    unsigned int max)
 196{
 197        unsigned int count = 0;
 198        u32 status;
 199
 200        while (count < max / 4) {
 201                spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
 202                            SPFI_INTERRUPT_CLEAR);
 203                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 204                if (!(status & SPFI_INTERRUPT_GDEX32BIT))
 205                        break;
 206                buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
 207                count++;
 208        }
 209
 210        return count * 4;
 211}
 212
 213static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
 214                                   unsigned int max)
 215{
 216        unsigned int count = 0;
 217        u32 status;
 218
 219        while (count < max) {
 220                spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
 221                            SPFI_INTERRUPT_CLEAR);
 222                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 223                if (!(status & SPFI_INTERRUPT_GDEX8BIT))
 224                        break;
 225                buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
 226                count++;
 227        }
 228
 229        return count;
 230}
 231
 232static int img_spfi_start_pio(struct spi_master *master,
 233                               struct spi_device *spi,
 234                               struct spi_transfer *xfer)
 235{
 236        struct img_spfi *spfi = spi_master_get_devdata(spi->master);
 237        unsigned int tx_bytes = 0, rx_bytes = 0;
 238        const void *tx_buf = xfer->tx_buf;
 239        void *rx_buf = xfer->rx_buf;
 240        unsigned long timeout;
 241        int ret;
 242
 243        if (tx_buf)
 244                tx_bytes = xfer->len;
 245        if (rx_buf)
 246                rx_bytes = xfer->len;
 247
 248        spfi_start(spfi);
 249
 250        timeout = jiffies +
 251                msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
 252        while ((tx_bytes > 0 || rx_bytes > 0) &&
 253               time_before(jiffies, timeout)) {
 254                unsigned int tx_count, rx_count;
 255
 256                if (tx_bytes >= 4)
 257                        tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
 258                else
 259                        tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
 260
 261                if (rx_bytes >= 4)
 262                        rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
 263                else
 264                        rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
 265
 266                tx_buf += tx_count;
 267                rx_buf += rx_count;
 268                tx_bytes -= tx_count;
 269                rx_bytes -= rx_count;
 270
 271                cpu_relax();
 272        }
 273
 274        if (rx_bytes > 0 || tx_bytes > 0) {
 275                dev_err(spfi->dev, "PIO transfer timed out\n");
 276                return -ETIMEDOUT;
 277        }
 278
 279        ret = spfi_wait_all_done(spfi);
 280        if (ret < 0)
 281                return ret;
 282
 283        return 0;
 284}
 285
 286static void img_spfi_dma_rx_cb(void *data)
 287{
 288        struct img_spfi *spfi = data;
 289        unsigned long flags;
 290
 291        spfi_wait_all_done(spfi);
 292
 293        spin_lock_irqsave(&spfi->lock, flags);
 294        spfi->rx_dma_busy = false;
 295        if (!spfi->tx_dma_busy)
 296                spi_finalize_current_transfer(spfi->master);
 297        spin_unlock_irqrestore(&spfi->lock, flags);
 298}
 299
 300static void img_spfi_dma_tx_cb(void *data)
 301{
 302        struct img_spfi *spfi = data;
 303        unsigned long flags;
 304
 305        spfi_wait_all_done(spfi);
 306
 307        spin_lock_irqsave(&spfi->lock, flags);
 308        spfi->tx_dma_busy = false;
 309        if (!spfi->rx_dma_busy)
 310                spi_finalize_current_transfer(spfi->master);
 311        spin_unlock_irqrestore(&spfi->lock, flags);
 312}
 313
 314static int img_spfi_start_dma(struct spi_master *master,
 315                              struct spi_device *spi,
 316                              struct spi_transfer *xfer)
 317{
 318        struct img_spfi *spfi = spi_master_get_devdata(spi->master);
 319        struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
 320        struct dma_slave_config rxconf, txconf;
 321
 322        spfi->rx_dma_busy = false;
 323        spfi->tx_dma_busy = false;
 324
 325        if (xfer->rx_buf) {
 326                rxconf.direction = DMA_DEV_TO_MEM;
 327                if (xfer->len % 4 == 0) {
 328                        rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
 329                        rxconf.src_addr_width = 4;
 330                        rxconf.src_maxburst = 4;
 331                } else {
 332                        rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
 333                        rxconf.src_addr_width = 1;
 334                        rxconf.src_maxburst = 4;
 335                }
 336                dmaengine_slave_config(spfi->rx_ch, &rxconf);
 337
 338                rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
 339                                                 xfer->rx_sg.nents,
 340                                                 DMA_DEV_TO_MEM,
 341                                                 DMA_PREP_INTERRUPT);
 342                if (!rxdesc)
 343                        goto stop_dma;
 344
 345                rxdesc->callback = img_spfi_dma_rx_cb;
 346                rxdesc->callback_param = spfi;
 347        }
 348
 349        if (xfer->tx_buf) {
 350                txconf.direction = DMA_MEM_TO_DEV;
 351                if (xfer->len % 4 == 0) {
 352                        txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
 353                        txconf.dst_addr_width = 4;
 354                        txconf.dst_maxburst = 4;
 355                } else {
 356                        txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
 357                        txconf.dst_addr_width = 1;
 358                        txconf.dst_maxburst = 4;
 359                }
 360                dmaengine_slave_config(spfi->tx_ch, &txconf);
 361
 362                txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
 363                                                 xfer->tx_sg.nents,
 364                                                 DMA_MEM_TO_DEV,
 365                                                 DMA_PREP_INTERRUPT);
 366                if (!txdesc)
 367                        goto stop_dma;
 368
 369                txdesc->callback = img_spfi_dma_tx_cb;
 370                txdesc->callback_param = spfi;
 371        }
 372
 373        if (xfer->rx_buf) {
 374                spfi->rx_dma_busy = true;
 375                dmaengine_submit(rxdesc);
 376                dma_async_issue_pending(spfi->rx_ch);
 377        }
 378
 379        spfi_start(spfi);
 380
 381        if (xfer->tx_buf) {
 382                spfi->tx_dma_busy = true;
 383                dmaengine_submit(txdesc);
 384                dma_async_issue_pending(spfi->tx_ch);
 385        }
 386
 387        return 1;
 388
 389stop_dma:
 390        dmaengine_terminate_all(spfi->rx_ch);
 391        dmaengine_terminate_all(spfi->tx_ch);
 392        return -EIO;
 393}
 394
 395static void img_spfi_handle_err(struct spi_master *master,
 396                                struct spi_message *msg)
 397{
 398        struct img_spfi *spfi = spi_master_get_devdata(master);
 399        unsigned long flags;
 400
 401        /*
 402         * Stop all DMA and reset the controller if the previous transaction
 403         * timed-out and never completed it's DMA.
 404         */
 405        spin_lock_irqsave(&spfi->lock, flags);
 406        if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
 407                spfi->tx_dma_busy = false;
 408                spfi->rx_dma_busy = false;
 409
 410                dmaengine_terminate_all(spfi->tx_ch);
 411                dmaengine_terminate_all(spfi->rx_ch);
 412        }
 413        spin_unlock_irqrestore(&spfi->lock, flags);
 414}
 415
 416static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
 417{
 418        struct img_spfi *spfi = spi_master_get_devdata(master);
 419        u32 val;
 420
 421        val = spfi_readl(spfi, SPFI_PORT_STATE);
 422        if (msg->spi->mode & SPI_CPHA)
 423                val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
 424        else
 425                val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
 426        if (msg->spi->mode & SPI_CPOL)
 427                val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
 428        else
 429                val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
 430        spfi_writel(spfi, val, SPFI_PORT_STATE);
 431
 432        return 0;
 433}
 434
 435static int img_spfi_unprepare(struct spi_master *master,
 436                              struct spi_message *msg)
 437{
 438        struct img_spfi *spfi = spi_master_get_devdata(master);
 439
 440        spfi_reset(spfi);
 441
 442        return 0;
 443}
 444
 445static int img_spfi_setup(struct spi_device *spi)
 446{
 447        int ret = -EINVAL;
 448        struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
 449
 450        if (!spfi_data) {
 451                spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
 452                if (!spfi_data)
 453                        return -ENOMEM;
 454                spfi_data->gpio_requested = false;
 455                spi_set_ctldata(spi, spfi_data);
 456        }
 457        if (!spfi_data->gpio_requested) {
 458                ret = gpio_request_one(spi->cs_gpio,
 459                                       (spi->mode & SPI_CS_HIGH) ?
 460                                       GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
 461                                       dev_name(&spi->dev));
 462                if (ret)
 463                        dev_err(&spi->dev, "can't request chipselect gpio %d\n",
 464                                spi->cs_gpio);
 465                else
 466                        spfi_data->gpio_requested = true;
 467        } else {
 468                if (gpio_is_valid(spi->cs_gpio)) {
 469                        int mode = ((spi->mode & SPI_CS_HIGH) ?
 470                                    GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
 471
 472                        ret = gpio_direction_output(spi->cs_gpio, mode);
 473                        if (ret)
 474                                dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
 475                                        spi->cs_gpio, ret);
 476                }
 477        }
 478        return ret;
 479}
 480
 481static void img_spfi_cleanup(struct spi_device *spi)
 482{
 483        struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
 484
 485        if (spfi_data) {
 486                if (spfi_data->gpio_requested)
 487                        gpio_free(spi->cs_gpio);
 488                kfree(spfi_data);
 489                spi_set_ctldata(spi, NULL);
 490        }
 491}
 492
 493static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
 494                            struct spi_transfer *xfer)
 495{
 496        struct img_spfi *spfi = spi_master_get_devdata(spi->master);
 497        u32 val, div;
 498
 499        /*
 500         * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
 501         * power of 2 up to 128
 502         */
 503        div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
 504        div = clamp(512 / (1 << get_count_order(div)), 1, 128);
 505
 506        val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
 507        val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
 508                 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
 509        val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
 510        spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
 511
 512        spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
 513                    SPFI_TRANSACTION);
 514
 515        val = spfi_readl(spfi, SPFI_CONTROL);
 516        val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
 517        if (xfer->tx_buf)
 518                val |= SPFI_CONTROL_SEND_DMA;
 519        if (xfer->rx_buf)
 520                val |= SPFI_CONTROL_GET_DMA;
 521        val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
 522        if (xfer->tx_nbits == SPI_NBITS_DUAL &&
 523            xfer->rx_nbits == SPI_NBITS_DUAL)
 524                val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
 525        else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
 526                 xfer->rx_nbits == SPI_NBITS_QUAD)
 527                val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
 528        val |= SPFI_CONTROL_SE;
 529        spfi_writel(spfi, val, SPFI_CONTROL);
 530}
 531
 532static int img_spfi_transfer_one(struct spi_master *master,
 533                                 struct spi_device *spi,
 534                                 struct spi_transfer *xfer)
 535{
 536        struct img_spfi *spfi = spi_master_get_devdata(spi->master);
 537        int ret;
 538
 539        if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
 540                dev_err(spfi->dev,
 541                        "Transfer length (%d) is greater than the max supported (%d)",
 542                        xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
 543                return -EINVAL;
 544        }
 545
 546        img_spfi_config(master, spi, xfer);
 547        if (master->can_dma && master->can_dma(master, spi, xfer))
 548                ret = img_spfi_start_dma(master, spi, xfer);
 549        else
 550                ret = img_spfi_start_pio(master, spi, xfer);
 551
 552        return ret;
 553}
 554
 555static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
 556                             struct spi_transfer *xfer)
 557{
 558        if (xfer->len > SPFI_32BIT_FIFO_SIZE)
 559                return true;
 560        return false;
 561}
 562
 563static irqreturn_t img_spfi_irq(int irq, void *dev_id)
 564{
 565        struct img_spfi *spfi = (struct img_spfi *)dev_id;
 566        u32 status;
 567
 568        status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
 569        if (status & SPFI_INTERRUPT_IACCESS) {
 570                spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
 571                dev_err(spfi->dev, "Illegal access interrupt");
 572                return IRQ_HANDLED;
 573        }
 574
 575        return IRQ_NONE;
 576}
 577
 578static int img_spfi_probe(struct platform_device *pdev)
 579{
 580        struct spi_master *master;
 581        struct img_spfi *spfi;
 582        struct resource *res;
 583        int ret;
 584        u32 max_speed_hz;
 585
 586        master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
 587        if (!master)
 588                return -ENOMEM;
 589        platform_set_drvdata(pdev, master);
 590
 591        spfi = spi_master_get_devdata(master);
 592        spfi->dev = &pdev->dev;
 593        spfi->master = master;
 594        spin_lock_init(&spfi->lock);
 595
 596        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 597        spfi->regs = devm_ioremap_resource(spfi->dev, res);
 598        if (IS_ERR(spfi->regs)) {
 599                ret = PTR_ERR(spfi->regs);
 600                goto put_spi;
 601        }
 602        spfi->phys = res->start;
 603
 604        spfi->irq = platform_get_irq(pdev, 0);
 605        if (spfi->irq < 0) {
 606                ret = spfi->irq;
 607                goto put_spi;
 608        }
 609        ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
 610                               IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
 611        if (ret)
 612                goto put_spi;
 613
 614        spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
 615        if (IS_ERR(spfi->sys_clk)) {
 616                ret = PTR_ERR(spfi->sys_clk);
 617                goto put_spi;
 618        }
 619        spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
 620        if (IS_ERR(spfi->spfi_clk)) {
 621                ret = PTR_ERR(spfi->spfi_clk);
 622                goto put_spi;
 623        }
 624
 625        ret = clk_prepare_enable(spfi->sys_clk);
 626        if (ret)
 627                goto put_spi;
 628        ret = clk_prepare_enable(spfi->spfi_clk);
 629        if (ret)
 630                goto disable_pclk;
 631
 632        spfi_reset(spfi);
 633        /*
 634         * Only enable the error (IACCESS) interrupt.  In PIO mode we'll
 635         * poll the status of the FIFOs.
 636         */
 637        spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
 638
 639        master->auto_runtime_pm = true;
 640        master->bus_num = pdev->id;
 641        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
 642        if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
 643                master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
 644        master->dev.of_node = pdev->dev.of_node;
 645        master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
 646        master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
 647        master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
 648
 649        /*
 650         * Maximum speed supported by spfi is limited to the lower value
 651         * between 1/4 of the SPFI clock or to "spfi-max-frequency"
 652         * defined in the device tree.
 653         * If no value is defined in the device tree assume the maximum
 654         * speed supported to be 1/4 of the SPFI clock.
 655         */
 656        if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
 657                                  &max_speed_hz)) {
 658                if (master->max_speed_hz > max_speed_hz)
 659                        master->max_speed_hz = max_speed_hz;
 660        }
 661
 662        master->setup = img_spfi_setup;
 663        master->cleanup = img_spfi_cleanup;
 664        master->transfer_one = img_spfi_transfer_one;
 665        master->prepare_message = img_spfi_prepare;
 666        master->unprepare_message = img_spfi_unprepare;
 667        master->handle_err = img_spfi_handle_err;
 668
 669        spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
 670        spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
 671        if (!spfi->tx_ch || !spfi->rx_ch) {
 672                if (spfi->tx_ch)
 673                        dma_release_channel(spfi->tx_ch);
 674                if (spfi->rx_ch)
 675                        dma_release_channel(spfi->rx_ch);
 676                dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
 677        } else {
 678                master->dma_tx = spfi->tx_ch;
 679                master->dma_rx = spfi->rx_ch;
 680                master->can_dma = img_spfi_can_dma;
 681        }
 682
 683        pm_runtime_set_active(spfi->dev);
 684        pm_runtime_enable(spfi->dev);
 685
 686        ret = devm_spi_register_master(spfi->dev, master);
 687        if (ret)
 688                goto disable_pm;
 689
 690        return 0;
 691
 692disable_pm:
 693        pm_runtime_disable(spfi->dev);
 694        if (spfi->rx_ch)
 695                dma_release_channel(spfi->rx_ch);
 696        if (spfi->tx_ch)
 697                dma_release_channel(spfi->tx_ch);
 698        clk_disable_unprepare(spfi->spfi_clk);
 699disable_pclk:
 700        clk_disable_unprepare(spfi->sys_clk);
 701put_spi:
 702        spi_master_put(master);
 703
 704        return ret;
 705}
 706
 707static int img_spfi_remove(struct platform_device *pdev)
 708{
 709        struct spi_master *master = platform_get_drvdata(pdev);
 710        struct img_spfi *spfi = spi_master_get_devdata(master);
 711
 712        if (spfi->tx_ch)
 713                dma_release_channel(spfi->tx_ch);
 714        if (spfi->rx_ch)
 715                dma_release_channel(spfi->rx_ch);
 716
 717        pm_runtime_disable(spfi->dev);
 718        if (!pm_runtime_status_suspended(spfi->dev)) {
 719                clk_disable_unprepare(spfi->spfi_clk);
 720                clk_disable_unprepare(spfi->sys_clk);
 721        }
 722
 723        return 0;
 724}
 725
 726#ifdef CONFIG_PM
 727static int img_spfi_runtime_suspend(struct device *dev)
 728{
 729        struct spi_master *master = dev_get_drvdata(dev);
 730        struct img_spfi *spfi = spi_master_get_devdata(master);
 731
 732        clk_disable_unprepare(spfi->spfi_clk);
 733        clk_disable_unprepare(spfi->sys_clk);
 734
 735        return 0;
 736}
 737
 738static int img_spfi_runtime_resume(struct device *dev)
 739{
 740        struct spi_master *master = dev_get_drvdata(dev);
 741        struct img_spfi *spfi = spi_master_get_devdata(master);
 742        int ret;
 743
 744        ret = clk_prepare_enable(spfi->sys_clk);
 745        if (ret)
 746                return ret;
 747        ret = clk_prepare_enable(spfi->spfi_clk);
 748        if (ret) {
 749                clk_disable_unprepare(spfi->sys_clk);
 750                return ret;
 751        }
 752
 753        return 0;
 754}
 755#endif /* CONFIG_PM */
 756
 757#ifdef CONFIG_PM_SLEEP
 758static int img_spfi_suspend(struct device *dev)
 759{
 760        struct spi_master *master = dev_get_drvdata(dev);
 761
 762        return spi_master_suspend(master);
 763}
 764
 765static int img_spfi_resume(struct device *dev)
 766{
 767        struct spi_master *master = dev_get_drvdata(dev);
 768        struct img_spfi *spfi = spi_master_get_devdata(master);
 769        int ret;
 770
 771        ret = pm_runtime_get_sync(dev);
 772        if (ret)
 773                return ret;
 774        spfi_reset(spfi);
 775        pm_runtime_put(dev);
 776
 777        return spi_master_resume(master);
 778}
 779#endif /* CONFIG_PM_SLEEP */
 780
 781static const struct dev_pm_ops img_spfi_pm_ops = {
 782        SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
 783                           NULL)
 784        SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
 785};
 786
 787static const struct of_device_id img_spfi_of_match[] = {
 788        { .compatible = "img,spfi", },
 789        { },
 790};
 791MODULE_DEVICE_TABLE(of, img_spfi_of_match);
 792
 793static struct platform_driver img_spfi_driver = {
 794        .driver = {
 795                .name = "img-spfi",
 796                .pm = &img_spfi_pm_ops,
 797                .of_match_table = of_match_ptr(img_spfi_of_match),
 798        },
 799        .probe = img_spfi_probe,
 800        .remove = img_spfi_remove,
 801};
 802module_platform_driver(img_spfi_driver);
 803
 804MODULE_DESCRIPTION("IMG SPFI controller driver");
 805MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
 806MODULE_LICENSE("GPL v2");
 807