linux/drivers/spi/spi-s3c64xx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// Copyright (c) 2009 Samsung Electronics Co., Ltd.
   4//      Jaswinder Singh <jassi.brar@samsung.com>
   5
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/interrupt.h>
   9#include <linux/delay.h>
  10#include <linux/clk.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/platform_device.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/spi/spi.h>
  16#include <linux/gpio.h>
  17#include <linux/of.h>
  18#include <linux/of_gpio.h>
  19
  20#include <linux/platform_data/spi-s3c64xx.h>
  21
  22#define MAX_SPI_PORTS           6
  23#define S3C64XX_SPI_QUIRK_POLL          (1 << 0)
  24#define S3C64XX_SPI_QUIRK_CS_AUTO       (1 << 1)
  25#define AUTOSUSPEND_TIMEOUT     2000
  26
  27/* Registers and bit-fields */
  28
  29#define S3C64XX_SPI_CH_CFG              0x00
  30#define S3C64XX_SPI_CLK_CFG             0x04
  31#define S3C64XX_SPI_MODE_CFG            0x08
  32#define S3C64XX_SPI_CS_REG              0x0C
  33#define S3C64XX_SPI_INT_EN              0x10
  34#define S3C64XX_SPI_STATUS              0x14
  35#define S3C64XX_SPI_TX_DATA             0x18
  36#define S3C64XX_SPI_RX_DATA             0x1C
  37#define S3C64XX_SPI_PACKET_CNT          0x20
  38#define S3C64XX_SPI_PENDING_CLR         0x24
  39#define S3C64XX_SPI_SWAP_CFG            0x28
  40#define S3C64XX_SPI_FB_CLK              0x2C
  41
  42#define S3C64XX_SPI_CH_HS_EN            (1<<6)  /* High Speed Enable */
  43#define S3C64XX_SPI_CH_SW_RST           (1<<5)
  44#define S3C64XX_SPI_CH_SLAVE            (1<<4)
  45#define S3C64XX_SPI_CPOL_L              (1<<3)
  46#define S3C64XX_SPI_CPHA_B              (1<<2)
  47#define S3C64XX_SPI_CH_RXCH_ON          (1<<1)
  48#define S3C64XX_SPI_CH_TXCH_ON          (1<<0)
  49
  50#define S3C64XX_SPI_CLKSEL_SRCMSK       (3<<9)
  51#define S3C64XX_SPI_CLKSEL_SRCSHFT      9
  52#define S3C64XX_SPI_ENCLK_ENABLE        (1<<8)
  53#define S3C64XX_SPI_PSR_MASK            0xff
  54
  55#define S3C64XX_SPI_MODE_CH_TSZ_BYTE            (0<<29)
  56#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD        (1<<29)
  57#define S3C64XX_SPI_MODE_CH_TSZ_WORD            (2<<29)
  58#define S3C64XX_SPI_MODE_CH_TSZ_MASK            (3<<29)
  59#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE           (0<<17)
  60#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD       (1<<17)
  61#define S3C64XX_SPI_MODE_BUS_TSZ_WORD           (2<<17)
  62#define S3C64XX_SPI_MODE_BUS_TSZ_MASK           (3<<17)
  63#define S3C64XX_SPI_MODE_RXDMA_ON               (1<<2)
  64#define S3C64XX_SPI_MODE_TXDMA_ON               (1<<1)
  65#define S3C64XX_SPI_MODE_4BURST                 (1<<0)
  66
  67#define S3C64XX_SPI_CS_NSC_CNT_2                (2<<4)
  68#define S3C64XX_SPI_CS_AUTO                     (1<<1)
  69#define S3C64XX_SPI_CS_SIG_INACT                (1<<0)
  70
  71#define S3C64XX_SPI_INT_TRAILING_EN             (1<<6)
  72#define S3C64XX_SPI_INT_RX_OVERRUN_EN           (1<<5)
  73#define S3C64XX_SPI_INT_RX_UNDERRUN_EN          (1<<4)
  74#define S3C64XX_SPI_INT_TX_OVERRUN_EN           (1<<3)
  75#define S3C64XX_SPI_INT_TX_UNDERRUN_EN          (1<<2)
  76#define S3C64XX_SPI_INT_RX_FIFORDY_EN           (1<<1)
  77#define S3C64XX_SPI_INT_TX_FIFORDY_EN           (1<<0)
  78
  79#define S3C64XX_SPI_ST_RX_OVERRUN_ERR           (1<<5)
  80#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR          (1<<4)
  81#define S3C64XX_SPI_ST_TX_OVERRUN_ERR           (1<<3)
  82#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR          (1<<2)
  83#define S3C64XX_SPI_ST_RX_FIFORDY               (1<<1)
  84#define S3C64XX_SPI_ST_TX_FIFORDY               (1<<0)
  85
  86#define S3C64XX_SPI_PACKET_CNT_EN               (1<<16)
  87
  88#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR         (1<<4)
  89#define S3C64XX_SPI_PND_TX_OVERRUN_CLR          (1<<3)
  90#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR         (1<<2)
  91#define S3C64XX_SPI_PND_RX_OVERRUN_CLR          (1<<1)
  92#define S3C64XX_SPI_PND_TRAILING_CLR            (1<<0)
  93
  94#define S3C64XX_SPI_SWAP_RX_HALF_WORD           (1<<7)
  95#define S3C64XX_SPI_SWAP_RX_BYTE                (1<<6)
  96#define S3C64XX_SPI_SWAP_RX_BIT                 (1<<5)
  97#define S3C64XX_SPI_SWAP_RX_EN                  (1<<4)
  98#define S3C64XX_SPI_SWAP_TX_HALF_WORD           (1<<3)
  99#define S3C64XX_SPI_SWAP_TX_BYTE                (1<<2)
 100#define S3C64XX_SPI_SWAP_TX_BIT                 (1<<1)
 101#define S3C64XX_SPI_SWAP_TX_EN                  (1<<0)
 102
 103#define S3C64XX_SPI_FBCLK_MSK                   (3<<0)
 104
 105#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
 106#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
 107                                (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
 108#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
 109#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
 110                                        FIFO_LVL_MASK(i))
 111
 112#define S3C64XX_SPI_MAX_TRAILCNT        0x3ff
 113#define S3C64XX_SPI_TRAILCNT_OFF        19
 114
 115#define S3C64XX_SPI_TRAILCNT            S3C64XX_SPI_MAX_TRAILCNT
 116
 117#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
 118#define is_polling(x)   (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
 119
 120#define RXBUSY    (1<<2)
 121#define TXBUSY    (1<<3)
 122
 123struct s3c64xx_spi_dma_data {
 124        struct dma_chan *ch;
 125        dma_cookie_t cookie;
 126        enum dma_transfer_direction direction;
 127};
 128
 129/**
 130 * struct s3c64xx_spi_info - SPI Controller hardware info
 131 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
 132 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
 133 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
 134 * @quirks: Bitmask of known quirks
 135 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
 136 * @clk_from_cmu: True, if the controller does not include a clock mux and
 137 *      prescaler unit.
 138 * @clk_ioclk: True if clock is present on this device
 139 *
 140 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
 141 * differ in some aspects such as the size of the fifo and spi bus clock
 142 * setup. Such differences are specified to the driver using this structure
 143 * which is provided as driver data to the driver.
 144 */
 145struct s3c64xx_spi_port_config {
 146        int     fifo_lvl_mask[MAX_SPI_PORTS];
 147        int     rx_lvl_offset;
 148        int     tx_st_done;
 149        int     quirks;
 150        bool    high_speed;
 151        bool    clk_from_cmu;
 152        bool    clk_ioclk;
 153};
 154
 155/**
 156 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
 157 * @clk: Pointer to the spi clock.
 158 * @src_clk: Pointer to the clock used to generate SPI signals.
 159 * @ioclk: Pointer to the i/o clock between master and slave
 160 * @pdev: Pointer to device's platform device data
 161 * @master: Pointer to the SPI Protocol master.
 162 * @cntrlr_info: Platform specific data for the controller this driver manages.
 163 * @lock: Controller specific lock.
 164 * @state: Set of FLAGS to indicate status.
 165 * @sfr_start: BUS address of SPI controller regs.
 166 * @regs: Pointer to ioremap'ed controller registers.
 167 * @xfer_completion: To indicate completion of xfer task.
 168 * @cur_mode: Stores the active configuration of the controller.
 169 * @cur_bpw: Stores the active bits per word settings.
 170 * @cur_speed: Current clock speed
 171 * @rx_dma: Local receive DMA data (e.g. chan and direction)
 172 * @tx_dma: Local transmit DMA data (e.g. chan and direction)
 173 * @port_conf: Local SPI port configuartion data
 174 * @port_id: Port identification number
 175 */
 176struct s3c64xx_spi_driver_data {
 177        void __iomem                    *regs;
 178        struct clk                      *clk;
 179        struct clk                      *src_clk;
 180        struct clk                      *ioclk;
 181        struct platform_device          *pdev;
 182        struct spi_master               *master;
 183        struct s3c64xx_spi_info         *cntrlr_info;
 184        spinlock_t                      lock;
 185        unsigned long                   sfr_start;
 186        struct completion               xfer_completion;
 187        unsigned                        state;
 188        unsigned                        cur_mode, cur_bpw;
 189        unsigned                        cur_speed;
 190        struct s3c64xx_spi_dma_data     rx_dma;
 191        struct s3c64xx_spi_dma_data     tx_dma;
 192        struct s3c64xx_spi_port_config  *port_conf;
 193        unsigned int                    port_id;
 194};
 195
 196static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
 197{
 198        void __iomem *regs = sdd->regs;
 199        unsigned long loops;
 200        u32 val;
 201
 202        writel(0, regs + S3C64XX_SPI_PACKET_CNT);
 203
 204        val = readl(regs + S3C64XX_SPI_CH_CFG);
 205        val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
 206        writel(val, regs + S3C64XX_SPI_CH_CFG);
 207
 208        val = readl(regs + S3C64XX_SPI_CH_CFG);
 209        val |= S3C64XX_SPI_CH_SW_RST;
 210        val &= ~S3C64XX_SPI_CH_HS_EN;
 211        writel(val, regs + S3C64XX_SPI_CH_CFG);
 212
 213        /* Flush TxFIFO*/
 214        loops = msecs_to_loops(1);
 215        do {
 216                val = readl(regs + S3C64XX_SPI_STATUS);
 217        } while (TX_FIFO_LVL(val, sdd) && loops--);
 218
 219        if (loops == 0)
 220                dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
 221
 222        /* Flush RxFIFO*/
 223        loops = msecs_to_loops(1);
 224        do {
 225                val = readl(regs + S3C64XX_SPI_STATUS);
 226                if (RX_FIFO_LVL(val, sdd))
 227                        readl(regs + S3C64XX_SPI_RX_DATA);
 228                else
 229                        break;
 230        } while (loops--);
 231
 232        if (loops == 0)
 233                dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
 234
 235        val = readl(regs + S3C64XX_SPI_CH_CFG);
 236        val &= ~S3C64XX_SPI_CH_SW_RST;
 237        writel(val, regs + S3C64XX_SPI_CH_CFG);
 238
 239        val = readl(regs + S3C64XX_SPI_MODE_CFG);
 240        val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
 241        writel(val, regs + S3C64XX_SPI_MODE_CFG);
 242}
 243
 244static void s3c64xx_spi_dmacb(void *data)
 245{
 246        struct s3c64xx_spi_driver_data *sdd;
 247        struct s3c64xx_spi_dma_data *dma = data;
 248        unsigned long flags;
 249
 250        if (dma->direction == DMA_DEV_TO_MEM)
 251                sdd = container_of(data,
 252                        struct s3c64xx_spi_driver_data, rx_dma);
 253        else
 254                sdd = container_of(data,
 255                        struct s3c64xx_spi_driver_data, tx_dma);
 256
 257        spin_lock_irqsave(&sdd->lock, flags);
 258
 259        if (dma->direction == DMA_DEV_TO_MEM) {
 260                sdd->state &= ~RXBUSY;
 261                if (!(sdd->state & TXBUSY))
 262                        complete(&sdd->xfer_completion);
 263        } else {
 264                sdd->state &= ~TXBUSY;
 265                if (!(sdd->state & RXBUSY))
 266                        complete(&sdd->xfer_completion);
 267        }
 268
 269        spin_unlock_irqrestore(&sdd->lock, flags);
 270}
 271
 272static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
 273                        struct sg_table *sgt)
 274{
 275        struct s3c64xx_spi_driver_data *sdd;
 276        struct dma_slave_config config;
 277        struct dma_async_tx_descriptor *desc;
 278        int ret;
 279
 280        memset(&config, 0, sizeof(config));
 281
 282        if (dma->direction == DMA_DEV_TO_MEM) {
 283                sdd = container_of((void *)dma,
 284                        struct s3c64xx_spi_driver_data, rx_dma);
 285                config.direction = dma->direction;
 286                config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
 287                config.src_addr_width = sdd->cur_bpw / 8;
 288                config.src_maxburst = 1;
 289                dmaengine_slave_config(dma->ch, &config);
 290        } else {
 291                sdd = container_of((void *)dma,
 292                        struct s3c64xx_spi_driver_data, tx_dma);
 293                config.direction = dma->direction;
 294                config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
 295                config.dst_addr_width = sdd->cur_bpw / 8;
 296                config.dst_maxburst = 1;
 297                dmaengine_slave_config(dma->ch, &config);
 298        }
 299
 300        desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
 301                                       dma->direction, DMA_PREP_INTERRUPT);
 302        if (!desc) {
 303                dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
 304                        dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
 305                return -ENOMEM;
 306        }
 307
 308        desc->callback = s3c64xx_spi_dmacb;
 309        desc->callback_param = dma;
 310
 311        dma->cookie = dmaengine_submit(desc);
 312        ret = dma_submit_error(dma->cookie);
 313        if (ret) {
 314                dev_err(&sdd->pdev->dev, "DMA submission failed");
 315                return -EIO;
 316        }
 317
 318        dma_async_issue_pending(dma->ch);
 319        return 0;
 320}
 321
 322static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
 323{
 324        struct s3c64xx_spi_driver_data *sdd =
 325                                        spi_master_get_devdata(spi->master);
 326
 327        if (sdd->cntrlr_info->no_cs)
 328                return;
 329
 330        if (enable) {
 331                if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
 332                        writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
 333                } else {
 334                        u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
 335
 336                        ssel |= (S3C64XX_SPI_CS_AUTO |
 337                                                S3C64XX_SPI_CS_NSC_CNT_2);
 338                        writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
 339                }
 340        } else {
 341                if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
 342                        writel(S3C64XX_SPI_CS_SIG_INACT,
 343                               sdd->regs + S3C64XX_SPI_CS_REG);
 344        }
 345}
 346
 347static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
 348{
 349        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
 350
 351        if (is_polling(sdd))
 352                return 0;
 353
 354        spi->dma_rx = sdd->rx_dma.ch;
 355        spi->dma_tx = sdd->tx_dma.ch;
 356
 357        return 0;
 358}
 359
 360static bool s3c64xx_spi_can_dma(struct spi_master *master,
 361                                struct spi_device *spi,
 362                                struct spi_transfer *xfer)
 363{
 364        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 365
 366        return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
 367}
 368
 369static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
 370                                    struct spi_transfer *xfer, int dma_mode)
 371{
 372        void __iomem *regs = sdd->regs;
 373        u32 modecfg, chcfg;
 374        int ret = 0;
 375
 376        modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
 377        modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
 378
 379        chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
 380        chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
 381
 382        if (dma_mode) {
 383                chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
 384        } else {
 385                /* Always shift in data in FIFO, even if xfer is Tx only,
 386                 * this helps setting PCKT_CNT value for generating clocks
 387                 * as exactly needed.
 388                 */
 389                chcfg |= S3C64XX_SPI_CH_RXCH_ON;
 390                writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
 391                                        | S3C64XX_SPI_PACKET_CNT_EN,
 392                                        regs + S3C64XX_SPI_PACKET_CNT);
 393        }
 394
 395        if (xfer->tx_buf != NULL) {
 396                sdd->state |= TXBUSY;
 397                chcfg |= S3C64XX_SPI_CH_TXCH_ON;
 398                if (dma_mode) {
 399                        modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
 400                        ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
 401                } else {
 402                        switch (sdd->cur_bpw) {
 403                        case 32:
 404                                iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
 405                                        xfer->tx_buf, xfer->len / 4);
 406                                break;
 407                        case 16:
 408                                iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
 409                                        xfer->tx_buf, xfer->len / 2);
 410                                break;
 411                        default:
 412                                iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
 413                                        xfer->tx_buf, xfer->len);
 414                                break;
 415                        }
 416                }
 417        }
 418
 419        if (xfer->rx_buf != NULL) {
 420                sdd->state |= RXBUSY;
 421
 422                if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
 423                                        && !(sdd->cur_mode & SPI_CPHA))
 424                        chcfg |= S3C64XX_SPI_CH_HS_EN;
 425
 426                if (dma_mode) {
 427                        modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
 428                        chcfg |= S3C64XX_SPI_CH_RXCH_ON;
 429                        writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
 430                                        | S3C64XX_SPI_PACKET_CNT_EN,
 431                                        regs + S3C64XX_SPI_PACKET_CNT);
 432                        ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
 433                }
 434        }
 435
 436        if (ret)
 437                return ret;
 438
 439        writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
 440        writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
 441
 442        return 0;
 443}
 444
 445static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
 446                                        int timeout_ms)
 447{
 448        void __iomem *regs = sdd->regs;
 449        unsigned long val = 1;
 450        u32 status;
 451
 452        /* max fifo depth available */
 453        u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
 454
 455        if (timeout_ms)
 456                val = msecs_to_loops(timeout_ms);
 457
 458        do {
 459                status = readl(regs + S3C64XX_SPI_STATUS);
 460        } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
 461
 462        /* return the actual received data length */
 463        return RX_FIFO_LVL(status, sdd);
 464}
 465
 466static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
 467                                struct spi_transfer *xfer)
 468{
 469        void __iomem *regs = sdd->regs;
 470        unsigned long val;
 471        u32 status;
 472        int ms;
 473
 474        /* millisecs to xfer 'len' bytes @ 'cur_speed' */
 475        ms = xfer->len * 8 * 1000 / sdd->cur_speed;
 476        ms += 30;               /* some tolerance */
 477        ms = max(ms, 100);      /* minimum timeout */
 478
 479        val = msecs_to_jiffies(ms) + 10;
 480        val = wait_for_completion_timeout(&sdd->xfer_completion, val);
 481
 482        /*
 483         * If the previous xfer was completed within timeout, then
 484         * proceed further else return -EIO.
 485         * DmaTx returns after simply writing data in the FIFO,
 486         * w/o waiting for real transmission on the bus to finish.
 487         * DmaRx returns only after Dma read data from FIFO which
 488         * needs bus transmission to finish, so we don't worry if
 489         * Xfer involved Rx(with or without Tx).
 490         */
 491        if (val && !xfer->rx_buf) {
 492                val = msecs_to_loops(10);
 493                status = readl(regs + S3C64XX_SPI_STATUS);
 494                while ((TX_FIFO_LVL(status, sdd)
 495                        || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
 496                       && --val) {
 497                        cpu_relax();
 498                        status = readl(regs + S3C64XX_SPI_STATUS);
 499                }
 500
 501        }
 502
 503        /* If timed out while checking rx/tx status return error */
 504        if (!val)
 505                return -EIO;
 506
 507        return 0;
 508}
 509
 510static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
 511                                struct spi_transfer *xfer)
 512{
 513        void __iomem *regs = sdd->regs;
 514        unsigned long val;
 515        u32 status;
 516        int loops;
 517        u32 cpy_len;
 518        u8 *buf;
 519        int ms;
 520
 521        /* millisecs to xfer 'len' bytes @ 'cur_speed' */
 522        ms = xfer->len * 8 * 1000 / sdd->cur_speed;
 523        ms += 10; /* some tolerance */
 524
 525        val = msecs_to_loops(ms);
 526        do {
 527                status = readl(regs + S3C64XX_SPI_STATUS);
 528        } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
 529
 530        if (!val)
 531                return -EIO;
 532
 533        /* If it was only Tx */
 534        if (!xfer->rx_buf) {
 535                sdd->state &= ~TXBUSY;
 536                return 0;
 537        }
 538
 539        /*
 540         * If the receive length is bigger than the controller fifo
 541         * size, calculate the loops and read the fifo as many times.
 542         * loops = length / max fifo size (calculated by using the
 543         * fifo mask).
 544         * For any size less than the fifo size the below code is
 545         * executed atleast once.
 546         */
 547        loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
 548        buf = xfer->rx_buf;
 549        do {
 550                /* wait for data to be received in the fifo */
 551                cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
 552                                                       (loops ? ms : 0));
 553
 554                switch (sdd->cur_bpw) {
 555                case 32:
 556                        ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
 557                                     buf, cpy_len / 4);
 558                        break;
 559                case 16:
 560                        ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
 561                                     buf, cpy_len / 2);
 562                        break;
 563                default:
 564                        ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
 565                                    buf, cpy_len);
 566                        break;
 567                }
 568
 569                buf = buf + cpy_len;
 570        } while (loops--);
 571        sdd->state &= ~RXBUSY;
 572
 573        return 0;
 574}
 575
 576static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
 577{
 578        void __iomem *regs = sdd->regs;
 579        int ret;
 580        u32 val;
 581
 582        /* Disable Clock */
 583        if (!sdd->port_conf->clk_from_cmu) {
 584                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 585                val &= ~S3C64XX_SPI_ENCLK_ENABLE;
 586                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 587        }
 588
 589        /* Set Polarity and Phase */
 590        val = readl(regs + S3C64XX_SPI_CH_CFG);
 591        val &= ~(S3C64XX_SPI_CH_SLAVE |
 592                        S3C64XX_SPI_CPOL_L |
 593                        S3C64XX_SPI_CPHA_B);
 594
 595        if (sdd->cur_mode & SPI_CPOL)
 596                val |= S3C64XX_SPI_CPOL_L;
 597
 598        if (sdd->cur_mode & SPI_CPHA)
 599                val |= S3C64XX_SPI_CPHA_B;
 600
 601        writel(val, regs + S3C64XX_SPI_CH_CFG);
 602
 603        /* Set Channel & DMA Mode */
 604        val = readl(regs + S3C64XX_SPI_MODE_CFG);
 605        val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
 606                        | S3C64XX_SPI_MODE_CH_TSZ_MASK);
 607
 608        switch (sdd->cur_bpw) {
 609        case 32:
 610                val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
 611                val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
 612                break;
 613        case 16:
 614                val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
 615                val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
 616                break;
 617        default:
 618                val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
 619                val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
 620                break;
 621        }
 622
 623        writel(val, regs + S3C64XX_SPI_MODE_CFG);
 624
 625        if (sdd->port_conf->clk_from_cmu) {
 626                /* The src_clk clock is divided internally by 2 */
 627                ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
 628                if (ret)
 629                        return ret;
 630                sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
 631        } else {
 632                /* Configure Clock */
 633                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 634                val &= ~S3C64XX_SPI_PSR_MASK;
 635                val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
 636                                & S3C64XX_SPI_PSR_MASK);
 637                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 638
 639                /* Enable Clock */
 640                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 641                val |= S3C64XX_SPI_ENCLK_ENABLE;
 642                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 643        }
 644
 645        return 0;
 646}
 647
 648#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
 649
 650static int s3c64xx_spi_prepare_message(struct spi_master *master,
 651                                       struct spi_message *msg)
 652{
 653        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 654        struct spi_device *spi = msg->spi;
 655        struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 656
 657        /* Configure feedback delay */
 658        writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
 659
 660        return 0;
 661}
 662
 663static int s3c64xx_spi_transfer_one(struct spi_master *master,
 664                                    struct spi_device *spi,
 665                                    struct spi_transfer *xfer)
 666{
 667        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 668        const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
 669        const void *tx_buf = NULL;
 670        void *rx_buf = NULL;
 671        int target_len = 0, origin_len = 0;
 672        int use_dma = 0;
 673        int status;
 674        u32 speed;
 675        u8 bpw;
 676        unsigned long flags;
 677
 678        reinit_completion(&sdd->xfer_completion);
 679
 680        /* Only BPW and Speed may change across transfers */
 681        bpw = xfer->bits_per_word;
 682        speed = xfer->speed_hz;
 683
 684        if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
 685                sdd->cur_bpw = bpw;
 686                sdd->cur_speed = speed;
 687                sdd->cur_mode = spi->mode;
 688                status = s3c64xx_spi_config(sdd);
 689                if (status)
 690                        return status;
 691        }
 692
 693        if (!is_polling(sdd) && (xfer->len > fifo_len) &&
 694            sdd->rx_dma.ch && sdd->tx_dma.ch) {
 695                use_dma = 1;
 696
 697        } else if (is_polling(sdd) && xfer->len > fifo_len) {
 698                tx_buf = xfer->tx_buf;
 699                rx_buf = xfer->rx_buf;
 700                origin_len = xfer->len;
 701
 702                target_len = xfer->len;
 703                if (xfer->len > fifo_len)
 704                        xfer->len = fifo_len;
 705        }
 706
 707        do {
 708                spin_lock_irqsave(&sdd->lock, flags);
 709
 710                /* Pending only which is to be done */
 711                sdd->state &= ~RXBUSY;
 712                sdd->state &= ~TXBUSY;
 713
 714                /* Start the signals */
 715                s3c64xx_spi_set_cs(spi, true);
 716
 717                status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
 718
 719                spin_unlock_irqrestore(&sdd->lock, flags);
 720
 721                if (status) {
 722                        dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
 723                        break;
 724                }
 725
 726                if (use_dma)
 727                        status = s3c64xx_wait_for_dma(sdd, xfer);
 728                else
 729                        status = s3c64xx_wait_for_pio(sdd, xfer);
 730
 731                if (status) {
 732                        dev_err(&spi->dev,
 733                                "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
 734                                xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
 735                                (sdd->state & RXBUSY) ? 'f' : 'p',
 736                                (sdd->state & TXBUSY) ? 'f' : 'p',
 737                                xfer->len, use_dma ? 1 : 0, status);
 738
 739                        if (use_dma) {
 740                                struct dma_tx_state s;
 741
 742                                if (xfer->tx_buf && (sdd->state & TXBUSY)) {
 743                                        dmaengine_pause(sdd->tx_dma.ch);
 744                                        dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
 745                                        dmaengine_terminate_all(sdd->tx_dma.ch);
 746                                        dev_err(&spi->dev, "TX residue: %d\n", s.residue);
 747
 748                                }
 749                                if (xfer->rx_buf && (sdd->state & RXBUSY)) {
 750                                        dmaengine_pause(sdd->rx_dma.ch);
 751                                        dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
 752                                        dmaengine_terminate_all(sdd->rx_dma.ch);
 753                                        dev_err(&spi->dev, "RX residue: %d\n", s.residue);
 754                                }
 755                        }
 756                } else {
 757                        s3c64xx_flush_fifo(sdd);
 758                }
 759                if (target_len > 0) {
 760                        target_len -= xfer->len;
 761
 762                        if (xfer->tx_buf)
 763                                xfer->tx_buf += xfer->len;
 764
 765                        if (xfer->rx_buf)
 766                                xfer->rx_buf += xfer->len;
 767
 768                        if (target_len > fifo_len)
 769                                xfer->len = fifo_len;
 770                        else
 771                                xfer->len = target_len;
 772                }
 773        } while (target_len > 0);
 774
 775        if (origin_len) {
 776                /* Restore original xfer buffers and length */
 777                xfer->tx_buf = tx_buf;
 778                xfer->rx_buf = rx_buf;
 779                xfer->len = origin_len;
 780        }
 781
 782        return status;
 783}
 784
 785static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
 786                                struct spi_device *spi)
 787{
 788        struct s3c64xx_spi_csinfo *cs;
 789        struct device_node *slave_np, *data_np = NULL;
 790        u32 fb_delay = 0;
 791
 792        slave_np = spi->dev.of_node;
 793        if (!slave_np) {
 794                dev_err(&spi->dev, "device node not found\n");
 795                return ERR_PTR(-EINVAL);
 796        }
 797
 798        data_np = of_get_child_by_name(slave_np, "controller-data");
 799        if (!data_np) {
 800                dev_err(&spi->dev, "child node 'controller-data' not found\n");
 801                return ERR_PTR(-EINVAL);
 802        }
 803
 804        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
 805        if (!cs) {
 806                of_node_put(data_np);
 807                return ERR_PTR(-ENOMEM);
 808        }
 809
 810        of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
 811        cs->fb_delay = fb_delay;
 812        of_node_put(data_np);
 813        return cs;
 814}
 815
 816/*
 817 * Here we only check the validity of requested configuration
 818 * and save the configuration in a local data-structure.
 819 * The controller is actually configured only just before we
 820 * get a message to transfer.
 821 */
 822static int s3c64xx_spi_setup(struct spi_device *spi)
 823{
 824        struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 825        struct s3c64xx_spi_driver_data *sdd;
 826        int err;
 827
 828        sdd = spi_master_get_devdata(spi->master);
 829        if (spi->dev.of_node) {
 830                cs = s3c64xx_get_slave_ctrldata(spi);
 831                spi->controller_data = cs;
 832        } else if (cs) {
 833                /* On non-DT platforms the SPI core will set spi->cs_gpio
 834                 * to -ENOENT. The GPIO pin used to drive the chip select
 835                 * is defined by using platform data so spi->cs_gpio value
 836                 * has to be override to have the proper GPIO pin number.
 837                 */
 838                spi->cs_gpio = cs->line;
 839        }
 840
 841        if (IS_ERR_OR_NULL(cs)) {
 842                dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
 843                return -ENODEV;
 844        }
 845
 846        if (!spi_get_ctldata(spi)) {
 847                if (gpio_is_valid(spi->cs_gpio)) {
 848                        err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
 849                                               dev_name(&spi->dev));
 850                        if (err) {
 851                                dev_err(&spi->dev,
 852                                        "Failed to get /CS gpio [%d]: %d\n",
 853                                        spi->cs_gpio, err);
 854                                goto err_gpio_req;
 855                        }
 856                }
 857
 858                spi_set_ctldata(spi, cs);
 859        }
 860
 861        pm_runtime_get_sync(&sdd->pdev->dev);
 862
 863        /* Check if we can provide the requested rate */
 864        if (!sdd->port_conf->clk_from_cmu) {
 865                u32 psr, speed;
 866
 867                /* Max possible */
 868                speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
 869
 870                if (spi->max_speed_hz > speed)
 871                        spi->max_speed_hz = speed;
 872
 873                psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
 874                psr &= S3C64XX_SPI_PSR_MASK;
 875                if (psr == S3C64XX_SPI_PSR_MASK)
 876                        psr--;
 877
 878                speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
 879                if (spi->max_speed_hz < speed) {
 880                        if (psr+1 < S3C64XX_SPI_PSR_MASK) {
 881                                psr++;
 882                        } else {
 883                                err = -EINVAL;
 884                                goto setup_exit;
 885                        }
 886                }
 887
 888                speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
 889                if (spi->max_speed_hz >= speed) {
 890                        spi->max_speed_hz = speed;
 891                } else {
 892                        dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
 893                                spi->max_speed_hz);
 894                        err = -EINVAL;
 895                        goto setup_exit;
 896                }
 897        }
 898
 899        pm_runtime_mark_last_busy(&sdd->pdev->dev);
 900        pm_runtime_put_autosuspend(&sdd->pdev->dev);
 901        s3c64xx_spi_set_cs(spi, false);
 902
 903        return 0;
 904
 905setup_exit:
 906        pm_runtime_mark_last_busy(&sdd->pdev->dev);
 907        pm_runtime_put_autosuspend(&sdd->pdev->dev);
 908        /* setup() returns with device de-selected */
 909        s3c64xx_spi_set_cs(spi, false);
 910
 911        if (gpio_is_valid(spi->cs_gpio))
 912                gpio_free(spi->cs_gpio);
 913        spi_set_ctldata(spi, NULL);
 914
 915err_gpio_req:
 916        if (spi->dev.of_node)
 917                kfree(cs);
 918
 919        return err;
 920}
 921
 922static void s3c64xx_spi_cleanup(struct spi_device *spi)
 923{
 924        struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
 925
 926        if (gpio_is_valid(spi->cs_gpio)) {
 927                gpio_free(spi->cs_gpio);
 928                if (spi->dev.of_node)
 929                        kfree(cs);
 930                else {
 931                        /* On non-DT platforms, the SPI core sets
 932                         * spi->cs_gpio to -ENOENT and .setup()
 933                         * overrides it with the GPIO pin value
 934                         * passed using platform data.
 935                         */
 936                        spi->cs_gpio = -ENOENT;
 937                }
 938        }
 939
 940        spi_set_ctldata(spi, NULL);
 941}
 942
 943static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
 944{
 945        struct s3c64xx_spi_driver_data *sdd = data;
 946        struct spi_master *spi = sdd->master;
 947        unsigned int val, clr = 0;
 948
 949        val = readl(sdd->regs + S3C64XX_SPI_STATUS);
 950
 951        if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
 952                clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
 953                dev_err(&spi->dev, "RX overrun\n");
 954        }
 955        if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
 956                clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
 957                dev_err(&spi->dev, "RX underrun\n");
 958        }
 959        if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
 960                clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
 961                dev_err(&spi->dev, "TX overrun\n");
 962        }
 963        if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
 964                clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
 965                dev_err(&spi->dev, "TX underrun\n");
 966        }
 967
 968        /* Clear the pending irq by setting and then clearing it */
 969        writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
 970        writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
 971
 972        return IRQ_HANDLED;
 973}
 974
 975static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
 976{
 977        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 978        void __iomem *regs = sdd->regs;
 979        unsigned int val;
 980
 981        sdd->cur_speed = 0;
 982
 983        if (sci->no_cs)
 984                writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
 985        else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
 986                writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
 987
 988        /* Disable Interrupts - we use Polling if not DMA mode */
 989        writel(0, regs + S3C64XX_SPI_INT_EN);
 990
 991        if (!sdd->port_conf->clk_from_cmu)
 992                writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
 993                                regs + S3C64XX_SPI_CLK_CFG);
 994        writel(0, regs + S3C64XX_SPI_MODE_CFG);
 995        writel(0, regs + S3C64XX_SPI_PACKET_CNT);
 996
 997        /* Clear any irq pending bits, should set and clear the bits */
 998        val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
 999                S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1000                S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1001                S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1002        writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1003        writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1004
1005        writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1006
1007        val = readl(regs + S3C64XX_SPI_MODE_CFG);
1008        val &= ~S3C64XX_SPI_MODE_4BURST;
1009        val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1010        val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1011        writel(val, regs + S3C64XX_SPI_MODE_CFG);
1012
1013        s3c64xx_flush_fifo(sdd);
1014}
1015
1016#ifdef CONFIG_OF
1017static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1018{
1019        struct s3c64xx_spi_info *sci;
1020        u32 temp;
1021
1022        sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1023        if (!sci)
1024                return ERR_PTR(-ENOMEM);
1025
1026        if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1027                dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1028                sci->src_clk_nr = 0;
1029        } else {
1030                sci->src_clk_nr = temp;
1031        }
1032
1033        if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1034                dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1035                sci->num_cs = 1;
1036        } else {
1037                sci->num_cs = temp;
1038        }
1039
1040        sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
1041
1042        return sci;
1043}
1044#else
1045static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1046{
1047        return dev_get_platdata(dev);
1048}
1049#endif
1050
1051static const struct of_device_id s3c64xx_spi_dt_match[];
1052
1053static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1054                                                struct platform_device *pdev)
1055{
1056#ifdef CONFIG_OF
1057        if (pdev->dev.of_node) {
1058                const struct of_device_id *match;
1059                match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1060                return (struct s3c64xx_spi_port_config *)match->data;
1061        }
1062#endif
1063        return (struct s3c64xx_spi_port_config *)
1064                         platform_get_device_id(pdev)->driver_data;
1065}
1066
1067static int s3c64xx_spi_probe(struct platform_device *pdev)
1068{
1069        struct resource *mem_res;
1070        struct s3c64xx_spi_driver_data *sdd;
1071        struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1072        struct spi_master *master;
1073        int ret, irq;
1074        char clk_name[16];
1075
1076        if (!sci && pdev->dev.of_node) {
1077                sci = s3c64xx_spi_parse_dt(&pdev->dev);
1078                if (IS_ERR(sci))
1079                        return PTR_ERR(sci);
1080        }
1081
1082        if (!sci) {
1083                dev_err(&pdev->dev, "platform_data missing!\n");
1084                return -ENODEV;
1085        }
1086
1087        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1088        if (mem_res == NULL) {
1089                dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1090                return -ENXIO;
1091        }
1092
1093        irq = platform_get_irq(pdev, 0);
1094        if (irq < 0) {
1095                dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1096                return irq;
1097        }
1098
1099        master = spi_alloc_master(&pdev->dev,
1100                                sizeof(struct s3c64xx_spi_driver_data));
1101        if (master == NULL) {
1102                dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1103                return -ENOMEM;
1104        }
1105
1106        platform_set_drvdata(pdev, master);
1107
1108        sdd = spi_master_get_devdata(master);
1109        sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1110        sdd->master = master;
1111        sdd->cntrlr_info = sci;
1112        sdd->pdev = pdev;
1113        sdd->sfr_start = mem_res->start;
1114        if (pdev->dev.of_node) {
1115                ret = of_alias_get_id(pdev->dev.of_node, "spi");
1116                if (ret < 0) {
1117                        dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1118                                ret);
1119                        goto err_deref_master;
1120                }
1121                sdd->port_id = ret;
1122        } else {
1123                sdd->port_id = pdev->id;
1124        }
1125
1126        sdd->cur_bpw = 8;
1127
1128        sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1129        sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1130
1131        master->dev.of_node = pdev->dev.of_node;
1132        master->bus_num = sdd->port_id;
1133        master->setup = s3c64xx_spi_setup;
1134        master->cleanup = s3c64xx_spi_cleanup;
1135        master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1136        master->prepare_message = s3c64xx_spi_prepare_message;
1137        master->transfer_one = s3c64xx_spi_transfer_one;
1138        master->num_chipselect = sci->num_cs;
1139        master->dma_alignment = 8;
1140        master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1141                                        SPI_BPW_MASK(8);
1142        /* the spi->mode bits understood by this driver: */
1143        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1144        master->auto_runtime_pm = true;
1145        if (!is_polling(sdd))
1146                master->can_dma = s3c64xx_spi_can_dma;
1147
1148        sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1149        if (IS_ERR(sdd->regs)) {
1150                ret = PTR_ERR(sdd->regs);
1151                goto err_deref_master;
1152        }
1153
1154        if (sci->cfg_gpio && sci->cfg_gpio()) {
1155                dev_err(&pdev->dev, "Unable to config gpio\n");
1156                ret = -EBUSY;
1157                goto err_deref_master;
1158        }
1159
1160        /* Setup clocks */
1161        sdd->clk = devm_clk_get(&pdev->dev, "spi");
1162        if (IS_ERR(sdd->clk)) {
1163                dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1164                ret = PTR_ERR(sdd->clk);
1165                goto err_deref_master;
1166        }
1167
1168        ret = clk_prepare_enable(sdd->clk);
1169        if (ret) {
1170                dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1171                goto err_deref_master;
1172        }
1173
1174        sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1175        sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1176        if (IS_ERR(sdd->src_clk)) {
1177                dev_err(&pdev->dev,
1178                        "Unable to acquire clock '%s'\n", clk_name);
1179                ret = PTR_ERR(sdd->src_clk);
1180                goto err_disable_clk;
1181        }
1182
1183        ret = clk_prepare_enable(sdd->src_clk);
1184        if (ret) {
1185                dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1186                goto err_disable_clk;
1187        }
1188
1189        if (sdd->port_conf->clk_ioclk) {
1190                sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
1191                if (IS_ERR(sdd->ioclk)) {
1192                        dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
1193                        ret = PTR_ERR(sdd->ioclk);
1194                        goto err_disable_src_clk;
1195                }
1196
1197                ret = clk_prepare_enable(sdd->ioclk);
1198                if (ret) {
1199                        dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
1200                        goto err_disable_src_clk;
1201                }
1202        }
1203
1204        if (!is_polling(sdd)) {
1205                /* Acquire DMA channels */
1206                sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
1207                if (IS_ERR(sdd->rx_dma.ch)) {
1208                        dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
1209                        ret = PTR_ERR(sdd->rx_dma.ch);
1210                        goto err_disable_io_clk;
1211                }
1212                sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
1213                if (IS_ERR(sdd->tx_dma.ch)) {
1214                        dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
1215                        ret = PTR_ERR(sdd->tx_dma.ch);
1216                        goto err_release_rx_dma;
1217                }
1218        }
1219
1220        pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1221        pm_runtime_use_autosuspend(&pdev->dev);
1222        pm_runtime_set_active(&pdev->dev);
1223        pm_runtime_enable(&pdev->dev);
1224        pm_runtime_get_sync(&pdev->dev);
1225
1226        /* Setup Deufult Mode */
1227        s3c64xx_spi_hwinit(sdd);
1228
1229        spin_lock_init(&sdd->lock);
1230        init_completion(&sdd->xfer_completion);
1231
1232        ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1233                                "spi-s3c64xx", sdd);
1234        if (ret != 0) {
1235                dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1236                        irq, ret);
1237                goto err_pm_put;
1238        }
1239
1240        writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1241               S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1242               sdd->regs + S3C64XX_SPI_INT_EN);
1243
1244        ret = devm_spi_register_master(&pdev->dev, master);
1245        if (ret != 0) {
1246                dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1247                goto err_pm_put;
1248        }
1249
1250        dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1251                                        sdd->port_id, master->num_chipselect);
1252        dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1253                                        mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
1254
1255        pm_runtime_mark_last_busy(&pdev->dev);
1256        pm_runtime_put_autosuspend(&pdev->dev);
1257
1258        return 0;
1259
1260err_pm_put:
1261        pm_runtime_put_noidle(&pdev->dev);
1262        pm_runtime_disable(&pdev->dev);
1263        pm_runtime_set_suspended(&pdev->dev);
1264
1265        if (!is_polling(sdd))
1266                dma_release_channel(sdd->tx_dma.ch);
1267err_release_rx_dma:
1268        if (!is_polling(sdd))
1269                dma_release_channel(sdd->rx_dma.ch);
1270err_disable_io_clk:
1271        clk_disable_unprepare(sdd->ioclk);
1272err_disable_src_clk:
1273        clk_disable_unprepare(sdd->src_clk);
1274err_disable_clk:
1275        clk_disable_unprepare(sdd->clk);
1276err_deref_master:
1277        spi_master_put(master);
1278
1279        return ret;
1280}
1281
1282static int s3c64xx_spi_remove(struct platform_device *pdev)
1283{
1284        struct spi_master *master = platform_get_drvdata(pdev);
1285        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1286
1287        pm_runtime_get_sync(&pdev->dev);
1288
1289        writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1290
1291        if (!is_polling(sdd)) {
1292                dma_release_channel(sdd->rx_dma.ch);
1293                dma_release_channel(sdd->tx_dma.ch);
1294        }
1295
1296        clk_disable_unprepare(sdd->ioclk);
1297
1298        clk_disable_unprepare(sdd->src_clk);
1299
1300        clk_disable_unprepare(sdd->clk);
1301
1302        pm_runtime_put_noidle(&pdev->dev);
1303        pm_runtime_disable(&pdev->dev);
1304        pm_runtime_set_suspended(&pdev->dev);
1305
1306        return 0;
1307}
1308
1309#ifdef CONFIG_PM_SLEEP
1310static int s3c64xx_spi_suspend(struct device *dev)
1311{
1312        struct spi_master *master = dev_get_drvdata(dev);
1313        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1314
1315        int ret = spi_master_suspend(master);
1316        if (ret)
1317                return ret;
1318
1319        ret = pm_runtime_force_suspend(dev);
1320        if (ret < 0)
1321                return ret;
1322
1323        sdd->cur_speed = 0; /* Output Clock is stopped */
1324
1325        return 0;
1326}
1327
1328static int s3c64xx_spi_resume(struct device *dev)
1329{
1330        struct spi_master *master = dev_get_drvdata(dev);
1331        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1332        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1333        int ret;
1334
1335        if (sci->cfg_gpio)
1336                sci->cfg_gpio();
1337
1338        ret = pm_runtime_force_resume(dev);
1339        if (ret < 0)
1340                return ret;
1341
1342        return spi_master_resume(master);
1343}
1344#endif /* CONFIG_PM_SLEEP */
1345
1346#ifdef CONFIG_PM
1347static int s3c64xx_spi_runtime_suspend(struct device *dev)
1348{
1349        struct spi_master *master = dev_get_drvdata(dev);
1350        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1351
1352        clk_disable_unprepare(sdd->clk);
1353        clk_disable_unprepare(sdd->src_clk);
1354        clk_disable_unprepare(sdd->ioclk);
1355
1356        return 0;
1357}
1358
1359static int s3c64xx_spi_runtime_resume(struct device *dev)
1360{
1361        struct spi_master *master = dev_get_drvdata(dev);
1362        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1363        int ret;
1364
1365        if (sdd->port_conf->clk_ioclk) {
1366                ret = clk_prepare_enable(sdd->ioclk);
1367                if (ret != 0)
1368                        return ret;
1369        }
1370
1371        ret = clk_prepare_enable(sdd->src_clk);
1372        if (ret != 0)
1373                goto err_disable_ioclk;
1374
1375        ret = clk_prepare_enable(sdd->clk);
1376        if (ret != 0)
1377                goto err_disable_src_clk;
1378
1379        s3c64xx_spi_hwinit(sdd);
1380
1381        writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1382               S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1383               sdd->regs + S3C64XX_SPI_INT_EN);
1384
1385        return 0;
1386
1387err_disable_src_clk:
1388        clk_disable_unprepare(sdd->src_clk);
1389err_disable_ioclk:
1390        clk_disable_unprepare(sdd->ioclk);
1391
1392        return ret;
1393}
1394#endif /* CONFIG_PM */
1395
1396static const struct dev_pm_ops s3c64xx_spi_pm = {
1397        SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1398        SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1399                           s3c64xx_spi_runtime_resume, NULL)
1400};
1401
1402static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1403        .fifo_lvl_mask  = { 0x7f },
1404        .rx_lvl_offset  = 13,
1405        .tx_st_done     = 21,
1406        .high_speed     = true,
1407};
1408
1409static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1410        .fifo_lvl_mask  = { 0x7f, 0x7F },
1411        .rx_lvl_offset  = 13,
1412        .tx_st_done     = 21,
1413};
1414
1415static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1416        .fifo_lvl_mask  = { 0x1ff, 0x7F },
1417        .rx_lvl_offset  = 15,
1418        .tx_st_done     = 25,
1419        .high_speed     = true,
1420};
1421
1422static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1423        .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F },
1424        .rx_lvl_offset  = 15,
1425        .tx_st_done     = 25,
1426        .high_speed     = true,
1427        .clk_from_cmu   = true,
1428        .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1429};
1430
1431static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1432        .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1433        .rx_lvl_offset  = 15,
1434        .tx_st_done     = 25,
1435        .high_speed     = true,
1436        .clk_from_cmu   = true,
1437        .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1438};
1439
1440static struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
1441        .fifo_lvl_mask  = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1442        .rx_lvl_offset  = 15,
1443        .tx_st_done     = 25,
1444        .high_speed     = true,
1445        .clk_from_cmu   = true,
1446        .clk_ioclk      = true,
1447        .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1448};
1449
1450static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
1451        {
1452                .name           = "s3c2443-spi",
1453                .driver_data    = (kernel_ulong_t)&s3c2443_spi_port_config,
1454        }, {
1455                .name           = "s3c6410-spi",
1456                .driver_data    = (kernel_ulong_t)&s3c6410_spi_port_config,
1457        },
1458        { },
1459};
1460
1461static const struct of_device_id s3c64xx_spi_dt_match[] = {
1462        { .compatible = "samsung,s3c2443-spi",
1463                        .data = (void *)&s3c2443_spi_port_config,
1464        },
1465        { .compatible = "samsung,s3c6410-spi",
1466                        .data = (void *)&s3c6410_spi_port_config,
1467        },
1468        { .compatible = "samsung,s5pv210-spi",
1469                        .data = (void *)&s5pv210_spi_port_config,
1470        },
1471        { .compatible = "samsung,exynos4210-spi",
1472                        .data = (void *)&exynos4_spi_port_config,
1473        },
1474        { .compatible = "samsung,exynos7-spi",
1475                        .data = (void *)&exynos7_spi_port_config,
1476        },
1477        { .compatible = "samsung,exynos5433-spi",
1478                        .data = (void *)&exynos5433_spi_port_config,
1479        },
1480        { },
1481};
1482MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1483
1484static struct platform_driver s3c64xx_spi_driver = {
1485        .driver = {
1486                .name   = "s3c64xx-spi",
1487                .pm = &s3c64xx_spi_pm,
1488                .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1489        },
1490        .probe = s3c64xx_spi_probe,
1491        .remove = s3c64xx_spi_remove,
1492        .id_table = s3c64xx_spi_driver_ids,
1493};
1494MODULE_ALIAS("platform:s3c64xx-spi");
1495
1496module_platform_driver(s3c64xx_spi_driver);
1497
1498MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1499MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1500MODULE_LICENSE("GPL");
1501