linux/drivers/spi/spi-s3c64xx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009 Samsung Electronics Ltd.
   3 *      Jaswinder Singh <jassi.brar@samsung.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/workqueue.h>
  23#include <linux/interrupt.h>
  24#include <linux/delay.h>
  25#include <linux/clk.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/dmaengine.h>
  28#include <linux/platform_device.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/spi/spi.h>
  31#include <linux/gpio.h>
  32#include <linux/of.h>
  33#include <linux/of_gpio.h>
  34
  35#include <linux/platform_data/spi-s3c64xx.h>
  36
  37#ifdef CONFIG_S3C_DMA
  38#include <mach/dma.h>
  39#endif
  40
  41#define MAX_SPI_PORTS           3
  42
  43/* Registers and bit-fields */
  44
  45#define S3C64XX_SPI_CH_CFG              0x00
  46#define S3C64XX_SPI_CLK_CFG             0x04
  47#define S3C64XX_SPI_MODE_CFG    0x08
  48#define S3C64XX_SPI_SLAVE_SEL   0x0C
  49#define S3C64XX_SPI_INT_EN              0x10
  50#define S3C64XX_SPI_STATUS              0x14
  51#define S3C64XX_SPI_TX_DATA             0x18
  52#define S3C64XX_SPI_RX_DATA             0x1C
  53#define S3C64XX_SPI_PACKET_CNT  0x20
  54#define S3C64XX_SPI_PENDING_CLR 0x24
  55#define S3C64XX_SPI_SWAP_CFG    0x28
  56#define S3C64XX_SPI_FB_CLK              0x2C
  57
  58#define S3C64XX_SPI_CH_HS_EN            (1<<6)  /* High Speed Enable */
  59#define S3C64XX_SPI_CH_SW_RST           (1<<5)
  60#define S3C64XX_SPI_CH_SLAVE            (1<<4)
  61#define S3C64XX_SPI_CPOL_L              (1<<3)
  62#define S3C64XX_SPI_CPHA_B              (1<<2)
  63#define S3C64XX_SPI_CH_RXCH_ON          (1<<1)
  64#define S3C64XX_SPI_CH_TXCH_ON          (1<<0)
  65
  66#define S3C64XX_SPI_CLKSEL_SRCMSK       (3<<9)
  67#define S3C64XX_SPI_CLKSEL_SRCSHFT      9
  68#define S3C64XX_SPI_ENCLK_ENABLE        (1<<8)
  69#define S3C64XX_SPI_PSR_MASK            0xff
  70
  71#define S3C64XX_SPI_MODE_CH_TSZ_BYTE            (0<<29)
  72#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD        (1<<29)
  73#define S3C64XX_SPI_MODE_CH_TSZ_WORD            (2<<29)
  74#define S3C64XX_SPI_MODE_CH_TSZ_MASK            (3<<29)
  75#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE           (0<<17)
  76#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD       (1<<17)
  77#define S3C64XX_SPI_MODE_BUS_TSZ_WORD           (2<<17)
  78#define S3C64XX_SPI_MODE_BUS_TSZ_MASK           (3<<17)
  79#define S3C64XX_SPI_MODE_RXDMA_ON               (1<<2)
  80#define S3C64XX_SPI_MODE_TXDMA_ON               (1<<1)
  81#define S3C64XX_SPI_MODE_4BURST                 (1<<0)
  82
  83#define S3C64XX_SPI_SLAVE_AUTO                  (1<<1)
  84#define S3C64XX_SPI_SLAVE_SIG_INACT             (1<<0)
  85
  86#define S3C64XX_SPI_INT_TRAILING_EN             (1<<6)
  87#define S3C64XX_SPI_INT_RX_OVERRUN_EN           (1<<5)
  88#define S3C64XX_SPI_INT_RX_UNDERRUN_EN          (1<<4)
  89#define S3C64XX_SPI_INT_TX_OVERRUN_EN           (1<<3)
  90#define S3C64XX_SPI_INT_TX_UNDERRUN_EN          (1<<2)
  91#define S3C64XX_SPI_INT_RX_FIFORDY_EN           (1<<1)
  92#define S3C64XX_SPI_INT_TX_FIFORDY_EN           (1<<0)
  93
  94#define S3C64XX_SPI_ST_RX_OVERRUN_ERR           (1<<5)
  95#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR  (1<<4)
  96#define S3C64XX_SPI_ST_TX_OVERRUN_ERR           (1<<3)
  97#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR  (1<<2)
  98#define S3C64XX_SPI_ST_RX_FIFORDY               (1<<1)
  99#define S3C64XX_SPI_ST_TX_FIFORDY               (1<<0)
 100
 101#define S3C64XX_SPI_PACKET_CNT_EN               (1<<16)
 102
 103#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR         (1<<4)
 104#define S3C64XX_SPI_PND_TX_OVERRUN_CLR          (1<<3)
 105#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR         (1<<2)
 106#define S3C64XX_SPI_PND_RX_OVERRUN_CLR          (1<<1)
 107#define S3C64XX_SPI_PND_TRAILING_CLR            (1<<0)
 108
 109#define S3C64XX_SPI_SWAP_RX_HALF_WORD           (1<<7)
 110#define S3C64XX_SPI_SWAP_RX_BYTE                (1<<6)
 111#define S3C64XX_SPI_SWAP_RX_BIT                 (1<<5)
 112#define S3C64XX_SPI_SWAP_RX_EN                  (1<<4)
 113#define S3C64XX_SPI_SWAP_TX_HALF_WORD           (1<<3)
 114#define S3C64XX_SPI_SWAP_TX_BYTE                (1<<2)
 115#define S3C64XX_SPI_SWAP_TX_BIT                 (1<<1)
 116#define S3C64XX_SPI_SWAP_TX_EN                  (1<<0)
 117
 118#define S3C64XX_SPI_FBCLK_MSK           (3<<0)
 119
 120#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
 121#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
 122                                (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
 123#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
 124#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
 125                                        FIFO_LVL_MASK(i))
 126
 127#define S3C64XX_SPI_MAX_TRAILCNT        0x3ff
 128#define S3C64XX_SPI_TRAILCNT_OFF        19
 129
 130#define S3C64XX_SPI_TRAILCNT            S3C64XX_SPI_MAX_TRAILCNT
 131
 132#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
 133
 134#define RXBUSY    (1<<2)
 135#define TXBUSY    (1<<3)
 136
 137struct s3c64xx_spi_dma_data {
 138        struct dma_chan *ch;
 139        enum dma_transfer_direction direction;
 140        unsigned int dmach;
 141};
 142
 143/**
 144 * struct s3c64xx_spi_info - SPI Controller hardware info
 145 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
 146 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
 147 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
 148 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
 149 * @clk_from_cmu: True, if the controller does not include a clock mux and
 150 *      prescaler unit.
 151 *
 152 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
 153 * differ in some aspects such as the size of the fifo and spi bus clock
 154 * setup. Such differences are specified to the driver using this structure
 155 * which is provided as driver data to the driver.
 156 */
 157struct s3c64xx_spi_port_config {
 158        int     fifo_lvl_mask[MAX_SPI_PORTS];
 159        int     rx_lvl_offset;
 160        int     tx_st_done;
 161        bool    high_speed;
 162        bool    clk_from_cmu;
 163};
 164
 165/**
 166 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
 167 * @clk: Pointer to the spi clock.
 168 * @src_clk: Pointer to the clock used to generate SPI signals.
 169 * @master: Pointer to the SPI Protocol master.
 170 * @cntrlr_info: Platform specific data for the controller this driver manages.
 171 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
 172 * @queue: To log SPI xfer requests.
 173 * @lock: Controller specific lock.
 174 * @state: Set of FLAGS to indicate status.
 175 * @rx_dmach: Controller's DMA channel for Rx.
 176 * @tx_dmach: Controller's DMA channel for Tx.
 177 * @sfr_start: BUS address of SPI controller regs.
 178 * @regs: Pointer to ioremap'ed controller registers.
 179 * @irq: interrupt
 180 * @xfer_completion: To indicate completion of xfer task.
 181 * @cur_mode: Stores the active configuration of the controller.
 182 * @cur_bpw: Stores the active bits per word settings.
 183 * @cur_speed: Stores the active xfer clock speed.
 184 */
 185struct s3c64xx_spi_driver_data {
 186        void __iomem                    *regs;
 187        struct clk                      *clk;
 188        struct clk                      *src_clk;
 189        struct platform_device          *pdev;
 190        struct spi_master               *master;
 191        struct s3c64xx_spi_info  *cntrlr_info;
 192        struct spi_device               *tgl_spi;
 193        struct list_head                queue;
 194        spinlock_t                      lock;
 195        unsigned long                   sfr_start;
 196        struct completion               xfer_completion;
 197        unsigned                        state;
 198        unsigned                        cur_mode, cur_bpw;
 199        unsigned                        cur_speed;
 200        struct s3c64xx_spi_dma_data     rx_dma;
 201        struct s3c64xx_spi_dma_data     tx_dma;
 202#ifdef CONFIG_S3C_DMA
 203        struct samsung_dma_ops          *ops;
 204#endif
 205        struct s3c64xx_spi_port_config  *port_conf;
 206        unsigned int                    port_id;
 207        unsigned long                   gpios[4];
 208};
 209
 210static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
 211{
 212        void __iomem *regs = sdd->regs;
 213        unsigned long loops;
 214        u32 val;
 215
 216        writel(0, regs + S3C64XX_SPI_PACKET_CNT);
 217
 218        val = readl(regs + S3C64XX_SPI_CH_CFG);
 219        val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
 220        writel(val, regs + S3C64XX_SPI_CH_CFG);
 221
 222        val = readl(regs + S3C64XX_SPI_CH_CFG);
 223        val |= S3C64XX_SPI_CH_SW_RST;
 224        val &= ~S3C64XX_SPI_CH_HS_EN;
 225        writel(val, regs + S3C64XX_SPI_CH_CFG);
 226
 227        /* Flush TxFIFO*/
 228        loops = msecs_to_loops(1);
 229        do {
 230                val = readl(regs + S3C64XX_SPI_STATUS);
 231        } while (TX_FIFO_LVL(val, sdd) && loops--);
 232
 233        if (loops == 0)
 234                dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
 235
 236        /* Flush RxFIFO*/
 237        loops = msecs_to_loops(1);
 238        do {
 239                val = readl(regs + S3C64XX_SPI_STATUS);
 240                if (RX_FIFO_LVL(val, sdd))
 241                        readl(regs + S3C64XX_SPI_RX_DATA);
 242                else
 243                        break;
 244        } while (loops--);
 245
 246        if (loops == 0)
 247                dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
 248
 249        val = readl(regs + S3C64XX_SPI_CH_CFG);
 250        val &= ~S3C64XX_SPI_CH_SW_RST;
 251        writel(val, regs + S3C64XX_SPI_CH_CFG);
 252
 253        val = readl(regs + S3C64XX_SPI_MODE_CFG);
 254        val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
 255        writel(val, regs + S3C64XX_SPI_MODE_CFG);
 256}
 257
 258static void s3c64xx_spi_dmacb(void *data)
 259{
 260        struct s3c64xx_spi_driver_data *sdd;
 261        struct s3c64xx_spi_dma_data *dma = data;
 262        unsigned long flags;
 263
 264        if (dma->direction == DMA_DEV_TO_MEM)
 265                sdd = container_of(data,
 266                        struct s3c64xx_spi_driver_data, rx_dma);
 267        else
 268                sdd = container_of(data,
 269                        struct s3c64xx_spi_driver_data, tx_dma);
 270
 271        spin_lock_irqsave(&sdd->lock, flags);
 272
 273        if (dma->direction == DMA_DEV_TO_MEM) {
 274                sdd->state &= ~RXBUSY;
 275                if (!(sdd->state & TXBUSY))
 276                        complete(&sdd->xfer_completion);
 277        } else {
 278                sdd->state &= ~TXBUSY;
 279                if (!(sdd->state & RXBUSY))
 280                        complete(&sdd->xfer_completion);
 281        }
 282
 283        spin_unlock_irqrestore(&sdd->lock, flags);
 284}
 285
 286#ifdef CONFIG_S3C_DMA
 287/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
 288
 289static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
 290        .name = "samsung-spi-dma",
 291};
 292
 293static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
 294                                        unsigned len, dma_addr_t buf)
 295{
 296        struct s3c64xx_spi_driver_data *sdd;
 297        struct samsung_dma_prep info;
 298        struct samsung_dma_config config;
 299
 300        if (dma->direction == DMA_DEV_TO_MEM) {
 301                sdd = container_of((void *)dma,
 302                        struct s3c64xx_spi_driver_data, rx_dma);
 303                config.direction = sdd->rx_dma.direction;
 304                config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
 305                config.width = sdd->cur_bpw / 8;
 306                sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
 307        } else {
 308                sdd = container_of((void *)dma,
 309                        struct s3c64xx_spi_driver_data, tx_dma);
 310                config.direction =  sdd->tx_dma.direction;
 311                config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
 312                config.width = sdd->cur_bpw / 8;
 313                sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
 314        }
 315
 316        info.cap = DMA_SLAVE;
 317        info.len = len;
 318        info.fp = s3c64xx_spi_dmacb;
 319        info.fp_param = dma;
 320        info.direction = dma->direction;
 321        info.buf = buf;
 322
 323        sdd->ops->prepare((enum dma_ch)dma->ch, &info);
 324        sdd->ops->trigger((enum dma_ch)dma->ch);
 325}
 326
 327static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
 328{
 329        struct samsung_dma_req req;
 330        struct device *dev = &sdd->pdev->dev;
 331
 332        sdd->ops = samsung_dma_get_ops();
 333
 334        req.cap = DMA_SLAVE;
 335        req.client = &s3c64xx_spi_dma_client;
 336
 337        sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
 338        sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
 339
 340        return 1;
 341}
 342
 343static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
 344{
 345        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
 346
 347        /* Acquire DMA channels */
 348        while (!acquire_dma(sdd))
 349                usleep_range(10000, 11000);
 350
 351        pm_runtime_get_sync(&sdd->pdev->dev);
 352
 353        return 0;
 354}
 355
 356static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
 357{
 358        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
 359
 360        /* Free DMA channels */
 361        sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
 362        sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
 363
 364        pm_runtime_put(&sdd->pdev->dev);
 365
 366        return 0;
 367}
 368
 369static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
 370                                 struct s3c64xx_spi_dma_data *dma)
 371{
 372        sdd->ops->stop((enum dma_ch)dma->ch);
 373}
 374#else
 375
 376static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
 377                                        unsigned len, dma_addr_t buf)
 378{
 379        struct s3c64xx_spi_driver_data *sdd;
 380        struct dma_slave_config config;
 381        struct scatterlist sg;
 382        struct dma_async_tx_descriptor *desc;
 383
 384        if (dma->direction == DMA_DEV_TO_MEM) {
 385                sdd = container_of((void *)dma,
 386                        struct s3c64xx_spi_driver_data, rx_dma);
 387                config.direction = dma->direction;
 388                config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
 389                config.src_addr_width = sdd->cur_bpw / 8;
 390                config.src_maxburst = 1;
 391                dmaengine_slave_config(dma->ch, &config);
 392        } else {
 393                sdd = container_of((void *)dma,
 394                        struct s3c64xx_spi_driver_data, tx_dma);
 395                config.direction = dma->direction;
 396                config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
 397                config.dst_addr_width = sdd->cur_bpw / 8;
 398                config.dst_maxburst = 1;
 399                dmaengine_slave_config(dma->ch, &config);
 400        }
 401
 402        sg_init_table(&sg, 1);
 403        sg_dma_len(&sg) = len;
 404        sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
 405                    len, offset_in_page(buf));
 406        sg_dma_address(&sg) = buf;
 407
 408        desc = dmaengine_prep_slave_sg(dma->ch,
 409                &sg, 1, dma->direction, DMA_PREP_INTERRUPT);
 410
 411        desc->callback = s3c64xx_spi_dmacb;
 412        desc->callback_param = dma;
 413
 414        dmaengine_submit(desc);
 415        dma_async_issue_pending(dma->ch);
 416}
 417
 418static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
 419{
 420        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
 421        dma_filter_fn filter = sdd->cntrlr_info->filter;
 422        struct device *dev = &sdd->pdev->dev;
 423        dma_cap_mask_t mask;
 424        int ret;
 425
 426        dma_cap_zero(mask);
 427        dma_cap_set(DMA_SLAVE, mask);
 428
 429        /* Acquire DMA channels */
 430        sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
 431                                (void*)sdd->rx_dma.dmach, dev, "rx");
 432        if (!sdd->rx_dma.ch) {
 433                dev_err(dev, "Failed to get RX DMA channel\n");
 434                ret = -EBUSY;
 435                goto out;
 436        }
 437
 438        sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
 439                                (void*)sdd->tx_dma.dmach, dev, "tx");
 440        if (!sdd->tx_dma.ch) {
 441                dev_err(dev, "Failed to get TX DMA channel\n");
 442                ret = -EBUSY;
 443                goto out_rx;
 444        }
 445
 446        ret = pm_runtime_get_sync(&sdd->pdev->dev);
 447        if (ret < 0) {
 448                dev_err(dev, "Failed to enable device: %d\n", ret);
 449                goto out_tx;
 450        }
 451
 452        return 0;
 453
 454out_tx:
 455        dma_release_channel(sdd->tx_dma.ch);
 456out_rx:
 457        dma_release_channel(sdd->rx_dma.ch);
 458out:
 459        return ret;
 460}
 461
 462static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
 463{
 464        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
 465
 466        /* Free DMA channels */
 467        dma_release_channel(sdd->rx_dma.ch);
 468        dma_release_channel(sdd->tx_dma.ch);
 469
 470        pm_runtime_put(&sdd->pdev->dev);
 471        return 0;
 472}
 473
 474static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
 475                                 struct s3c64xx_spi_dma_data *dma)
 476{
 477        dmaengine_terminate_all(dma->ch);
 478}
 479#endif
 480
 481static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
 482                                struct spi_device *spi,
 483                                struct spi_transfer *xfer, int dma_mode)
 484{
 485        void __iomem *regs = sdd->regs;
 486        u32 modecfg, chcfg;
 487
 488        modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
 489        modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
 490
 491        chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
 492        chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
 493
 494        if (dma_mode) {
 495                chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
 496        } else {
 497                /* Always shift in data in FIFO, even if xfer is Tx only,
 498                 * this helps setting PCKT_CNT value for generating clocks
 499                 * as exactly needed.
 500                 */
 501                chcfg |= S3C64XX_SPI_CH_RXCH_ON;
 502                writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
 503                                        | S3C64XX_SPI_PACKET_CNT_EN,
 504                                        regs + S3C64XX_SPI_PACKET_CNT);
 505        }
 506
 507        if (xfer->tx_buf != NULL) {
 508                sdd->state |= TXBUSY;
 509                chcfg |= S3C64XX_SPI_CH_TXCH_ON;
 510                if (dma_mode) {
 511                        modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
 512                        prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
 513                } else {
 514                        switch (sdd->cur_bpw) {
 515                        case 32:
 516                                iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
 517                                        xfer->tx_buf, xfer->len / 4);
 518                                break;
 519                        case 16:
 520                                iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
 521                                        xfer->tx_buf, xfer->len / 2);
 522                                break;
 523                        default:
 524                                iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
 525                                        xfer->tx_buf, xfer->len);
 526                                break;
 527                        }
 528                }
 529        }
 530
 531        if (xfer->rx_buf != NULL) {
 532                sdd->state |= RXBUSY;
 533
 534                if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
 535                                        && !(sdd->cur_mode & SPI_CPHA))
 536                        chcfg |= S3C64XX_SPI_CH_HS_EN;
 537
 538                if (dma_mode) {
 539                        modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
 540                        chcfg |= S3C64XX_SPI_CH_RXCH_ON;
 541                        writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
 542                                        | S3C64XX_SPI_PACKET_CNT_EN,
 543                                        regs + S3C64XX_SPI_PACKET_CNT);
 544                        prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
 545                }
 546        }
 547
 548        writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
 549        writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
 550}
 551
 552static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
 553                                                struct spi_device *spi)
 554{
 555        struct s3c64xx_spi_csinfo *cs;
 556
 557        if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
 558                if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
 559                        /* Deselect the last toggled device */
 560                        cs = sdd->tgl_spi->controller_data;
 561                        gpio_set_value(cs->line,
 562                                spi->mode & SPI_CS_HIGH ? 0 : 1);
 563                }
 564                sdd->tgl_spi = NULL;
 565        }
 566
 567        cs = spi->controller_data;
 568        gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
 569}
 570
 571static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
 572                                struct spi_transfer *xfer, int dma_mode)
 573{
 574        void __iomem *regs = sdd->regs;
 575        unsigned long val;
 576        int ms;
 577
 578        /* millisecs to xfer 'len' bytes @ 'cur_speed' */
 579        ms = xfer->len * 8 * 1000 / sdd->cur_speed;
 580        ms += 10; /* some tolerance */
 581
 582        if (dma_mode) {
 583                val = msecs_to_jiffies(ms) + 10;
 584                val = wait_for_completion_timeout(&sdd->xfer_completion, val);
 585        } else {
 586                u32 status;
 587                val = msecs_to_loops(ms);
 588                do {
 589                        status = readl(regs + S3C64XX_SPI_STATUS);
 590                } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
 591        }
 592
 593        if (!val)
 594                return -EIO;
 595
 596        if (dma_mode) {
 597                u32 status;
 598
 599                /*
 600                 * DmaTx returns after simply writing data in the FIFO,
 601                 * w/o waiting for real transmission on the bus to finish.
 602                 * DmaRx returns only after Dma read data from FIFO which
 603                 * needs bus transmission to finish, so we don't worry if
 604                 * Xfer involved Rx(with or without Tx).
 605                 */
 606                if (xfer->rx_buf == NULL) {
 607                        val = msecs_to_loops(10);
 608                        status = readl(regs + S3C64XX_SPI_STATUS);
 609                        while ((TX_FIFO_LVL(status, sdd)
 610                                || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
 611                                        && --val) {
 612                                cpu_relax();
 613                                status = readl(regs + S3C64XX_SPI_STATUS);
 614                        }
 615
 616                        if (!val)
 617                                return -EIO;
 618                }
 619        } else {
 620                /* If it was only Tx */
 621                if (xfer->rx_buf == NULL) {
 622                        sdd->state &= ~TXBUSY;
 623                        return 0;
 624                }
 625
 626                switch (sdd->cur_bpw) {
 627                case 32:
 628                        ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
 629                                xfer->rx_buf, xfer->len / 4);
 630                        break;
 631                case 16:
 632                        ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
 633                                xfer->rx_buf, xfer->len / 2);
 634                        break;
 635                default:
 636                        ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
 637                                xfer->rx_buf, xfer->len);
 638                        break;
 639                }
 640                sdd->state &= ~RXBUSY;
 641        }
 642
 643        return 0;
 644}
 645
 646static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
 647                                                struct spi_device *spi)
 648{
 649        struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 650
 651        if (sdd->tgl_spi == spi)
 652                sdd->tgl_spi = NULL;
 653
 654        gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
 655}
 656
 657static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
 658{
 659        void __iomem *regs = sdd->regs;
 660        u32 val;
 661
 662        /* Disable Clock */
 663        if (sdd->port_conf->clk_from_cmu) {
 664                clk_disable_unprepare(sdd->src_clk);
 665        } else {
 666                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 667                val &= ~S3C64XX_SPI_ENCLK_ENABLE;
 668                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 669        }
 670
 671        /* Set Polarity and Phase */
 672        val = readl(regs + S3C64XX_SPI_CH_CFG);
 673        val &= ~(S3C64XX_SPI_CH_SLAVE |
 674                        S3C64XX_SPI_CPOL_L |
 675                        S3C64XX_SPI_CPHA_B);
 676
 677        if (sdd->cur_mode & SPI_CPOL)
 678                val |= S3C64XX_SPI_CPOL_L;
 679
 680        if (sdd->cur_mode & SPI_CPHA)
 681                val |= S3C64XX_SPI_CPHA_B;
 682
 683        writel(val, regs + S3C64XX_SPI_CH_CFG);
 684
 685        /* Set Channel & DMA Mode */
 686        val = readl(regs + S3C64XX_SPI_MODE_CFG);
 687        val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
 688                        | S3C64XX_SPI_MODE_CH_TSZ_MASK);
 689
 690        switch (sdd->cur_bpw) {
 691        case 32:
 692                val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
 693                val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
 694                break;
 695        case 16:
 696                val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
 697                val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
 698                break;
 699        default:
 700                val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
 701                val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
 702                break;
 703        }
 704
 705        writel(val, regs + S3C64XX_SPI_MODE_CFG);
 706
 707        if (sdd->port_conf->clk_from_cmu) {
 708                /* Configure Clock */
 709                /* There is half-multiplier before the SPI */
 710                clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
 711                /* Enable Clock */
 712                clk_prepare_enable(sdd->src_clk);
 713        } else {
 714                /* Configure Clock */
 715                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 716                val &= ~S3C64XX_SPI_PSR_MASK;
 717                val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
 718                                & S3C64XX_SPI_PSR_MASK);
 719                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 720
 721                /* Enable Clock */
 722                val = readl(regs + S3C64XX_SPI_CLK_CFG);
 723                val |= S3C64XX_SPI_ENCLK_ENABLE;
 724                writel(val, regs + S3C64XX_SPI_CLK_CFG);
 725        }
 726}
 727
 728#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
 729
 730static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
 731                                                struct spi_message *msg)
 732{
 733        struct device *dev = &sdd->pdev->dev;
 734        struct spi_transfer *xfer;
 735
 736        if (msg->is_dma_mapped)
 737                return 0;
 738
 739        /* First mark all xfer unmapped */
 740        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 741                xfer->rx_dma = XFER_DMAADDR_INVALID;
 742                xfer->tx_dma = XFER_DMAADDR_INVALID;
 743        }
 744
 745        /* Map until end or first fail */
 746        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 747
 748                if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
 749                        continue;
 750
 751                if (xfer->tx_buf != NULL) {
 752                        xfer->tx_dma = dma_map_single(dev,
 753                                        (void *)xfer->tx_buf, xfer->len,
 754                                        DMA_TO_DEVICE);
 755                        if (dma_mapping_error(dev, xfer->tx_dma)) {
 756                                dev_err(dev, "dma_map_single Tx failed\n");
 757                                xfer->tx_dma = XFER_DMAADDR_INVALID;
 758                                return -ENOMEM;
 759                        }
 760                }
 761
 762                if (xfer->rx_buf != NULL) {
 763                        xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
 764                                                xfer->len, DMA_FROM_DEVICE);
 765                        if (dma_mapping_error(dev, xfer->rx_dma)) {
 766                                dev_err(dev, "dma_map_single Rx failed\n");
 767                                dma_unmap_single(dev, xfer->tx_dma,
 768                                                xfer->len, DMA_TO_DEVICE);
 769                                xfer->tx_dma = XFER_DMAADDR_INVALID;
 770                                xfer->rx_dma = XFER_DMAADDR_INVALID;
 771                                return -ENOMEM;
 772                        }
 773                }
 774        }
 775
 776        return 0;
 777}
 778
 779static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
 780                                                struct spi_message *msg)
 781{
 782        struct device *dev = &sdd->pdev->dev;
 783        struct spi_transfer *xfer;
 784
 785        if (msg->is_dma_mapped)
 786                return;
 787
 788        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 789
 790                if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
 791                        continue;
 792
 793                if (xfer->rx_buf != NULL
 794                                && xfer->rx_dma != XFER_DMAADDR_INVALID)
 795                        dma_unmap_single(dev, xfer->rx_dma,
 796                                                xfer->len, DMA_FROM_DEVICE);
 797
 798                if (xfer->tx_buf != NULL
 799                                && xfer->tx_dma != XFER_DMAADDR_INVALID)
 800                        dma_unmap_single(dev, xfer->tx_dma,
 801                                                xfer->len, DMA_TO_DEVICE);
 802        }
 803}
 804
 805static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
 806                                            struct spi_message *msg)
 807{
 808        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 809        struct spi_device *spi = msg->spi;
 810        struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 811        struct spi_transfer *xfer;
 812        int status = 0, cs_toggle = 0;
 813        u32 speed;
 814        u8 bpw;
 815
 816        /* If Master's(controller) state differs from that needed by Slave */
 817        if (sdd->cur_speed != spi->max_speed_hz
 818                        || sdd->cur_mode != spi->mode
 819                        || sdd->cur_bpw != spi->bits_per_word) {
 820                sdd->cur_bpw = spi->bits_per_word;
 821                sdd->cur_speed = spi->max_speed_hz;
 822                sdd->cur_mode = spi->mode;
 823                s3c64xx_spi_config(sdd);
 824        }
 825
 826        /* Map all the transfers if needed */
 827        if (s3c64xx_spi_map_mssg(sdd, msg)) {
 828                dev_err(&spi->dev,
 829                        "Xfer: Unable to map message buffers!\n");
 830                status = -ENOMEM;
 831                goto out;
 832        }
 833
 834        /* Configure feedback delay */
 835        writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
 836
 837        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 838
 839                unsigned long flags;
 840                int use_dma;
 841
 842                INIT_COMPLETION(sdd->xfer_completion);
 843
 844                /* Only BPW and Speed may change across transfers */
 845                bpw = xfer->bits_per_word;
 846                speed = xfer->speed_hz ? : spi->max_speed_hz;
 847
 848                if (xfer->len % (bpw / 8)) {
 849                        dev_err(&spi->dev,
 850                                "Xfer length(%u) not a multiple of word size(%u)\n",
 851                                xfer->len, bpw / 8);
 852                        status = -EIO;
 853                        goto out;
 854                }
 855
 856                if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
 857                        sdd->cur_bpw = bpw;
 858                        sdd->cur_speed = speed;
 859                        s3c64xx_spi_config(sdd);
 860                }
 861
 862                /* Polling method for xfers not bigger than FIFO capacity */
 863                use_dma = 0;
 864                if (sdd->rx_dma.ch && sdd->tx_dma.ch &&
 865                    (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))
 866                        use_dma = 1;
 867
 868                spin_lock_irqsave(&sdd->lock, flags);
 869
 870                /* Pending only which is to be done */
 871                sdd->state &= ~RXBUSY;
 872                sdd->state &= ~TXBUSY;
 873
 874                enable_datapath(sdd, spi, xfer, use_dma);
 875
 876                /* Slave Select */
 877                enable_cs(sdd, spi);
 878
 879                /* Start the signals */
 880                writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
 881
 882                spin_unlock_irqrestore(&sdd->lock, flags);
 883
 884                status = wait_for_xfer(sdd, xfer, use_dma);
 885
 886                /* Quiese the signals */
 887                writel(S3C64XX_SPI_SLAVE_SIG_INACT,
 888                       sdd->regs + S3C64XX_SPI_SLAVE_SEL);
 889
 890                if (status) {
 891                        dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
 892                                xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
 893                                (sdd->state & RXBUSY) ? 'f' : 'p',
 894                                (sdd->state & TXBUSY) ? 'f' : 'p',
 895                                xfer->len);
 896
 897                        if (use_dma) {
 898                                if (xfer->tx_buf != NULL
 899                                                && (sdd->state & TXBUSY))
 900                                        s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
 901                                if (xfer->rx_buf != NULL
 902                                                && (sdd->state & RXBUSY))
 903                                        s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
 904                        }
 905
 906                        goto out;
 907                }
 908
 909                if (xfer->delay_usecs)
 910                        udelay(xfer->delay_usecs);
 911
 912                if (xfer->cs_change) {
 913                        /* Hint that the next mssg is gonna be
 914                           for the same device */
 915                        if (list_is_last(&xfer->transfer_list,
 916                                                &msg->transfers))
 917                                cs_toggle = 1;
 918                }
 919
 920                msg->actual_length += xfer->len;
 921
 922                flush_fifo(sdd);
 923        }
 924
 925out:
 926        if (!cs_toggle || status)
 927                disable_cs(sdd, spi);
 928        else
 929                sdd->tgl_spi = spi;
 930
 931        s3c64xx_spi_unmap_mssg(sdd, msg);
 932
 933        msg->status = status;
 934
 935        spi_finalize_current_message(master);
 936
 937        return 0;
 938}
 939
 940static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
 941                                struct spi_device *spi)
 942{
 943        struct s3c64xx_spi_csinfo *cs;
 944        struct device_node *slave_np, *data_np = NULL;
 945        u32 fb_delay = 0;
 946
 947        slave_np = spi->dev.of_node;
 948        if (!slave_np) {
 949                dev_err(&spi->dev, "device node not found\n");
 950                return ERR_PTR(-EINVAL);
 951        }
 952
 953        data_np = of_get_child_by_name(slave_np, "controller-data");
 954        if (!data_np) {
 955                dev_err(&spi->dev, "child node 'controller-data' not found\n");
 956                return ERR_PTR(-EINVAL);
 957        }
 958
 959        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
 960        if (!cs) {
 961                dev_err(&spi->dev, "could not allocate memory for controller data\n");
 962                of_node_put(data_np);
 963                return ERR_PTR(-ENOMEM);
 964        }
 965
 966        cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
 967        if (!gpio_is_valid(cs->line)) {
 968                dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
 969                kfree(cs);
 970                of_node_put(data_np);
 971                return ERR_PTR(-EINVAL);
 972        }
 973
 974        of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
 975        cs->fb_delay = fb_delay;
 976        of_node_put(data_np);
 977        return cs;
 978}
 979
 980/*
 981 * Here we only check the validity of requested configuration
 982 * and save the configuration in a local data-structure.
 983 * The controller is actually configured only just before we
 984 * get a message to transfer.
 985 */
 986static int s3c64xx_spi_setup(struct spi_device *spi)
 987{
 988        struct s3c64xx_spi_csinfo *cs = spi->controller_data;
 989        struct s3c64xx_spi_driver_data *sdd;
 990        struct s3c64xx_spi_info *sci;
 991        struct spi_message *msg;
 992        unsigned long flags;
 993        int err;
 994
 995        sdd = spi_master_get_devdata(spi->master);
 996        if (!cs && spi->dev.of_node) {
 997                cs = s3c64xx_get_slave_ctrldata(spi);
 998                spi->controller_data = cs;
 999        }
1000
1001        if (IS_ERR_OR_NULL(cs)) {
1002                dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
1003                return -ENODEV;
1004        }
1005
1006        if (!spi_get_ctldata(spi)) {
1007                err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
1008                                       dev_name(&spi->dev));
1009                if (err) {
1010                        dev_err(&spi->dev,
1011                                "Failed to get /CS gpio [%d]: %d\n",
1012                                cs->line, err);
1013                        goto err_gpio_req;
1014                }
1015                spi_set_ctldata(spi, cs);
1016        }
1017
1018        sci = sdd->cntrlr_info;
1019
1020        spin_lock_irqsave(&sdd->lock, flags);
1021
1022        list_for_each_entry(msg, &sdd->queue, queue) {
1023                /* Is some mssg is already queued for this device */
1024                if (msg->spi == spi) {
1025                        dev_err(&spi->dev,
1026                                "setup: attempt while mssg in queue!\n");
1027                        spin_unlock_irqrestore(&sdd->lock, flags);
1028                        err = -EBUSY;
1029                        goto err_msgq;
1030                }
1031        }
1032
1033        spin_unlock_irqrestore(&sdd->lock, flags);
1034
1035        pm_runtime_get_sync(&sdd->pdev->dev);
1036
1037        /* Check if we can provide the requested rate */
1038        if (!sdd->port_conf->clk_from_cmu) {
1039                u32 psr, speed;
1040
1041                /* Max possible */
1042                speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
1043
1044                if (spi->max_speed_hz > speed)
1045                        spi->max_speed_hz = speed;
1046
1047                psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
1048                psr &= S3C64XX_SPI_PSR_MASK;
1049                if (psr == S3C64XX_SPI_PSR_MASK)
1050                        psr--;
1051
1052                speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
1053                if (spi->max_speed_hz < speed) {
1054                        if (psr+1 < S3C64XX_SPI_PSR_MASK) {
1055                                psr++;
1056                        } else {
1057                                err = -EINVAL;
1058                                goto setup_exit;
1059                        }
1060                }
1061
1062                speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
1063                if (spi->max_speed_hz >= speed) {
1064                        spi->max_speed_hz = speed;
1065                } else {
1066                        dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
1067                                spi->max_speed_hz);
1068                        err = -EINVAL;
1069                        goto setup_exit;
1070                }
1071        }
1072
1073        pm_runtime_put(&sdd->pdev->dev);
1074        disable_cs(sdd, spi);
1075        return 0;
1076
1077setup_exit:
1078        /* setup() returns with device de-selected */
1079        disable_cs(sdd, spi);
1080
1081err_msgq:
1082        gpio_free(cs->line);
1083        spi_set_ctldata(spi, NULL);
1084
1085err_gpio_req:
1086        if (spi->dev.of_node)
1087                kfree(cs);
1088
1089        return err;
1090}
1091
1092static void s3c64xx_spi_cleanup(struct spi_device *spi)
1093{
1094        struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
1095
1096        if (cs) {
1097                gpio_free(cs->line);
1098                if (spi->dev.of_node)
1099                        kfree(cs);
1100        }
1101        spi_set_ctldata(spi, NULL);
1102}
1103
1104static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
1105{
1106        struct s3c64xx_spi_driver_data *sdd = data;
1107        struct spi_master *spi = sdd->master;
1108        unsigned int val, clr = 0;
1109
1110        val = readl(sdd->regs + S3C64XX_SPI_STATUS);
1111
1112        if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
1113                clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
1114                dev_err(&spi->dev, "RX overrun\n");
1115        }
1116        if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
1117                clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
1118                dev_err(&spi->dev, "RX underrun\n");
1119        }
1120        if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
1121                clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
1122                dev_err(&spi->dev, "TX overrun\n");
1123        }
1124        if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
1125                clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1126                dev_err(&spi->dev, "TX underrun\n");
1127        }
1128
1129        /* Clear the pending irq by setting and then clearing it */
1130        writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1131        writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1132
1133        return IRQ_HANDLED;
1134}
1135
1136static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
1137{
1138        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1139        void __iomem *regs = sdd->regs;
1140        unsigned int val;
1141
1142        sdd->cur_speed = 0;
1143
1144        writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
1145
1146        /* Disable Interrupts - we use Polling if not DMA mode */
1147        writel(0, regs + S3C64XX_SPI_INT_EN);
1148
1149        if (!sdd->port_conf->clk_from_cmu)
1150                writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
1151                                regs + S3C64XX_SPI_CLK_CFG);
1152        writel(0, regs + S3C64XX_SPI_MODE_CFG);
1153        writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1154
1155        /* Clear any irq pending bits, should set and clear the bits */
1156        val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1157                S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1158                S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1159                S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1160        writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1161        writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1162
1163        writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1164
1165        val = readl(regs + S3C64XX_SPI_MODE_CFG);
1166        val &= ~S3C64XX_SPI_MODE_4BURST;
1167        val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1168        val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1169        writel(val, regs + S3C64XX_SPI_MODE_CFG);
1170
1171        flush_fifo(sdd);
1172}
1173
1174#ifdef CONFIG_OF
1175static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1176{
1177        struct s3c64xx_spi_info *sci;
1178        u32 temp;
1179
1180        sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1181        if (!sci) {
1182                dev_err(dev, "memory allocation for spi_info failed\n");
1183                return ERR_PTR(-ENOMEM);
1184        }
1185
1186        if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1187                dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1188                sci->src_clk_nr = 0;
1189        } else {
1190                sci->src_clk_nr = temp;
1191        }
1192
1193        if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1194                dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1195                sci->num_cs = 1;
1196        } else {
1197                sci->num_cs = temp;
1198        }
1199
1200        return sci;
1201}
1202#else
1203static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1204{
1205        return dev->platform_data;
1206}
1207#endif
1208
1209static const struct of_device_id s3c64xx_spi_dt_match[];
1210
1211static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1212                                                struct platform_device *pdev)
1213{
1214#ifdef CONFIG_OF
1215        if (pdev->dev.of_node) {
1216                const struct of_device_id *match;
1217                match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1218                return (struct s3c64xx_spi_port_config *)match->data;
1219        }
1220#endif
1221        return (struct s3c64xx_spi_port_config *)
1222                         platform_get_device_id(pdev)->driver_data;
1223}
1224
1225static int s3c64xx_spi_probe(struct platform_device *pdev)
1226{
1227        struct resource *mem_res;
1228        struct resource *res;
1229        struct s3c64xx_spi_driver_data *sdd;
1230        struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
1231        struct spi_master *master;
1232        int ret, irq;
1233        char clk_name[16];
1234
1235        if (!sci && pdev->dev.of_node) {
1236                sci = s3c64xx_spi_parse_dt(&pdev->dev);
1237                if (IS_ERR(sci))
1238                        return PTR_ERR(sci);
1239        }
1240
1241        if (!sci) {
1242                dev_err(&pdev->dev, "platform_data missing!\n");
1243                return -ENODEV;
1244        }
1245
1246        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247        if (mem_res == NULL) {
1248                dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1249                return -ENXIO;
1250        }
1251
1252        irq = platform_get_irq(pdev, 0);
1253        if (irq < 0) {
1254                dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1255                return irq;
1256        }
1257
1258        master = spi_alloc_master(&pdev->dev,
1259                                sizeof(struct s3c64xx_spi_driver_data));
1260        if (master == NULL) {
1261                dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1262                return -ENOMEM;
1263        }
1264
1265        platform_set_drvdata(pdev, master);
1266
1267        sdd = spi_master_get_devdata(master);
1268        sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1269        sdd->master = master;
1270        sdd->cntrlr_info = sci;
1271        sdd->pdev = pdev;
1272        sdd->sfr_start = mem_res->start;
1273        if (pdev->dev.of_node) {
1274                ret = of_alias_get_id(pdev->dev.of_node, "spi");
1275                if (ret < 0) {
1276                        dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1277                                ret);
1278                        goto err0;
1279                }
1280                sdd->port_id = ret;
1281        } else {
1282                sdd->port_id = pdev->id;
1283        }
1284
1285        sdd->cur_bpw = 8;
1286
1287        if (!sdd->pdev->dev.of_node) {
1288                res = platform_get_resource(pdev, IORESOURCE_DMA,  0);
1289                if (!res) {
1290                        dev_err(&pdev->dev, "Unable to get SPI tx dma "
1291                                        "resource\n");
1292                        return -ENXIO;
1293                }
1294                sdd->tx_dma.dmach = res->start;
1295
1296                res = platform_get_resource(pdev, IORESOURCE_DMA,  1);
1297                if (!res) {
1298                        dev_err(&pdev->dev, "Unable to get SPI rx dma "
1299                                        "resource\n");
1300                        return -ENXIO;
1301                }
1302                sdd->rx_dma.dmach = res->start;
1303        }
1304
1305        sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1306        sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1307
1308        master->dev.of_node = pdev->dev.of_node;
1309        master->bus_num = sdd->port_id;
1310        master->setup = s3c64xx_spi_setup;
1311        master->cleanup = s3c64xx_spi_cleanup;
1312        master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1313        master->transfer_one_message = s3c64xx_spi_transfer_one_message;
1314        master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1315        master->num_chipselect = sci->num_cs;
1316        master->dma_alignment = 8;
1317        master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
1318        /* the spi->mode bits understood by this driver: */
1319        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1320
1321        sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1322        if (IS_ERR(sdd->regs)) {
1323                ret = PTR_ERR(sdd->regs);
1324                goto err0;
1325        }
1326
1327        if (sci->cfg_gpio && sci->cfg_gpio()) {
1328                dev_err(&pdev->dev, "Unable to config gpio\n");
1329                ret = -EBUSY;
1330                goto err0;
1331        }
1332
1333        /* Setup clocks */
1334        sdd->clk = devm_clk_get(&pdev->dev, "spi");
1335        if (IS_ERR(sdd->clk)) {
1336                dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1337                ret = PTR_ERR(sdd->clk);
1338                goto err0;
1339        }
1340
1341        if (clk_prepare_enable(sdd->clk)) {
1342                dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1343                ret = -EBUSY;
1344                goto err0;
1345        }
1346
1347        sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1348        sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1349        if (IS_ERR(sdd->src_clk)) {
1350                dev_err(&pdev->dev,
1351                        "Unable to acquire clock '%s'\n", clk_name);
1352                ret = PTR_ERR(sdd->src_clk);
1353                goto err2;
1354        }
1355
1356        if (clk_prepare_enable(sdd->src_clk)) {
1357                dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1358                ret = -EBUSY;
1359                goto err2;
1360        }
1361
1362        /* Setup Deufult Mode */
1363        s3c64xx_spi_hwinit(sdd, sdd->port_id);
1364
1365        spin_lock_init(&sdd->lock);
1366        init_completion(&sdd->xfer_completion);
1367        INIT_LIST_HEAD(&sdd->queue);
1368
1369        ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1370                                "spi-s3c64xx", sdd);
1371        if (ret != 0) {
1372                dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1373                        irq, ret);
1374                goto err3;
1375        }
1376
1377        writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1378               S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1379               sdd->regs + S3C64XX_SPI_INT_EN);
1380
1381        if (spi_register_master(master)) {
1382                dev_err(&pdev->dev, "cannot register SPI master\n");
1383                ret = -EBUSY;
1384                goto err3;
1385        }
1386
1387        dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1388                                        sdd->port_id, master->num_chipselect);
1389        dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1390                                        mem_res->end, mem_res->start,
1391                                        sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1392
1393        pm_runtime_enable(&pdev->dev);
1394
1395        return 0;
1396
1397err3:
1398        clk_disable_unprepare(sdd->src_clk);
1399err2:
1400        clk_disable_unprepare(sdd->clk);
1401err0:
1402        platform_set_drvdata(pdev, NULL);
1403        spi_master_put(master);
1404
1405        return ret;
1406}
1407
1408static int s3c64xx_spi_remove(struct platform_device *pdev)
1409{
1410        struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1411        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1412
1413        pm_runtime_disable(&pdev->dev);
1414
1415        spi_unregister_master(master);
1416
1417        writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1418
1419        clk_disable_unprepare(sdd->src_clk);
1420
1421        clk_disable_unprepare(sdd->clk);
1422
1423        platform_set_drvdata(pdev, NULL);
1424        spi_master_put(master);
1425
1426        return 0;
1427}
1428
1429#ifdef CONFIG_PM_SLEEP
1430static int s3c64xx_spi_suspend(struct device *dev)
1431{
1432        struct spi_master *master = dev_get_drvdata(dev);
1433        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1434
1435        spi_master_suspend(master);
1436
1437        /* Disable the clock */
1438        clk_disable_unprepare(sdd->src_clk);
1439        clk_disable_unprepare(sdd->clk);
1440
1441        sdd->cur_speed = 0; /* Output Clock is stopped */
1442
1443        return 0;
1444}
1445
1446static int s3c64xx_spi_resume(struct device *dev)
1447{
1448        struct spi_master *master = dev_get_drvdata(dev);
1449        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1450        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1451
1452        if (sci->cfg_gpio)
1453                sci->cfg_gpio();
1454
1455        /* Enable the clock */
1456        clk_prepare_enable(sdd->src_clk);
1457        clk_prepare_enable(sdd->clk);
1458
1459        s3c64xx_spi_hwinit(sdd, sdd->port_id);
1460
1461        spi_master_resume(master);
1462
1463        return 0;
1464}
1465#endif /* CONFIG_PM_SLEEP */
1466
1467#ifdef CONFIG_PM_RUNTIME
1468static int s3c64xx_spi_runtime_suspend(struct device *dev)
1469{
1470        struct spi_master *master = dev_get_drvdata(dev);
1471        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1472
1473        clk_disable_unprepare(sdd->clk);
1474        clk_disable_unprepare(sdd->src_clk);
1475
1476        return 0;
1477}
1478
1479static int s3c64xx_spi_runtime_resume(struct device *dev)
1480{
1481        struct spi_master *master = dev_get_drvdata(dev);
1482        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1483
1484        clk_prepare_enable(sdd->src_clk);
1485        clk_prepare_enable(sdd->clk);
1486
1487        return 0;
1488}
1489#endif /* CONFIG_PM_RUNTIME */
1490
1491static const struct dev_pm_ops s3c64xx_spi_pm = {
1492        SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1493        SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1494                           s3c64xx_spi_runtime_resume, NULL)
1495};
1496
1497static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1498        .fifo_lvl_mask  = { 0x7f },
1499        .rx_lvl_offset  = 13,
1500        .tx_st_done     = 21,
1501        .high_speed     = true,
1502};
1503
1504static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1505        .fifo_lvl_mask  = { 0x7f, 0x7F },
1506        .rx_lvl_offset  = 13,
1507        .tx_st_done     = 21,
1508};
1509
1510static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
1511        .fifo_lvl_mask  = { 0x1ff, 0x7F },
1512        .rx_lvl_offset  = 15,
1513        .tx_st_done     = 25,
1514};
1515
1516static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
1517        .fifo_lvl_mask  = { 0x7f, 0x7F },
1518        .rx_lvl_offset  = 13,
1519        .tx_st_done     = 21,
1520        .high_speed     = true,
1521};
1522
1523static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1524        .fifo_lvl_mask  = { 0x1ff, 0x7F },
1525        .rx_lvl_offset  = 15,
1526        .tx_st_done     = 25,
1527        .high_speed     = true,
1528};
1529
1530static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1531        .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F },
1532        .rx_lvl_offset  = 15,
1533        .tx_st_done     = 25,
1534        .high_speed     = true,
1535        .clk_from_cmu   = true,
1536};
1537
1538static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1539        {
1540                .name           = "s3c2443-spi",
1541                .driver_data    = (kernel_ulong_t)&s3c2443_spi_port_config,
1542        }, {
1543                .name           = "s3c6410-spi",
1544                .driver_data    = (kernel_ulong_t)&s3c6410_spi_port_config,
1545        }, {
1546                .name           = "s5p64x0-spi",
1547                .driver_data    = (kernel_ulong_t)&s5p64x0_spi_port_config,
1548        }, {
1549                .name           = "s5pc100-spi",
1550                .driver_data    = (kernel_ulong_t)&s5pc100_spi_port_config,
1551        }, {
1552                .name           = "s5pv210-spi",
1553                .driver_data    = (kernel_ulong_t)&s5pv210_spi_port_config,
1554        }, {
1555                .name           = "exynos4210-spi",
1556                .driver_data    = (kernel_ulong_t)&exynos4_spi_port_config,
1557        },
1558        { },
1559};
1560
1561#ifdef CONFIG_OF
1562static const struct of_device_id s3c64xx_spi_dt_match[] = {
1563        { .compatible = "samsung,exynos4210-spi",
1564                        .data = (void *)&exynos4_spi_port_config,
1565        },
1566        { },
1567};
1568MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1569#endif /* CONFIG_OF */
1570
1571static struct platform_driver s3c64xx_spi_driver = {
1572        .driver = {
1573                .name   = "s3c64xx-spi",
1574                .owner = THIS_MODULE,
1575                .pm = &s3c64xx_spi_pm,
1576                .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1577        },
1578        .remove = s3c64xx_spi_remove,
1579        .id_table = s3c64xx_spi_driver_ids,
1580};
1581MODULE_ALIAS("platform:s3c64xx-spi");
1582
1583static int __init s3c64xx_spi_init(void)
1584{
1585        return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1586}
1587subsys_initcall(s3c64xx_spi_init);
1588
1589static void __exit s3c64xx_spi_exit(void)
1590{
1591        platform_driver_unregister(&s3c64xx_spi_driver);
1592}
1593module_exit(s3c64xx_spi_exit);
1594
1595MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1596MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1597MODULE_LICENSE("GPL");
1598