linux/drivers/spi/spi-rockchip.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
   3 * Author: Addy Ke <addy.ke@rock-chips.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/dmaengine.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/platform_device.h>
  21#include <linux/spi/spi.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/scatterlist.h>
  24
  25#define DRIVER_NAME "rockchip-spi"
  26
  27/* SPI register offsets */
  28#define ROCKCHIP_SPI_CTRLR0                     0x0000
  29#define ROCKCHIP_SPI_CTRLR1                     0x0004
  30#define ROCKCHIP_SPI_SSIENR                     0x0008
  31#define ROCKCHIP_SPI_SER                        0x000c
  32#define ROCKCHIP_SPI_BAUDR                      0x0010
  33#define ROCKCHIP_SPI_TXFTLR                     0x0014
  34#define ROCKCHIP_SPI_RXFTLR                     0x0018
  35#define ROCKCHIP_SPI_TXFLR                      0x001c
  36#define ROCKCHIP_SPI_RXFLR                      0x0020
  37#define ROCKCHIP_SPI_SR                         0x0024
  38#define ROCKCHIP_SPI_IPR                        0x0028
  39#define ROCKCHIP_SPI_IMR                        0x002c
  40#define ROCKCHIP_SPI_ISR                        0x0030
  41#define ROCKCHIP_SPI_RISR                       0x0034
  42#define ROCKCHIP_SPI_ICR                        0x0038
  43#define ROCKCHIP_SPI_DMACR                      0x003c
  44#define ROCKCHIP_SPI_DMATDLR            0x0040
  45#define ROCKCHIP_SPI_DMARDLR            0x0044
  46#define ROCKCHIP_SPI_TXDR                       0x0400
  47#define ROCKCHIP_SPI_RXDR                       0x0800
  48
  49/* Bit fields in CTRLR0 */
  50#define CR0_DFS_OFFSET                          0
  51
  52#define CR0_CFS_OFFSET                          2
  53
  54#define CR0_SCPH_OFFSET                         6
  55
  56#define CR0_SCPOL_OFFSET                        7
  57
  58#define CR0_CSM_OFFSET                          8
  59#define CR0_CSM_KEEP                            0x0
  60/* ss_n be high for half sclk_out cycles */
  61#define CR0_CSM_HALF                            0X1
  62/* ss_n be high for one sclk_out cycle */
  63#define CR0_CSM_ONE                                     0x2
  64
  65/* ss_n to sclk_out delay */
  66#define CR0_SSD_OFFSET                          10
  67/*
  68 * The period between ss_n active and
  69 * sclk_out active is half sclk_out cycles
  70 */
  71#define CR0_SSD_HALF                            0x0
  72/*
  73 * The period between ss_n active and
  74 * sclk_out active is one sclk_out cycle
  75 */
  76#define CR0_SSD_ONE                                     0x1
  77
  78#define CR0_EM_OFFSET                           11
  79#define CR0_EM_LITTLE                           0x0
  80#define CR0_EM_BIG                                      0x1
  81
  82#define CR0_FBM_OFFSET                          12
  83#define CR0_FBM_MSB                                     0x0
  84#define CR0_FBM_LSB                                     0x1
  85
  86#define CR0_BHT_OFFSET                          13
  87#define CR0_BHT_16BIT                           0x0
  88#define CR0_BHT_8BIT                            0x1
  89
  90#define CR0_RSD_OFFSET                          14
  91
  92#define CR0_FRF_OFFSET                          16
  93#define CR0_FRF_SPI                                     0x0
  94#define CR0_FRF_SSP                                     0x1
  95#define CR0_FRF_MICROWIRE                       0x2
  96
  97#define CR0_XFM_OFFSET                          18
  98#define CR0_XFM_MASK                            (0x03 << SPI_XFM_OFFSET)
  99#define CR0_XFM_TR                                      0x0
 100#define CR0_XFM_TO                                      0x1
 101#define CR0_XFM_RO                                      0x2
 102
 103#define CR0_OPM_OFFSET                          20
 104#define CR0_OPM_MASTER                          0x0
 105#define CR0_OPM_SLAVE                           0x1
 106
 107#define CR0_MTM_OFFSET                          0x21
 108
 109/* Bit fields in SER, 2bit */
 110#define SER_MASK                                        0x3
 111
 112/* Bit fields in SR, 5bit */
 113#define SR_MASK                                         0x1f
 114#define SR_BUSY                                         (1 << 0)
 115#define SR_TF_FULL                                      (1 << 1)
 116#define SR_TF_EMPTY                                     (1 << 2)
 117#define SR_RF_EMPTY                                     (1 << 3)
 118#define SR_RF_FULL                                      (1 << 4)
 119
 120/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
 121#define INT_MASK                                        0x1f
 122#define INT_TF_EMPTY                            (1 << 0)
 123#define INT_TF_OVERFLOW                         (1 << 1)
 124#define INT_RF_UNDERFLOW                        (1 << 2)
 125#define INT_RF_OVERFLOW                         (1 << 3)
 126#define INT_RF_FULL                                     (1 << 4)
 127
 128/* Bit fields in ICR, 4bit */
 129#define ICR_MASK                                        0x0f
 130#define ICR_ALL                                         (1 << 0)
 131#define ICR_RF_UNDERFLOW                        (1 << 1)
 132#define ICR_RF_OVERFLOW                         (1 << 2)
 133#define ICR_TF_OVERFLOW                         (1 << 3)
 134
 135/* Bit fields in DMACR */
 136#define RF_DMA_EN                                       (1 << 0)
 137#define TF_DMA_EN                                       (1 << 1)
 138
 139#define RXBUSY                                          (1 << 0)
 140#define TXBUSY                                          (1 << 1)
 141
 142/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
 143#define MAX_SCLK_OUT            50000000
 144
 145/*
 146 * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
 147 * the controller seems to hang when given 0x10000, so stick with this for now.
 148 */
 149#define ROCKCHIP_SPI_MAX_TRANLEN                0xffff
 150
 151enum rockchip_ssi_type {
 152        SSI_MOTO_SPI = 0,
 153        SSI_TI_SSP,
 154        SSI_NS_MICROWIRE,
 155};
 156
 157struct rockchip_spi_dma_data {
 158        struct dma_chan *ch;
 159        enum dma_transfer_direction direction;
 160        dma_addr_t addr;
 161};
 162
 163struct rockchip_spi {
 164        struct device *dev;
 165        struct spi_master *master;
 166
 167        struct clk *spiclk;
 168        struct clk *apb_pclk;
 169
 170        void __iomem *regs;
 171        /*depth of the FIFO buffer */
 172        u32 fifo_len;
 173        /* max bus freq supported */
 174        u32 max_freq;
 175        /* supported slave numbers */
 176        enum rockchip_ssi_type type;
 177
 178        u16 mode;
 179        u8 tmode;
 180        u8 bpw;
 181        u8 n_bytes;
 182        u32 rsd_nsecs;
 183        unsigned len;
 184        u32 speed;
 185
 186        const void *tx;
 187        const void *tx_end;
 188        void *rx;
 189        void *rx_end;
 190
 191        u32 state;
 192        /* protect state */
 193        spinlock_t lock;
 194
 195        u32 use_dma;
 196        struct sg_table tx_sg;
 197        struct sg_table rx_sg;
 198        struct rockchip_spi_dma_data dma_rx;
 199        struct rockchip_spi_dma_data dma_tx;
 200        struct dma_slave_caps dma_caps;
 201};
 202
 203static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
 204{
 205        writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
 206}
 207
 208static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
 209{
 210        writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
 211}
 212
 213static inline void flush_fifo(struct rockchip_spi *rs)
 214{
 215        while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
 216                readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
 217}
 218
 219static inline void wait_for_idle(struct rockchip_spi *rs)
 220{
 221        unsigned long timeout = jiffies + msecs_to_jiffies(5);
 222
 223        do {
 224                if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
 225                        return;
 226        } while (!time_after(jiffies, timeout));
 227
 228        dev_warn(rs->dev, "spi controller is in busy state!\n");
 229}
 230
 231static u32 get_fifo_len(struct rockchip_spi *rs)
 232{
 233        u32 fifo;
 234
 235        for (fifo = 2; fifo < 32; fifo++) {
 236                writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
 237                if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
 238                        break;
 239        }
 240
 241        writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
 242
 243        return (fifo == 31) ? 0 : fifo;
 244}
 245
 246static inline u32 tx_max(struct rockchip_spi *rs)
 247{
 248        u32 tx_left, tx_room;
 249
 250        tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
 251        tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
 252
 253        return min(tx_left, tx_room);
 254}
 255
 256static inline u32 rx_max(struct rockchip_spi *rs)
 257{
 258        u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
 259        u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
 260
 261        return min(rx_left, rx_room);
 262}
 263
 264static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
 265{
 266        u32 ser;
 267        struct spi_master *master = spi->master;
 268        struct rockchip_spi *rs = spi_master_get_devdata(master);
 269
 270        pm_runtime_get_sync(rs->dev);
 271
 272        ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
 273
 274        /*
 275         * drivers/spi/spi.c:
 276         * static void spi_set_cs(struct spi_device *spi, bool enable)
 277         * {
 278         *              if (spi->mode & SPI_CS_HIGH)
 279         *                      enable = !enable;
 280         *
 281         *              if (spi->cs_gpio >= 0)
 282         *                      gpio_set_value(spi->cs_gpio, !enable);
 283         *              else if (spi->master->set_cs)
 284         *              spi->master->set_cs(spi, !enable);
 285         * }
 286         *
 287         * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
 288         */
 289        if (!enable)
 290                ser |= 1 << spi->chip_select;
 291        else
 292                ser &= ~(1 << spi->chip_select);
 293
 294        writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
 295
 296        pm_runtime_put_sync(rs->dev);
 297}
 298
 299static int rockchip_spi_prepare_message(struct spi_master *master,
 300                                        struct spi_message *msg)
 301{
 302        struct rockchip_spi *rs = spi_master_get_devdata(master);
 303        struct spi_device *spi = msg->spi;
 304
 305        rs->mode = spi->mode;
 306
 307        return 0;
 308}
 309
 310static void rockchip_spi_handle_err(struct spi_master *master,
 311                                    struct spi_message *msg)
 312{
 313        unsigned long flags;
 314        struct rockchip_spi *rs = spi_master_get_devdata(master);
 315
 316        spin_lock_irqsave(&rs->lock, flags);
 317
 318        /*
 319         * For DMA mode, we need terminate DMA channel and flush
 320         * fifo for the next transfer if DMA thansfer timeout.
 321         * handle_err() was called by core if transfer failed.
 322         * Maybe it is reasonable for error handling here.
 323         */
 324        if (rs->use_dma) {
 325                if (rs->state & RXBUSY) {
 326                        dmaengine_terminate_async(rs->dma_rx.ch);
 327                        flush_fifo(rs);
 328                }
 329
 330                if (rs->state & TXBUSY)
 331                        dmaengine_terminate_async(rs->dma_tx.ch);
 332        }
 333
 334        spin_unlock_irqrestore(&rs->lock, flags);
 335}
 336
 337static int rockchip_spi_unprepare_message(struct spi_master *master,
 338                                          struct spi_message *msg)
 339{
 340        struct rockchip_spi *rs = spi_master_get_devdata(master);
 341
 342        spi_enable_chip(rs, 0);
 343
 344        return 0;
 345}
 346
 347static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
 348{
 349        u32 max = tx_max(rs);
 350        u32 txw = 0;
 351
 352        while (max--) {
 353                if (rs->n_bytes == 1)
 354                        txw = *(u8 *)(rs->tx);
 355                else
 356                        txw = *(u16 *)(rs->tx);
 357
 358                writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
 359                rs->tx += rs->n_bytes;
 360        }
 361}
 362
 363static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
 364{
 365        u32 max = rx_max(rs);
 366        u32 rxw;
 367
 368        while (max--) {
 369                rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
 370                if (rs->n_bytes == 1)
 371                        *(u8 *)(rs->rx) = (u8)rxw;
 372                else
 373                        *(u16 *)(rs->rx) = (u16)rxw;
 374                rs->rx += rs->n_bytes;
 375        }
 376}
 377
 378static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
 379{
 380        int remain = 0;
 381
 382        do {
 383                if (rs->tx) {
 384                        remain = rs->tx_end - rs->tx;
 385                        rockchip_spi_pio_writer(rs);
 386                }
 387
 388                if (rs->rx) {
 389                        remain = rs->rx_end - rs->rx;
 390                        rockchip_spi_pio_reader(rs);
 391                }
 392
 393                cpu_relax();
 394        } while (remain);
 395
 396        /* If tx, wait until the FIFO data completely. */
 397        if (rs->tx)
 398                wait_for_idle(rs);
 399
 400        spi_enable_chip(rs, 0);
 401
 402        return 0;
 403}
 404
 405static void rockchip_spi_dma_rxcb(void *data)
 406{
 407        unsigned long flags;
 408        struct rockchip_spi *rs = data;
 409
 410        spin_lock_irqsave(&rs->lock, flags);
 411
 412        rs->state &= ~RXBUSY;
 413        if (!(rs->state & TXBUSY)) {
 414                spi_enable_chip(rs, 0);
 415                spi_finalize_current_transfer(rs->master);
 416        }
 417
 418        spin_unlock_irqrestore(&rs->lock, flags);
 419}
 420
 421static void rockchip_spi_dma_txcb(void *data)
 422{
 423        unsigned long flags;
 424        struct rockchip_spi *rs = data;
 425
 426        /* Wait until the FIFO data completely. */
 427        wait_for_idle(rs);
 428
 429        spin_lock_irqsave(&rs->lock, flags);
 430
 431        rs->state &= ~TXBUSY;
 432        if (!(rs->state & RXBUSY)) {
 433                spi_enable_chip(rs, 0);
 434                spi_finalize_current_transfer(rs->master);
 435        }
 436
 437        spin_unlock_irqrestore(&rs->lock, flags);
 438}
 439
 440static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
 441{
 442        unsigned long flags;
 443        struct dma_slave_config rxconf, txconf;
 444        struct dma_async_tx_descriptor *rxdesc, *txdesc;
 445
 446        spin_lock_irqsave(&rs->lock, flags);
 447        rs->state &= ~RXBUSY;
 448        rs->state &= ~TXBUSY;
 449        spin_unlock_irqrestore(&rs->lock, flags);
 450
 451        rxdesc = NULL;
 452        if (rs->rx) {
 453                rxconf.direction = rs->dma_rx.direction;
 454                rxconf.src_addr = rs->dma_rx.addr;
 455                rxconf.src_addr_width = rs->n_bytes;
 456                if (rs->dma_caps.max_burst > 4)
 457                        rxconf.src_maxburst = 4;
 458                else
 459                        rxconf.src_maxburst = 1;
 460                dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
 461
 462                rxdesc = dmaengine_prep_slave_sg(
 463                                rs->dma_rx.ch,
 464                                rs->rx_sg.sgl, rs->rx_sg.nents,
 465                                rs->dma_rx.direction, DMA_PREP_INTERRUPT);
 466                if (!rxdesc)
 467                        return -EINVAL;
 468
 469                rxdesc->callback = rockchip_spi_dma_rxcb;
 470                rxdesc->callback_param = rs;
 471        }
 472
 473        txdesc = NULL;
 474        if (rs->tx) {
 475                txconf.direction = rs->dma_tx.direction;
 476                txconf.dst_addr = rs->dma_tx.addr;
 477                txconf.dst_addr_width = rs->n_bytes;
 478                if (rs->dma_caps.max_burst > 4)
 479                        txconf.dst_maxburst = 4;
 480                else
 481                        txconf.dst_maxburst = 1;
 482                dmaengine_slave_config(rs->dma_tx.ch, &txconf);
 483
 484                txdesc = dmaengine_prep_slave_sg(
 485                                rs->dma_tx.ch,
 486                                rs->tx_sg.sgl, rs->tx_sg.nents,
 487                                rs->dma_tx.direction, DMA_PREP_INTERRUPT);
 488                if (!txdesc) {
 489                        if (rxdesc)
 490                                dmaengine_terminate_sync(rs->dma_rx.ch);
 491                        return -EINVAL;
 492                }
 493
 494                txdesc->callback = rockchip_spi_dma_txcb;
 495                txdesc->callback_param = rs;
 496        }
 497
 498        /* rx must be started before tx due to spi instinct */
 499        if (rxdesc) {
 500                spin_lock_irqsave(&rs->lock, flags);
 501                rs->state |= RXBUSY;
 502                spin_unlock_irqrestore(&rs->lock, flags);
 503                dmaengine_submit(rxdesc);
 504                dma_async_issue_pending(rs->dma_rx.ch);
 505        }
 506
 507        if (txdesc) {
 508                spin_lock_irqsave(&rs->lock, flags);
 509                rs->state |= TXBUSY;
 510                spin_unlock_irqrestore(&rs->lock, flags);
 511                dmaengine_submit(txdesc);
 512                dma_async_issue_pending(rs->dma_tx.ch);
 513        }
 514
 515        return 0;
 516}
 517
 518static void rockchip_spi_config(struct rockchip_spi *rs)
 519{
 520        u32 div = 0;
 521        u32 dmacr = 0;
 522        int rsd = 0;
 523
 524        u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
 525                | (CR0_SSD_ONE << CR0_SSD_OFFSET)
 526                | (CR0_EM_BIG << CR0_EM_OFFSET);
 527
 528        cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
 529        cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
 530        cr0 |= (rs->tmode << CR0_XFM_OFFSET);
 531        cr0 |= (rs->type << CR0_FRF_OFFSET);
 532
 533        if (rs->use_dma) {
 534                if (rs->tx)
 535                        dmacr |= TF_DMA_EN;
 536                if (rs->rx)
 537                        dmacr |= RF_DMA_EN;
 538        }
 539
 540        if (WARN_ON(rs->speed > MAX_SCLK_OUT))
 541                rs->speed = MAX_SCLK_OUT;
 542
 543        /* the minimum divisor is 2 */
 544        if (rs->max_freq < 2 * rs->speed) {
 545                clk_set_rate(rs->spiclk, 2 * rs->speed);
 546                rs->max_freq = clk_get_rate(rs->spiclk);
 547        }
 548
 549        /* div doesn't support odd number */
 550        div = DIV_ROUND_UP(rs->max_freq, rs->speed);
 551        div = (div + 1) & 0xfffe;
 552
 553        /* Rx sample delay is expressed in parent clock cycles (max 3) */
 554        rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
 555                                1000000000 >> 8);
 556        if (!rsd && rs->rsd_nsecs) {
 557                pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
 558                             rs->max_freq, rs->rsd_nsecs);
 559        } else if (rsd > 3) {
 560                rsd = 3;
 561                pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
 562                             rs->max_freq, rs->rsd_nsecs,
 563                             rsd * 1000000000U / rs->max_freq);
 564        }
 565        cr0 |= rsd << CR0_RSD_OFFSET;
 566
 567        writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
 568
 569        writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
 570        writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
 571        writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
 572
 573        writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
 574        writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
 575        writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
 576
 577        spi_set_clk(rs, div);
 578
 579        dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
 580}
 581
 582static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
 583{
 584        return ROCKCHIP_SPI_MAX_TRANLEN;
 585}
 586
 587static int rockchip_spi_transfer_one(
 588                struct spi_master *master,
 589                struct spi_device *spi,
 590                struct spi_transfer *xfer)
 591{
 592        int ret = 0;
 593        struct rockchip_spi *rs = spi_master_get_devdata(master);
 594
 595        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
 596                (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 597
 598        if (!xfer->tx_buf && !xfer->rx_buf) {
 599                dev_err(rs->dev, "No buffer for transfer\n");
 600                return -EINVAL;
 601        }
 602
 603        if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
 604                dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
 605                return -EINVAL;
 606        }
 607
 608        rs->speed = xfer->speed_hz;
 609        rs->bpw = xfer->bits_per_word;
 610        rs->n_bytes = rs->bpw >> 3;
 611
 612        rs->tx = xfer->tx_buf;
 613        rs->tx_end = rs->tx + xfer->len;
 614        rs->rx = xfer->rx_buf;
 615        rs->rx_end = rs->rx + xfer->len;
 616        rs->len = xfer->len;
 617
 618        rs->tx_sg = xfer->tx_sg;
 619        rs->rx_sg = xfer->rx_sg;
 620
 621        if (rs->tx && rs->rx)
 622                rs->tmode = CR0_XFM_TR;
 623        else if (rs->tx)
 624                rs->tmode = CR0_XFM_TO;
 625        else if (rs->rx)
 626                rs->tmode = CR0_XFM_RO;
 627
 628        /* we need prepare dma before spi was enabled */
 629        if (master->can_dma && master->can_dma(master, spi, xfer))
 630                rs->use_dma = 1;
 631        else
 632                rs->use_dma = 0;
 633
 634        rockchip_spi_config(rs);
 635
 636        if (rs->use_dma) {
 637                if (rs->tmode == CR0_XFM_RO) {
 638                        /* rx: dma must be prepared first */
 639                        ret = rockchip_spi_prepare_dma(rs);
 640                        spi_enable_chip(rs, 1);
 641                } else {
 642                        /* tx or tr: spi must be enabled first */
 643                        spi_enable_chip(rs, 1);
 644                        ret = rockchip_spi_prepare_dma(rs);
 645                }
 646                /* successful DMA prepare means the transfer is in progress */
 647                ret = ret ? ret : 1;
 648        } else {
 649                spi_enable_chip(rs, 1);
 650                ret = rockchip_spi_pio_transfer(rs);
 651        }
 652
 653        return ret;
 654}
 655
 656static bool rockchip_spi_can_dma(struct spi_master *master,
 657                                 struct spi_device *spi,
 658                                 struct spi_transfer *xfer)
 659{
 660        struct rockchip_spi *rs = spi_master_get_devdata(master);
 661
 662        return (xfer->len > rs->fifo_len);
 663}
 664
 665static int rockchip_spi_probe(struct platform_device *pdev)
 666{
 667        int ret = 0;
 668        struct rockchip_spi *rs;
 669        struct spi_master *master;
 670        struct resource *mem;
 671        u32 rsd_nsecs;
 672
 673        master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
 674        if (!master)
 675                return -ENOMEM;
 676
 677        platform_set_drvdata(pdev, master);
 678
 679        rs = spi_master_get_devdata(master);
 680
 681        /* Get basic io resource and map it */
 682        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 683        rs->regs = devm_ioremap_resource(&pdev->dev, mem);
 684        if (IS_ERR(rs->regs)) {
 685                ret =  PTR_ERR(rs->regs);
 686                goto err_ioremap_resource;
 687        }
 688
 689        rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
 690        if (IS_ERR(rs->apb_pclk)) {
 691                dev_err(&pdev->dev, "Failed to get apb_pclk\n");
 692                ret = PTR_ERR(rs->apb_pclk);
 693                goto err_ioremap_resource;
 694        }
 695
 696        rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
 697        if (IS_ERR(rs->spiclk)) {
 698                dev_err(&pdev->dev, "Failed to get spi_pclk\n");
 699                ret = PTR_ERR(rs->spiclk);
 700                goto err_ioremap_resource;
 701        }
 702
 703        ret = clk_prepare_enable(rs->apb_pclk);
 704        if (ret) {
 705                dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
 706                goto err_ioremap_resource;
 707        }
 708
 709        ret = clk_prepare_enable(rs->spiclk);
 710        if (ret) {
 711                dev_err(&pdev->dev, "Failed to enable spi_clk\n");
 712                goto err_spiclk_enable;
 713        }
 714
 715        spi_enable_chip(rs, 0);
 716
 717        rs->type = SSI_MOTO_SPI;
 718        rs->master = master;
 719        rs->dev = &pdev->dev;
 720        rs->max_freq = clk_get_rate(rs->spiclk);
 721
 722        if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
 723                                  &rsd_nsecs))
 724                rs->rsd_nsecs = rsd_nsecs;
 725
 726        rs->fifo_len = get_fifo_len(rs);
 727        if (!rs->fifo_len) {
 728                dev_err(&pdev->dev, "Failed to get fifo length\n");
 729                ret = -EINVAL;
 730                goto err_get_fifo_len;
 731        }
 732
 733        spin_lock_init(&rs->lock);
 734
 735        pm_runtime_set_active(&pdev->dev);
 736        pm_runtime_enable(&pdev->dev);
 737
 738        master->auto_runtime_pm = true;
 739        master->bus_num = pdev->id;
 740        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
 741        master->num_chipselect = 2;
 742        master->dev.of_node = pdev->dev.of_node;
 743        master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
 744
 745        master->set_cs = rockchip_spi_set_cs;
 746        master->prepare_message = rockchip_spi_prepare_message;
 747        master->unprepare_message = rockchip_spi_unprepare_message;
 748        master->transfer_one = rockchip_spi_transfer_one;
 749        master->max_transfer_size = rockchip_spi_max_transfer_size;
 750        master->handle_err = rockchip_spi_handle_err;
 751
 752        rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
 753        if (IS_ERR(rs->dma_tx.ch)) {
 754                /* Check tx to see if we need defer probing driver */
 755                if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
 756                        ret = -EPROBE_DEFER;
 757                        goto err_get_fifo_len;
 758                }
 759                dev_warn(rs->dev, "Failed to request TX DMA channel\n");
 760                rs->dma_tx.ch = NULL;
 761        }
 762
 763        rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
 764        if (IS_ERR(rs->dma_rx.ch)) {
 765                if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
 766                        ret = -EPROBE_DEFER;
 767                        goto err_free_dma_tx;
 768                }
 769                dev_warn(rs->dev, "Failed to request RX DMA channel\n");
 770                rs->dma_rx.ch = NULL;
 771        }
 772
 773        if (rs->dma_tx.ch && rs->dma_rx.ch) {
 774                dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
 775                rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
 776                rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
 777                rs->dma_tx.direction = DMA_MEM_TO_DEV;
 778                rs->dma_rx.direction = DMA_DEV_TO_MEM;
 779
 780                master->can_dma = rockchip_spi_can_dma;
 781                master->dma_tx = rs->dma_tx.ch;
 782                master->dma_rx = rs->dma_rx.ch;
 783        }
 784
 785        ret = devm_spi_register_master(&pdev->dev, master);
 786        if (ret) {
 787                dev_err(&pdev->dev, "Failed to register master\n");
 788                goto err_register_master;
 789        }
 790
 791        return 0;
 792
 793err_register_master:
 794        pm_runtime_disable(&pdev->dev);
 795        if (rs->dma_rx.ch)
 796                dma_release_channel(rs->dma_rx.ch);
 797err_free_dma_tx:
 798        if (rs->dma_tx.ch)
 799                dma_release_channel(rs->dma_tx.ch);
 800err_get_fifo_len:
 801        clk_disable_unprepare(rs->spiclk);
 802err_spiclk_enable:
 803        clk_disable_unprepare(rs->apb_pclk);
 804err_ioremap_resource:
 805        spi_master_put(master);
 806
 807        return ret;
 808}
 809
 810static int rockchip_spi_remove(struct platform_device *pdev)
 811{
 812        struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 813        struct rockchip_spi *rs = spi_master_get_devdata(master);
 814
 815        pm_runtime_disable(&pdev->dev);
 816
 817        clk_disable_unprepare(rs->spiclk);
 818        clk_disable_unprepare(rs->apb_pclk);
 819
 820        if (rs->dma_tx.ch)
 821                dma_release_channel(rs->dma_tx.ch);
 822        if (rs->dma_rx.ch)
 823                dma_release_channel(rs->dma_rx.ch);
 824
 825        spi_master_put(master);
 826
 827        return 0;
 828}
 829
 830#ifdef CONFIG_PM_SLEEP
 831static int rockchip_spi_suspend(struct device *dev)
 832{
 833        int ret = 0;
 834        struct spi_master *master = dev_get_drvdata(dev);
 835        struct rockchip_spi *rs = spi_master_get_devdata(master);
 836
 837        ret = spi_master_suspend(rs->master);
 838        if (ret)
 839                return ret;
 840
 841        if (!pm_runtime_suspended(dev)) {
 842                clk_disable_unprepare(rs->spiclk);
 843                clk_disable_unprepare(rs->apb_pclk);
 844        }
 845
 846        return ret;
 847}
 848
 849static int rockchip_spi_resume(struct device *dev)
 850{
 851        int ret = 0;
 852        struct spi_master *master = dev_get_drvdata(dev);
 853        struct rockchip_spi *rs = spi_master_get_devdata(master);
 854
 855        if (!pm_runtime_suspended(dev)) {
 856                ret = clk_prepare_enable(rs->apb_pclk);
 857                if (ret < 0)
 858                        return ret;
 859
 860                ret = clk_prepare_enable(rs->spiclk);
 861                if (ret < 0) {
 862                        clk_disable_unprepare(rs->apb_pclk);
 863                        return ret;
 864                }
 865        }
 866
 867        ret = spi_master_resume(rs->master);
 868        if (ret < 0) {
 869                clk_disable_unprepare(rs->spiclk);
 870                clk_disable_unprepare(rs->apb_pclk);
 871        }
 872
 873        return ret;
 874}
 875#endif /* CONFIG_PM_SLEEP */
 876
 877#ifdef CONFIG_PM
 878static int rockchip_spi_runtime_suspend(struct device *dev)
 879{
 880        struct spi_master *master = dev_get_drvdata(dev);
 881        struct rockchip_spi *rs = spi_master_get_devdata(master);
 882
 883        clk_disable_unprepare(rs->spiclk);
 884        clk_disable_unprepare(rs->apb_pclk);
 885
 886        return 0;
 887}
 888
 889static int rockchip_spi_runtime_resume(struct device *dev)
 890{
 891        int ret;
 892        struct spi_master *master = dev_get_drvdata(dev);
 893        struct rockchip_spi *rs = spi_master_get_devdata(master);
 894
 895        ret = clk_prepare_enable(rs->apb_pclk);
 896        if (ret)
 897                return ret;
 898
 899        ret = clk_prepare_enable(rs->spiclk);
 900        if (ret)
 901                clk_disable_unprepare(rs->apb_pclk);
 902
 903        return ret;
 904}
 905#endif /* CONFIG_PM */
 906
 907static const struct dev_pm_ops rockchip_spi_pm = {
 908        SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
 909        SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
 910                           rockchip_spi_runtime_resume, NULL)
 911};
 912
 913static const struct of_device_id rockchip_spi_dt_match[] = {
 914        { .compatible = "rockchip,rk3036-spi", },
 915        { .compatible = "rockchip,rk3066-spi", },
 916        { .compatible = "rockchip,rk3188-spi", },
 917        { .compatible = "rockchip,rk3228-spi", },
 918        { .compatible = "rockchip,rk3288-spi", },
 919        { .compatible = "rockchip,rk3368-spi", },
 920        { .compatible = "rockchip,rk3399-spi", },
 921        { },
 922};
 923MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
 924
 925static struct platform_driver rockchip_spi_driver = {
 926        .driver = {
 927                .name   = DRIVER_NAME,
 928                .pm = &rockchip_spi_pm,
 929                .of_match_table = of_match_ptr(rockchip_spi_dt_match),
 930        },
 931        .probe = rockchip_spi_probe,
 932        .remove = rockchip_spi_remove,
 933};
 934
 935module_platform_driver(rockchip_spi_driver);
 936
 937MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
 938MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
 939MODULE_LICENSE("GPL v2");
 940