linux/drivers/spi/spi-tegra114.c
<<
>>
Prefs
   1/*
   2 * SPI driver for NVIDIA's Tegra114 SPI Controller.
   3 *
   4 * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include <linux/clk.h>
  20#include <linux/clk/tegra.h>
  21#include <linux/completion.h>
  22#include <linux/delay.h>
  23#include <linux/dmaengine.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/dmapool.h>
  26#include <linux/err.h>
  27#include <linux/init.h>
  28#include <linux/interrupt.h>
  29#include <linux/io.h>
  30#include <linux/kernel.h>
  31#include <linux/kthread.h>
  32#include <linux/module.h>
  33#include <linux/platform_device.h>
  34#include <linux/pm_runtime.h>
  35#include <linux/of.h>
  36#include <linux/of_device.h>
  37#include <linux/spi/spi.h>
  38
  39#define SPI_COMMAND1                            0x000
  40#define SPI_BIT_LENGTH(x)                       (((x) & 0x1f) << 0)
  41#define SPI_PACKED                              (1 << 5)
  42#define SPI_TX_EN                               (1 << 11)
  43#define SPI_RX_EN                               (1 << 12)
  44#define SPI_BOTH_EN_BYTE                        (1 << 13)
  45#define SPI_BOTH_EN_BIT                         (1 << 14)
  46#define SPI_LSBYTE_FE                           (1 << 15)
  47#define SPI_LSBIT_FE                            (1 << 16)
  48#define SPI_BIDIROE                             (1 << 17)
  49#define SPI_IDLE_SDA_DRIVE_LOW                  (0 << 18)
  50#define SPI_IDLE_SDA_DRIVE_HIGH                 (1 << 18)
  51#define SPI_IDLE_SDA_PULL_LOW                   (2 << 18)
  52#define SPI_IDLE_SDA_PULL_HIGH                  (3 << 18)
  53#define SPI_IDLE_SDA_MASK                       (3 << 18)
  54#define SPI_CS_SS_VAL                           (1 << 20)
  55#define SPI_CS_SW_HW                            (1 << 21)
  56/* SPI_CS_POL_INACTIVE bits are default high */
  57#define SPI_CS_POL_INACTIVE                     22
  58#define SPI_CS_POL_INACTIVE_0                   (1 << 22)
  59#define SPI_CS_POL_INACTIVE_1                   (1 << 23)
  60#define SPI_CS_POL_INACTIVE_2                   (1 << 24)
  61#define SPI_CS_POL_INACTIVE_3                   (1 << 25)
  62#define SPI_CS_POL_INACTIVE_MASK                (0xF << 22)
  63
  64#define SPI_CS_SEL_0                            (0 << 26)
  65#define SPI_CS_SEL_1                            (1 << 26)
  66#define SPI_CS_SEL_2                            (2 << 26)
  67#define SPI_CS_SEL_3                            (3 << 26)
  68#define SPI_CS_SEL_MASK                         (3 << 26)
  69#define SPI_CS_SEL(x)                           (((x) & 0x3) << 26)
  70#define SPI_CONTROL_MODE_0                      (0 << 28)
  71#define SPI_CONTROL_MODE_1                      (1 << 28)
  72#define SPI_CONTROL_MODE_2                      (2 << 28)
  73#define SPI_CONTROL_MODE_3                      (3 << 28)
  74#define SPI_CONTROL_MODE_MASK                   (3 << 28)
  75#define SPI_MODE_SEL(x)                         (((x) & 0x3) << 28)
  76#define SPI_M_S                                 (1 << 30)
  77#define SPI_PIO                                 (1 << 31)
  78
  79#define SPI_COMMAND2                            0x004
  80#define SPI_TX_TAP_DELAY(x)                     (((x) & 0x3F) << 6)
  81#define SPI_RX_TAP_DELAY(x)                     (((x) & 0x3F) << 0)
  82
  83#define SPI_CS_TIMING1                          0x008
  84#define SPI_SETUP_HOLD(setup, hold)             (((setup) << 4) | (hold))
  85#define SPI_CS_SETUP_HOLD(reg, cs, val)                 \
  86                ((((val) & 0xFFu) << ((cs) * 8)) |      \
  87                ((reg) & ~(0xFFu << ((cs) * 8))))
  88
  89#define SPI_CS_TIMING2                          0x00C
  90#define CYCLES_BETWEEN_PACKETS_0(x)             (((x) & 0x1F) << 0)
  91#define CS_ACTIVE_BETWEEN_PACKETS_0             (1 << 5)
  92#define CYCLES_BETWEEN_PACKETS_1(x)             (((x) & 0x1F) << 8)
  93#define CS_ACTIVE_BETWEEN_PACKETS_1             (1 << 13)
  94#define CYCLES_BETWEEN_PACKETS_2(x)             (((x) & 0x1F) << 16)
  95#define CS_ACTIVE_BETWEEN_PACKETS_2             (1 << 21)
  96#define CYCLES_BETWEEN_PACKETS_3(x)             (((x) & 0x1F) << 24)
  97#define CS_ACTIVE_BETWEEN_PACKETS_3             (1 << 29)
  98#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val)         \
  99                (reg = (((val) & 0x1) << ((cs) * 8 + 5)) |      \
 100                        ((reg) & ~(1 << ((cs) * 8 + 5))))
 101#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val)            \
 102                (reg = (((val) & 0xF) << ((cs) * 8)) |          \
 103                        ((reg) & ~(0xF << ((cs) * 8))))
 104
 105#define SPI_TRANS_STATUS                        0x010
 106#define SPI_BLK_CNT(val)                        (((val) >> 0) & 0xFFFF)
 107#define SPI_SLV_IDLE_COUNT(val)                 (((val) >> 16) & 0xFF)
 108#define SPI_RDY                                 (1 << 30)
 109
 110#define SPI_FIFO_STATUS                         0x014
 111#define SPI_RX_FIFO_EMPTY                       (1 << 0)
 112#define SPI_RX_FIFO_FULL                        (1 << 1)
 113#define SPI_TX_FIFO_EMPTY                       (1 << 2)
 114#define SPI_TX_FIFO_FULL                        (1 << 3)
 115#define SPI_RX_FIFO_UNF                         (1 << 4)
 116#define SPI_RX_FIFO_OVF                         (1 << 5)
 117#define SPI_TX_FIFO_UNF                         (1 << 6)
 118#define SPI_TX_FIFO_OVF                         (1 << 7)
 119#define SPI_ERR                                 (1 << 8)
 120#define SPI_TX_FIFO_FLUSH                       (1 << 14)
 121#define SPI_RX_FIFO_FLUSH                       (1 << 15)
 122#define SPI_TX_FIFO_EMPTY_COUNT(val)            (((val) >> 16) & 0x7F)
 123#define SPI_RX_FIFO_FULL_COUNT(val)             (((val) >> 23) & 0x7F)
 124#define SPI_FRAME_END                           (1 << 30)
 125#define SPI_CS_INACTIVE                         (1 << 31)
 126
 127#define SPI_FIFO_ERROR                          (SPI_RX_FIFO_UNF | \
 128                        SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
 129#define SPI_FIFO_EMPTY                  (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
 130
 131#define SPI_TX_DATA                             0x018
 132#define SPI_RX_DATA                             0x01C
 133
 134#define SPI_DMA_CTL                             0x020
 135#define SPI_TX_TRIG_1                           (0 << 15)
 136#define SPI_TX_TRIG_4                           (1 << 15)
 137#define SPI_TX_TRIG_8                           (2 << 15)
 138#define SPI_TX_TRIG_16                          (3 << 15)
 139#define SPI_TX_TRIG_MASK                        (3 << 15)
 140#define SPI_RX_TRIG_1                           (0 << 19)
 141#define SPI_RX_TRIG_4                           (1 << 19)
 142#define SPI_RX_TRIG_8                           (2 << 19)
 143#define SPI_RX_TRIG_16                          (3 << 19)
 144#define SPI_RX_TRIG_MASK                        (3 << 19)
 145#define SPI_IE_TX                               (1 << 28)
 146#define SPI_IE_RX                               (1 << 29)
 147#define SPI_CONT                                (1 << 30)
 148#define SPI_DMA                                 (1 << 31)
 149#define SPI_DMA_EN                              SPI_DMA
 150
 151#define SPI_DMA_BLK                             0x024
 152#define SPI_DMA_BLK_SET(x)                      (((x) & 0xFFFF) << 0)
 153
 154#define SPI_TX_FIFO                             0x108
 155#define SPI_RX_FIFO                             0x188
 156#define MAX_CHIP_SELECT                         4
 157#define SPI_FIFO_DEPTH                          64
 158#define DATA_DIR_TX                             (1 << 0)
 159#define DATA_DIR_RX                             (1 << 1)
 160
 161#define SPI_DMA_TIMEOUT                         (msecs_to_jiffies(1000))
 162#define DEFAULT_SPI_DMA_BUF_LEN                 (16*1024)
 163#define TX_FIFO_EMPTY_COUNT_MAX                 SPI_TX_FIFO_EMPTY_COUNT(0x40)
 164#define RX_FIFO_FULL_COUNT_ZERO                 SPI_RX_FIFO_FULL_COUNT(0)
 165#define MAX_HOLD_CYCLES                         16
 166#define SPI_DEFAULT_SPEED                       25000000
 167
 168#define MAX_CHIP_SELECT                         4
 169#define SPI_FIFO_DEPTH                          64
 170
 171struct tegra_spi_data {
 172        struct device                           *dev;
 173        struct spi_master                       *master;
 174        spinlock_t                              lock;
 175
 176        struct clk                              *clk;
 177        void __iomem                            *base;
 178        phys_addr_t                             phys;
 179        unsigned                                irq;
 180        int                                     dma_req_sel;
 181        u32                                     spi_max_frequency;
 182        u32                                     cur_speed;
 183
 184        struct spi_device                       *cur_spi;
 185        unsigned                                cur_pos;
 186        unsigned                                cur_len;
 187        unsigned                                words_per_32bit;
 188        unsigned                                bytes_per_word;
 189        unsigned                                curr_dma_words;
 190        unsigned                                cur_direction;
 191
 192        unsigned                                cur_rx_pos;
 193        unsigned                                cur_tx_pos;
 194
 195        unsigned                                dma_buf_size;
 196        unsigned                                max_buf_size;
 197        bool                                    is_curr_dma_xfer;
 198
 199        struct completion                       rx_dma_complete;
 200        struct completion                       tx_dma_complete;
 201
 202        u32                                     tx_status;
 203        u32                                     rx_status;
 204        u32                                     status_reg;
 205        bool                                    is_packed;
 206        unsigned long                           packed_size;
 207
 208        u32                                     command1_reg;
 209        u32                                     dma_control_reg;
 210        u32                                     def_command1_reg;
 211        u32                                     spi_cs_timing;
 212
 213        struct completion                       xfer_completion;
 214        struct spi_transfer                     *curr_xfer;
 215        struct dma_chan                         *rx_dma_chan;
 216        u32                                     *rx_dma_buf;
 217        dma_addr_t                              rx_dma_phys;
 218        struct dma_async_tx_descriptor          *rx_dma_desc;
 219
 220        struct dma_chan                         *tx_dma_chan;
 221        u32                                     *tx_dma_buf;
 222        dma_addr_t                              tx_dma_phys;
 223        struct dma_async_tx_descriptor          *tx_dma_desc;
 224};
 225
 226static int tegra_spi_runtime_suspend(struct device *dev);
 227static int tegra_spi_runtime_resume(struct device *dev);
 228
 229static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
 230                unsigned long reg)
 231{
 232        return readl(tspi->base + reg);
 233}
 234
 235static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
 236                unsigned long val, unsigned long reg)
 237{
 238        writel(val, tspi->base + reg);
 239
 240        /* Read back register to make sure that register writes completed */
 241        if (reg != SPI_TX_FIFO)
 242                readl(tspi->base + SPI_COMMAND1);
 243}
 244
 245static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
 246{
 247        unsigned long val;
 248
 249        /* Write 1 to clear status register */
 250        val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
 251        tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
 252
 253        /* Clear fifo status error if any */
 254        val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
 255        if (val & SPI_ERR)
 256                tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
 257                                SPI_FIFO_STATUS);
 258}
 259
 260static unsigned tegra_spi_calculate_curr_xfer_param(
 261        struct spi_device *spi, struct tegra_spi_data *tspi,
 262        struct spi_transfer *t)
 263{
 264        unsigned remain_len = t->len - tspi->cur_pos;
 265        unsigned max_word;
 266        unsigned bits_per_word = t->bits_per_word;
 267        unsigned max_len;
 268        unsigned total_fifo_words;
 269
 270        tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
 271
 272        if (bits_per_word == 8 || bits_per_word == 16) {
 273                tspi->is_packed = 1;
 274                tspi->words_per_32bit = 32/bits_per_word;
 275        } else {
 276                tspi->is_packed = 0;
 277                tspi->words_per_32bit = 1;
 278        }
 279
 280        if (tspi->is_packed) {
 281                max_len = min(remain_len, tspi->max_buf_size);
 282                tspi->curr_dma_words = max_len/tspi->bytes_per_word;
 283                total_fifo_words = (max_len + 3) / 4;
 284        } else {
 285                max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
 286                max_word = min(max_word, tspi->max_buf_size/4);
 287                tspi->curr_dma_words = max_word;
 288                total_fifo_words = max_word;
 289        }
 290        return total_fifo_words;
 291}
 292
 293static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
 294        struct tegra_spi_data *tspi, struct spi_transfer *t)
 295{
 296        unsigned nbytes;
 297        unsigned tx_empty_count;
 298        unsigned long fifo_status;
 299        unsigned max_n_32bit;
 300        unsigned i, count;
 301        unsigned long x;
 302        unsigned int written_words;
 303        unsigned fifo_words_left;
 304        u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
 305
 306        fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
 307        tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
 308
 309        if (tspi->is_packed) {
 310                fifo_words_left = tx_empty_count * tspi->words_per_32bit;
 311                written_words = min(fifo_words_left, tspi->curr_dma_words);
 312                nbytes = written_words * tspi->bytes_per_word;
 313                max_n_32bit = DIV_ROUND_UP(nbytes, 4);
 314                for (count = 0; count < max_n_32bit; count++) {
 315                        x = 0;
 316                        for (i = 0; (i < 4) && nbytes; i++, nbytes--)
 317                                x |= (*tx_buf++) << (i*8);
 318                        tegra_spi_writel(tspi, x, SPI_TX_FIFO);
 319                }
 320        } else {
 321                max_n_32bit = min(tspi->curr_dma_words,  tx_empty_count);
 322                written_words = max_n_32bit;
 323                nbytes = written_words * tspi->bytes_per_word;
 324                for (count = 0; count < max_n_32bit; count++) {
 325                        x = 0;
 326                        for (i = 0; nbytes && (i < tspi->bytes_per_word);
 327                                                        i++, nbytes--)
 328                                x |= ((*tx_buf++) << i*8);
 329                        tegra_spi_writel(tspi, x, SPI_TX_FIFO);
 330                }
 331        }
 332        tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
 333        return written_words;
 334}
 335
 336static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
 337                struct tegra_spi_data *tspi, struct spi_transfer *t)
 338{
 339        unsigned rx_full_count;
 340        unsigned long fifo_status;
 341        unsigned i, count;
 342        unsigned long x;
 343        unsigned int read_words = 0;
 344        unsigned len;
 345        u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
 346
 347        fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
 348        rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
 349        if (tspi->is_packed) {
 350                len = tspi->curr_dma_words * tspi->bytes_per_word;
 351                for (count = 0; count < rx_full_count; count++) {
 352                        x = tegra_spi_readl(tspi, SPI_RX_FIFO);
 353                        for (i = 0; len && (i < 4); i++, len--)
 354                                *rx_buf++ = (x >> i*8) & 0xFF;
 355                }
 356                tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
 357                read_words += tspi->curr_dma_words;
 358        } else {
 359                unsigned int rx_mask;
 360                unsigned int bits_per_word = t->bits_per_word;
 361
 362                rx_mask = (1 << bits_per_word) - 1;
 363                for (count = 0; count < rx_full_count; count++) {
 364                        x = tegra_spi_readl(tspi, SPI_RX_FIFO);
 365                        x &= rx_mask;
 366                        for (i = 0; (i < tspi->bytes_per_word); i++)
 367                                *rx_buf++ = (x >> (i*8)) & 0xFF;
 368                }
 369                tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
 370                read_words += rx_full_count;
 371        }
 372        return read_words;
 373}
 374
 375static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
 376                struct tegra_spi_data *tspi, struct spi_transfer *t)
 377{
 378        unsigned len;
 379
 380        /* Make the dma buffer to read by cpu */
 381        dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
 382                                tspi->dma_buf_size, DMA_TO_DEVICE);
 383
 384        if (tspi->is_packed) {
 385                len = tspi->curr_dma_words * tspi->bytes_per_word;
 386                memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
 387        } else {
 388                unsigned int i;
 389                unsigned int count;
 390                u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
 391                unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
 392                unsigned int x;
 393
 394                for (count = 0; count < tspi->curr_dma_words; count++) {
 395                        x = 0;
 396                        for (i = 0; consume && (i < tspi->bytes_per_word);
 397                                                        i++, consume--)
 398                                x |= ((*tx_buf++) << i * 8);
 399                        tspi->tx_dma_buf[count] = x;
 400                }
 401        }
 402        tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
 403
 404        /* Make the dma buffer to read by dma */
 405        dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
 406                                tspi->dma_buf_size, DMA_TO_DEVICE);
 407}
 408
 409static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
 410                struct tegra_spi_data *tspi, struct spi_transfer *t)
 411{
 412        unsigned len;
 413
 414        /* Make the dma buffer to read by cpu */
 415        dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
 416                tspi->dma_buf_size, DMA_FROM_DEVICE);
 417
 418        if (tspi->is_packed) {
 419                len = tspi->curr_dma_words * tspi->bytes_per_word;
 420                memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
 421        } else {
 422                unsigned int i;
 423                unsigned int count;
 424                unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
 425                unsigned int x;
 426                unsigned int rx_mask;
 427                unsigned int bits_per_word = t->bits_per_word;
 428
 429                rx_mask = (1 << bits_per_word) - 1;
 430                for (count = 0; count < tspi->curr_dma_words; count++) {
 431                        x = tspi->rx_dma_buf[count];
 432                        x &= rx_mask;
 433                        for (i = 0; (i < tspi->bytes_per_word); i++)
 434                                *rx_buf++ = (x >> (i*8)) & 0xFF;
 435                }
 436        }
 437        tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
 438
 439        /* Make the dma buffer to read by dma */
 440        dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
 441                tspi->dma_buf_size, DMA_FROM_DEVICE);
 442}
 443
 444static void tegra_spi_dma_complete(void *args)
 445{
 446        struct completion *dma_complete = args;
 447
 448        complete(dma_complete);
 449}
 450
 451static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
 452{
 453        INIT_COMPLETION(tspi->tx_dma_complete);
 454        tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
 455                                tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
 456                                DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 457        if (!tspi->tx_dma_desc) {
 458                dev_err(tspi->dev, "Not able to get desc for Tx\n");
 459                return -EIO;
 460        }
 461
 462        tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
 463        tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
 464
 465        dmaengine_submit(tspi->tx_dma_desc);
 466        dma_async_issue_pending(tspi->tx_dma_chan);
 467        return 0;
 468}
 469
 470static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
 471{
 472        INIT_COMPLETION(tspi->rx_dma_complete);
 473        tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
 474                                tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
 475                                DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 476        if (!tspi->rx_dma_desc) {
 477                dev_err(tspi->dev, "Not able to get desc for Rx\n");
 478                return -EIO;
 479        }
 480
 481        tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
 482        tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
 483
 484        dmaengine_submit(tspi->rx_dma_desc);
 485        dma_async_issue_pending(tspi->rx_dma_chan);
 486        return 0;
 487}
 488
 489static int tegra_spi_start_dma_based_transfer(
 490                struct tegra_spi_data *tspi, struct spi_transfer *t)
 491{
 492        unsigned long val;
 493        unsigned int len;
 494        int ret = 0;
 495        unsigned long status;
 496
 497        /* Make sure that Rx and Tx fifo are empty */
 498        status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
 499        if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
 500                dev_err(tspi->dev,
 501                        "Rx/Tx fifo are not empty status 0x%08lx\n", status);
 502                return -EIO;
 503        }
 504
 505        val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
 506        tegra_spi_writel(tspi, val, SPI_DMA_BLK);
 507
 508        if (tspi->is_packed)
 509                len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
 510                                        4) * 4;
 511        else
 512                len = tspi->curr_dma_words * 4;
 513
 514        /* Set attention level based on length of transfer */
 515        if (len & 0xF)
 516                val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
 517        else if (((len) >> 4) & 0x1)
 518                val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
 519        else
 520                val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
 521
 522        if (tspi->cur_direction & DATA_DIR_TX)
 523                val |= SPI_IE_TX;
 524
 525        if (tspi->cur_direction & DATA_DIR_RX)
 526                val |= SPI_IE_RX;
 527
 528        tegra_spi_writel(tspi, val, SPI_DMA_CTL);
 529        tspi->dma_control_reg = val;
 530
 531        if (tspi->cur_direction & DATA_DIR_TX) {
 532                tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
 533                ret = tegra_spi_start_tx_dma(tspi, len);
 534                if (ret < 0) {
 535                        dev_err(tspi->dev,
 536                                "Starting tx dma failed, err %d\n", ret);
 537                        return ret;
 538                }
 539        }
 540
 541        if (tspi->cur_direction & DATA_DIR_RX) {
 542                /* Make the dma buffer to read by dma */
 543                dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
 544                                tspi->dma_buf_size, DMA_FROM_DEVICE);
 545
 546                ret = tegra_spi_start_rx_dma(tspi, len);
 547                if (ret < 0) {
 548                        dev_err(tspi->dev,
 549                                "Starting rx dma failed, err %d\n", ret);
 550                        if (tspi->cur_direction & DATA_DIR_TX)
 551                                dmaengine_terminate_all(tspi->tx_dma_chan);
 552                        return ret;
 553                }
 554        }
 555        tspi->is_curr_dma_xfer = true;
 556        tspi->dma_control_reg = val;
 557
 558        val |= SPI_DMA_EN;
 559        tegra_spi_writel(tspi, val, SPI_DMA_CTL);
 560        return ret;
 561}
 562
 563static int tegra_spi_start_cpu_based_transfer(
 564                struct tegra_spi_data *tspi, struct spi_transfer *t)
 565{
 566        unsigned long val;
 567        unsigned cur_words;
 568
 569        if (tspi->cur_direction & DATA_DIR_TX)
 570                cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
 571        else
 572                cur_words = tspi->curr_dma_words;
 573
 574        val = SPI_DMA_BLK_SET(cur_words - 1);
 575        tegra_spi_writel(tspi, val, SPI_DMA_BLK);
 576
 577        val = 0;
 578        if (tspi->cur_direction & DATA_DIR_TX)
 579                val |= SPI_IE_TX;
 580
 581        if (tspi->cur_direction & DATA_DIR_RX)
 582                val |= SPI_IE_RX;
 583
 584        tegra_spi_writel(tspi, val, SPI_DMA_CTL);
 585        tspi->dma_control_reg = val;
 586
 587        tspi->is_curr_dma_xfer = false;
 588
 589        val |= SPI_DMA_EN;
 590        tegra_spi_writel(tspi, val, SPI_DMA_CTL);
 591        return 0;
 592}
 593
 594static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
 595                        bool dma_to_memory)
 596{
 597        struct dma_chan *dma_chan;
 598        u32 *dma_buf;
 599        dma_addr_t dma_phys;
 600        int ret;
 601        struct dma_slave_config dma_sconfig;
 602        dma_cap_mask_t mask;
 603
 604        dma_cap_zero(mask);
 605        dma_cap_set(DMA_SLAVE, mask);
 606        dma_chan = dma_request_channel(mask, NULL, NULL);
 607        if (!dma_chan) {
 608                dev_err(tspi->dev,
 609                        "Dma channel is not available, will try later\n");
 610                return -EPROBE_DEFER;
 611        }
 612
 613        dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
 614                                &dma_phys, GFP_KERNEL);
 615        if (!dma_buf) {
 616                dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
 617                dma_release_channel(dma_chan);
 618                return -ENOMEM;
 619        }
 620
 621        dma_sconfig.slave_id = tspi->dma_req_sel;
 622        if (dma_to_memory) {
 623                dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
 624                dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 625                dma_sconfig.src_maxburst = 0;
 626        } else {
 627                dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
 628                dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 629                dma_sconfig.dst_maxburst = 0;
 630        }
 631
 632        ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
 633        if (ret)
 634                goto scrub;
 635        if (dma_to_memory) {
 636                tspi->rx_dma_chan = dma_chan;
 637                tspi->rx_dma_buf = dma_buf;
 638                tspi->rx_dma_phys = dma_phys;
 639        } else {
 640                tspi->tx_dma_chan = dma_chan;
 641                tspi->tx_dma_buf = dma_buf;
 642                tspi->tx_dma_phys = dma_phys;
 643        }
 644        return 0;
 645
 646scrub:
 647        dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
 648        dma_release_channel(dma_chan);
 649        return ret;
 650}
 651
 652static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
 653        bool dma_to_memory)
 654{
 655        u32 *dma_buf;
 656        dma_addr_t dma_phys;
 657        struct dma_chan *dma_chan;
 658
 659        if (dma_to_memory) {
 660                dma_buf = tspi->rx_dma_buf;
 661                dma_chan = tspi->rx_dma_chan;
 662                dma_phys = tspi->rx_dma_phys;
 663                tspi->rx_dma_chan = NULL;
 664                tspi->rx_dma_buf = NULL;
 665        } else {
 666                dma_buf = tspi->tx_dma_buf;
 667                dma_chan = tspi->tx_dma_chan;
 668                dma_phys = tspi->tx_dma_phys;
 669                tspi->tx_dma_buf = NULL;
 670                tspi->tx_dma_chan = NULL;
 671        }
 672        if (!dma_chan)
 673                return;
 674
 675        dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
 676        dma_release_channel(dma_chan);
 677}
 678
 679static int tegra_spi_start_transfer_one(struct spi_device *spi,
 680                struct spi_transfer *t, bool is_first_of_msg,
 681                bool is_single_xfer)
 682{
 683        struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
 684        u32 speed = t->speed_hz;
 685        u8 bits_per_word = t->bits_per_word;
 686        unsigned total_fifo_words;
 687        int ret;
 688        unsigned long command1;
 689        int req_mode;
 690
 691        if (speed != tspi->cur_speed) {
 692                clk_set_rate(tspi->clk, speed);
 693                tspi->cur_speed = speed;
 694        }
 695
 696        tspi->cur_spi = spi;
 697        tspi->cur_pos = 0;
 698        tspi->cur_rx_pos = 0;
 699        tspi->cur_tx_pos = 0;
 700        tspi->curr_xfer = t;
 701        total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
 702
 703        if (is_first_of_msg) {
 704                tegra_spi_clear_status(tspi);
 705
 706                command1 = tspi->def_command1_reg;
 707                command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
 708
 709                command1 &= ~SPI_CONTROL_MODE_MASK;
 710                req_mode = spi->mode & 0x3;
 711                if (req_mode == SPI_MODE_0)
 712                        command1 |= SPI_CONTROL_MODE_0;
 713                else if (req_mode == SPI_MODE_1)
 714                        command1 |= SPI_CONTROL_MODE_1;
 715                else if (req_mode == SPI_MODE_2)
 716                        command1 |= SPI_CONTROL_MODE_2;
 717                else if (req_mode == SPI_MODE_3)
 718                        command1 |= SPI_CONTROL_MODE_3;
 719
 720                tegra_spi_writel(tspi, command1, SPI_COMMAND1);
 721
 722                command1 |= SPI_CS_SW_HW;
 723                if (spi->mode & SPI_CS_HIGH)
 724                        command1 |= SPI_CS_SS_VAL;
 725                else
 726                        command1 &= ~SPI_CS_SS_VAL;
 727
 728                tegra_spi_writel(tspi, 0, SPI_COMMAND2);
 729        } else {
 730                command1 = tspi->command1_reg;
 731                command1 &= ~SPI_BIT_LENGTH(~0);
 732                command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
 733        }
 734
 735        if (tspi->is_packed)
 736                command1 |= SPI_PACKED;
 737
 738        command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
 739        tspi->cur_direction = 0;
 740        if (t->rx_buf) {
 741                command1 |= SPI_RX_EN;
 742                tspi->cur_direction |= DATA_DIR_RX;
 743        }
 744        if (t->tx_buf) {
 745                command1 |= SPI_TX_EN;
 746                tspi->cur_direction |= DATA_DIR_TX;
 747        }
 748        command1 |= SPI_CS_SEL(spi->chip_select);
 749        tegra_spi_writel(tspi, command1, SPI_COMMAND1);
 750        tspi->command1_reg = command1;
 751
 752        dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
 753                                tspi->def_command1_reg, command1);
 754
 755        if (total_fifo_words > SPI_FIFO_DEPTH)
 756                ret = tegra_spi_start_dma_based_transfer(tspi, t);
 757        else
 758                ret = tegra_spi_start_cpu_based_transfer(tspi, t);
 759        return ret;
 760}
 761
 762static int tegra_spi_setup(struct spi_device *spi)
 763{
 764        struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
 765        unsigned long val;
 766        unsigned long flags;
 767        int ret;
 768        unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
 769                        SPI_CS_POL_INACTIVE_0,
 770                        SPI_CS_POL_INACTIVE_1,
 771                        SPI_CS_POL_INACTIVE_2,
 772                        SPI_CS_POL_INACTIVE_3,
 773        };
 774
 775        dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
 776                spi->bits_per_word,
 777                spi->mode & SPI_CPOL ? "" : "~",
 778                spi->mode & SPI_CPHA ? "" : "~",
 779                spi->max_speed_hz);
 780
 781        BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
 782
 783        /* Set speed to the spi max fequency if spi device has not set */
 784        spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
 785
 786        ret = pm_runtime_get_sync(tspi->dev);
 787        if (ret < 0) {
 788                dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
 789                return ret;
 790        }
 791
 792        spin_lock_irqsave(&tspi->lock, flags);
 793        val = tspi->def_command1_reg;
 794        if (spi->mode & SPI_CS_HIGH)
 795                val &= ~cs_pol_bit[spi->chip_select];
 796        else
 797                val |= cs_pol_bit[spi->chip_select];
 798        tspi->def_command1_reg = val;
 799        tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
 800        spin_unlock_irqrestore(&tspi->lock, flags);
 801
 802        pm_runtime_put(tspi->dev);
 803        return 0;
 804}
 805
 806static int tegra_spi_transfer_one_message(struct spi_master *master,
 807                        struct spi_message *msg)
 808{
 809        bool is_first_msg = true;
 810        int single_xfer;
 811        struct tegra_spi_data *tspi = spi_master_get_devdata(master);
 812        struct spi_transfer *xfer;
 813        struct spi_device *spi = msg->spi;
 814        int ret;
 815
 816        msg->status = 0;
 817        msg->actual_length = 0;
 818
 819        ret = pm_runtime_get_sync(tspi->dev);
 820        if (ret < 0) {
 821                dev_err(tspi->dev, "runtime PM get failed: %d\n", ret);
 822                msg->status = ret;
 823                spi_finalize_current_message(master);
 824                return ret;
 825        }
 826
 827        single_xfer = list_is_singular(&msg->transfers);
 828        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 829                INIT_COMPLETION(tspi->xfer_completion);
 830                ret = tegra_spi_start_transfer_one(spi, xfer,
 831                                        is_first_msg, single_xfer);
 832                if (ret < 0) {
 833                        dev_err(tspi->dev,
 834                                "spi can not start transfer, err %d\n", ret);
 835                        goto exit;
 836                }
 837                is_first_msg = false;
 838                ret = wait_for_completion_timeout(&tspi->xfer_completion,
 839                                                SPI_DMA_TIMEOUT);
 840                if (WARN_ON(ret == 0)) {
 841                        dev_err(tspi->dev,
 842                                "spi trasfer timeout, err %d\n", ret);
 843                        ret = -EIO;
 844                        goto exit;
 845                }
 846
 847                if (tspi->tx_status ||  tspi->rx_status) {
 848                        dev_err(tspi->dev, "Error in Transfer\n");
 849                        ret = -EIO;
 850                        goto exit;
 851                }
 852                msg->actual_length += xfer->len;
 853                if (xfer->cs_change && xfer->delay_usecs) {
 854                        tegra_spi_writel(tspi, tspi->def_command1_reg,
 855                                        SPI_COMMAND1);
 856                        udelay(xfer->delay_usecs);
 857                }
 858        }
 859        ret = 0;
 860exit:
 861        tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
 862        pm_runtime_put(tspi->dev);
 863        msg->status = ret;
 864        spi_finalize_current_message(master);
 865        return ret;
 866}
 867
 868static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
 869{
 870        struct spi_transfer *t = tspi->curr_xfer;
 871        unsigned long flags;
 872
 873        spin_lock_irqsave(&tspi->lock, flags);
 874        if (tspi->tx_status ||  tspi->rx_status) {
 875                dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
 876                        tspi->status_reg);
 877                dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
 878                        tspi->command1_reg, tspi->dma_control_reg);
 879                tegra_periph_reset_assert(tspi->clk);
 880                udelay(2);
 881                tegra_periph_reset_deassert(tspi->clk);
 882                complete(&tspi->xfer_completion);
 883                goto exit;
 884        }
 885
 886        if (tspi->cur_direction & DATA_DIR_RX)
 887                tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
 888
 889        if (tspi->cur_direction & DATA_DIR_TX)
 890                tspi->cur_pos = tspi->cur_tx_pos;
 891        else
 892                tspi->cur_pos = tspi->cur_rx_pos;
 893
 894        if (tspi->cur_pos == t->len) {
 895                complete(&tspi->xfer_completion);
 896                goto exit;
 897        }
 898
 899        tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
 900        tegra_spi_start_cpu_based_transfer(tspi, t);
 901exit:
 902        spin_unlock_irqrestore(&tspi->lock, flags);
 903        return IRQ_HANDLED;
 904}
 905
 906static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
 907{
 908        struct spi_transfer *t = tspi->curr_xfer;
 909        long wait_status;
 910        int err = 0;
 911        unsigned total_fifo_words;
 912        unsigned long flags;
 913
 914        /* Abort dmas if any error */
 915        if (tspi->cur_direction & DATA_DIR_TX) {
 916                if (tspi->tx_status) {
 917                        dmaengine_terminate_all(tspi->tx_dma_chan);
 918                        err += 1;
 919                } else {
 920                        wait_status = wait_for_completion_interruptible_timeout(
 921                                &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
 922                        if (wait_status <= 0) {
 923                                dmaengine_terminate_all(tspi->tx_dma_chan);
 924                                dev_err(tspi->dev, "TxDma Xfer failed\n");
 925                                err += 1;
 926                        }
 927                }
 928        }
 929
 930        if (tspi->cur_direction & DATA_DIR_RX) {
 931                if (tspi->rx_status) {
 932                        dmaengine_terminate_all(tspi->rx_dma_chan);
 933                        err += 2;
 934                } else {
 935                        wait_status = wait_for_completion_interruptible_timeout(
 936                                &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
 937                        if (wait_status <= 0) {
 938                                dmaengine_terminate_all(tspi->rx_dma_chan);
 939                                dev_err(tspi->dev, "RxDma Xfer failed\n");
 940                                err += 2;
 941                        }
 942                }
 943        }
 944
 945        spin_lock_irqsave(&tspi->lock, flags);
 946        if (err) {
 947                dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
 948                        tspi->status_reg);
 949                dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
 950                        tspi->command1_reg, tspi->dma_control_reg);
 951                tegra_periph_reset_assert(tspi->clk);
 952                udelay(2);
 953                tegra_periph_reset_deassert(tspi->clk);
 954                complete(&tspi->xfer_completion);
 955                spin_unlock_irqrestore(&tspi->lock, flags);
 956                return IRQ_HANDLED;
 957        }
 958
 959        if (tspi->cur_direction & DATA_DIR_RX)
 960                tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
 961
 962        if (tspi->cur_direction & DATA_DIR_TX)
 963                tspi->cur_pos = tspi->cur_tx_pos;
 964        else
 965                tspi->cur_pos = tspi->cur_rx_pos;
 966
 967        if (tspi->cur_pos == t->len) {
 968                complete(&tspi->xfer_completion);
 969                goto exit;
 970        }
 971
 972        /* Continue transfer in current message */
 973        total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
 974                                                        tspi, t);
 975        if (total_fifo_words > SPI_FIFO_DEPTH)
 976                err = tegra_spi_start_dma_based_transfer(tspi, t);
 977        else
 978                err = tegra_spi_start_cpu_based_transfer(tspi, t);
 979
 980exit:
 981        spin_unlock_irqrestore(&tspi->lock, flags);
 982        return IRQ_HANDLED;
 983}
 984
 985static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
 986{
 987        struct tegra_spi_data *tspi = context_data;
 988
 989        if (!tspi->is_curr_dma_xfer)
 990                return handle_cpu_based_xfer(tspi);
 991        return handle_dma_based_xfer(tspi);
 992}
 993
 994static irqreturn_t tegra_spi_isr(int irq, void *context_data)
 995{
 996        struct tegra_spi_data *tspi = context_data;
 997
 998        tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
 999        if (tspi->cur_direction & DATA_DIR_TX)
1000                tspi->tx_status = tspi->status_reg &
1001                                        (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
1002
1003        if (tspi->cur_direction & DATA_DIR_RX)
1004                tspi->rx_status = tspi->status_reg &
1005                                        (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
1006        tegra_spi_clear_status(tspi);
1007
1008        return IRQ_WAKE_THREAD;
1009}
1010
1011static void tegra_spi_parse_dt(struct platform_device *pdev,
1012        struct tegra_spi_data *tspi)
1013{
1014        struct device_node *np = pdev->dev.of_node;
1015        u32 of_dma[2];
1016
1017        if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
1018                                of_dma, 2) >= 0)
1019                tspi->dma_req_sel = of_dma[1];
1020
1021        if (of_property_read_u32(np, "spi-max-frequency",
1022                                &tspi->spi_max_frequency))
1023                tspi->spi_max_frequency = 25000000; /* 25MHz */
1024}
1025
1026static struct of_device_id tegra_spi_of_match[] = {
1027        { .compatible = "nvidia,tegra114-spi", },
1028        {}
1029};
1030MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
1031
1032static int tegra_spi_probe(struct platform_device *pdev)
1033{
1034        struct spi_master       *master;
1035        struct tegra_spi_data   *tspi;
1036        struct resource         *r;
1037        int ret, spi_irq;
1038
1039        master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1040        if (!master) {
1041                dev_err(&pdev->dev, "master allocation failed\n");
1042                return -ENOMEM;
1043        }
1044        dev_set_drvdata(&pdev->dev, master);
1045        tspi = spi_master_get_devdata(master);
1046
1047        /* Parse DT */
1048        tegra_spi_parse_dt(pdev, tspi);
1049
1050        /* the spi->mode bits understood by this driver: */
1051        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1052        master->setup = tegra_spi_setup;
1053        master->transfer_one_message = tegra_spi_transfer_one_message;
1054        master->num_chipselect = MAX_CHIP_SELECT;
1055        master->bus_num = -1;
1056
1057        tspi->master = master;
1058        tspi->dev = &pdev->dev;
1059        spin_lock_init(&tspi->lock);
1060
1061        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1062        if (!r) {
1063                dev_err(&pdev->dev, "No IO memory resource\n");
1064                ret = -ENODEV;
1065                goto exit_free_master;
1066        }
1067        tspi->phys = r->start;
1068        tspi->base = devm_ioremap_resource(&pdev->dev, r);
1069        if (IS_ERR(tspi->base)) {
1070                ret = PTR_ERR(tspi->base);
1071                dev_err(&pdev->dev, "ioremap failed: err = %d\n", ret);
1072                goto exit_free_master;
1073        }
1074
1075        spi_irq = platform_get_irq(pdev, 0);
1076        tspi->irq = spi_irq;
1077        ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
1078                        tegra_spi_isr_thread, IRQF_ONESHOT,
1079                        dev_name(&pdev->dev), tspi);
1080        if (ret < 0) {
1081                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1082                                        tspi->irq);
1083                goto exit_free_master;
1084        }
1085
1086        tspi->clk = devm_clk_get(&pdev->dev, "spi");
1087        if (IS_ERR(tspi->clk)) {
1088                dev_err(&pdev->dev, "can not get clock\n");
1089                ret = PTR_ERR(tspi->clk);
1090                goto exit_free_irq;
1091        }
1092
1093        tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
1094        tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1095
1096        if (tspi->dma_req_sel) {
1097                ret = tegra_spi_init_dma_param(tspi, true);
1098                if (ret < 0) {
1099                        dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
1100                        goto exit_free_irq;
1101                }
1102
1103                ret = tegra_spi_init_dma_param(tspi, false);
1104                if (ret < 0) {
1105                        dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
1106                        goto exit_rx_dma_free;
1107                }
1108                tspi->max_buf_size = tspi->dma_buf_size;
1109                init_completion(&tspi->tx_dma_complete);
1110                init_completion(&tspi->rx_dma_complete);
1111        }
1112
1113        init_completion(&tspi->xfer_completion);
1114
1115        pm_runtime_enable(&pdev->dev);
1116        if (!pm_runtime_enabled(&pdev->dev)) {
1117                ret = tegra_spi_runtime_resume(&pdev->dev);
1118                if (ret)
1119                        goto exit_pm_disable;
1120        }
1121
1122        ret = pm_runtime_get_sync(&pdev->dev);
1123        if (ret < 0) {
1124                dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1125                goto exit_pm_disable;
1126        }
1127        tspi->def_command1_reg  = SPI_M_S;
1128        tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1129        pm_runtime_put(&pdev->dev);
1130
1131        master->dev.of_node = pdev->dev.of_node;
1132        ret = spi_register_master(master);
1133        if (ret < 0) {
1134                dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1135                goto exit_pm_disable;
1136        }
1137        return ret;
1138
1139exit_pm_disable:
1140        pm_runtime_disable(&pdev->dev);
1141        if (!pm_runtime_status_suspended(&pdev->dev))
1142                tegra_spi_runtime_suspend(&pdev->dev);
1143        tegra_spi_deinit_dma_param(tspi, false);
1144exit_rx_dma_free:
1145        tegra_spi_deinit_dma_param(tspi, true);
1146exit_free_irq:
1147        free_irq(spi_irq, tspi);
1148exit_free_master:
1149        spi_master_put(master);
1150        return ret;
1151}
1152
1153static int tegra_spi_remove(struct platform_device *pdev)
1154{
1155        struct spi_master *master = dev_get_drvdata(&pdev->dev);
1156        struct tegra_spi_data   *tspi = spi_master_get_devdata(master);
1157
1158        free_irq(tspi->irq, tspi);
1159        spi_unregister_master(master);
1160
1161        if (tspi->tx_dma_chan)
1162                tegra_spi_deinit_dma_param(tspi, false);
1163
1164        if (tspi->rx_dma_chan)
1165                tegra_spi_deinit_dma_param(tspi, true);
1166
1167        pm_runtime_disable(&pdev->dev);
1168        if (!pm_runtime_status_suspended(&pdev->dev))
1169                tegra_spi_runtime_suspend(&pdev->dev);
1170
1171        return 0;
1172}
1173
1174#ifdef CONFIG_PM_SLEEP
1175static int tegra_spi_suspend(struct device *dev)
1176{
1177        struct spi_master *master = dev_get_drvdata(dev);
1178
1179        return spi_master_suspend(master);
1180}
1181
1182static int tegra_spi_resume(struct device *dev)
1183{
1184        struct spi_master *master = dev_get_drvdata(dev);
1185        struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1186        int ret;
1187
1188        ret = pm_runtime_get_sync(dev);
1189        if (ret < 0) {
1190                dev_err(dev, "pm runtime failed, e = %d\n", ret);
1191                return ret;
1192        }
1193        tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1194        pm_runtime_put(dev);
1195
1196        return spi_master_resume(master);
1197}
1198#endif
1199
1200static int tegra_spi_runtime_suspend(struct device *dev)
1201{
1202        struct spi_master *master = dev_get_drvdata(dev);
1203        struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1204
1205        /* Flush all write which are in PPSB queue by reading back */
1206        tegra_spi_readl(tspi, SPI_COMMAND1);
1207
1208        clk_disable_unprepare(tspi->clk);
1209        return 0;
1210}
1211
1212static int tegra_spi_runtime_resume(struct device *dev)
1213{
1214        struct spi_master *master = dev_get_drvdata(dev);
1215        struct tegra_spi_data *tspi = spi_master_get_devdata(master);
1216        int ret;
1217
1218        ret = clk_prepare_enable(tspi->clk);
1219        if (ret < 0) {
1220                dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1221                return ret;
1222        }
1223        return 0;
1224}
1225
1226static const struct dev_pm_ops tegra_spi_pm_ops = {
1227        SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
1228                tegra_spi_runtime_resume, NULL)
1229        SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
1230};
1231static struct platform_driver tegra_spi_driver = {
1232        .driver = {
1233                .name           = "spi-tegra114",
1234                .owner          = THIS_MODULE,
1235                .pm             = &tegra_spi_pm_ops,
1236                .of_match_table = tegra_spi_of_match,
1237        },
1238        .probe =        tegra_spi_probe,
1239        .remove =       tegra_spi_remove,
1240};
1241module_platform_driver(tegra_spi_driver);
1242
1243MODULE_ALIAS("platform:spi-tegra114");
1244MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
1245MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1246MODULE_LICENSE("GPL v2");
1247