linux/drivers/spi/spi-rspi.c
<<
>>
Prefs
   1/*
   2 * SH RSPI driver
   3 *
   4 * Copyright (C) 2012, 2013  Renesas Solutions Corp.
   5 * Copyright (C) 2014 Glider bvba
   6 *
   7 * Based on spi-sh.c:
   8 * Copyright (C) 2011 Renesas Solutions Corp.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; version 2 of the License.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/sched.h>
  23#include <linux/errno.h>
  24#include <linux/interrupt.h>
  25#include <linux/platform_device.h>
  26#include <linux/io.h>
  27#include <linux/clk.h>
  28#include <linux/dmaengine.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/of_device.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/sh_dma.h>
  33#include <linux/spi/spi.h>
  34#include <linux/spi/rspi.h>
  35
  36#define RSPI_SPCR               0x00    /* Control Register */
  37#define RSPI_SSLP               0x01    /* Slave Select Polarity Register */
  38#define RSPI_SPPCR              0x02    /* Pin Control Register */
  39#define RSPI_SPSR               0x03    /* Status Register */
  40#define RSPI_SPDR               0x04    /* Data Register */
  41#define RSPI_SPSCR              0x08    /* Sequence Control Register */
  42#define RSPI_SPSSR              0x09    /* Sequence Status Register */
  43#define RSPI_SPBR               0x0a    /* Bit Rate Register */
  44#define RSPI_SPDCR              0x0b    /* Data Control Register */
  45#define RSPI_SPCKD              0x0c    /* Clock Delay Register */
  46#define RSPI_SSLND              0x0d    /* Slave Select Negation Delay Register */
  47#define RSPI_SPND               0x0e    /* Next-Access Delay Register */
  48#define RSPI_SPCR2              0x0f    /* Control Register 2 (SH only) */
  49#define RSPI_SPCMD0             0x10    /* Command Register 0 */
  50#define RSPI_SPCMD1             0x12    /* Command Register 1 */
  51#define RSPI_SPCMD2             0x14    /* Command Register 2 */
  52#define RSPI_SPCMD3             0x16    /* Command Register 3 */
  53#define RSPI_SPCMD4             0x18    /* Command Register 4 */
  54#define RSPI_SPCMD5             0x1a    /* Command Register 5 */
  55#define RSPI_SPCMD6             0x1c    /* Command Register 6 */
  56#define RSPI_SPCMD7             0x1e    /* Command Register 7 */
  57#define RSPI_SPCMD(i)           (RSPI_SPCMD0 + (i) * 2)
  58#define RSPI_NUM_SPCMD          8
  59#define RSPI_RZ_NUM_SPCMD       4
  60#define QSPI_NUM_SPCMD          4
  61
  62/* RSPI on RZ only */
  63#define RSPI_SPBFCR             0x20    /* Buffer Control Register */
  64#define RSPI_SPBFDR             0x22    /* Buffer Data Count Setting Register */
  65
  66/* QSPI only */
  67#define QSPI_SPBFCR             0x18    /* Buffer Control Register */
  68#define QSPI_SPBDCR             0x1a    /* Buffer Data Count Register */
  69#define QSPI_SPBMUL0            0x1c    /* Transfer Data Length Multiplier Setting Register 0 */
  70#define QSPI_SPBMUL1            0x20    /* Transfer Data Length Multiplier Setting Register 1 */
  71#define QSPI_SPBMUL2            0x24    /* Transfer Data Length Multiplier Setting Register 2 */
  72#define QSPI_SPBMUL3            0x28    /* Transfer Data Length Multiplier Setting Register 3 */
  73#define QSPI_SPBMUL(i)          (QSPI_SPBMUL0 + (i) * 4)
  74
  75/* SPCR - Control Register */
  76#define SPCR_SPRIE              0x80    /* Receive Interrupt Enable */
  77#define SPCR_SPE                0x40    /* Function Enable */
  78#define SPCR_SPTIE              0x20    /* Transmit Interrupt Enable */
  79#define SPCR_SPEIE              0x10    /* Error Interrupt Enable */
  80#define SPCR_MSTR               0x08    /* Master/Slave Mode Select */
  81#define SPCR_MODFEN             0x04    /* Mode Fault Error Detection Enable */
  82/* RSPI on SH only */
  83#define SPCR_TXMD               0x02    /* TX Only Mode (vs. Full Duplex) */
  84#define SPCR_SPMS               0x01    /* 3-wire Mode (vs. 4-wire) */
  85/* QSPI on R-Car Gen2 only */
  86#define SPCR_WSWAP              0x02    /* Word Swap of read-data for DMAC */
  87#define SPCR_BSWAP              0x01    /* Byte Swap of read-data for DMAC */
  88
  89/* SSLP - Slave Select Polarity Register */
  90#define SSLP_SSL1P              0x02    /* SSL1 Signal Polarity Setting */
  91#define SSLP_SSL0P              0x01    /* SSL0 Signal Polarity Setting */
  92
  93/* SPPCR - Pin Control Register */
  94#define SPPCR_MOIFE             0x20    /* MOSI Idle Value Fixing Enable */
  95#define SPPCR_MOIFV             0x10    /* MOSI Idle Fixed Value */
  96#define SPPCR_SPOM              0x04
  97#define SPPCR_SPLP2             0x02    /* Loopback Mode 2 (non-inverting) */
  98#define SPPCR_SPLP              0x01    /* Loopback Mode (inverting) */
  99
 100#define SPPCR_IO3FV             0x04    /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
 101#define SPPCR_IO2FV             0x04    /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
 102
 103/* SPSR - Status Register */
 104#define SPSR_SPRF               0x80    /* Receive Buffer Full Flag */
 105#define SPSR_TEND               0x40    /* Transmit End */
 106#define SPSR_SPTEF              0x20    /* Transmit Buffer Empty Flag */
 107#define SPSR_PERF               0x08    /* Parity Error Flag */
 108#define SPSR_MODF               0x04    /* Mode Fault Error Flag */
 109#define SPSR_IDLNF              0x02    /* RSPI Idle Flag */
 110#define SPSR_OVRF               0x01    /* Overrun Error Flag (RSPI only) */
 111
 112/* SPSCR - Sequence Control Register */
 113#define SPSCR_SPSLN_MASK        0x07    /* Sequence Length Specification */
 114
 115/* SPSSR - Sequence Status Register */
 116#define SPSSR_SPECM_MASK        0x70    /* Command Error Mask */
 117#define SPSSR_SPCP_MASK         0x07    /* Command Pointer Mask */
 118
 119/* SPDCR - Data Control Register */
 120#define SPDCR_TXDMY             0x80    /* Dummy Data Transmission Enable */
 121#define SPDCR_SPLW1             0x40    /* Access Width Specification (RZ) */
 122#define SPDCR_SPLW0             0x20    /* Access Width Specification (RZ) */
 123#define SPDCR_SPLLWORD          (SPDCR_SPLW1 | SPDCR_SPLW0)
 124#define SPDCR_SPLWORD           SPDCR_SPLW1
 125#define SPDCR_SPLBYTE           SPDCR_SPLW0
 126#define SPDCR_SPLW              0x20    /* Access Width Specification (SH) */
 127#define SPDCR_SPRDTD            0x10    /* Receive Transmit Data Select (SH) */
 128#define SPDCR_SLSEL1            0x08
 129#define SPDCR_SLSEL0            0x04
 130#define SPDCR_SLSEL_MASK        0x0c    /* SSL1 Output Select (SH) */
 131#define SPDCR_SPFC1             0x02
 132#define SPDCR_SPFC0             0x01
 133#define SPDCR_SPFC_MASK         0x03    /* Frame Count Setting (1-4) (SH) */
 134
 135/* SPCKD - Clock Delay Register */
 136#define SPCKD_SCKDL_MASK        0x07    /* Clock Delay Setting (1-8) */
 137
 138/* SSLND - Slave Select Negation Delay Register */
 139#define SSLND_SLNDL_MASK        0x07    /* SSL Negation Delay Setting (1-8) */
 140
 141/* SPND - Next-Access Delay Register */
 142#define SPND_SPNDL_MASK         0x07    /* Next-Access Delay Setting (1-8) */
 143
 144/* SPCR2 - Control Register 2 */
 145#define SPCR2_PTE               0x08    /* Parity Self-Test Enable */
 146#define SPCR2_SPIE              0x04    /* Idle Interrupt Enable */
 147#define SPCR2_SPOE              0x02    /* Odd Parity Enable (vs. Even) */
 148#define SPCR2_SPPE              0x01    /* Parity Enable */
 149
 150/* SPCMDn - Command Registers */
 151#define SPCMD_SCKDEN            0x8000  /* Clock Delay Setting Enable */
 152#define SPCMD_SLNDEN            0x4000  /* SSL Negation Delay Setting Enable */
 153#define SPCMD_SPNDEN            0x2000  /* Next-Access Delay Enable */
 154#define SPCMD_LSBF              0x1000  /* LSB First */
 155#define SPCMD_SPB_MASK          0x0f00  /* Data Length Setting */
 156#define SPCMD_SPB_8_TO_16(bit)  (((bit - 1) << 8) & SPCMD_SPB_MASK)
 157#define SPCMD_SPB_8BIT          0x0000  /* QSPI only */
 158#define SPCMD_SPB_16BIT         0x0100
 159#define SPCMD_SPB_20BIT         0x0000
 160#define SPCMD_SPB_24BIT         0x0100
 161#define SPCMD_SPB_32BIT         0x0200
 162#define SPCMD_SSLKP             0x0080  /* SSL Signal Level Keeping */
 163#define SPCMD_SPIMOD_MASK       0x0060  /* SPI Operating Mode (QSPI only) */
 164#define SPCMD_SPIMOD1           0x0040
 165#define SPCMD_SPIMOD0           0x0020
 166#define SPCMD_SPIMOD_SINGLE     0
 167#define SPCMD_SPIMOD_DUAL       SPCMD_SPIMOD0
 168#define SPCMD_SPIMOD_QUAD       SPCMD_SPIMOD1
 169#define SPCMD_SPRW              0x0010  /* SPI Read/Write Access (Dual/Quad) */
 170#define SPCMD_SSLA_MASK         0x0030  /* SSL Assert Signal Setting (RSPI) */
 171#define SPCMD_BRDV_MASK         0x000c  /* Bit Rate Division Setting */
 172#define SPCMD_CPOL              0x0002  /* Clock Polarity Setting */
 173#define SPCMD_CPHA              0x0001  /* Clock Phase Setting */
 174
 175/* SPBFCR - Buffer Control Register */
 176#define SPBFCR_TXRST            0x80    /* Transmit Buffer Data Reset */
 177#define SPBFCR_RXRST            0x40    /* Receive Buffer Data Reset */
 178#define SPBFCR_TXTRG_MASK       0x30    /* Transmit Buffer Data Triggering Number */
 179#define SPBFCR_RXTRG_MASK       0x07    /* Receive Buffer Data Triggering Number */
 180/* QSPI on R-Car Gen2 */
 181#define SPBFCR_TXTRG_1B         0x00    /* 31 bytes (1 byte available) */
 182#define SPBFCR_TXTRG_32B        0x30    /* 0 byte (32 bytes available) */
 183#define SPBFCR_RXTRG_1B         0x00    /* 1 byte (31 bytes available) */
 184#define SPBFCR_RXTRG_32B        0x07    /* 32 bytes (0 byte available) */
 185
 186#define QSPI_BUFFER_SIZE        32u
 187
 188struct rspi_data {
 189        void __iomem *addr;
 190        u32 max_speed_hz;
 191        struct spi_master *master;
 192        wait_queue_head_t wait;
 193        struct clk *clk;
 194        u16 spcmd;
 195        u8 spsr;
 196        u8 sppcr;
 197        int rx_irq, tx_irq;
 198        const struct spi_ops *ops;
 199
 200        unsigned dma_callbacked:1;
 201        unsigned byte_access:1;
 202};
 203
 204static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
 205{
 206        iowrite8(data, rspi->addr + offset);
 207}
 208
 209static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
 210{
 211        iowrite16(data, rspi->addr + offset);
 212}
 213
 214static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
 215{
 216        iowrite32(data, rspi->addr + offset);
 217}
 218
 219static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
 220{
 221        return ioread8(rspi->addr + offset);
 222}
 223
 224static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
 225{
 226        return ioread16(rspi->addr + offset);
 227}
 228
 229static void rspi_write_data(const struct rspi_data *rspi, u16 data)
 230{
 231        if (rspi->byte_access)
 232                rspi_write8(rspi, data, RSPI_SPDR);
 233        else /* 16 bit */
 234                rspi_write16(rspi, data, RSPI_SPDR);
 235}
 236
 237static u16 rspi_read_data(const struct rspi_data *rspi)
 238{
 239        if (rspi->byte_access)
 240                return rspi_read8(rspi, RSPI_SPDR);
 241        else /* 16 bit */
 242                return rspi_read16(rspi, RSPI_SPDR);
 243}
 244
 245/* optional functions */
 246struct spi_ops {
 247        int (*set_config_register)(struct rspi_data *rspi, int access_size);
 248        int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
 249                            struct spi_transfer *xfer);
 250        u16 mode_bits;
 251        u16 flags;
 252        u16 fifo_size;
 253};
 254
 255/*
 256 * functions for RSPI on legacy SH
 257 */
 258static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
 259{
 260        int spbr;
 261
 262        /* Sets output mode, MOSI signal, and (optionally) loopback */
 263        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 264
 265        /* Sets transfer bit rate */
 266        spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
 267                            2 * rspi->max_speed_hz) - 1;
 268        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 269
 270        /* Disable dummy transmission, set 16-bit word access, 1 frame */
 271        rspi_write8(rspi, 0, RSPI_SPDCR);
 272        rspi->byte_access = 0;
 273
 274        /* Sets RSPCK, SSL, next-access delay value */
 275        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 276        rspi_write8(rspi, 0x00, RSPI_SSLND);
 277        rspi_write8(rspi, 0x00, RSPI_SPND);
 278
 279        /* Sets parity, interrupt mask */
 280        rspi_write8(rspi, 0x00, RSPI_SPCR2);
 281
 282        /* Sets SPCMD */
 283        rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 284        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 285
 286        /* Sets RSPI mode */
 287        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 288
 289        return 0;
 290}
 291
 292/*
 293 * functions for RSPI on RZ
 294 */
 295static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 296{
 297        int spbr;
 298
 299        /* Sets output mode, MOSI signal, and (optionally) loopback */
 300        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 301
 302        /* Sets transfer bit rate */
 303        spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
 304                            2 * rspi->max_speed_hz) - 1;
 305        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 306
 307        /* Disable dummy transmission, set byte access */
 308        rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
 309        rspi->byte_access = 1;
 310
 311        /* Sets RSPCK, SSL, next-access delay value */
 312        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 313        rspi_write8(rspi, 0x00, RSPI_SSLND);
 314        rspi_write8(rspi, 0x00, RSPI_SPND);
 315
 316        /* Sets SPCMD */
 317        rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 318        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 319
 320        /* Sets RSPI mode */
 321        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 322
 323        return 0;
 324}
 325
 326/*
 327 * functions for QSPI
 328 */
 329static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
 330{
 331        int spbr;
 332
 333        /* Sets output mode, MOSI signal, and (optionally) loopback */
 334        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 335
 336        /* Sets transfer bit rate */
 337        spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
 338        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 339
 340        /* Disable dummy transmission, set byte access */
 341        rspi_write8(rspi, 0, RSPI_SPDCR);
 342        rspi->byte_access = 1;
 343
 344        /* Sets RSPCK, SSL, next-access delay value */
 345        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 346        rspi_write8(rspi, 0x00, RSPI_SSLND);
 347        rspi_write8(rspi, 0x00, RSPI_SPND);
 348
 349        /* Data Length Setting */
 350        if (access_size == 8)
 351                rspi->spcmd |= SPCMD_SPB_8BIT;
 352        else if (access_size == 16)
 353                rspi->spcmd |= SPCMD_SPB_16BIT;
 354        else
 355                rspi->spcmd |= SPCMD_SPB_32BIT;
 356
 357        rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
 358
 359        /* Resets transfer data length */
 360        rspi_write32(rspi, 0, QSPI_SPBMUL0);
 361
 362        /* Resets transmit and receive buffer */
 363        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
 364        /* Sets buffer to allow normal operation */
 365        rspi_write8(rspi, 0x00, QSPI_SPBFCR);
 366
 367        /* Sets SPCMD */
 368        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 369
 370        /* Enables SPI function in master mode */
 371        rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
 372
 373        return 0;
 374}
 375
 376static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
 377{
 378        u8 data;
 379
 380        data = rspi_read8(rspi, reg);
 381        data &= ~mask;
 382        data |= (val & mask);
 383        rspi_write8(rspi, data, reg);
 384}
 385
 386static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
 387                                          unsigned int len)
 388{
 389        unsigned int n;
 390
 391        n = min(len, QSPI_BUFFER_SIZE);
 392
 393        if (len >= QSPI_BUFFER_SIZE) {
 394                /* sets triggering number to 32 bytes */
 395                qspi_update(rspi, SPBFCR_TXTRG_MASK,
 396                             SPBFCR_TXTRG_32B, QSPI_SPBFCR);
 397        } else {
 398                /* sets triggering number to 1 byte */
 399                qspi_update(rspi, SPBFCR_TXTRG_MASK,
 400                             SPBFCR_TXTRG_1B, QSPI_SPBFCR);
 401        }
 402
 403        return n;
 404}
 405
 406static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
 407{
 408        unsigned int n;
 409
 410        n = min(len, QSPI_BUFFER_SIZE);
 411
 412        if (len >= QSPI_BUFFER_SIZE) {
 413                /* sets triggering number to 32 bytes */
 414                qspi_update(rspi, SPBFCR_RXTRG_MASK,
 415                             SPBFCR_RXTRG_32B, QSPI_SPBFCR);
 416        } else {
 417                /* sets triggering number to 1 byte */
 418                qspi_update(rspi, SPBFCR_RXTRG_MASK,
 419                             SPBFCR_RXTRG_1B, QSPI_SPBFCR);
 420        }
 421}
 422
 423#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
 424
 425static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
 426{
 427        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
 428}
 429
 430static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
 431{
 432        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
 433}
 434
 435static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
 436                                   u8 enable_bit)
 437{
 438        int ret;
 439
 440        rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
 441        if (rspi->spsr & wait_mask)
 442                return 0;
 443
 444        rspi_enable_irq(rspi, enable_bit);
 445        ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
 446        if (ret == 0 && !(rspi->spsr & wait_mask))
 447                return -ETIMEDOUT;
 448
 449        return 0;
 450}
 451
 452static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
 453{
 454        return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
 455}
 456
 457static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
 458{
 459        return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
 460}
 461
 462static int rspi_data_out(struct rspi_data *rspi, u8 data)
 463{
 464        int error = rspi_wait_for_tx_empty(rspi);
 465        if (error < 0) {
 466                dev_err(&rspi->master->dev, "transmit timeout\n");
 467                return error;
 468        }
 469        rspi_write_data(rspi, data);
 470        return 0;
 471}
 472
 473static int rspi_data_in(struct rspi_data *rspi)
 474{
 475        int error;
 476        u8 data;
 477
 478        error = rspi_wait_for_rx_full(rspi);
 479        if (error < 0) {
 480                dev_err(&rspi->master->dev, "receive timeout\n");
 481                return error;
 482        }
 483        data = rspi_read_data(rspi);
 484        return data;
 485}
 486
 487static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
 488                             unsigned int n)
 489{
 490        while (n-- > 0) {
 491                if (tx) {
 492                        int ret = rspi_data_out(rspi, *tx++);
 493                        if (ret < 0)
 494                                return ret;
 495                }
 496                if (rx) {
 497                        int ret = rspi_data_in(rspi);
 498                        if (ret < 0)
 499                                return ret;
 500                        *rx++ = ret;
 501                }
 502        }
 503
 504        return 0;
 505}
 506
 507static void rspi_dma_complete(void *arg)
 508{
 509        struct rspi_data *rspi = arg;
 510
 511        rspi->dma_callbacked = 1;
 512        wake_up_interruptible(&rspi->wait);
 513}
 514
 515static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 516                             struct sg_table *rx)
 517{
 518        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 519        u8 irq_mask = 0;
 520        unsigned int other_irq = 0;
 521        dma_cookie_t cookie;
 522        int ret;
 523
 524        /* First prepare and submit the DMA request(s), as this may fail */
 525        if (rx) {
 526                desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
 527                                        rx->sgl, rx->nents, DMA_FROM_DEVICE,
 528                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 529                if (!desc_rx) {
 530                        ret = -EAGAIN;
 531                        goto no_dma_rx;
 532                }
 533
 534                desc_rx->callback = rspi_dma_complete;
 535                desc_rx->callback_param = rspi;
 536                cookie = dmaengine_submit(desc_rx);
 537                if (dma_submit_error(cookie)) {
 538                        ret = cookie;
 539                        goto no_dma_rx;
 540                }
 541
 542                irq_mask |= SPCR_SPRIE;
 543        }
 544
 545        if (tx) {
 546                desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
 547                                        tx->sgl, tx->nents, DMA_TO_DEVICE,
 548                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 549                if (!desc_tx) {
 550                        ret = -EAGAIN;
 551                        goto no_dma_tx;
 552                }
 553
 554                if (rx) {
 555                        /* No callback */
 556                        desc_tx->callback = NULL;
 557                } else {
 558                        desc_tx->callback = rspi_dma_complete;
 559                        desc_tx->callback_param = rspi;
 560                }
 561                cookie = dmaengine_submit(desc_tx);
 562                if (dma_submit_error(cookie)) {
 563                        ret = cookie;
 564                        goto no_dma_tx;
 565                }
 566
 567                irq_mask |= SPCR_SPTIE;
 568        }
 569
 570        /*
 571         * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
 572         * called. So, this driver disables the IRQ while DMA transfer.
 573         */
 574        if (tx)
 575                disable_irq(other_irq = rspi->tx_irq);
 576        if (rx && rspi->rx_irq != other_irq)
 577                disable_irq(rspi->rx_irq);
 578
 579        rspi_enable_irq(rspi, irq_mask);
 580        rspi->dma_callbacked = 0;
 581
 582        /* Now start DMA */
 583        if (rx)
 584                dma_async_issue_pending(rspi->master->dma_rx);
 585        if (tx)
 586                dma_async_issue_pending(rspi->master->dma_tx);
 587
 588        ret = wait_event_interruptible_timeout(rspi->wait,
 589                                               rspi->dma_callbacked, HZ);
 590        if (ret > 0 && rspi->dma_callbacked)
 591                ret = 0;
 592        else if (!ret) {
 593                dev_err(&rspi->master->dev, "DMA timeout\n");
 594                ret = -ETIMEDOUT;
 595                if (tx)
 596                        dmaengine_terminate_all(rspi->master->dma_tx);
 597                if (rx)
 598                        dmaengine_terminate_all(rspi->master->dma_rx);
 599        }
 600
 601        rspi_disable_irq(rspi, irq_mask);
 602
 603        if (tx)
 604                enable_irq(rspi->tx_irq);
 605        if (rx && rspi->rx_irq != other_irq)
 606                enable_irq(rspi->rx_irq);
 607
 608        return ret;
 609
 610no_dma_tx:
 611        if (rx)
 612                dmaengine_terminate_all(rspi->master->dma_rx);
 613no_dma_rx:
 614        if (ret == -EAGAIN) {
 615                pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
 616                             dev_driver_string(&rspi->master->dev),
 617                             dev_name(&rspi->master->dev));
 618        }
 619        return ret;
 620}
 621
 622static void rspi_receive_init(const struct rspi_data *rspi)
 623{
 624        u8 spsr;
 625
 626        spsr = rspi_read8(rspi, RSPI_SPSR);
 627        if (spsr & SPSR_SPRF)
 628                rspi_read_data(rspi);   /* dummy read */
 629        if (spsr & SPSR_OVRF)
 630                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
 631                            RSPI_SPSR);
 632}
 633
 634static void rspi_rz_receive_init(const struct rspi_data *rspi)
 635{
 636        rspi_receive_init(rspi);
 637        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
 638        rspi_write8(rspi, 0, RSPI_SPBFCR);
 639}
 640
 641static void qspi_receive_init(const struct rspi_data *rspi)
 642{
 643        u8 spsr;
 644
 645        spsr = rspi_read8(rspi, RSPI_SPSR);
 646        if (spsr & SPSR_SPRF)
 647                rspi_read_data(rspi);   /* dummy read */
 648        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
 649        rspi_write8(rspi, 0, QSPI_SPBFCR);
 650}
 651
 652static bool __rspi_can_dma(const struct rspi_data *rspi,
 653                           const struct spi_transfer *xfer)
 654{
 655        return xfer->len > rspi->ops->fifo_size;
 656}
 657
 658static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
 659                         struct spi_transfer *xfer)
 660{
 661        struct rspi_data *rspi = spi_master_get_devdata(master);
 662
 663        return __rspi_can_dma(rspi, xfer);
 664}
 665
 666static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
 667                                         struct spi_transfer *xfer)
 668{
 669        if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
 670                return -EAGAIN;
 671
 672        /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
 673        return rspi_dma_transfer(rspi, &xfer->tx_sg,
 674                                xfer->rx_buf ? &xfer->rx_sg : NULL);
 675}
 676
 677static int rspi_common_transfer(struct rspi_data *rspi,
 678                                struct spi_transfer *xfer)
 679{
 680        int ret;
 681
 682        ret = rspi_dma_check_then_transfer(rspi, xfer);
 683        if (ret != -EAGAIN)
 684                return ret;
 685
 686        ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
 687        if (ret < 0)
 688                return ret;
 689
 690        /* Wait for the last transmission */
 691        rspi_wait_for_tx_empty(rspi);
 692
 693        return 0;
 694}
 695
 696static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
 697                             struct spi_transfer *xfer)
 698{
 699        struct rspi_data *rspi = spi_master_get_devdata(master);
 700        u8 spcr;
 701
 702        spcr = rspi_read8(rspi, RSPI_SPCR);
 703        if (xfer->rx_buf) {
 704                rspi_receive_init(rspi);
 705                spcr &= ~SPCR_TXMD;
 706        } else {
 707                spcr |= SPCR_TXMD;
 708        }
 709        rspi_write8(rspi, spcr, RSPI_SPCR);
 710
 711        return rspi_common_transfer(rspi, xfer);
 712}
 713
 714static int rspi_rz_transfer_one(struct spi_master *master,
 715                                struct spi_device *spi,
 716                                struct spi_transfer *xfer)
 717{
 718        struct rspi_data *rspi = spi_master_get_devdata(master);
 719
 720        rspi_rz_receive_init(rspi);
 721
 722        return rspi_common_transfer(rspi, xfer);
 723}
 724
 725static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
 726                                        u8 *rx, unsigned int len)
 727{
 728        unsigned int i, n;
 729        int ret;
 730
 731        while (len > 0) {
 732                n = qspi_set_send_trigger(rspi, len);
 733                qspi_set_receive_trigger(rspi, len);
 734                if (n == QSPI_BUFFER_SIZE) {
 735                        ret = rspi_wait_for_tx_empty(rspi);
 736                        if (ret < 0) {
 737                                dev_err(&rspi->master->dev, "transmit timeout\n");
 738                                return ret;
 739                        }
 740                        for (i = 0; i < n; i++)
 741                                rspi_write_data(rspi, *tx++);
 742
 743                        ret = rspi_wait_for_rx_full(rspi);
 744                        if (ret < 0) {
 745                                dev_err(&rspi->master->dev, "receive timeout\n");
 746                                return ret;
 747                        }
 748                        for (i = 0; i < n; i++)
 749                                *rx++ = rspi_read_data(rspi);
 750                } else {
 751                        ret = rspi_pio_transfer(rspi, tx, rx, n);
 752                        if (ret < 0)
 753                                return ret;
 754                }
 755                len -= n;
 756        }
 757
 758        return 0;
 759}
 760
 761static int qspi_transfer_out_in(struct rspi_data *rspi,
 762                                struct spi_transfer *xfer)
 763{
 764        int ret;
 765
 766        qspi_receive_init(rspi);
 767
 768        ret = rspi_dma_check_then_transfer(rspi, xfer);
 769        if (ret != -EAGAIN)
 770                return ret;
 771
 772        return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
 773                                            xfer->rx_buf, xfer->len);
 774}
 775
 776static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
 777{
 778        int ret;
 779
 780        if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
 781                ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
 782                if (ret != -EAGAIN)
 783                        return ret;
 784        }
 785
 786        ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
 787        if (ret < 0)
 788                return ret;
 789
 790        /* Wait for the last transmission */
 791        rspi_wait_for_tx_empty(rspi);
 792
 793        return 0;
 794}
 795
 796static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
 797{
 798        if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
 799                int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
 800                if (ret != -EAGAIN)
 801                        return ret;
 802        }
 803
 804        return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
 805}
 806
 807static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
 808                             struct spi_transfer *xfer)
 809{
 810        struct rspi_data *rspi = spi_master_get_devdata(master);
 811
 812        if (spi->mode & SPI_LOOP) {
 813                return qspi_transfer_out_in(rspi, xfer);
 814        } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
 815                /* Quad or Dual SPI Write */
 816                return qspi_transfer_out(rspi, xfer);
 817        } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
 818                /* Quad or Dual SPI Read */
 819                return qspi_transfer_in(rspi, xfer);
 820        } else {
 821                /* Single SPI Transfer */
 822                return qspi_transfer_out_in(rspi, xfer);
 823        }
 824}
 825
 826static int rspi_setup(struct spi_device *spi)
 827{
 828        struct rspi_data *rspi = spi_master_get_devdata(spi->master);
 829
 830        rspi->max_speed_hz = spi->max_speed_hz;
 831
 832        rspi->spcmd = SPCMD_SSLKP;
 833        if (spi->mode & SPI_CPOL)
 834                rspi->spcmd |= SPCMD_CPOL;
 835        if (spi->mode & SPI_CPHA)
 836                rspi->spcmd |= SPCMD_CPHA;
 837
 838        /* CMOS output mode and MOSI signal from previous transfer */
 839        rspi->sppcr = 0;
 840        if (spi->mode & SPI_LOOP)
 841                rspi->sppcr |= SPPCR_SPLP;
 842
 843        set_config_register(rspi, 8);
 844
 845        return 0;
 846}
 847
 848static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
 849{
 850        if (xfer->tx_buf)
 851                switch (xfer->tx_nbits) {
 852                case SPI_NBITS_QUAD:
 853                        return SPCMD_SPIMOD_QUAD;
 854                case SPI_NBITS_DUAL:
 855                        return SPCMD_SPIMOD_DUAL;
 856                default:
 857                        return 0;
 858                }
 859        if (xfer->rx_buf)
 860                switch (xfer->rx_nbits) {
 861                case SPI_NBITS_QUAD:
 862                        return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
 863                case SPI_NBITS_DUAL:
 864                        return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
 865                default:
 866                        return 0;
 867                }
 868
 869        return 0;
 870}
 871
 872static int qspi_setup_sequencer(struct rspi_data *rspi,
 873                                const struct spi_message *msg)
 874{
 875        const struct spi_transfer *xfer;
 876        unsigned int i = 0, len = 0;
 877        u16 current_mode = 0xffff, mode;
 878
 879        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 880                mode = qspi_transfer_mode(xfer);
 881                if (mode == current_mode) {
 882                        len += xfer->len;
 883                        continue;
 884                }
 885
 886                /* Transfer mode change */
 887                if (i) {
 888                        /* Set transfer data length of previous transfer */
 889                        rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
 890                }
 891
 892                if (i >= QSPI_NUM_SPCMD) {
 893                        dev_err(&msg->spi->dev,
 894                                "Too many different transfer modes");
 895                        return -EINVAL;
 896                }
 897
 898                /* Program transfer mode for this transfer */
 899                rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
 900                current_mode = mode;
 901                len = xfer->len;
 902                i++;
 903        }
 904        if (i) {
 905                /* Set final transfer data length and sequence length */
 906                rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
 907                rspi_write8(rspi, i - 1, RSPI_SPSCR);
 908        }
 909
 910        return 0;
 911}
 912
 913static int rspi_prepare_message(struct spi_master *master,
 914                                struct spi_message *msg)
 915{
 916        struct rspi_data *rspi = spi_master_get_devdata(master);
 917        int ret;
 918
 919        if (msg->spi->mode &
 920            (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
 921                /* Setup sequencer for messages with multiple transfer modes */
 922                ret = qspi_setup_sequencer(rspi, msg);
 923                if (ret < 0)
 924                        return ret;
 925        }
 926
 927        /* Enable SPI function in master mode */
 928        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
 929        return 0;
 930}
 931
 932static int rspi_unprepare_message(struct spi_master *master,
 933                                  struct spi_message *msg)
 934{
 935        struct rspi_data *rspi = spi_master_get_devdata(master);
 936
 937        /* Disable SPI function */
 938        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
 939
 940        /* Reset sequencer for Single SPI Transfers */
 941        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 942        rspi_write8(rspi, 0, RSPI_SPSCR);
 943        return 0;
 944}
 945
 946static irqreturn_t rspi_irq_mux(int irq, void *_sr)
 947{
 948        struct rspi_data *rspi = _sr;
 949        u8 spsr;
 950        irqreturn_t ret = IRQ_NONE;
 951        u8 disable_irq = 0;
 952
 953        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
 954        if (spsr & SPSR_SPRF)
 955                disable_irq |= SPCR_SPRIE;
 956        if (spsr & SPSR_SPTEF)
 957                disable_irq |= SPCR_SPTIE;
 958
 959        if (disable_irq) {
 960                ret = IRQ_HANDLED;
 961                rspi_disable_irq(rspi, disable_irq);
 962                wake_up(&rspi->wait);
 963        }
 964
 965        return ret;
 966}
 967
 968static irqreturn_t rspi_irq_rx(int irq, void *_sr)
 969{
 970        struct rspi_data *rspi = _sr;
 971        u8 spsr;
 972
 973        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
 974        if (spsr & SPSR_SPRF) {
 975                rspi_disable_irq(rspi, SPCR_SPRIE);
 976                wake_up(&rspi->wait);
 977                return IRQ_HANDLED;
 978        }
 979
 980        return 0;
 981}
 982
 983static irqreturn_t rspi_irq_tx(int irq, void *_sr)
 984{
 985        struct rspi_data *rspi = _sr;
 986        u8 spsr;
 987
 988        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
 989        if (spsr & SPSR_SPTEF) {
 990                rspi_disable_irq(rspi, SPCR_SPTIE);
 991                wake_up(&rspi->wait);
 992                return IRQ_HANDLED;
 993        }
 994
 995        return 0;
 996}
 997
 998static struct dma_chan *rspi_request_dma_chan(struct device *dev,
 999                                              enum dma_transfer_direction dir,
1000                                              unsigned int id,
1001                                              dma_addr_t port_addr)
1002{
1003        dma_cap_mask_t mask;
1004        struct dma_chan *chan;
1005        struct dma_slave_config cfg;
1006        int ret;
1007
1008        dma_cap_zero(mask);
1009        dma_cap_set(DMA_SLAVE, mask);
1010
1011        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1012                                (void *)(unsigned long)id, dev,
1013                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1014        if (!chan) {
1015                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1016                return NULL;
1017        }
1018
1019        memset(&cfg, 0, sizeof(cfg));
1020        cfg.direction = dir;
1021        if (dir == DMA_MEM_TO_DEV) {
1022                cfg.dst_addr = port_addr;
1023                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1024        } else {
1025                cfg.src_addr = port_addr;
1026                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1027        }
1028
1029        ret = dmaengine_slave_config(chan, &cfg);
1030        if (ret) {
1031                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1032                dma_release_channel(chan);
1033                return NULL;
1034        }
1035
1036        return chan;
1037}
1038
1039static int rspi_request_dma(struct device *dev, struct spi_master *master,
1040                            const struct resource *res)
1041{
1042        const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
1043        unsigned int dma_tx_id, dma_rx_id;
1044
1045        if (dev->of_node) {
1046                /* In the OF case we will get the slave IDs from the DT */
1047                dma_tx_id = 0;
1048                dma_rx_id = 0;
1049        } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
1050                dma_tx_id = rspi_pd->dma_tx_id;
1051                dma_rx_id = rspi_pd->dma_rx_id;
1052        } else {
1053                /* The driver assumes no error. */
1054                return 0;
1055        }
1056
1057        master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1058                                               res->start + RSPI_SPDR);
1059        if (!master->dma_tx)
1060                return -ENODEV;
1061
1062        master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1063                                               res->start + RSPI_SPDR);
1064        if (!master->dma_rx) {
1065                dma_release_channel(master->dma_tx);
1066                master->dma_tx = NULL;
1067                return -ENODEV;
1068        }
1069
1070        master->can_dma = rspi_can_dma;
1071        dev_info(dev, "DMA available");
1072        return 0;
1073}
1074
1075static void rspi_release_dma(struct spi_master *master)
1076{
1077        if (master->dma_tx)
1078                dma_release_channel(master->dma_tx);
1079        if (master->dma_rx)
1080                dma_release_channel(master->dma_rx);
1081}
1082
1083static int rspi_remove(struct platform_device *pdev)
1084{
1085        struct rspi_data *rspi = platform_get_drvdata(pdev);
1086
1087        rspi_release_dma(rspi->master);
1088        pm_runtime_disable(&pdev->dev);
1089
1090        return 0;
1091}
1092
1093static const struct spi_ops rspi_ops = {
1094        .set_config_register =  rspi_set_config_register,
1095        .transfer_one =         rspi_transfer_one,
1096        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
1097        .flags =                SPI_MASTER_MUST_TX,
1098        .fifo_size =            8,
1099};
1100
1101static const struct spi_ops rspi_rz_ops = {
1102        .set_config_register =  rspi_rz_set_config_register,
1103        .transfer_one =         rspi_rz_transfer_one,
1104        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
1105        .flags =                SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1106        .fifo_size =            8,      /* 8 for TX, 32 for RX */
1107};
1108
1109static const struct spi_ops qspi_ops = {
1110        .set_config_register =  qspi_set_config_register,
1111        .transfer_one =         qspi_transfer_one,
1112        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP |
1113                                SPI_TX_DUAL | SPI_TX_QUAD |
1114                                SPI_RX_DUAL | SPI_RX_QUAD,
1115        .flags =                SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1116        .fifo_size =            32,
1117};
1118
1119#ifdef CONFIG_OF
1120static const struct of_device_id rspi_of_match[] = {
1121        /* RSPI on legacy SH */
1122        { .compatible = "renesas,rspi", .data = &rspi_ops },
1123        /* RSPI on RZ/A1H */
1124        { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
1125        /* QSPI on R-Car Gen2 */
1126        { .compatible = "renesas,qspi", .data = &qspi_ops },
1127        { /* sentinel */ }
1128};
1129
1130MODULE_DEVICE_TABLE(of, rspi_of_match);
1131
1132static int rspi_parse_dt(struct device *dev, struct spi_master *master)
1133{
1134        u32 num_cs;
1135        int error;
1136
1137        /* Parse DT properties */
1138        error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
1139        if (error) {
1140                dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
1141                return error;
1142        }
1143
1144        master->num_chipselect = num_cs;
1145        return 0;
1146}
1147#else
1148#define rspi_of_match   NULL
1149static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
1150{
1151        return -EINVAL;
1152}
1153#endif /* CONFIG_OF */
1154
1155static int rspi_request_irq(struct device *dev, unsigned int irq,
1156                            irq_handler_t handler, const char *suffix,
1157                            void *dev_id)
1158{
1159        const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
1160                                          dev_name(dev), suffix);
1161        if (!name)
1162                return -ENOMEM;
1163
1164        return devm_request_irq(dev, irq, handler, 0, name, dev_id);
1165}
1166
1167static int rspi_probe(struct platform_device *pdev)
1168{
1169        struct resource *res;
1170        struct spi_master *master;
1171        struct rspi_data *rspi;
1172        int ret;
1173        const struct of_device_id *of_id;
1174        const struct rspi_plat_data *rspi_pd;
1175        const struct spi_ops *ops;
1176
1177        master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1178        if (master == NULL) {
1179                dev_err(&pdev->dev, "spi_alloc_master error.\n");
1180                return -ENOMEM;
1181        }
1182
1183        of_id = of_match_device(rspi_of_match, &pdev->dev);
1184        if (of_id) {
1185                ops = of_id->data;
1186                ret = rspi_parse_dt(&pdev->dev, master);
1187                if (ret)
1188                        goto error1;
1189        } else {
1190                ops = (struct spi_ops *)pdev->id_entry->driver_data;
1191                rspi_pd = dev_get_platdata(&pdev->dev);
1192                if (rspi_pd && rspi_pd->num_chipselect)
1193                        master->num_chipselect = rspi_pd->num_chipselect;
1194                else
1195                        master->num_chipselect = 2; /* default */
1196        }
1197
1198        /* ops parameter check */
1199        if (!ops->set_config_register) {
1200                dev_err(&pdev->dev, "there is no set_config_register\n");
1201                ret = -ENODEV;
1202                goto error1;
1203        }
1204
1205        rspi = spi_master_get_devdata(master);
1206        platform_set_drvdata(pdev, rspi);
1207        rspi->ops = ops;
1208        rspi->master = master;
1209
1210        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1211        rspi->addr = devm_ioremap_resource(&pdev->dev, res);
1212        if (IS_ERR(rspi->addr)) {
1213                ret = PTR_ERR(rspi->addr);
1214                goto error1;
1215        }
1216
1217        rspi->clk = devm_clk_get(&pdev->dev, NULL);
1218        if (IS_ERR(rspi->clk)) {
1219                dev_err(&pdev->dev, "cannot get clock\n");
1220                ret = PTR_ERR(rspi->clk);
1221                goto error1;
1222        }
1223
1224        pm_runtime_enable(&pdev->dev);
1225
1226        init_waitqueue_head(&rspi->wait);
1227
1228        master->bus_num = pdev->id;
1229        master->setup = rspi_setup;
1230        master->auto_runtime_pm = true;
1231        master->transfer_one = ops->transfer_one;
1232        master->prepare_message = rspi_prepare_message;
1233        master->unprepare_message = rspi_unprepare_message;
1234        master->mode_bits = ops->mode_bits;
1235        master->flags = ops->flags;
1236        master->dev.of_node = pdev->dev.of_node;
1237
1238        ret = platform_get_irq_byname(pdev, "rx");
1239        if (ret < 0) {
1240                ret = platform_get_irq_byname(pdev, "mux");
1241                if (ret < 0)
1242                        ret = platform_get_irq(pdev, 0);
1243                if (ret >= 0)
1244                        rspi->rx_irq = rspi->tx_irq = ret;
1245        } else {
1246                rspi->rx_irq = ret;
1247                ret = platform_get_irq_byname(pdev, "tx");
1248                if (ret >= 0)
1249                        rspi->tx_irq = ret;
1250        }
1251        if (ret < 0) {
1252                dev_err(&pdev->dev, "platform_get_irq error\n");
1253                goto error2;
1254        }
1255
1256        if (rspi->rx_irq == rspi->tx_irq) {
1257                /* Single multiplexed interrupt */
1258                ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
1259                                       "mux", rspi);
1260        } else {
1261                /* Multi-interrupt mode, only SPRI and SPTI are used */
1262                ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
1263                                       "rx", rspi);
1264                if (!ret)
1265                        ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
1266                                               rspi_irq_tx, "tx", rspi);
1267        }
1268        if (ret < 0) {
1269                dev_err(&pdev->dev, "request_irq error\n");
1270                goto error2;
1271        }
1272
1273        ret = rspi_request_dma(&pdev->dev, master, res);
1274        if (ret < 0)
1275                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1276
1277        ret = devm_spi_register_master(&pdev->dev, master);
1278        if (ret < 0) {
1279                dev_err(&pdev->dev, "spi_register_master error.\n");
1280                goto error3;
1281        }
1282
1283        dev_info(&pdev->dev, "probed\n");
1284
1285        return 0;
1286
1287error3:
1288        rspi_release_dma(master);
1289error2:
1290        pm_runtime_disable(&pdev->dev);
1291error1:
1292        spi_master_put(master);
1293
1294        return ret;
1295}
1296
1297static const struct platform_device_id spi_driver_ids[] = {
1298        { "rspi",       (kernel_ulong_t)&rspi_ops },
1299        { "rspi-rz",    (kernel_ulong_t)&rspi_rz_ops },
1300        { "qspi",       (kernel_ulong_t)&qspi_ops },
1301        {},
1302};
1303
1304MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1305
1306static struct platform_driver rspi_driver = {
1307        .probe =        rspi_probe,
1308        .remove =       rspi_remove,
1309        .id_table =     spi_driver_ids,
1310        .driver         = {
1311                .name = "renesas_spi",
1312                .of_match_table = of_match_ptr(rspi_of_match),
1313        },
1314};
1315module_platform_driver(rspi_driver);
1316
1317MODULE_DESCRIPTION("Renesas RSPI bus driver");
1318MODULE_LICENSE("GPL v2");
1319MODULE_AUTHOR("Yoshihiro Shimoda");
1320MODULE_ALIAS("platform:rspi");
1321