linux/drivers/spi/spi-rspi.c
<<
>>
Prefs
   1/*
   2 * SH RSPI driver
   3 *
   4 * Copyright (C) 2012, 2013  Renesas Solutions Corp.
   5 * Copyright (C) 2014 Glider bvba
   6 *
   7 * Based on spi-sh.c:
   8 * Copyright (C) 2011 Renesas Solutions Corp.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; version 2 of the License.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/sched.h>
  23#include <linux/errno.h>
  24#include <linux/interrupt.h>
  25#include <linux/platform_device.h>
  26#include <linux/io.h>
  27#include <linux/clk.h>
  28#include <linux/dmaengine.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/of_device.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/sh_dma.h>
  33#include <linux/spi/spi.h>
  34#include <linux/spi/rspi.h>
  35
  36#define RSPI_SPCR               0x00    /* Control Register */
  37#define RSPI_SSLP               0x01    /* Slave Select Polarity Register */
  38#define RSPI_SPPCR              0x02    /* Pin Control Register */
  39#define RSPI_SPSR               0x03    /* Status Register */
  40#define RSPI_SPDR               0x04    /* Data Register */
  41#define RSPI_SPSCR              0x08    /* Sequence Control Register */
  42#define RSPI_SPSSR              0x09    /* Sequence Status Register */
  43#define RSPI_SPBR               0x0a    /* Bit Rate Register */
  44#define RSPI_SPDCR              0x0b    /* Data Control Register */
  45#define RSPI_SPCKD              0x0c    /* Clock Delay Register */
  46#define RSPI_SSLND              0x0d    /* Slave Select Negation Delay Register */
  47#define RSPI_SPND               0x0e    /* Next-Access Delay Register */
  48#define RSPI_SPCR2              0x0f    /* Control Register 2 (SH only) */
  49#define RSPI_SPCMD0             0x10    /* Command Register 0 */
  50#define RSPI_SPCMD1             0x12    /* Command Register 1 */
  51#define RSPI_SPCMD2             0x14    /* Command Register 2 */
  52#define RSPI_SPCMD3             0x16    /* Command Register 3 */
  53#define RSPI_SPCMD4             0x18    /* Command Register 4 */
  54#define RSPI_SPCMD5             0x1a    /* Command Register 5 */
  55#define RSPI_SPCMD6             0x1c    /* Command Register 6 */
  56#define RSPI_SPCMD7             0x1e    /* Command Register 7 */
  57#define RSPI_SPCMD(i)           (RSPI_SPCMD0 + (i) * 2)
  58#define RSPI_NUM_SPCMD          8
  59#define RSPI_RZ_NUM_SPCMD       4
  60#define QSPI_NUM_SPCMD          4
  61
  62/* RSPI on RZ only */
  63#define RSPI_SPBFCR             0x20    /* Buffer Control Register */
  64#define RSPI_SPBFDR             0x22    /* Buffer Data Count Setting Register */
  65
  66/* QSPI only */
  67#define QSPI_SPBFCR             0x18    /* Buffer Control Register */
  68#define QSPI_SPBDCR             0x1a    /* Buffer Data Count Register */
  69#define QSPI_SPBMUL0            0x1c    /* Transfer Data Length Multiplier Setting Register 0 */
  70#define QSPI_SPBMUL1            0x20    /* Transfer Data Length Multiplier Setting Register 1 */
  71#define QSPI_SPBMUL2            0x24    /* Transfer Data Length Multiplier Setting Register 2 */
  72#define QSPI_SPBMUL3            0x28    /* Transfer Data Length Multiplier Setting Register 3 */
  73#define QSPI_SPBMUL(i)          (QSPI_SPBMUL0 + (i) * 4)
  74
  75/* SPCR - Control Register */
  76#define SPCR_SPRIE              0x80    /* Receive Interrupt Enable */
  77#define SPCR_SPE                0x40    /* Function Enable */
  78#define SPCR_SPTIE              0x20    /* Transmit Interrupt Enable */
  79#define SPCR_SPEIE              0x10    /* Error Interrupt Enable */
  80#define SPCR_MSTR               0x08    /* Master/Slave Mode Select */
  81#define SPCR_MODFEN             0x04    /* Mode Fault Error Detection Enable */
  82/* RSPI on SH only */
  83#define SPCR_TXMD               0x02    /* TX Only Mode (vs. Full Duplex) */
  84#define SPCR_SPMS               0x01    /* 3-wire Mode (vs. 4-wire) */
  85/* QSPI on R-Car Gen2 only */
  86#define SPCR_WSWAP              0x02    /* Word Swap of read-data for DMAC */
  87#define SPCR_BSWAP              0x01    /* Byte Swap of read-data for DMAC */
  88
  89/* SSLP - Slave Select Polarity Register */
  90#define SSLP_SSL1P              0x02    /* SSL1 Signal Polarity Setting */
  91#define SSLP_SSL0P              0x01    /* SSL0 Signal Polarity Setting */
  92
  93/* SPPCR - Pin Control Register */
  94#define SPPCR_MOIFE             0x20    /* MOSI Idle Value Fixing Enable */
  95#define SPPCR_MOIFV             0x10    /* MOSI Idle Fixed Value */
  96#define SPPCR_SPOM              0x04
  97#define SPPCR_SPLP2             0x02    /* Loopback Mode 2 (non-inverting) */
  98#define SPPCR_SPLP              0x01    /* Loopback Mode (inverting) */
  99
 100#define SPPCR_IO3FV             0x04    /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
 101#define SPPCR_IO2FV             0x04    /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
 102
 103/* SPSR - Status Register */
 104#define SPSR_SPRF               0x80    /* Receive Buffer Full Flag */
 105#define SPSR_TEND               0x40    /* Transmit End */
 106#define SPSR_SPTEF              0x20    /* Transmit Buffer Empty Flag */
 107#define SPSR_PERF               0x08    /* Parity Error Flag */
 108#define SPSR_MODF               0x04    /* Mode Fault Error Flag */
 109#define SPSR_IDLNF              0x02    /* RSPI Idle Flag */
 110#define SPSR_OVRF               0x01    /* Overrun Error Flag (RSPI only) */
 111
 112/* SPSCR - Sequence Control Register */
 113#define SPSCR_SPSLN_MASK        0x07    /* Sequence Length Specification */
 114
 115/* SPSSR - Sequence Status Register */
 116#define SPSSR_SPECM_MASK        0x70    /* Command Error Mask */
 117#define SPSSR_SPCP_MASK         0x07    /* Command Pointer Mask */
 118
 119/* SPDCR - Data Control Register */
 120#define SPDCR_TXDMY             0x80    /* Dummy Data Transmission Enable */
 121#define SPDCR_SPLW1             0x40    /* Access Width Specification (RZ) */
 122#define SPDCR_SPLW0             0x20    /* Access Width Specification (RZ) */
 123#define SPDCR_SPLLWORD          (SPDCR_SPLW1 | SPDCR_SPLW0)
 124#define SPDCR_SPLWORD           SPDCR_SPLW1
 125#define SPDCR_SPLBYTE           SPDCR_SPLW0
 126#define SPDCR_SPLW              0x20    /* Access Width Specification (SH) */
 127#define SPDCR_SPRDTD            0x10    /* Receive Transmit Data Select (SH) */
 128#define SPDCR_SLSEL1            0x08
 129#define SPDCR_SLSEL0            0x04
 130#define SPDCR_SLSEL_MASK        0x0c    /* SSL1 Output Select (SH) */
 131#define SPDCR_SPFC1             0x02
 132#define SPDCR_SPFC0             0x01
 133#define SPDCR_SPFC_MASK         0x03    /* Frame Count Setting (1-4) (SH) */
 134
 135/* SPCKD - Clock Delay Register */
 136#define SPCKD_SCKDL_MASK        0x07    /* Clock Delay Setting (1-8) */
 137
 138/* SSLND - Slave Select Negation Delay Register */
 139#define SSLND_SLNDL_MASK        0x07    /* SSL Negation Delay Setting (1-8) */
 140
 141/* SPND - Next-Access Delay Register */
 142#define SPND_SPNDL_MASK         0x07    /* Next-Access Delay Setting (1-8) */
 143
 144/* SPCR2 - Control Register 2 */
 145#define SPCR2_PTE               0x08    /* Parity Self-Test Enable */
 146#define SPCR2_SPIE              0x04    /* Idle Interrupt Enable */
 147#define SPCR2_SPOE              0x02    /* Odd Parity Enable (vs. Even) */
 148#define SPCR2_SPPE              0x01    /* Parity Enable */
 149
 150/* SPCMDn - Command Registers */
 151#define SPCMD_SCKDEN            0x8000  /* Clock Delay Setting Enable */
 152#define SPCMD_SLNDEN            0x4000  /* SSL Negation Delay Setting Enable */
 153#define SPCMD_SPNDEN            0x2000  /* Next-Access Delay Enable */
 154#define SPCMD_LSBF              0x1000  /* LSB First */
 155#define SPCMD_SPB_MASK          0x0f00  /* Data Length Setting */
 156#define SPCMD_SPB_8_TO_16(bit)  (((bit - 1) << 8) & SPCMD_SPB_MASK)
 157#define SPCMD_SPB_8BIT          0x0000  /* QSPI only */
 158#define SPCMD_SPB_16BIT         0x0100
 159#define SPCMD_SPB_20BIT         0x0000
 160#define SPCMD_SPB_24BIT         0x0100
 161#define SPCMD_SPB_32BIT         0x0200
 162#define SPCMD_SSLKP             0x0080  /* SSL Signal Level Keeping */
 163#define SPCMD_SPIMOD_MASK       0x0060  /* SPI Operating Mode (QSPI only) */
 164#define SPCMD_SPIMOD1           0x0040
 165#define SPCMD_SPIMOD0           0x0020
 166#define SPCMD_SPIMOD_SINGLE     0
 167#define SPCMD_SPIMOD_DUAL       SPCMD_SPIMOD0
 168#define SPCMD_SPIMOD_QUAD       SPCMD_SPIMOD1
 169#define SPCMD_SPRW              0x0010  /* SPI Read/Write Access (Dual/Quad) */
 170#define SPCMD_SSLA_MASK         0x0030  /* SSL Assert Signal Setting (RSPI) */
 171#define SPCMD_BRDV_MASK         0x000c  /* Bit Rate Division Setting */
 172#define SPCMD_CPOL              0x0002  /* Clock Polarity Setting */
 173#define SPCMD_CPHA              0x0001  /* Clock Phase Setting */
 174
 175/* SPBFCR - Buffer Control Register */
 176#define SPBFCR_TXRST            0x80    /* Transmit Buffer Data Reset */
 177#define SPBFCR_RXRST            0x40    /* Receive Buffer Data Reset */
 178#define SPBFCR_TXTRG_MASK       0x30    /* Transmit Buffer Data Triggering Number */
 179#define SPBFCR_RXTRG_MASK       0x07    /* Receive Buffer Data Triggering Number */
 180/* QSPI on R-Car Gen2 */
 181#define SPBFCR_TXTRG_1B         0x00    /* 31 bytes (1 byte available) */
 182#define SPBFCR_TXTRG_32B        0x30    /* 0 byte (32 bytes available) */
 183#define SPBFCR_RXTRG_1B         0x00    /* 1 byte (31 bytes available) */
 184#define SPBFCR_RXTRG_32B        0x07    /* 32 bytes (0 byte available) */
 185
 186#define QSPI_BUFFER_SIZE        32u
 187
 188struct rspi_data {
 189        void __iomem *addr;
 190        u32 max_speed_hz;
 191        struct spi_master *master;
 192        wait_queue_head_t wait;
 193        struct clk *clk;
 194        u16 spcmd;
 195        u8 spsr;
 196        u8 sppcr;
 197        int rx_irq, tx_irq;
 198        const struct spi_ops *ops;
 199
 200        unsigned dma_callbacked:1;
 201        unsigned byte_access:1;
 202};
 203
 204static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
 205{
 206        iowrite8(data, rspi->addr + offset);
 207}
 208
 209static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
 210{
 211        iowrite16(data, rspi->addr + offset);
 212}
 213
 214static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
 215{
 216        iowrite32(data, rspi->addr + offset);
 217}
 218
 219static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
 220{
 221        return ioread8(rspi->addr + offset);
 222}
 223
 224static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
 225{
 226        return ioread16(rspi->addr + offset);
 227}
 228
 229static void rspi_write_data(const struct rspi_data *rspi, u16 data)
 230{
 231        if (rspi->byte_access)
 232                rspi_write8(rspi, data, RSPI_SPDR);
 233        else /* 16 bit */
 234                rspi_write16(rspi, data, RSPI_SPDR);
 235}
 236
 237static u16 rspi_read_data(const struct rspi_data *rspi)
 238{
 239        if (rspi->byte_access)
 240                return rspi_read8(rspi, RSPI_SPDR);
 241        else /* 16 bit */
 242                return rspi_read16(rspi, RSPI_SPDR);
 243}
 244
 245/* optional functions */
 246struct spi_ops {
 247        int (*set_config_register)(struct rspi_data *rspi, int access_size);
 248        int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
 249                            struct spi_transfer *xfer);
 250        u16 mode_bits;
 251        u16 flags;
 252        u16 fifo_size;
 253};
 254
 255/*
 256 * functions for RSPI on legacy SH
 257 */
 258static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
 259{
 260        int spbr;
 261
 262        /* Sets output mode, MOSI signal, and (optionally) loopback */
 263        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 264
 265        /* Sets transfer bit rate */
 266        spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
 267                            2 * rspi->max_speed_hz) - 1;
 268        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 269
 270        /* Disable dummy transmission, set 16-bit word access, 1 frame */
 271        rspi_write8(rspi, 0, RSPI_SPDCR);
 272        rspi->byte_access = 0;
 273
 274        /* Sets RSPCK, SSL, next-access delay value */
 275        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 276        rspi_write8(rspi, 0x00, RSPI_SSLND);
 277        rspi_write8(rspi, 0x00, RSPI_SPND);
 278
 279        /* Sets parity, interrupt mask */
 280        rspi_write8(rspi, 0x00, RSPI_SPCR2);
 281
 282        /* Sets SPCMD */
 283        rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 284        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 285
 286        /* Sets RSPI mode */
 287        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 288
 289        return 0;
 290}
 291
 292/*
 293 * functions for RSPI on RZ
 294 */
 295static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 296{
 297        int spbr;
 298        int div = 0;
 299        unsigned long clksrc;
 300
 301        /* Sets output mode, MOSI signal, and (optionally) loopback */
 302        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 303
 304        clksrc = clk_get_rate(rspi->clk);
 305        while (div < 3) {
 306                if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
 307                        break;
 308                div++;
 309                clksrc /= 2;
 310        }
 311
 312        /* Sets transfer bit rate */
 313        spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
 314        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 315        rspi->spcmd |= div << 2;
 316
 317        /* Disable dummy transmission, set byte access */
 318        rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
 319        rspi->byte_access = 1;
 320
 321        /* Sets RSPCK, SSL, next-access delay value */
 322        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 323        rspi_write8(rspi, 0x00, RSPI_SSLND);
 324        rspi_write8(rspi, 0x00, RSPI_SPND);
 325
 326        /* Sets SPCMD */
 327        rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
 328        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 329
 330        /* Sets RSPI mode */
 331        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 332
 333        return 0;
 334}
 335
 336/*
 337 * functions for QSPI
 338 */
 339static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
 340{
 341        int spbr;
 342
 343        /* Sets output mode, MOSI signal, and (optionally) loopback */
 344        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 345
 346        /* Sets transfer bit rate */
 347        spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
 348        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 349
 350        /* Disable dummy transmission, set byte access */
 351        rspi_write8(rspi, 0, RSPI_SPDCR);
 352        rspi->byte_access = 1;
 353
 354        /* Sets RSPCK, SSL, next-access delay value */
 355        rspi_write8(rspi, 0x00, RSPI_SPCKD);
 356        rspi_write8(rspi, 0x00, RSPI_SSLND);
 357        rspi_write8(rspi, 0x00, RSPI_SPND);
 358
 359        /* Data Length Setting */
 360        if (access_size == 8)
 361                rspi->spcmd |= SPCMD_SPB_8BIT;
 362        else if (access_size == 16)
 363                rspi->spcmd |= SPCMD_SPB_16BIT;
 364        else
 365                rspi->spcmd |= SPCMD_SPB_32BIT;
 366
 367        rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
 368
 369        /* Resets transfer data length */
 370        rspi_write32(rspi, 0, QSPI_SPBMUL0);
 371
 372        /* Resets transmit and receive buffer */
 373        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
 374        /* Sets buffer to allow normal operation */
 375        rspi_write8(rspi, 0x00, QSPI_SPBFCR);
 376
 377        /* Sets SPCMD */
 378        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 379
 380        /* Sets RSPI mode */
 381        rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 382
 383        return 0;
 384}
 385
 386static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
 387{
 388        u8 data;
 389
 390        data = rspi_read8(rspi, reg);
 391        data &= ~mask;
 392        data |= (val & mask);
 393        rspi_write8(rspi, data, reg);
 394}
 395
 396static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
 397                                          unsigned int len)
 398{
 399        unsigned int n;
 400
 401        n = min(len, QSPI_BUFFER_SIZE);
 402
 403        if (len >= QSPI_BUFFER_SIZE) {
 404                /* sets triggering number to 32 bytes */
 405                qspi_update(rspi, SPBFCR_TXTRG_MASK,
 406                             SPBFCR_TXTRG_32B, QSPI_SPBFCR);
 407        } else {
 408                /* sets triggering number to 1 byte */
 409                qspi_update(rspi, SPBFCR_TXTRG_MASK,
 410                             SPBFCR_TXTRG_1B, QSPI_SPBFCR);
 411        }
 412
 413        return n;
 414}
 415
 416static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
 417{
 418        unsigned int n;
 419
 420        n = min(len, QSPI_BUFFER_SIZE);
 421
 422        if (len >= QSPI_BUFFER_SIZE) {
 423                /* sets triggering number to 32 bytes */
 424                qspi_update(rspi, SPBFCR_RXTRG_MASK,
 425                             SPBFCR_RXTRG_32B, QSPI_SPBFCR);
 426        } else {
 427                /* sets triggering number to 1 byte */
 428                qspi_update(rspi, SPBFCR_RXTRG_MASK,
 429                             SPBFCR_RXTRG_1B, QSPI_SPBFCR);
 430        }
 431        return n;
 432}
 433
 434#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
 435
 436static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
 437{
 438        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
 439}
 440
 441static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
 442{
 443        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
 444}
 445
 446static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
 447                                   u8 enable_bit)
 448{
 449        int ret;
 450
 451        rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
 452        if (rspi->spsr & wait_mask)
 453                return 0;
 454
 455        rspi_enable_irq(rspi, enable_bit);
 456        ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
 457        if (ret == 0 && !(rspi->spsr & wait_mask))
 458                return -ETIMEDOUT;
 459
 460        return 0;
 461}
 462
 463static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
 464{
 465        return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
 466}
 467
 468static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
 469{
 470        return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
 471}
 472
 473static int rspi_data_out(struct rspi_data *rspi, u8 data)
 474{
 475        int error = rspi_wait_for_tx_empty(rspi);
 476        if (error < 0) {
 477                dev_err(&rspi->master->dev, "transmit timeout\n");
 478                return error;
 479        }
 480        rspi_write_data(rspi, data);
 481        return 0;
 482}
 483
 484static int rspi_data_in(struct rspi_data *rspi)
 485{
 486        int error;
 487        u8 data;
 488
 489        error = rspi_wait_for_rx_full(rspi);
 490        if (error < 0) {
 491                dev_err(&rspi->master->dev, "receive timeout\n");
 492                return error;
 493        }
 494        data = rspi_read_data(rspi);
 495        return data;
 496}
 497
 498static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
 499                             unsigned int n)
 500{
 501        while (n-- > 0) {
 502                if (tx) {
 503                        int ret = rspi_data_out(rspi, *tx++);
 504                        if (ret < 0)
 505                                return ret;
 506                }
 507                if (rx) {
 508                        int ret = rspi_data_in(rspi);
 509                        if (ret < 0)
 510                                return ret;
 511                        *rx++ = ret;
 512                }
 513        }
 514
 515        return 0;
 516}
 517
 518static void rspi_dma_complete(void *arg)
 519{
 520        struct rspi_data *rspi = arg;
 521
 522        rspi->dma_callbacked = 1;
 523        wake_up_interruptible(&rspi->wait);
 524}
 525
 526static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 527                             struct sg_table *rx)
 528{
 529        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 530        u8 irq_mask = 0;
 531        unsigned int other_irq = 0;
 532        dma_cookie_t cookie;
 533        int ret;
 534
 535        /* First prepare and submit the DMA request(s), as this may fail */
 536        if (rx) {
 537                desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
 538                                        rx->sgl, rx->nents, DMA_DEV_TO_MEM,
 539                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 540                if (!desc_rx) {
 541                        ret = -EAGAIN;
 542                        goto no_dma_rx;
 543                }
 544
 545                desc_rx->callback = rspi_dma_complete;
 546                desc_rx->callback_param = rspi;
 547                cookie = dmaengine_submit(desc_rx);
 548                if (dma_submit_error(cookie)) {
 549                        ret = cookie;
 550                        goto no_dma_rx;
 551                }
 552
 553                irq_mask |= SPCR_SPRIE;
 554        }
 555
 556        if (tx) {
 557                desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
 558                                        tx->sgl, tx->nents, DMA_MEM_TO_DEV,
 559                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 560                if (!desc_tx) {
 561                        ret = -EAGAIN;
 562                        goto no_dma_tx;
 563                }
 564
 565                if (rx) {
 566                        /* No callback */
 567                        desc_tx->callback = NULL;
 568                } else {
 569                        desc_tx->callback = rspi_dma_complete;
 570                        desc_tx->callback_param = rspi;
 571                }
 572                cookie = dmaengine_submit(desc_tx);
 573                if (dma_submit_error(cookie)) {
 574                        ret = cookie;
 575                        goto no_dma_tx;
 576                }
 577
 578                irq_mask |= SPCR_SPTIE;
 579        }
 580
 581        /*
 582         * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
 583         * called. So, this driver disables the IRQ while DMA transfer.
 584         */
 585        if (tx)
 586                disable_irq(other_irq = rspi->tx_irq);
 587        if (rx && rspi->rx_irq != other_irq)
 588                disable_irq(rspi->rx_irq);
 589
 590        rspi_enable_irq(rspi, irq_mask);
 591        rspi->dma_callbacked = 0;
 592
 593        /* Now start DMA */
 594        if (rx)
 595                dma_async_issue_pending(rspi->master->dma_rx);
 596        if (tx)
 597                dma_async_issue_pending(rspi->master->dma_tx);
 598
 599        ret = wait_event_interruptible_timeout(rspi->wait,
 600                                               rspi->dma_callbacked, HZ);
 601        if (ret > 0 && rspi->dma_callbacked)
 602                ret = 0;
 603        else if (!ret) {
 604                dev_err(&rspi->master->dev, "DMA timeout\n");
 605                ret = -ETIMEDOUT;
 606                if (tx)
 607                        dmaengine_terminate_all(rspi->master->dma_tx);
 608                if (rx)
 609                        dmaengine_terminate_all(rspi->master->dma_rx);
 610        }
 611
 612        rspi_disable_irq(rspi, irq_mask);
 613
 614        if (tx)
 615                enable_irq(rspi->tx_irq);
 616        if (rx && rspi->rx_irq != other_irq)
 617                enable_irq(rspi->rx_irq);
 618
 619        return ret;
 620
 621no_dma_tx:
 622        if (rx)
 623                dmaengine_terminate_all(rspi->master->dma_rx);
 624no_dma_rx:
 625        if (ret == -EAGAIN) {
 626                pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
 627                             dev_driver_string(&rspi->master->dev),
 628                             dev_name(&rspi->master->dev));
 629        }
 630        return ret;
 631}
 632
 633static void rspi_receive_init(const struct rspi_data *rspi)
 634{
 635        u8 spsr;
 636
 637        spsr = rspi_read8(rspi, RSPI_SPSR);
 638        if (spsr & SPSR_SPRF)
 639                rspi_read_data(rspi);   /* dummy read */
 640        if (spsr & SPSR_OVRF)
 641                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
 642                            RSPI_SPSR);
 643}
 644
 645static void rspi_rz_receive_init(const struct rspi_data *rspi)
 646{
 647        rspi_receive_init(rspi);
 648        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
 649        rspi_write8(rspi, 0, RSPI_SPBFCR);
 650}
 651
 652static void qspi_receive_init(const struct rspi_data *rspi)
 653{
 654        u8 spsr;
 655
 656        spsr = rspi_read8(rspi, RSPI_SPSR);
 657        if (spsr & SPSR_SPRF)
 658                rspi_read_data(rspi);   /* dummy read */
 659        rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
 660        rspi_write8(rspi, 0, QSPI_SPBFCR);
 661}
 662
 663static bool __rspi_can_dma(const struct rspi_data *rspi,
 664                           const struct spi_transfer *xfer)
 665{
 666        return xfer->len > rspi->ops->fifo_size;
 667}
 668
 669static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
 670                         struct spi_transfer *xfer)
 671{
 672        struct rspi_data *rspi = spi_master_get_devdata(master);
 673
 674        return __rspi_can_dma(rspi, xfer);
 675}
 676
 677static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
 678                                         struct spi_transfer *xfer)
 679{
 680        if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
 681                return -EAGAIN;
 682
 683        /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
 684        return rspi_dma_transfer(rspi, &xfer->tx_sg,
 685                                xfer->rx_buf ? &xfer->rx_sg : NULL);
 686}
 687
 688static int rspi_common_transfer(struct rspi_data *rspi,
 689                                struct spi_transfer *xfer)
 690{
 691        int ret;
 692
 693        ret = rspi_dma_check_then_transfer(rspi, xfer);
 694        if (ret != -EAGAIN)
 695                return ret;
 696
 697        ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
 698        if (ret < 0)
 699                return ret;
 700
 701        /* Wait for the last transmission */
 702        rspi_wait_for_tx_empty(rspi);
 703
 704        return 0;
 705}
 706
 707static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
 708                             struct spi_transfer *xfer)
 709{
 710        struct rspi_data *rspi = spi_master_get_devdata(master);
 711        u8 spcr;
 712
 713        spcr = rspi_read8(rspi, RSPI_SPCR);
 714        if (xfer->rx_buf) {
 715                rspi_receive_init(rspi);
 716                spcr &= ~SPCR_TXMD;
 717        } else {
 718                spcr |= SPCR_TXMD;
 719        }
 720        rspi_write8(rspi, spcr, RSPI_SPCR);
 721
 722        return rspi_common_transfer(rspi, xfer);
 723}
 724
 725static int rspi_rz_transfer_one(struct spi_master *master,
 726                                struct spi_device *spi,
 727                                struct spi_transfer *xfer)
 728{
 729        struct rspi_data *rspi = spi_master_get_devdata(master);
 730
 731        rspi_rz_receive_init(rspi);
 732
 733        return rspi_common_transfer(rspi, xfer);
 734}
 735
 736static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
 737                                        u8 *rx, unsigned int len)
 738{
 739        unsigned int i, n;
 740        int ret;
 741
 742        while (len > 0) {
 743                n = qspi_set_send_trigger(rspi, len);
 744                qspi_set_receive_trigger(rspi, len);
 745                if (n == QSPI_BUFFER_SIZE) {
 746                        ret = rspi_wait_for_tx_empty(rspi);
 747                        if (ret < 0) {
 748                                dev_err(&rspi->master->dev, "transmit timeout\n");
 749                                return ret;
 750                        }
 751                        for (i = 0; i < n; i++)
 752                                rspi_write_data(rspi, *tx++);
 753
 754                        ret = rspi_wait_for_rx_full(rspi);
 755                        if (ret < 0) {
 756                                dev_err(&rspi->master->dev, "receive timeout\n");
 757                                return ret;
 758                        }
 759                        for (i = 0; i < n; i++)
 760                                *rx++ = rspi_read_data(rspi);
 761                } else {
 762                        ret = rspi_pio_transfer(rspi, tx, rx, n);
 763                        if (ret < 0)
 764                                return ret;
 765                }
 766                len -= n;
 767        }
 768
 769        return 0;
 770}
 771
 772static int qspi_transfer_out_in(struct rspi_data *rspi,
 773                                struct spi_transfer *xfer)
 774{
 775        int ret;
 776
 777        qspi_receive_init(rspi);
 778
 779        ret = rspi_dma_check_then_transfer(rspi, xfer);
 780        if (ret != -EAGAIN)
 781                return ret;
 782
 783        return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
 784                                            xfer->rx_buf, xfer->len);
 785}
 786
 787static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
 788{
 789        const u8 *tx = xfer->tx_buf;
 790        unsigned int n = xfer->len;
 791        unsigned int i, len;
 792        int ret;
 793
 794        if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
 795                ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
 796                if (ret != -EAGAIN)
 797                        return ret;
 798        }
 799
 800        while (n > 0) {
 801                len = qspi_set_send_trigger(rspi, n);
 802                if (len == QSPI_BUFFER_SIZE) {
 803                        ret = rspi_wait_for_tx_empty(rspi);
 804                        if (ret < 0) {
 805                                dev_err(&rspi->master->dev, "transmit timeout\n");
 806                                return ret;
 807                        }
 808                        for (i = 0; i < len; i++)
 809                                rspi_write_data(rspi, *tx++);
 810                } else {
 811                        ret = rspi_pio_transfer(rspi, tx, NULL, len);
 812                        if (ret < 0)
 813                                return ret;
 814                }
 815                n -= len;
 816        }
 817
 818        /* Wait for the last transmission */
 819        rspi_wait_for_tx_empty(rspi);
 820
 821        return 0;
 822}
 823
 824static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
 825{
 826        u8 *rx = xfer->rx_buf;
 827        unsigned int n = xfer->len;
 828        unsigned int i, len;
 829        int ret;
 830
 831        if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
 832                int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
 833                if (ret != -EAGAIN)
 834                        return ret;
 835        }
 836
 837        while (n > 0) {
 838                len = qspi_set_receive_trigger(rspi, n);
 839                if (len == QSPI_BUFFER_SIZE) {
 840                        ret = rspi_wait_for_rx_full(rspi);
 841                        if (ret < 0) {
 842                                dev_err(&rspi->master->dev, "receive timeout\n");
 843                                return ret;
 844                        }
 845                        for (i = 0; i < len; i++)
 846                                *rx++ = rspi_read_data(rspi);
 847                } else {
 848                        ret = rspi_pio_transfer(rspi, NULL, rx, len);
 849                        if (ret < 0)
 850                                return ret;
 851                }
 852                n -= len;
 853        }
 854
 855        return 0;
 856}
 857
 858static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
 859                             struct spi_transfer *xfer)
 860{
 861        struct rspi_data *rspi = spi_master_get_devdata(master);
 862
 863        if (spi->mode & SPI_LOOP) {
 864                return qspi_transfer_out_in(rspi, xfer);
 865        } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
 866                /* Quad or Dual SPI Write */
 867                return qspi_transfer_out(rspi, xfer);
 868        } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
 869                /* Quad or Dual SPI Read */
 870                return qspi_transfer_in(rspi, xfer);
 871        } else {
 872                /* Single SPI Transfer */
 873                return qspi_transfer_out_in(rspi, xfer);
 874        }
 875}
 876
 877static int rspi_setup(struct spi_device *spi)
 878{
 879        struct rspi_data *rspi = spi_master_get_devdata(spi->master);
 880
 881        rspi->max_speed_hz = spi->max_speed_hz;
 882
 883        rspi->spcmd = SPCMD_SSLKP;
 884        if (spi->mode & SPI_CPOL)
 885                rspi->spcmd |= SPCMD_CPOL;
 886        if (spi->mode & SPI_CPHA)
 887                rspi->spcmd |= SPCMD_CPHA;
 888
 889        /* CMOS output mode and MOSI signal from previous transfer */
 890        rspi->sppcr = 0;
 891        if (spi->mode & SPI_LOOP)
 892                rspi->sppcr |= SPPCR_SPLP;
 893
 894        set_config_register(rspi, 8);
 895
 896        return 0;
 897}
 898
 899static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
 900{
 901        if (xfer->tx_buf)
 902                switch (xfer->tx_nbits) {
 903                case SPI_NBITS_QUAD:
 904                        return SPCMD_SPIMOD_QUAD;
 905                case SPI_NBITS_DUAL:
 906                        return SPCMD_SPIMOD_DUAL;
 907                default:
 908                        return 0;
 909                }
 910        if (xfer->rx_buf)
 911                switch (xfer->rx_nbits) {
 912                case SPI_NBITS_QUAD:
 913                        return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
 914                case SPI_NBITS_DUAL:
 915                        return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
 916                default:
 917                        return 0;
 918                }
 919
 920        return 0;
 921}
 922
 923static int qspi_setup_sequencer(struct rspi_data *rspi,
 924                                const struct spi_message *msg)
 925{
 926        const struct spi_transfer *xfer;
 927        unsigned int i = 0, len = 0;
 928        u16 current_mode = 0xffff, mode;
 929
 930        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 931                mode = qspi_transfer_mode(xfer);
 932                if (mode == current_mode) {
 933                        len += xfer->len;
 934                        continue;
 935                }
 936
 937                /* Transfer mode change */
 938                if (i) {
 939                        /* Set transfer data length of previous transfer */
 940                        rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
 941                }
 942
 943                if (i >= QSPI_NUM_SPCMD) {
 944                        dev_err(&msg->spi->dev,
 945                                "Too many different transfer modes");
 946                        return -EINVAL;
 947                }
 948
 949                /* Program transfer mode for this transfer */
 950                rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
 951                current_mode = mode;
 952                len = xfer->len;
 953                i++;
 954        }
 955        if (i) {
 956                /* Set final transfer data length and sequence length */
 957                rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
 958                rspi_write8(rspi, i - 1, RSPI_SPSCR);
 959        }
 960
 961        return 0;
 962}
 963
 964static int rspi_prepare_message(struct spi_master *master,
 965                                struct spi_message *msg)
 966{
 967        struct rspi_data *rspi = spi_master_get_devdata(master);
 968        int ret;
 969
 970        if (msg->spi->mode &
 971            (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
 972                /* Setup sequencer for messages with multiple transfer modes */
 973                ret = qspi_setup_sequencer(rspi, msg);
 974                if (ret < 0)
 975                        return ret;
 976        }
 977
 978        /* Enable SPI function in master mode */
 979        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
 980        return 0;
 981}
 982
 983static int rspi_unprepare_message(struct spi_master *master,
 984                                  struct spi_message *msg)
 985{
 986        struct rspi_data *rspi = spi_master_get_devdata(master);
 987
 988        /* Disable SPI function */
 989        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
 990
 991        /* Reset sequencer for Single SPI Transfers */
 992        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 993        rspi_write8(rspi, 0, RSPI_SPSCR);
 994        return 0;
 995}
 996
 997static irqreturn_t rspi_irq_mux(int irq, void *_sr)
 998{
 999        struct rspi_data *rspi = _sr;
1000        u8 spsr;
1001        irqreturn_t ret = IRQ_NONE;
1002        u8 disable_irq = 0;
1003
1004        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1005        if (spsr & SPSR_SPRF)
1006                disable_irq |= SPCR_SPRIE;
1007        if (spsr & SPSR_SPTEF)
1008                disable_irq |= SPCR_SPTIE;
1009
1010        if (disable_irq) {
1011                ret = IRQ_HANDLED;
1012                rspi_disable_irq(rspi, disable_irq);
1013                wake_up(&rspi->wait);
1014        }
1015
1016        return ret;
1017}
1018
1019static irqreturn_t rspi_irq_rx(int irq, void *_sr)
1020{
1021        struct rspi_data *rspi = _sr;
1022        u8 spsr;
1023
1024        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1025        if (spsr & SPSR_SPRF) {
1026                rspi_disable_irq(rspi, SPCR_SPRIE);
1027                wake_up(&rspi->wait);
1028                return IRQ_HANDLED;
1029        }
1030
1031        return 0;
1032}
1033
1034static irqreturn_t rspi_irq_tx(int irq, void *_sr)
1035{
1036        struct rspi_data *rspi = _sr;
1037        u8 spsr;
1038
1039        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
1040        if (spsr & SPSR_SPTEF) {
1041                rspi_disable_irq(rspi, SPCR_SPTIE);
1042                wake_up(&rspi->wait);
1043                return IRQ_HANDLED;
1044        }
1045
1046        return 0;
1047}
1048
1049static struct dma_chan *rspi_request_dma_chan(struct device *dev,
1050                                              enum dma_transfer_direction dir,
1051                                              unsigned int id,
1052                                              dma_addr_t port_addr)
1053{
1054        dma_cap_mask_t mask;
1055        struct dma_chan *chan;
1056        struct dma_slave_config cfg;
1057        int ret;
1058
1059        dma_cap_zero(mask);
1060        dma_cap_set(DMA_SLAVE, mask);
1061
1062        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1063                                (void *)(unsigned long)id, dev,
1064                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1065        if (!chan) {
1066                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1067                return NULL;
1068        }
1069
1070        memset(&cfg, 0, sizeof(cfg));
1071        cfg.direction = dir;
1072        if (dir == DMA_MEM_TO_DEV) {
1073                cfg.dst_addr = port_addr;
1074                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1075        } else {
1076                cfg.src_addr = port_addr;
1077                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1078        }
1079
1080        ret = dmaengine_slave_config(chan, &cfg);
1081        if (ret) {
1082                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1083                dma_release_channel(chan);
1084                return NULL;
1085        }
1086
1087        return chan;
1088}
1089
1090static int rspi_request_dma(struct device *dev, struct spi_master *master,
1091                            const struct resource *res)
1092{
1093        const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
1094        unsigned int dma_tx_id, dma_rx_id;
1095
1096        if (dev->of_node) {
1097                /* In the OF case we will get the slave IDs from the DT */
1098                dma_tx_id = 0;
1099                dma_rx_id = 0;
1100        } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
1101                dma_tx_id = rspi_pd->dma_tx_id;
1102                dma_rx_id = rspi_pd->dma_rx_id;
1103        } else {
1104                /* The driver assumes no error. */
1105                return 0;
1106        }
1107
1108        master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1109                                               res->start + RSPI_SPDR);
1110        if (!master->dma_tx)
1111                return -ENODEV;
1112
1113        master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1114                                               res->start + RSPI_SPDR);
1115        if (!master->dma_rx) {
1116                dma_release_channel(master->dma_tx);
1117                master->dma_tx = NULL;
1118                return -ENODEV;
1119        }
1120
1121        master->can_dma = rspi_can_dma;
1122        dev_info(dev, "DMA available");
1123        return 0;
1124}
1125
1126static void rspi_release_dma(struct spi_master *master)
1127{
1128        if (master->dma_tx)
1129                dma_release_channel(master->dma_tx);
1130        if (master->dma_rx)
1131                dma_release_channel(master->dma_rx);
1132}
1133
1134static int rspi_remove(struct platform_device *pdev)
1135{
1136        struct rspi_data *rspi = platform_get_drvdata(pdev);
1137
1138        rspi_release_dma(rspi->master);
1139        pm_runtime_disable(&pdev->dev);
1140
1141        return 0;
1142}
1143
1144static const struct spi_ops rspi_ops = {
1145        .set_config_register =  rspi_set_config_register,
1146        .transfer_one =         rspi_transfer_one,
1147        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
1148        .flags =                SPI_MASTER_MUST_TX,
1149        .fifo_size =            8,
1150};
1151
1152static const struct spi_ops rspi_rz_ops = {
1153        .set_config_register =  rspi_rz_set_config_register,
1154        .transfer_one =         rspi_rz_transfer_one,
1155        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
1156        .flags =                SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1157        .fifo_size =            8,      /* 8 for TX, 32 for RX */
1158};
1159
1160static const struct spi_ops qspi_ops = {
1161        .set_config_register =  qspi_set_config_register,
1162        .transfer_one =         qspi_transfer_one,
1163        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP |
1164                                SPI_TX_DUAL | SPI_TX_QUAD |
1165                                SPI_RX_DUAL | SPI_RX_QUAD,
1166        .flags =                SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
1167        .fifo_size =            32,
1168};
1169
1170#ifdef CONFIG_OF
1171static const struct of_device_id rspi_of_match[] = {
1172        /* RSPI on legacy SH */
1173        { .compatible = "renesas,rspi", .data = &rspi_ops },
1174        /* RSPI on RZ/A1H */
1175        { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
1176        /* QSPI on R-Car Gen2 */
1177        { .compatible = "renesas,qspi", .data = &qspi_ops },
1178        { /* sentinel */ }
1179};
1180
1181MODULE_DEVICE_TABLE(of, rspi_of_match);
1182
1183static int rspi_parse_dt(struct device *dev, struct spi_master *master)
1184{
1185        u32 num_cs;
1186        int error;
1187
1188        /* Parse DT properties */
1189        error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
1190        if (error) {
1191                dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
1192                return error;
1193        }
1194
1195        master->num_chipselect = num_cs;
1196        return 0;
1197}
1198#else
1199#define rspi_of_match   NULL
1200static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
1201{
1202        return -EINVAL;
1203}
1204#endif /* CONFIG_OF */
1205
1206static int rspi_request_irq(struct device *dev, unsigned int irq,
1207                            irq_handler_t handler, const char *suffix,
1208                            void *dev_id)
1209{
1210        const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
1211                                          dev_name(dev), suffix);
1212        if (!name)
1213                return -ENOMEM;
1214
1215        return devm_request_irq(dev, irq, handler, 0, name, dev_id);
1216}
1217
1218static int rspi_probe(struct platform_device *pdev)
1219{
1220        struct resource *res;
1221        struct spi_master *master;
1222        struct rspi_data *rspi;
1223        int ret;
1224        const struct rspi_plat_data *rspi_pd;
1225        const struct spi_ops *ops;
1226
1227        master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1228        if (master == NULL)
1229                return -ENOMEM;
1230
1231        ops = of_device_get_match_data(&pdev->dev);
1232        if (ops) {
1233                ret = rspi_parse_dt(&pdev->dev, master);
1234                if (ret)
1235                        goto error1;
1236        } else {
1237                ops = (struct spi_ops *)pdev->id_entry->driver_data;
1238                rspi_pd = dev_get_platdata(&pdev->dev);
1239                if (rspi_pd && rspi_pd->num_chipselect)
1240                        master->num_chipselect = rspi_pd->num_chipselect;
1241                else
1242                        master->num_chipselect = 2; /* default */
1243        }
1244
1245        /* ops parameter check */
1246        if (!ops->set_config_register) {
1247                dev_err(&pdev->dev, "there is no set_config_register\n");
1248                ret = -ENODEV;
1249                goto error1;
1250        }
1251
1252        rspi = spi_master_get_devdata(master);
1253        platform_set_drvdata(pdev, rspi);
1254        rspi->ops = ops;
1255        rspi->master = master;
1256
1257        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1258        rspi->addr = devm_ioremap_resource(&pdev->dev, res);
1259        if (IS_ERR(rspi->addr)) {
1260                ret = PTR_ERR(rspi->addr);
1261                goto error1;
1262        }
1263
1264        rspi->clk = devm_clk_get(&pdev->dev, NULL);
1265        if (IS_ERR(rspi->clk)) {
1266                dev_err(&pdev->dev, "cannot get clock\n");
1267                ret = PTR_ERR(rspi->clk);
1268                goto error1;
1269        }
1270
1271        pm_runtime_enable(&pdev->dev);
1272
1273        init_waitqueue_head(&rspi->wait);
1274
1275        master->bus_num = pdev->id;
1276        master->setup = rspi_setup;
1277        master->auto_runtime_pm = true;
1278        master->transfer_one = ops->transfer_one;
1279        master->prepare_message = rspi_prepare_message;
1280        master->unprepare_message = rspi_unprepare_message;
1281        master->mode_bits = ops->mode_bits;
1282        master->flags = ops->flags;
1283        master->dev.of_node = pdev->dev.of_node;
1284
1285        ret = platform_get_irq_byname(pdev, "rx");
1286        if (ret < 0) {
1287                ret = platform_get_irq_byname(pdev, "mux");
1288                if (ret < 0)
1289                        ret = platform_get_irq(pdev, 0);
1290                if (ret >= 0)
1291                        rspi->rx_irq = rspi->tx_irq = ret;
1292        } else {
1293                rspi->rx_irq = ret;
1294                ret = platform_get_irq_byname(pdev, "tx");
1295                if (ret >= 0)
1296                        rspi->tx_irq = ret;
1297        }
1298        if (ret < 0) {
1299                dev_err(&pdev->dev, "platform_get_irq error\n");
1300                goto error2;
1301        }
1302
1303        if (rspi->rx_irq == rspi->tx_irq) {
1304                /* Single multiplexed interrupt */
1305                ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
1306                                       "mux", rspi);
1307        } else {
1308                /* Multi-interrupt mode, only SPRI and SPTI are used */
1309                ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
1310                                       "rx", rspi);
1311                if (!ret)
1312                        ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
1313                                               rspi_irq_tx, "tx", rspi);
1314        }
1315        if (ret < 0) {
1316                dev_err(&pdev->dev, "request_irq error\n");
1317                goto error2;
1318        }
1319
1320        ret = rspi_request_dma(&pdev->dev, master, res);
1321        if (ret < 0)
1322                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1323
1324        ret = devm_spi_register_master(&pdev->dev, master);
1325        if (ret < 0) {
1326                dev_err(&pdev->dev, "spi_register_master error.\n");
1327                goto error3;
1328        }
1329
1330        dev_info(&pdev->dev, "probed\n");
1331
1332        return 0;
1333
1334error3:
1335        rspi_release_dma(master);
1336error2:
1337        pm_runtime_disable(&pdev->dev);
1338error1:
1339        spi_master_put(master);
1340
1341        return ret;
1342}
1343
1344static const struct platform_device_id spi_driver_ids[] = {
1345        { "rspi",       (kernel_ulong_t)&rspi_ops },
1346        { "rspi-rz",    (kernel_ulong_t)&rspi_rz_ops },
1347        { "qspi",       (kernel_ulong_t)&qspi_ops },
1348        {},
1349};
1350
1351MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1352
1353static struct platform_driver rspi_driver = {
1354        .probe =        rspi_probe,
1355        .remove =       rspi_remove,
1356        .id_table =     spi_driver_ids,
1357        .driver         = {
1358                .name = "renesas_spi",
1359                .of_match_table = of_match_ptr(rspi_of_match),
1360        },
1361};
1362module_platform_driver(rspi_driver);
1363
1364MODULE_DESCRIPTION("Renesas RSPI bus driver");
1365MODULE_LICENSE("GPL v2");
1366MODULE_AUTHOR("Yoshihiro Shimoda");
1367MODULE_ALIAS("platform:rspi");
1368