linux/drivers/spi/spi-sh-msiof.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SuperH MSIOF SPI Controller Interface
   4 *
   5 * Copyright (c) 2009 Magnus Damm
   6 * Copyright (C) 2014 Renesas Electronics Corporation
   7 * Copyright (C) 2014-2017 Glider bvba
   8 */
   9
  10#include <linux/bitmap.h>
  11#include <linux/clk.h>
  12#include <linux/completion.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmaengine.h>
  16#include <linux/err.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/iopoll.h>
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/of.h>
  23#include <linux/of_device.h>
  24#include <linux/platform_device.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/sh_dma.h>
  27
  28#include <linux/spi/sh_msiof.h>
  29#include <linux/spi/spi.h>
  30
  31#include <asm/unaligned.h>
  32
  33struct sh_msiof_chipdata {
  34        u32 bits_per_word_mask;
  35        u16 tx_fifo_size;
  36        u16 rx_fifo_size;
  37        u16 ctlr_flags;
  38        u16 min_div_pow;
  39};
  40
  41struct sh_msiof_spi_priv {
  42        struct spi_controller *ctlr;
  43        void __iomem *mapbase;
  44        struct clk *clk;
  45        struct platform_device *pdev;
  46        struct sh_msiof_spi_info *info;
  47        struct completion done;
  48        struct completion done_txdma;
  49        unsigned int tx_fifo_size;
  50        unsigned int rx_fifo_size;
  51        unsigned int min_div_pow;
  52        void *tx_dma_page;
  53        void *rx_dma_page;
  54        dma_addr_t tx_dma_addr;
  55        dma_addr_t rx_dma_addr;
  56        bool native_cs_inited;
  57        bool native_cs_high;
  58        bool slave_aborted;
  59};
  60
  61#define MAX_SS  3       /* Maximum number of native chip selects */
  62
  63#define SITMDR1 0x00    /* Transmit Mode Register 1 */
  64#define SITMDR2 0x04    /* Transmit Mode Register 2 */
  65#define SITMDR3 0x08    /* Transmit Mode Register 3 */
  66#define SIRMDR1 0x10    /* Receive Mode Register 1 */
  67#define SIRMDR2 0x14    /* Receive Mode Register 2 */
  68#define SIRMDR3 0x18    /* Receive Mode Register 3 */
  69#define SITSCR  0x20    /* Transmit Clock Select Register */
  70#define SIRSCR  0x22    /* Receive Clock Select Register (SH, A1, APE6) */
  71#define SICTR   0x28    /* Control Register */
  72#define SIFCTR  0x30    /* FIFO Control Register */
  73#define SISTR   0x40    /* Status Register */
  74#define SIIER   0x44    /* Interrupt Enable Register */
  75#define SITDR1  0x48    /* Transmit Control Data Register 1 (SH, A1) */
  76#define SITDR2  0x4c    /* Transmit Control Data Register 2 (SH, A1) */
  77#define SITFDR  0x50    /* Transmit FIFO Data Register */
  78#define SIRDR1  0x58    /* Receive Control Data Register 1 (SH, A1) */
  79#define SIRDR2  0x5c    /* Receive Control Data Register 2 (SH, A1) */
  80#define SIRFDR  0x60    /* Receive FIFO Data Register */
  81
  82/* SITMDR1 and SIRMDR1 */
  83#define SIMDR1_TRMD             BIT(31)         /* Transfer Mode (1 = Master mode) */
  84#define SIMDR1_SYNCMD_MASK      GENMASK(29, 28) /* SYNC Mode */
  85#define SIMDR1_SYNCMD_SPI       (2 << 28)       /*   Level mode/SPI */
  86#define SIMDR1_SYNCMD_LR        (3 << 28)       /*   L/R mode */
  87#define SIMDR1_SYNCAC_SHIFT     25              /* Sync Polarity (1 = Active-low) */
  88#define SIMDR1_BITLSB_SHIFT     24              /* MSB/LSB First (1 = LSB first) */
  89#define SIMDR1_DTDL_SHIFT       20              /* Data Pin Bit Delay for MSIOF_SYNC */
  90#define SIMDR1_SYNCDL_SHIFT     16              /* Frame Sync Signal Timing Delay */
  91#define SIMDR1_FLD_MASK         GENMASK(3, 2)   /* Frame Sync Signal Interval (0-3) */
  92#define SIMDR1_FLD_SHIFT        2
  93#define SIMDR1_XXSTP            BIT(0)          /* Transmission/Reception Stop on FIFO */
  94/* SITMDR1 */
  95#define SITMDR1_PCON            BIT(30)         /* Transfer Signal Connection */
  96#define SITMDR1_SYNCCH_MASK     GENMASK(27, 26) /* Sync Signal Channel Select */
  97#define SITMDR1_SYNCCH_SHIFT    26              /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
  98
  99/* SITMDR2 and SIRMDR2 */
 100#define SIMDR2_BITLEN1(i)       (((i) - 1) << 24) /* Data Size (8-32 bits) */
 101#define SIMDR2_WDLEN1(i)        (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
 102#define SIMDR2_GRPMASK1         BIT(0)          /* Group Output Mask 1 (SH, A1) */
 103
 104/* SITSCR and SIRSCR */
 105#define SISCR_BRPS_MASK         GENMASK(12, 8)  /* Prescaler Setting (1-32) */
 106#define SISCR_BRPS(i)           (((i) - 1) << 8)
 107#define SISCR_BRDV_MASK         GENMASK(2, 0)   /* Baud Rate Generator's Division Ratio */
 108#define SISCR_BRDV_DIV_2        0
 109#define SISCR_BRDV_DIV_4        1
 110#define SISCR_BRDV_DIV_8        2
 111#define SISCR_BRDV_DIV_16       3
 112#define SISCR_BRDV_DIV_32       4
 113#define SISCR_BRDV_DIV_1        7
 114
 115/* SICTR */
 116#define SICTR_TSCKIZ_MASK       GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
 117#define SICTR_TSCKIZ_SCK        BIT(31)         /*   Disable SCK when TX disabled */
 118#define SICTR_TSCKIZ_POL_SHIFT  30              /*   Transmit Clock Polarity */
 119#define SICTR_RSCKIZ_MASK       GENMASK(29, 28) /* Receive Clock Polarity Select */
 120#define SICTR_RSCKIZ_SCK        BIT(29)         /*   Must match CTR_TSCKIZ_SCK */
 121#define SICTR_RSCKIZ_POL_SHIFT  28              /*   Receive Clock Polarity */
 122#define SICTR_TEDG_SHIFT        27              /* Transmit Timing (1 = falling edge) */
 123#define SICTR_REDG_SHIFT        26              /* Receive Timing (1 = falling edge) */
 124#define SICTR_TXDIZ_MASK        GENMASK(23, 22) /* Pin Output When TX is Disabled */
 125#define SICTR_TXDIZ_LOW         (0 << 22)       /*   0 */
 126#define SICTR_TXDIZ_HIGH        (1 << 22)       /*   1 */
 127#define SICTR_TXDIZ_HIZ         (2 << 22)       /*   High-impedance */
 128#define SICTR_TSCKE             BIT(15)         /* Transmit Serial Clock Output Enable */
 129#define SICTR_TFSE              BIT(14)         /* Transmit Frame Sync Signal Output Enable */
 130#define SICTR_TXE               BIT(9)          /* Transmit Enable */
 131#define SICTR_RXE               BIT(8)          /* Receive Enable */
 132#define SICTR_TXRST             BIT(1)          /* Transmit Reset */
 133#define SICTR_RXRST             BIT(0)          /* Receive Reset */
 134
 135/* SIFCTR */
 136#define SIFCTR_TFWM_MASK        GENMASK(31, 29) /* Transmit FIFO Watermark */
 137#define SIFCTR_TFWM_64          (0 << 29)       /*  Transfer Request when 64 empty stages */
 138#define SIFCTR_TFWM_32          (1 << 29)       /*  Transfer Request when 32 empty stages */
 139#define SIFCTR_TFWM_24          (2 << 29)       /*  Transfer Request when 24 empty stages */
 140#define SIFCTR_TFWM_16          (3 << 29)       /*  Transfer Request when 16 empty stages */
 141#define SIFCTR_TFWM_12          (4 << 29)       /*  Transfer Request when 12 empty stages */
 142#define SIFCTR_TFWM_8           (5 << 29)       /*  Transfer Request when 8 empty stages */
 143#define SIFCTR_TFWM_4           (6 << 29)       /*  Transfer Request when 4 empty stages */
 144#define SIFCTR_TFWM_1           (7 << 29)       /*  Transfer Request when 1 empty stage */
 145#define SIFCTR_TFUA_MASK        GENMASK(26, 20) /* Transmit FIFO Usable Area */
 146#define SIFCTR_TFUA_SHIFT       20
 147#define SIFCTR_TFUA(i)          ((i) << SIFCTR_TFUA_SHIFT)
 148#define SIFCTR_RFWM_MASK        GENMASK(15, 13) /* Receive FIFO Watermark */
 149#define SIFCTR_RFWM_1           (0 << 13)       /*  Transfer Request when 1 valid stages */
 150#define SIFCTR_RFWM_4           (1 << 13)       /*  Transfer Request when 4 valid stages */
 151#define SIFCTR_RFWM_8           (2 << 13)       /*  Transfer Request when 8 valid stages */
 152#define SIFCTR_RFWM_16          (3 << 13)       /*  Transfer Request when 16 valid stages */
 153#define SIFCTR_RFWM_32          (4 << 13)       /*  Transfer Request when 32 valid stages */
 154#define SIFCTR_RFWM_64          (5 << 13)       /*  Transfer Request when 64 valid stages */
 155#define SIFCTR_RFWM_128         (6 << 13)       /*  Transfer Request when 128 valid stages */
 156#define SIFCTR_RFWM_256         (7 << 13)       /*  Transfer Request when 256 valid stages */
 157#define SIFCTR_RFUA_MASK        GENMASK(12, 4)  /* Receive FIFO Usable Area (0x40 = full) */
 158#define SIFCTR_RFUA_SHIFT       4
 159#define SIFCTR_RFUA(i)          ((i) << SIFCTR_RFUA_SHIFT)
 160
 161/* SISTR */
 162#define SISTR_TFEMP             BIT(29) /* Transmit FIFO Empty */
 163#define SISTR_TDREQ             BIT(28) /* Transmit Data Transfer Request */
 164#define SISTR_TEOF              BIT(23) /* Frame Transmission End */
 165#define SISTR_TFSERR            BIT(21) /* Transmit Frame Synchronization Error */
 166#define SISTR_TFOVF             BIT(20) /* Transmit FIFO Overflow */
 167#define SISTR_TFUDF             BIT(19) /* Transmit FIFO Underflow */
 168#define SISTR_RFFUL             BIT(13) /* Receive FIFO Full */
 169#define SISTR_RDREQ             BIT(12) /* Receive Data Transfer Request */
 170#define SISTR_REOF              BIT(7)  /* Frame Reception End */
 171#define SISTR_RFSERR            BIT(5)  /* Receive Frame Synchronization Error */
 172#define SISTR_RFUDF             BIT(4)  /* Receive FIFO Underflow */
 173#define SISTR_RFOVF             BIT(3)  /* Receive FIFO Overflow */
 174
 175/* SIIER */
 176#define SIIER_TDMAE             BIT(31) /* Transmit Data DMA Transfer Req. Enable */
 177#define SIIER_TFEMPE            BIT(29) /* Transmit FIFO Empty Enable */
 178#define SIIER_TDREQE            BIT(28) /* Transmit Data Transfer Request Enable */
 179#define SIIER_TEOFE             BIT(23) /* Frame Transmission End Enable */
 180#define SIIER_TFSERRE           BIT(21) /* Transmit Frame Sync Error Enable */
 181#define SIIER_TFOVFE            BIT(20) /* Transmit FIFO Overflow Enable */
 182#define SIIER_TFUDFE            BIT(19) /* Transmit FIFO Underflow Enable */
 183#define SIIER_RDMAE             BIT(15) /* Receive Data DMA Transfer Req. Enable */
 184#define SIIER_RFFULE            BIT(13) /* Receive FIFO Full Enable */
 185#define SIIER_RDREQE            BIT(12) /* Receive Data Transfer Request Enable */
 186#define SIIER_REOFE             BIT(7)  /* Frame Reception End Enable */
 187#define SIIER_RFSERRE           BIT(5)  /* Receive Frame Sync Error Enable */
 188#define SIIER_RFUDFE            BIT(4)  /* Receive FIFO Underflow Enable */
 189#define SIIER_RFOVFE            BIT(3)  /* Receive FIFO Overflow Enable */
 190
 191
 192static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 193{
 194        switch (reg_offs) {
 195        case SITSCR:
 196        case SIRSCR:
 197                return ioread16(p->mapbase + reg_offs);
 198        default:
 199                return ioread32(p->mapbase + reg_offs);
 200        }
 201}
 202
 203static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
 204                           u32 value)
 205{
 206        switch (reg_offs) {
 207        case SITSCR:
 208        case SIRSCR:
 209                iowrite16(value, p->mapbase + reg_offs);
 210                break;
 211        default:
 212                iowrite32(value, p->mapbase + reg_offs);
 213                break;
 214        }
 215}
 216
 217static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
 218                                    u32 clr, u32 set)
 219{
 220        u32 mask = clr | set;
 221        u32 data;
 222
 223        data = sh_msiof_read(p, SICTR);
 224        data &= ~clr;
 225        data |= set;
 226        sh_msiof_write(p, SICTR, data);
 227
 228        return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
 229                                         (data & mask) == set, 1, 100);
 230}
 231
 232static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 233{
 234        struct sh_msiof_spi_priv *p = data;
 235
 236        /* just disable the interrupt and wake up */
 237        sh_msiof_write(p, SIIER, 0);
 238        complete(&p->done);
 239
 240        return IRQ_HANDLED;
 241}
 242
 243static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
 244{
 245        u32 mask = SICTR_TXRST | SICTR_RXRST;
 246        u32 data;
 247
 248        data = sh_msiof_read(p, SICTR);
 249        data |= mask;
 250        sh_msiof_write(p, SICTR, data);
 251
 252        readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
 253                                  100);
 254}
 255
 256static const u32 sh_msiof_spi_div_array[] = {
 257        SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
 258        SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
 259};
 260
 261static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 262                                      struct spi_transfer *t)
 263{
 264        unsigned long parent_rate = clk_get_rate(p->clk);
 265        unsigned int div_pow = p->min_div_pow;
 266        u32 spi_hz = t->speed_hz;
 267        unsigned long div;
 268        u32 brps, scr;
 269
 270        if (!spi_hz || !parent_rate) {
 271                WARN(1, "Invalid clock rate parameters %lu and %u\n",
 272                     parent_rate, spi_hz);
 273                return;
 274        }
 275
 276        div = DIV_ROUND_UP(parent_rate, spi_hz);
 277        if (div <= 1024) {
 278                /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
 279                if (!div_pow && div <= 32 && div > 2)
 280                        div_pow = 1;
 281
 282                if (div_pow)
 283                        brps = (div + 1) >> div_pow;
 284                else
 285                        brps = div;
 286
 287                for (; brps > 32; div_pow++)
 288                        brps = (brps + 1) >> 1;
 289        } else {
 290                /* Set transfer rate composite divisor to 2^5 * 32 = 1024 */
 291                dev_err(&p->pdev->dev,
 292                        "Requested SPI transfer rate %d is too low\n", spi_hz);
 293                div_pow = 5;
 294                brps = 32;
 295        }
 296
 297        t->effective_speed_hz = parent_rate / (brps << div_pow);
 298
 299        scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
 300        sh_msiof_write(p, SITSCR, scr);
 301        if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
 302                sh_msiof_write(p, SIRSCR, scr);
 303}
 304
 305static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
 306{
 307        /*
 308         * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
 309         * b'000                : 0
 310         * b'001                : 100
 311         * b'010                : 200
 312         * b'011 (SYNCDL only)  : 300
 313         * b'101                : 50
 314         * b'110                : 150
 315         */
 316        if (dtdl_or_syncdl % 100)
 317                return dtdl_or_syncdl / 100 + 5;
 318        else
 319                return dtdl_or_syncdl / 100;
 320}
 321
 322static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
 323{
 324        u32 val;
 325
 326        if (!p->info)
 327                return 0;
 328
 329        /* check if DTDL and SYNCDL is allowed value */
 330        if (p->info->dtdl > 200 || p->info->syncdl > 300) {
 331                dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
 332                return 0;
 333        }
 334
 335        /* check if the sum of DTDL and SYNCDL becomes an integer value  */
 336        if ((p->info->dtdl + p->info->syncdl) % 100) {
 337                dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
 338                return 0;
 339        }
 340
 341        val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
 342        val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
 343
 344        return val;
 345}
 346
 347static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
 348                                      u32 cpol, u32 cpha,
 349                                      u32 tx_hi_z, u32 lsb_first, u32 cs_high)
 350{
 351        u32 tmp;
 352        int edge;
 353
 354        /*
 355         * CPOL CPHA     TSCKIZ RSCKIZ TEDG REDG
 356         *    0    0         10     10    1    1
 357         *    0    1         10     10    0    0
 358         *    1    0         11     11    0    0
 359         *    1    1         11     11    1    1
 360         */
 361        tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
 362        tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
 363        tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
 364        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
 365        if (spi_controller_is_slave(p->ctlr)) {
 366                sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
 367        } else {
 368                sh_msiof_write(p, SITMDR1,
 369                               tmp | SIMDR1_TRMD | SITMDR1_PCON |
 370                               (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
 371        }
 372        if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
 373                /* These bits are reserved if RX needs TX */
 374                tmp &= ~0x0000ffff;
 375        }
 376        sh_msiof_write(p, SIRMDR1, tmp);
 377
 378        tmp = 0;
 379        tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
 380        tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
 381
 382        edge = cpol ^ !cpha;
 383
 384        tmp |= edge << SICTR_TEDG_SHIFT;
 385        tmp |= edge << SICTR_REDG_SHIFT;
 386        tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
 387        sh_msiof_write(p, SICTR, tmp);
 388}
 389
 390static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 391                                       const void *tx_buf, void *rx_buf,
 392                                       u32 bits, u32 words)
 393{
 394        u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
 395
 396        if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
 397                sh_msiof_write(p, SITMDR2, dr2);
 398        else
 399                sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
 400
 401        if (rx_buf)
 402                sh_msiof_write(p, SIRMDR2, dr2);
 403}
 404
 405static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 406{
 407        sh_msiof_write(p, SISTR,
 408                       sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
 409}
 410
 411static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
 412                                      const void *tx_buf, int words, int fs)
 413{
 414        const u8 *buf_8 = tx_buf;
 415        int k;
 416
 417        for (k = 0; k < words; k++)
 418                sh_msiof_write(p, SITFDR, buf_8[k] << fs);
 419}
 420
 421static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
 422                                       const void *tx_buf, int words, int fs)
 423{
 424        const u16 *buf_16 = tx_buf;
 425        int k;
 426
 427        for (k = 0; k < words; k++)
 428                sh_msiof_write(p, SITFDR, buf_16[k] << fs);
 429}
 430
 431static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
 432                                        const void *tx_buf, int words, int fs)
 433{
 434        const u16 *buf_16 = tx_buf;
 435        int k;
 436
 437        for (k = 0; k < words; k++)
 438                sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
 439}
 440
 441static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
 442                                       const void *tx_buf, int words, int fs)
 443{
 444        const u32 *buf_32 = tx_buf;
 445        int k;
 446
 447        for (k = 0; k < words; k++)
 448                sh_msiof_write(p, SITFDR, buf_32[k] << fs);
 449}
 450
 451static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
 452                                        const void *tx_buf, int words, int fs)
 453{
 454        const u32 *buf_32 = tx_buf;
 455        int k;
 456
 457        for (k = 0; k < words; k++)
 458                sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
 459}
 460
 461static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
 462                                        const void *tx_buf, int words, int fs)
 463{
 464        const u32 *buf_32 = tx_buf;
 465        int k;
 466
 467        for (k = 0; k < words; k++)
 468                sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
 469}
 470
 471static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
 472                                         const void *tx_buf, int words, int fs)
 473{
 474        const u32 *buf_32 = tx_buf;
 475        int k;
 476
 477        for (k = 0; k < words; k++)
 478                sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 479}
 480
 481static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
 482                                     void *rx_buf, int words, int fs)
 483{
 484        u8 *buf_8 = rx_buf;
 485        int k;
 486
 487        for (k = 0; k < words; k++)
 488                buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
 489}
 490
 491static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
 492                                      void *rx_buf, int words, int fs)
 493{
 494        u16 *buf_16 = rx_buf;
 495        int k;
 496
 497        for (k = 0; k < words; k++)
 498                buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
 499}
 500
 501static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
 502                                       void *rx_buf, int words, int fs)
 503{
 504        u16 *buf_16 = rx_buf;
 505        int k;
 506
 507        for (k = 0; k < words; k++)
 508                put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
 509}
 510
 511static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
 512                                      void *rx_buf, int words, int fs)
 513{
 514        u32 *buf_32 = rx_buf;
 515        int k;
 516
 517        for (k = 0; k < words; k++)
 518                buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
 519}
 520
 521static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
 522                                       void *rx_buf, int words, int fs)
 523{
 524        u32 *buf_32 = rx_buf;
 525        int k;
 526
 527        for (k = 0; k < words; k++)
 528                put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
 529}
 530
 531static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
 532                                       void *rx_buf, int words, int fs)
 533{
 534        u32 *buf_32 = rx_buf;
 535        int k;
 536
 537        for (k = 0; k < words; k++)
 538                buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
 539}
 540
 541static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
 542                                       void *rx_buf, int words, int fs)
 543{
 544        u32 *buf_32 = rx_buf;
 545        int k;
 546
 547        for (k = 0; k < words; k++)
 548                put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
 549}
 550
 551static int sh_msiof_spi_setup(struct spi_device *spi)
 552{
 553        struct sh_msiof_spi_priv *p =
 554                spi_controller_get_devdata(spi->controller);
 555        u32 clr, set, tmp;
 556
 557        if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
 558                return 0;
 559
 560        if (p->native_cs_inited &&
 561            (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
 562                return 0;
 563
 564        /* Configure native chip select mode/polarity early */
 565        clr = SIMDR1_SYNCMD_MASK;
 566        set = SIMDR1_SYNCMD_SPI;
 567        if (spi->mode & SPI_CS_HIGH)
 568                clr |= BIT(SIMDR1_SYNCAC_SHIFT);
 569        else
 570                set |= BIT(SIMDR1_SYNCAC_SHIFT);
 571        pm_runtime_get_sync(&p->pdev->dev);
 572        tmp = sh_msiof_read(p, SITMDR1) & ~clr;
 573        sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
 574        tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
 575        sh_msiof_write(p, SIRMDR1, tmp | set);
 576        pm_runtime_put(&p->pdev->dev);
 577        p->native_cs_high = spi->mode & SPI_CS_HIGH;
 578        p->native_cs_inited = true;
 579        return 0;
 580}
 581
 582static int sh_msiof_prepare_message(struct spi_controller *ctlr,
 583                                    struct spi_message *msg)
 584{
 585        struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
 586        const struct spi_device *spi = msg->spi;
 587        u32 ss, cs_high;
 588
 589        /* Configure pins before asserting CS */
 590        if (spi->cs_gpiod) {
 591                ss = ctlr->unused_native_cs;
 592                cs_high = p->native_cs_high;
 593        } else {
 594                ss = spi->chip_select;
 595                cs_high = !!(spi->mode & SPI_CS_HIGH);
 596        }
 597        sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
 598                                  !!(spi->mode & SPI_CPHA),
 599                                  !!(spi->mode & SPI_3WIRE),
 600                                  !!(spi->mode & SPI_LSB_FIRST), cs_high);
 601        return 0;
 602}
 603
 604static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 605{
 606        bool slave = spi_controller_is_slave(p->ctlr);
 607        int ret = 0;
 608
 609        /* setup clock and rx/tx signals */
 610        if (!slave)
 611                ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
 612        if (rx_buf && !ret)
 613                ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
 614        if (!ret)
 615                ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
 616
 617        /* start by setting frame bit */
 618        if (!ret && !slave)
 619                ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
 620
 621        return ret;
 622}
 623
 624static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 625{
 626        bool slave = spi_controller_is_slave(p->ctlr);
 627        int ret = 0;
 628
 629        /* shut down frame, rx/tx and clock signals */
 630        if (!slave)
 631                ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
 632        if (!ret)
 633                ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
 634        if (rx_buf && !ret)
 635                ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
 636        if (!ret && !slave)
 637                ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
 638
 639        return ret;
 640}
 641
 642static int sh_msiof_slave_abort(struct spi_controller *ctlr)
 643{
 644        struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
 645
 646        p->slave_aborted = true;
 647        complete(&p->done);
 648        complete(&p->done_txdma);
 649        return 0;
 650}
 651
 652static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
 653                                        struct completion *x)
 654{
 655        if (spi_controller_is_slave(p->ctlr)) {
 656                if (wait_for_completion_interruptible(x) ||
 657                    p->slave_aborted) {
 658                        dev_dbg(&p->pdev->dev, "interrupted\n");
 659                        return -EINTR;
 660                }
 661        } else {
 662                if (!wait_for_completion_timeout(x, HZ)) {
 663                        dev_err(&p->pdev->dev, "timeout\n");
 664                        return -ETIMEDOUT;
 665                }
 666        }
 667
 668        return 0;
 669}
 670
 671static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 672                                  void (*tx_fifo)(struct sh_msiof_spi_priv *,
 673                                                  const void *, int, int),
 674                                  void (*rx_fifo)(struct sh_msiof_spi_priv *,
 675                                                  void *, int, int),
 676                                  const void *tx_buf, void *rx_buf,
 677                                  int words, int bits)
 678{
 679        int fifo_shift;
 680        int ret;
 681
 682        /* limit maximum word transfer to rx/tx fifo size */
 683        if (tx_buf)
 684                words = min_t(int, words, p->tx_fifo_size);
 685        if (rx_buf)
 686                words = min_t(int, words, p->rx_fifo_size);
 687
 688        /* the fifo contents need shifting */
 689        fifo_shift = 32 - bits;
 690
 691        /* default FIFO watermarks for PIO */
 692        sh_msiof_write(p, SIFCTR, 0);
 693
 694        /* setup msiof transfer mode registers */
 695        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
 696        sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
 697
 698        /* write tx fifo */
 699        if (tx_buf)
 700                tx_fifo(p, tx_buf, words, fifo_shift);
 701
 702        reinit_completion(&p->done);
 703        p->slave_aborted = false;
 704
 705        ret = sh_msiof_spi_start(p, rx_buf);
 706        if (ret) {
 707                dev_err(&p->pdev->dev, "failed to start hardware\n");
 708                goto stop_ier;
 709        }
 710
 711        /* wait for tx fifo to be emptied / rx fifo to be filled */
 712        ret = sh_msiof_wait_for_completion(p, &p->done);
 713        if (ret)
 714                goto stop_reset;
 715
 716        /* read rx fifo */
 717        if (rx_buf)
 718                rx_fifo(p, rx_buf, words, fifo_shift);
 719
 720        /* clear status bits */
 721        sh_msiof_reset_str(p);
 722
 723        ret = sh_msiof_spi_stop(p, rx_buf);
 724        if (ret) {
 725                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 726                return ret;
 727        }
 728
 729        return words;
 730
 731stop_reset:
 732        sh_msiof_reset_str(p);
 733        sh_msiof_spi_stop(p, rx_buf);
 734stop_ier:
 735        sh_msiof_write(p, SIIER, 0);
 736        return ret;
 737}
 738
 739static void sh_msiof_dma_complete(void *arg)
 740{
 741        complete(arg);
 742}
 743
 744static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 745                             void *rx, unsigned int len)
 746{
 747        u32 ier_bits = 0;
 748        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 749        dma_cookie_t cookie;
 750        int ret;
 751
 752        /* First prepare and submit the DMA request(s), as this may fail */
 753        if (rx) {
 754                ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
 755                desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
 756                                        p->rx_dma_addr, len, DMA_DEV_TO_MEM,
 757                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 758                if (!desc_rx)
 759                        return -EAGAIN;
 760
 761                desc_rx->callback = sh_msiof_dma_complete;
 762                desc_rx->callback_param = &p->done;
 763                cookie = dmaengine_submit(desc_rx);
 764                if (dma_submit_error(cookie))
 765                        return cookie;
 766        }
 767
 768        if (tx) {
 769                ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
 770                dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
 771                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
 772                desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
 773                                        p->tx_dma_addr, len, DMA_MEM_TO_DEV,
 774                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 775                if (!desc_tx) {
 776                        ret = -EAGAIN;
 777                        goto no_dma_tx;
 778                }
 779
 780                desc_tx->callback = sh_msiof_dma_complete;
 781                desc_tx->callback_param = &p->done_txdma;
 782                cookie = dmaengine_submit(desc_tx);
 783                if (dma_submit_error(cookie)) {
 784                        ret = cookie;
 785                        goto no_dma_tx;
 786                }
 787        }
 788
 789        /* 1 stage FIFO watermarks for DMA */
 790        sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
 791
 792        /* setup msiof transfer mode registers (32-bit words) */
 793        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 794
 795        sh_msiof_write(p, SIIER, ier_bits);
 796
 797        reinit_completion(&p->done);
 798        if (tx)
 799                reinit_completion(&p->done_txdma);
 800        p->slave_aborted = false;
 801
 802        /* Now start DMA */
 803        if (rx)
 804                dma_async_issue_pending(p->ctlr->dma_rx);
 805        if (tx)
 806                dma_async_issue_pending(p->ctlr->dma_tx);
 807
 808        ret = sh_msiof_spi_start(p, rx);
 809        if (ret) {
 810                dev_err(&p->pdev->dev, "failed to start hardware\n");
 811                goto stop_dma;
 812        }
 813
 814        if (tx) {
 815                /* wait for tx DMA completion */
 816                ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
 817                if (ret)
 818                        goto stop_reset;
 819        }
 820
 821        if (rx) {
 822                /* wait for rx DMA completion */
 823                ret = sh_msiof_wait_for_completion(p, &p->done);
 824                if (ret)
 825                        goto stop_reset;
 826
 827                sh_msiof_write(p, SIIER, 0);
 828        } else {
 829                /* wait for tx fifo to be emptied */
 830                sh_msiof_write(p, SIIER, SIIER_TEOFE);
 831                ret = sh_msiof_wait_for_completion(p, &p->done);
 832                if (ret)
 833                        goto stop_reset;
 834        }
 835
 836        /* clear status bits */
 837        sh_msiof_reset_str(p);
 838
 839        ret = sh_msiof_spi_stop(p, rx);
 840        if (ret) {
 841                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 842                return ret;
 843        }
 844
 845        if (rx)
 846                dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
 847                                        p->rx_dma_addr, len, DMA_FROM_DEVICE);
 848
 849        return 0;
 850
 851stop_reset:
 852        sh_msiof_reset_str(p);
 853        sh_msiof_spi_stop(p, rx);
 854stop_dma:
 855        if (tx)
 856                dmaengine_terminate_sync(p->ctlr->dma_tx);
 857no_dma_tx:
 858        if (rx)
 859                dmaengine_terminate_sync(p->ctlr->dma_rx);
 860        sh_msiof_write(p, SIIER, 0);
 861        return ret;
 862}
 863
 864static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
 865{
 866        /* src or dst can be unaligned, but not both */
 867        if ((unsigned long)src & 3) {
 868                while (words--) {
 869                        *dst++ = swab32(get_unaligned(src));
 870                        src++;
 871                }
 872        } else if ((unsigned long)dst & 3) {
 873                while (words--) {
 874                        put_unaligned(swab32(*src++), dst);
 875                        dst++;
 876                }
 877        } else {
 878                while (words--)
 879                        *dst++ = swab32(*src++);
 880        }
 881}
 882
 883static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
 884{
 885        /* src or dst can be unaligned, but not both */
 886        if ((unsigned long)src & 3) {
 887                while (words--) {
 888                        *dst++ = swahw32(get_unaligned(src));
 889                        src++;
 890                }
 891        } else if ((unsigned long)dst & 3) {
 892                while (words--) {
 893                        put_unaligned(swahw32(*src++), dst);
 894                        dst++;
 895                }
 896        } else {
 897                while (words--)
 898                        *dst++ = swahw32(*src++);
 899        }
 900}
 901
 902static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
 903{
 904        memcpy(dst, src, words * 4);
 905}
 906
 907static int sh_msiof_transfer_one(struct spi_controller *ctlr,
 908                                 struct spi_device *spi,
 909                                 struct spi_transfer *t)
 910{
 911        struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
 912        void (*copy32)(u32 *, const u32 *, unsigned int);
 913        void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
 914        void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
 915        const void *tx_buf = t->tx_buf;
 916        void *rx_buf = t->rx_buf;
 917        unsigned int len = t->len;
 918        unsigned int bits = t->bits_per_word;
 919        unsigned int bytes_per_word;
 920        unsigned int words;
 921        int n;
 922        bool swab;
 923        int ret;
 924
 925        /* reset registers */
 926        sh_msiof_spi_reset_regs(p);
 927
 928        /* setup clocks (clock already enabled in chipselect()) */
 929        if (!spi_controller_is_slave(p->ctlr))
 930                sh_msiof_spi_set_clk_regs(p, t);
 931
 932        while (ctlr->dma_tx && len > 15) {
 933                /*
 934                 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
 935                 *  words, with byte resp. word swapping.
 936                 */
 937                unsigned int l = 0;
 938
 939                if (tx_buf)
 940                        l = min(round_down(len, 4), p->tx_fifo_size * 4);
 941                if (rx_buf)
 942                        l = min(round_down(len, 4), p->rx_fifo_size * 4);
 943
 944                if (bits <= 8) {
 945                        copy32 = copy_bswap32;
 946                } else if (bits <= 16) {
 947                        copy32 = copy_wswap32;
 948                } else {
 949                        copy32 = copy_plain32;
 950                }
 951
 952                if (tx_buf)
 953                        copy32(p->tx_dma_page, tx_buf, l / 4);
 954
 955                ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
 956                if (ret == -EAGAIN) {
 957                        dev_warn_once(&p->pdev->dev,
 958                                "DMA not available, falling back to PIO\n");
 959                        break;
 960                }
 961                if (ret)
 962                        return ret;
 963
 964                if (rx_buf) {
 965                        copy32(rx_buf, p->rx_dma_page, l / 4);
 966                        rx_buf += l;
 967                }
 968                if (tx_buf)
 969                        tx_buf += l;
 970
 971                len -= l;
 972                if (!len)
 973                        return 0;
 974        }
 975
 976        if (bits <= 8 && len > 15) {
 977                bits = 32;
 978                swab = true;
 979        } else {
 980                swab = false;
 981        }
 982
 983        /* setup bytes per word and fifo read/write functions */
 984        if (bits <= 8) {
 985                bytes_per_word = 1;
 986                tx_fifo = sh_msiof_spi_write_fifo_8;
 987                rx_fifo = sh_msiof_spi_read_fifo_8;
 988        } else if (bits <= 16) {
 989                bytes_per_word = 2;
 990                if ((unsigned long)tx_buf & 0x01)
 991                        tx_fifo = sh_msiof_spi_write_fifo_16u;
 992                else
 993                        tx_fifo = sh_msiof_spi_write_fifo_16;
 994
 995                if ((unsigned long)rx_buf & 0x01)
 996                        rx_fifo = sh_msiof_spi_read_fifo_16u;
 997                else
 998                        rx_fifo = sh_msiof_spi_read_fifo_16;
 999        } else if (swab) {
1000                bytes_per_word = 4;
1001                if ((unsigned long)tx_buf & 0x03)
1002                        tx_fifo = sh_msiof_spi_write_fifo_s32u;
1003                else
1004                        tx_fifo = sh_msiof_spi_write_fifo_s32;
1005
1006                if ((unsigned long)rx_buf & 0x03)
1007                        rx_fifo = sh_msiof_spi_read_fifo_s32u;
1008                else
1009                        rx_fifo = sh_msiof_spi_read_fifo_s32;
1010        } else {
1011                bytes_per_word = 4;
1012                if ((unsigned long)tx_buf & 0x03)
1013                        tx_fifo = sh_msiof_spi_write_fifo_32u;
1014                else
1015                        tx_fifo = sh_msiof_spi_write_fifo_32;
1016
1017                if ((unsigned long)rx_buf & 0x03)
1018                        rx_fifo = sh_msiof_spi_read_fifo_32u;
1019                else
1020                        rx_fifo = sh_msiof_spi_read_fifo_32;
1021        }
1022
1023        /* transfer in fifo sized chunks */
1024        words = len / bytes_per_word;
1025
1026        while (words > 0) {
1027                n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
1028                                           words, bits);
1029                if (n < 0)
1030                        return n;
1031
1032                if (tx_buf)
1033                        tx_buf += n * bytes_per_word;
1034                if (rx_buf)
1035                        rx_buf += n * bytes_per_word;
1036                words -= n;
1037
1038                if (words == 0 && (len % bytes_per_word)) {
1039                        words = len % bytes_per_word;
1040                        bits = t->bits_per_word;
1041                        bytes_per_word = 1;
1042                        tx_fifo = sh_msiof_spi_write_fifo_8;
1043                        rx_fifo = sh_msiof_spi_read_fifo_8;
1044                }
1045        }
1046
1047        return 0;
1048}
1049
1050static const struct sh_msiof_chipdata sh_data = {
1051        .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
1052        .tx_fifo_size = 64,
1053        .rx_fifo_size = 64,
1054        .ctlr_flags = 0,
1055        .min_div_pow = 0,
1056};
1057
1058static const struct sh_msiof_chipdata rcar_gen2_data = {
1059        .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1060                              SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
1061        .tx_fifo_size = 64,
1062        .rx_fifo_size = 64,
1063        .ctlr_flags = SPI_CONTROLLER_MUST_TX,
1064        .min_div_pow = 0,
1065};
1066
1067static const struct sh_msiof_chipdata rcar_gen3_data = {
1068        .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1069                              SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
1070        .tx_fifo_size = 64,
1071        .rx_fifo_size = 64,
1072        .ctlr_flags = SPI_CONTROLLER_MUST_TX,
1073        .min_div_pow = 1,
1074};
1075
1076static const struct of_device_id sh_msiof_match[] = {
1077        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
1078        { .compatible = "renesas,msiof-r8a7743",   .data = &rcar_gen2_data },
1079        { .compatible = "renesas,msiof-r8a7745",   .data = &rcar_gen2_data },
1080        { .compatible = "renesas,msiof-r8a7790",   .data = &rcar_gen2_data },
1081        { .compatible = "renesas,msiof-r8a7791",   .data = &rcar_gen2_data },
1082        { .compatible = "renesas,msiof-r8a7792",   .data = &rcar_gen2_data },
1083        { .compatible = "renesas,msiof-r8a7793",   .data = &rcar_gen2_data },
1084        { .compatible = "renesas,msiof-r8a7794",   .data = &rcar_gen2_data },
1085        { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
1086        { .compatible = "renesas,msiof-r8a7796",   .data = &rcar_gen3_data },
1087        { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
1088        { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
1089        {},
1090};
1091MODULE_DEVICE_TABLE(of, sh_msiof_match);
1092
1093#ifdef CONFIG_OF
1094static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1095{
1096        struct sh_msiof_spi_info *info;
1097        struct device_node *np = dev->of_node;
1098        u32 num_cs = 1;
1099
1100        info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
1101        if (!info)
1102                return NULL;
1103
1104        info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
1105                                                            : MSIOF_SPI_MASTER;
1106
1107        /* Parse the MSIOF properties */
1108        if (info->mode == MSIOF_SPI_MASTER)
1109                of_property_read_u32(np, "num-cs", &num_cs);
1110        of_property_read_u32(np, "renesas,tx-fifo-size",
1111                                        &info->tx_fifo_override);
1112        of_property_read_u32(np, "renesas,rx-fifo-size",
1113                                        &info->rx_fifo_override);
1114        of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1115        of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
1116
1117        info->num_chipselect = num_cs;
1118
1119        return info;
1120}
1121#else
1122static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1123{
1124        return NULL;
1125}
1126#endif
1127
1128static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
1129        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
1130{
1131        dma_cap_mask_t mask;
1132        struct dma_chan *chan;
1133        struct dma_slave_config cfg;
1134        int ret;
1135
1136        dma_cap_zero(mask);
1137        dma_cap_set(DMA_SLAVE, mask);
1138
1139        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1140                                (void *)(unsigned long)id, dev,
1141                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1142        if (!chan) {
1143                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1144                return NULL;
1145        }
1146
1147        memset(&cfg, 0, sizeof(cfg));
1148        cfg.direction = dir;
1149        if (dir == DMA_MEM_TO_DEV) {
1150                cfg.dst_addr = port_addr;
1151                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1152        } else {
1153                cfg.src_addr = port_addr;
1154                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1155        }
1156
1157        ret = dmaengine_slave_config(chan, &cfg);
1158        if (ret) {
1159                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1160                dma_release_channel(chan);
1161                return NULL;
1162        }
1163
1164        return chan;
1165}
1166
1167static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1168{
1169        struct platform_device *pdev = p->pdev;
1170        struct device *dev = &pdev->dev;
1171        const struct sh_msiof_spi_info *info = p->info;
1172        unsigned int dma_tx_id, dma_rx_id;
1173        const struct resource *res;
1174        struct spi_controller *ctlr;
1175        struct device *tx_dev, *rx_dev;
1176
1177        if (dev->of_node) {
1178                /* In the OF case we will get the slave IDs from the DT */
1179                dma_tx_id = 0;
1180                dma_rx_id = 0;
1181        } else if (info && info->dma_tx_id && info->dma_rx_id) {
1182                dma_tx_id = info->dma_tx_id;
1183                dma_rx_id = info->dma_rx_id;
1184        } else {
1185                /* The driver assumes no error */
1186                return 0;
1187        }
1188
1189        /* The DMA engine uses the second register set, if present */
1190        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1191        if (!res)
1192                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1193
1194        ctlr = p->ctlr;
1195        ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1196                                                 dma_tx_id, res->start + SITFDR);
1197        if (!ctlr->dma_tx)
1198                return -ENODEV;
1199
1200        ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1201                                                 dma_rx_id, res->start + SIRFDR);
1202        if (!ctlr->dma_rx)
1203                goto free_tx_chan;
1204
1205        p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1206        if (!p->tx_dma_page)
1207                goto free_rx_chan;
1208
1209        p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1210        if (!p->rx_dma_page)
1211                goto free_tx_page;
1212
1213        tx_dev = ctlr->dma_tx->device->dev;
1214        p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
1215                                        DMA_TO_DEVICE);
1216        if (dma_mapping_error(tx_dev, p->tx_dma_addr))
1217                goto free_rx_page;
1218
1219        rx_dev = ctlr->dma_rx->device->dev;
1220        p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
1221                                        DMA_FROM_DEVICE);
1222        if (dma_mapping_error(rx_dev, p->rx_dma_addr))
1223                goto unmap_tx_page;
1224
1225        dev_info(dev, "DMA available");
1226        return 0;
1227
1228unmap_tx_page:
1229        dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
1230free_rx_page:
1231        free_page((unsigned long)p->rx_dma_page);
1232free_tx_page:
1233        free_page((unsigned long)p->tx_dma_page);
1234free_rx_chan:
1235        dma_release_channel(ctlr->dma_rx);
1236free_tx_chan:
1237        dma_release_channel(ctlr->dma_tx);
1238        ctlr->dma_tx = NULL;
1239        return -ENODEV;
1240}
1241
1242static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1243{
1244        struct spi_controller *ctlr = p->ctlr;
1245
1246        if (!ctlr->dma_tx)
1247                return;
1248
1249        dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
1250                         DMA_FROM_DEVICE);
1251        dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
1252                         DMA_TO_DEVICE);
1253        free_page((unsigned long)p->rx_dma_page);
1254        free_page((unsigned long)p->tx_dma_page);
1255        dma_release_channel(ctlr->dma_rx);
1256        dma_release_channel(ctlr->dma_tx);
1257}
1258
1259static int sh_msiof_spi_probe(struct platform_device *pdev)
1260{
1261        struct spi_controller *ctlr;
1262        const struct sh_msiof_chipdata *chipdata;
1263        struct sh_msiof_spi_info *info;
1264        struct sh_msiof_spi_priv *p;
1265        unsigned long clksrc;
1266        int i;
1267        int ret;
1268
1269        chipdata = of_device_get_match_data(&pdev->dev);
1270        if (chipdata) {
1271                info = sh_msiof_spi_parse_dt(&pdev->dev);
1272        } else {
1273                chipdata = (const void *)pdev->id_entry->driver_data;
1274                info = dev_get_platdata(&pdev->dev);
1275        }
1276
1277        if (!info) {
1278                dev_err(&pdev->dev, "failed to obtain device info\n");
1279                return -ENXIO;
1280        }
1281
1282        if (info->mode == MSIOF_SPI_SLAVE)
1283                ctlr = spi_alloc_slave(&pdev->dev,
1284                                       sizeof(struct sh_msiof_spi_priv));
1285        else
1286                ctlr = spi_alloc_master(&pdev->dev,
1287                                        sizeof(struct sh_msiof_spi_priv));
1288        if (ctlr == NULL)
1289                return -ENOMEM;
1290
1291        p = spi_controller_get_devdata(ctlr);
1292
1293        platform_set_drvdata(pdev, p);
1294        p->ctlr = ctlr;
1295        p->info = info;
1296        p->min_div_pow = chipdata->min_div_pow;
1297
1298        init_completion(&p->done);
1299        init_completion(&p->done_txdma);
1300
1301        p->clk = devm_clk_get(&pdev->dev, NULL);
1302        if (IS_ERR(p->clk)) {
1303                dev_err(&pdev->dev, "cannot get clock\n");
1304                ret = PTR_ERR(p->clk);
1305                goto err1;
1306        }
1307
1308        i = platform_get_irq(pdev, 0);
1309        if (i < 0) {
1310                ret = i;
1311                goto err1;
1312        }
1313
1314        p->mapbase = devm_platform_ioremap_resource(pdev, 0);
1315        if (IS_ERR(p->mapbase)) {
1316                ret = PTR_ERR(p->mapbase);
1317                goto err1;
1318        }
1319
1320        ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
1321                               dev_name(&pdev->dev), p);
1322        if (ret) {
1323                dev_err(&pdev->dev, "unable to request irq\n");
1324                goto err1;
1325        }
1326
1327        p->pdev = pdev;
1328        pm_runtime_enable(&pdev->dev);
1329
1330        /* Platform data may override FIFO sizes */
1331        p->tx_fifo_size = chipdata->tx_fifo_size;
1332        p->rx_fifo_size = chipdata->rx_fifo_size;
1333        if (p->info->tx_fifo_override)
1334                p->tx_fifo_size = p->info->tx_fifo_override;
1335        if (p->info->rx_fifo_override)
1336                p->rx_fifo_size = p->info->rx_fifo_override;
1337
1338        /* init controller code */
1339        ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1340        ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1341        clksrc = clk_get_rate(p->clk);
1342        ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, 1024);
1343        ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, 1 << p->min_div_pow);
1344        ctlr->flags = chipdata->ctlr_flags;
1345        ctlr->bus_num = pdev->id;
1346        ctlr->num_chipselect = p->info->num_chipselect;
1347        ctlr->dev.of_node = pdev->dev.of_node;
1348        ctlr->setup = sh_msiof_spi_setup;
1349        ctlr->prepare_message = sh_msiof_prepare_message;
1350        ctlr->slave_abort = sh_msiof_slave_abort;
1351        ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
1352        ctlr->auto_runtime_pm = true;
1353        ctlr->transfer_one = sh_msiof_transfer_one;
1354        ctlr->use_gpio_descriptors = true;
1355        ctlr->max_native_cs = MAX_SS;
1356
1357        ret = sh_msiof_request_dma(p);
1358        if (ret < 0)
1359                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1360
1361        ret = devm_spi_register_controller(&pdev->dev, ctlr);
1362        if (ret < 0) {
1363                dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
1364                goto err2;
1365        }
1366
1367        return 0;
1368
1369 err2:
1370        sh_msiof_release_dma(p);
1371        pm_runtime_disable(&pdev->dev);
1372 err1:
1373        spi_controller_put(ctlr);
1374        return ret;
1375}
1376
1377static int sh_msiof_spi_remove(struct platform_device *pdev)
1378{
1379        struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1380
1381        sh_msiof_release_dma(p);
1382        pm_runtime_disable(&pdev->dev);
1383        return 0;
1384}
1385
1386static const struct platform_device_id spi_driver_ids[] = {
1387        { "spi_sh_msiof",       (kernel_ulong_t)&sh_data },
1388        {},
1389};
1390MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1391
1392#ifdef CONFIG_PM_SLEEP
1393static int sh_msiof_spi_suspend(struct device *dev)
1394{
1395        struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
1396
1397        return spi_controller_suspend(p->ctlr);
1398}
1399
1400static int sh_msiof_spi_resume(struct device *dev)
1401{
1402        struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
1403
1404        return spi_controller_resume(p->ctlr);
1405}
1406
1407static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
1408                         sh_msiof_spi_resume);
1409#define DEV_PM_OPS      (&sh_msiof_spi_pm_ops)
1410#else
1411#define DEV_PM_OPS      NULL
1412#endif /* CONFIG_PM_SLEEP */
1413
1414static struct platform_driver sh_msiof_spi_drv = {
1415        .probe          = sh_msiof_spi_probe,
1416        .remove         = sh_msiof_spi_remove,
1417        .id_table       = spi_driver_ids,
1418        .driver         = {
1419                .name           = "spi_sh_msiof",
1420                .pm             = DEV_PM_OPS,
1421                .of_match_table = of_match_ptr(sh_msiof_match),
1422        },
1423};
1424module_platform_driver(sh_msiof_spi_drv);
1425
1426MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
1427MODULE_AUTHOR("Magnus Damm");
1428MODULE_LICENSE("GPL v2");
1429MODULE_ALIAS("platform:spi_sh_msiof");
1430