linux/drivers/spi/spi-sh-msiof.c
<<
>>
Prefs
   1/*
   2 * SuperH MSIOF SPI Master Interface
   3 *
   4 * Copyright (c) 2009 Magnus Damm
   5 * Copyright (C) 2014 Renesas Electronics Corporation
   6 * Copyright (C) 2014-2017 Glider bvba
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 */
  13
  14#include <linux/bitmap.h>
  15#include <linux/clk.h>
  16#include <linux/completion.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/dmaengine.h>
  20#include <linux/err.h>
  21#include <linux/gpio.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/interrupt.h>
  24#include <linux/io.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/of.h>
  28#include <linux/of_device.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/sh_dma.h>
  32
  33#include <linux/spi/sh_msiof.h>
  34#include <linux/spi/spi.h>
  35
  36#include <asm/unaligned.h>
  37
  38struct sh_msiof_chipdata {
  39        u16 tx_fifo_size;
  40        u16 rx_fifo_size;
  41        u16 master_flags;
  42        u16 min_div;
  43};
  44
  45struct sh_msiof_spi_priv {
  46        struct spi_master *master;
  47        void __iomem *mapbase;
  48        struct clk *clk;
  49        struct platform_device *pdev;
  50        struct sh_msiof_spi_info *info;
  51        struct completion done;
  52        unsigned int tx_fifo_size;
  53        unsigned int rx_fifo_size;
  54        unsigned int min_div;
  55        void *tx_dma_page;
  56        void *rx_dma_page;
  57        dma_addr_t tx_dma_addr;
  58        dma_addr_t rx_dma_addr;
  59        unsigned short unused_ss;
  60        bool native_cs_inited;
  61        bool native_cs_high;
  62        bool slave_aborted;
  63};
  64
  65#define MAX_SS  3       /* Maximum number of native chip selects */
  66
  67#define TMDR1   0x00    /* Transmit Mode Register 1 */
  68#define TMDR2   0x04    /* Transmit Mode Register 2 */
  69#define TMDR3   0x08    /* Transmit Mode Register 3 */
  70#define RMDR1   0x10    /* Receive Mode Register 1 */
  71#define RMDR2   0x14    /* Receive Mode Register 2 */
  72#define RMDR3   0x18    /* Receive Mode Register 3 */
  73#define TSCR    0x20    /* Transmit Clock Select Register */
  74#define RSCR    0x22    /* Receive Clock Select Register (SH, A1, APE6) */
  75#define CTR     0x28    /* Control Register */
  76#define FCTR    0x30    /* FIFO Control Register */
  77#define STR     0x40    /* Status Register */
  78#define IER     0x44    /* Interrupt Enable Register */
  79#define TDR1    0x48    /* Transmit Control Data Register 1 (SH, A1) */
  80#define TDR2    0x4c    /* Transmit Control Data Register 2 (SH, A1) */
  81#define TFDR    0x50    /* Transmit FIFO Data Register */
  82#define RDR1    0x58    /* Receive Control Data Register 1 (SH, A1) */
  83#define RDR2    0x5c    /* Receive Control Data Register 2 (SH, A1) */
  84#define RFDR    0x60    /* Receive FIFO Data Register */
  85
  86/* TMDR1 and RMDR1 */
  87#define MDR1_TRMD        0x80000000 /* Transfer Mode (1 = Master mode) */
  88#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
  89#define MDR1_SYNCMD_SPI  0x20000000 /*   Level mode/SPI */
  90#define MDR1_SYNCMD_LR   0x30000000 /*   L/R mode */
  91#define MDR1_SYNCAC_SHIFT        25 /* Sync Polarity (1 = Active-low) */
  92#define MDR1_BITLSB_SHIFT        24 /* MSB/LSB First (1 = LSB first) */
  93#define MDR1_DTDL_SHIFT          20 /* Data Pin Bit Delay for MSIOF_SYNC */
  94#define MDR1_SYNCDL_SHIFT        16 /* Frame Sync Signal Timing Delay */
  95#define MDR1_FLD_MASK    0x0000000c /* Frame Sync Signal Interval (0-3) */
  96#define MDR1_FLD_SHIFT            2
  97#define MDR1_XXSTP       0x00000001 /* Transmission/Reception Stop on FIFO */
  98/* TMDR1 */
  99#define TMDR1_PCON       0x40000000 /* Transfer Signal Connection */
 100#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
 101#define TMDR1_SYNCCH_SHIFT       26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
 102
 103/* TMDR2 and RMDR2 */
 104#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
 105#define MDR2_WDLEN1(i)  (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
 106#define MDR2_GRPMASK1   0x00000001 /* Group Output Mask 1 (SH, A1) */
 107
 108/* TSCR and RSCR */
 109#define SCR_BRPS_MASK       0x1f00 /* Prescaler Setting (1-32) */
 110#define SCR_BRPS(i)     (((i) - 1) << 8)
 111#define SCR_BRDV_MASK       0x0007 /* Baud Rate Generator's Division Ratio */
 112#define SCR_BRDV_DIV_2      0x0000
 113#define SCR_BRDV_DIV_4      0x0001
 114#define SCR_BRDV_DIV_8      0x0002
 115#define SCR_BRDV_DIV_16     0x0003
 116#define SCR_BRDV_DIV_32     0x0004
 117#define SCR_BRDV_DIV_1      0x0007
 118
 119/* CTR */
 120#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
 121#define CTR_TSCKIZ_SCK  0x80000000 /*   Disable SCK when TX disabled */
 122#define CTR_TSCKIZ_POL_SHIFT    30 /*   Transmit Clock Polarity */
 123#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
 124#define CTR_RSCKIZ_SCK  0x20000000 /*   Must match CTR_TSCKIZ_SCK */
 125#define CTR_RSCKIZ_POL_SHIFT    28 /*   Receive Clock Polarity */
 126#define CTR_TEDG_SHIFT          27 /* Transmit Timing (1 = falling edge) */
 127#define CTR_REDG_SHIFT          26 /* Receive Timing (1 = falling edge) */
 128#define CTR_TXDIZ_MASK  0x00c00000 /* Pin Output When TX is Disabled */
 129#define CTR_TXDIZ_LOW   0x00000000 /*   0 */
 130#define CTR_TXDIZ_HIGH  0x00400000 /*   1 */
 131#define CTR_TXDIZ_HIZ   0x00800000 /*   High-impedance */
 132#define CTR_TSCKE       0x00008000 /* Transmit Serial Clock Output Enable */
 133#define CTR_TFSE        0x00004000 /* Transmit Frame Sync Signal Output Enable */
 134#define CTR_TXE         0x00000200 /* Transmit Enable */
 135#define CTR_RXE         0x00000100 /* Receive Enable */
 136
 137/* FCTR */
 138#define FCTR_TFWM_MASK  0xe0000000 /* Transmit FIFO Watermark */
 139#define FCTR_TFWM_64    0x00000000 /*  Transfer Request when 64 empty stages */
 140#define FCTR_TFWM_32    0x20000000 /*  Transfer Request when 32 empty stages */
 141#define FCTR_TFWM_24    0x40000000 /*  Transfer Request when 24 empty stages */
 142#define FCTR_TFWM_16    0x60000000 /*  Transfer Request when 16 empty stages */
 143#define FCTR_TFWM_12    0x80000000 /*  Transfer Request when 12 empty stages */
 144#define FCTR_TFWM_8     0xa0000000 /*  Transfer Request when 8 empty stages */
 145#define FCTR_TFWM_4     0xc0000000 /*  Transfer Request when 4 empty stages */
 146#define FCTR_TFWM_1     0xe0000000 /*  Transfer Request when 1 empty stage */
 147#define FCTR_TFUA_MASK  0x07f00000 /* Transmit FIFO Usable Area */
 148#define FCTR_TFUA_SHIFT         20
 149#define FCTR_TFUA(i)    ((i) << FCTR_TFUA_SHIFT)
 150#define FCTR_RFWM_MASK  0x0000e000 /* Receive FIFO Watermark */
 151#define FCTR_RFWM_1     0x00000000 /*  Transfer Request when 1 valid stages */
 152#define FCTR_RFWM_4     0x00002000 /*  Transfer Request when 4 valid stages */
 153#define FCTR_RFWM_8     0x00004000 /*  Transfer Request when 8 valid stages */
 154#define FCTR_RFWM_16    0x00006000 /*  Transfer Request when 16 valid stages */
 155#define FCTR_RFWM_32    0x00008000 /*  Transfer Request when 32 valid stages */
 156#define FCTR_RFWM_64    0x0000a000 /*  Transfer Request when 64 valid stages */
 157#define FCTR_RFWM_128   0x0000c000 /*  Transfer Request when 128 valid stages */
 158#define FCTR_RFWM_256   0x0000e000 /*  Transfer Request when 256 valid stages */
 159#define FCTR_RFUA_MASK  0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
 160#define FCTR_RFUA_SHIFT          4
 161#define FCTR_RFUA(i)    ((i) << FCTR_RFUA_SHIFT)
 162
 163/* STR */
 164#define STR_TFEMP       0x20000000 /* Transmit FIFO Empty */
 165#define STR_TDREQ       0x10000000 /* Transmit Data Transfer Request */
 166#define STR_TEOF        0x00800000 /* Frame Transmission End */
 167#define STR_TFSERR      0x00200000 /* Transmit Frame Synchronization Error */
 168#define STR_TFOVF       0x00100000 /* Transmit FIFO Overflow */
 169#define STR_TFUDF       0x00080000 /* Transmit FIFO Underflow */
 170#define STR_RFFUL       0x00002000 /* Receive FIFO Full */
 171#define STR_RDREQ       0x00001000 /* Receive Data Transfer Request */
 172#define STR_REOF        0x00000080 /* Frame Reception End */
 173#define STR_RFSERR      0x00000020 /* Receive Frame Synchronization Error */
 174#define STR_RFUDF       0x00000010 /* Receive FIFO Underflow */
 175#define STR_RFOVF       0x00000008 /* Receive FIFO Overflow */
 176
 177/* IER */
 178#define IER_TDMAE       0x80000000 /* Transmit Data DMA Transfer Req. Enable */
 179#define IER_TFEMPE      0x20000000 /* Transmit FIFO Empty Enable */
 180#define IER_TDREQE      0x10000000 /* Transmit Data Transfer Request Enable */
 181#define IER_TEOFE       0x00800000 /* Frame Transmission End Enable */
 182#define IER_TFSERRE     0x00200000 /* Transmit Frame Sync Error Enable */
 183#define IER_TFOVFE      0x00100000 /* Transmit FIFO Overflow Enable */
 184#define IER_TFUDFE      0x00080000 /* Transmit FIFO Underflow Enable */
 185#define IER_RDMAE       0x00008000 /* Receive Data DMA Transfer Req. Enable */
 186#define IER_RFFULE      0x00002000 /* Receive FIFO Full Enable */
 187#define IER_RDREQE      0x00001000 /* Receive Data Transfer Request Enable */
 188#define IER_REOFE       0x00000080 /* Frame Reception End Enable */
 189#define IER_RFSERRE     0x00000020 /* Receive Frame Sync Error Enable */
 190#define IER_RFUDFE      0x00000010 /* Receive FIFO Underflow Enable */
 191#define IER_RFOVFE      0x00000008 /* Receive FIFO Overflow Enable */
 192
 193
 194static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 195{
 196        switch (reg_offs) {
 197        case TSCR:
 198        case RSCR:
 199                return ioread16(p->mapbase + reg_offs);
 200        default:
 201                return ioread32(p->mapbase + reg_offs);
 202        }
 203}
 204
 205static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
 206                           u32 value)
 207{
 208        switch (reg_offs) {
 209        case TSCR:
 210        case RSCR:
 211                iowrite16(value, p->mapbase + reg_offs);
 212                break;
 213        default:
 214                iowrite32(value, p->mapbase + reg_offs);
 215                break;
 216        }
 217}
 218
 219static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
 220                                    u32 clr, u32 set)
 221{
 222        u32 mask = clr | set;
 223        u32 data;
 224        int k;
 225
 226        data = sh_msiof_read(p, CTR);
 227        data &= ~clr;
 228        data |= set;
 229        sh_msiof_write(p, CTR, data);
 230
 231        for (k = 100; k > 0; k--) {
 232                if ((sh_msiof_read(p, CTR) & mask) == set)
 233                        break;
 234
 235                udelay(10);
 236        }
 237
 238        return k > 0 ? 0 : -ETIMEDOUT;
 239}
 240
 241static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 242{
 243        struct sh_msiof_spi_priv *p = data;
 244
 245        /* just disable the interrupt and wake up */
 246        sh_msiof_write(p, IER, 0);
 247        complete(&p->done);
 248
 249        return IRQ_HANDLED;
 250}
 251
 252static struct {
 253        unsigned short div;
 254        unsigned short brdv;
 255} const sh_msiof_spi_div_table[] = {
 256        { 1,    SCR_BRDV_DIV_1 },
 257        { 2,    SCR_BRDV_DIV_2 },
 258        { 4,    SCR_BRDV_DIV_4 },
 259        { 8,    SCR_BRDV_DIV_8 },
 260        { 16,   SCR_BRDV_DIV_16 },
 261        { 32,   SCR_BRDV_DIV_32 },
 262};
 263
 264static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 265                                      unsigned long parent_rate, u32 spi_hz)
 266{
 267        unsigned long div = 1024;
 268        u32 brps, scr;
 269        size_t k;
 270
 271        if (!WARN_ON(!spi_hz || !parent_rate))
 272                div = DIV_ROUND_UP(parent_rate, spi_hz);
 273
 274        div = max_t(unsigned long, div, p->min_div);
 275
 276        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
 277                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
 278                /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
 279                if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
 280                        continue;
 281                if (brps <= 32) /* max of brdv is 32 */
 282                        break;
 283        }
 284
 285        k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
 286        brps = min_t(int, brps, 32);
 287
 288        scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
 289        sh_msiof_write(p, TSCR, scr);
 290        if (!(p->master->flags & SPI_MASTER_MUST_TX))
 291                sh_msiof_write(p, RSCR, scr);
 292}
 293
 294static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
 295{
 296        /*
 297         * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
 298         * b'000                : 0
 299         * b'001                : 100
 300         * b'010                : 200
 301         * b'011 (SYNCDL only)  : 300
 302         * b'101                : 50
 303         * b'110                : 150
 304         */
 305        if (dtdl_or_syncdl % 100)
 306                return dtdl_or_syncdl / 100 + 5;
 307        else
 308                return dtdl_or_syncdl / 100;
 309}
 310
 311static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
 312{
 313        u32 val;
 314
 315        if (!p->info)
 316                return 0;
 317
 318        /* check if DTDL and SYNCDL is allowed value */
 319        if (p->info->dtdl > 200 || p->info->syncdl > 300) {
 320                dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
 321                return 0;
 322        }
 323
 324        /* check if the sum of DTDL and SYNCDL becomes an integer value  */
 325        if ((p->info->dtdl + p->info->syncdl) % 100) {
 326                dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
 327                return 0;
 328        }
 329
 330        val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
 331        val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
 332
 333        return val;
 334}
 335
 336static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
 337                                      u32 cpol, u32 cpha,
 338                                      u32 tx_hi_z, u32 lsb_first, u32 cs_high)
 339{
 340        u32 tmp;
 341        int edge;
 342
 343        /*
 344         * CPOL CPHA     TSCKIZ RSCKIZ TEDG REDG
 345         *    0    0         10     10    1    1
 346         *    0    1         10     10    0    0
 347         *    1    0         11     11    0    0
 348         *    1    1         11     11    1    1
 349         */
 350        tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
 351        tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
 352        tmp |= lsb_first << MDR1_BITLSB_SHIFT;
 353        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
 354        if (spi_controller_is_slave(p->master)) {
 355                sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
 356        } else {
 357                sh_msiof_write(p, TMDR1,
 358                               tmp | MDR1_TRMD | TMDR1_PCON |
 359                               (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
 360        }
 361        if (p->master->flags & SPI_MASTER_MUST_TX) {
 362                /* These bits are reserved if RX needs TX */
 363                tmp &= ~0x0000ffff;
 364        }
 365        sh_msiof_write(p, RMDR1, tmp);
 366
 367        tmp = 0;
 368        tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
 369        tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
 370
 371        edge = cpol ^ !cpha;
 372
 373        tmp |= edge << CTR_TEDG_SHIFT;
 374        tmp |= edge << CTR_REDG_SHIFT;
 375        tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
 376        sh_msiof_write(p, CTR, tmp);
 377}
 378
 379static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 380                                       const void *tx_buf, void *rx_buf,
 381                                       u32 bits, u32 words)
 382{
 383        u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
 384
 385        if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
 386                sh_msiof_write(p, TMDR2, dr2);
 387        else
 388                sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
 389
 390        if (rx_buf)
 391                sh_msiof_write(p, RMDR2, dr2);
 392}
 393
 394static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 395{
 396        sh_msiof_write(p, STR, sh_msiof_read(p, STR));
 397}
 398
 399static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
 400                                      const void *tx_buf, int words, int fs)
 401{
 402        const u8 *buf_8 = tx_buf;
 403        int k;
 404
 405        for (k = 0; k < words; k++)
 406                sh_msiof_write(p, TFDR, buf_8[k] << fs);
 407}
 408
 409static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
 410                                       const void *tx_buf, int words, int fs)
 411{
 412        const u16 *buf_16 = tx_buf;
 413        int k;
 414
 415        for (k = 0; k < words; k++)
 416                sh_msiof_write(p, TFDR, buf_16[k] << fs);
 417}
 418
 419static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
 420                                        const void *tx_buf, int words, int fs)
 421{
 422        const u16 *buf_16 = tx_buf;
 423        int k;
 424
 425        for (k = 0; k < words; k++)
 426                sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
 427}
 428
 429static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
 430                                       const void *tx_buf, int words, int fs)
 431{
 432        const u32 *buf_32 = tx_buf;
 433        int k;
 434
 435        for (k = 0; k < words; k++)
 436                sh_msiof_write(p, TFDR, buf_32[k] << fs);
 437}
 438
 439static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
 440                                        const void *tx_buf, int words, int fs)
 441{
 442        const u32 *buf_32 = tx_buf;
 443        int k;
 444
 445        for (k = 0; k < words; k++)
 446                sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
 447}
 448
 449static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
 450                                        const void *tx_buf, int words, int fs)
 451{
 452        const u32 *buf_32 = tx_buf;
 453        int k;
 454
 455        for (k = 0; k < words; k++)
 456                sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
 457}
 458
 459static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
 460                                         const void *tx_buf, int words, int fs)
 461{
 462        const u32 *buf_32 = tx_buf;
 463        int k;
 464
 465        for (k = 0; k < words; k++)
 466                sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 467}
 468
 469static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
 470                                     void *rx_buf, int words, int fs)
 471{
 472        u8 *buf_8 = rx_buf;
 473        int k;
 474
 475        for (k = 0; k < words; k++)
 476                buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
 477}
 478
 479static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
 480                                      void *rx_buf, int words, int fs)
 481{
 482        u16 *buf_16 = rx_buf;
 483        int k;
 484
 485        for (k = 0; k < words; k++)
 486                buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
 487}
 488
 489static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
 490                                       void *rx_buf, int words, int fs)
 491{
 492        u16 *buf_16 = rx_buf;
 493        int k;
 494
 495        for (k = 0; k < words; k++)
 496                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
 497}
 498
 499static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
 500                                      void *rx_buf, int words, int fs)
 501{
 502        u32 *buf_32 = rx_buf;
 503        int k;
 504
 505        for (k = 0; k < words; k++)
 506                buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
 507}
 508
 509static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
 510                                       void *rx_buf, int words, int fs)
 511{
 512        u32 *buf_32 = rx_buf;
 513        int k;
 514
 515        for (k = 0; k < words; k++)
 516                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
 517}
 518
 519static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
 520                                       void *rx_buf, int words, int fs)
 521{
 522        u32 *buf_32 = rx_buf;
 523        int k;
 524
 525        for (k = 0; k < words; k++)
 526                buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
 527}
 528
 529static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
 530                                       void *rx_buf, int words, int fs)
 531{
 532        u32 *buf_32 = rx_buf;
 533        int k;
 534
 535        for (k = 0; k < words; k++)
 536                put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
 537}
 538
 539static int sh_msiof_spi_setup(struct spi_device *spi)
 540{
 541        struct device_node      *np = spi->master->dev.of_node;
 542        struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
 543        u32 clr, set, tmp;
 544
 545        if (!np) {
 546                /*
 547                 * Use spi->controller_data for CS (same strategy as spi_gpio),
 548                 * if any. otherwise let HW control CS
 549                 */
 550                spi->cs_gpio = (uintptr_t)spi->controller_data;
 551        }
 552
 553        if (gpio_is_valid(spi->cs_gpio)) {
 554                gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 555                return 0;
 556        }
 557
 558        if (spi_controller_is_slave(p->master))
 559                return 0;
 560
 561        if (p->native_cs_inited &&
 562            (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
 563                return 0;
 564
 565        /* Configure native chip select mode/polarity early */
 566        clr = MDR1_SYNCMD_MASK;
 567        set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
 568        if (spi->mode & SPI_CS_HIGH)
 569                clr |= BIT(MDR1_SYNCAC_SHIFT);
 570        else
 571                set |= BIT(MDR1_SYNCAC_SHIFT);
 572        pm_runtime_get_sync(&p->pdev->dev);
 573        tmp = sh_msiof_read(p, TMDR1) & ~clr;
 574        sh_msiof_write(p, TMDR1, tmp | set);
 575        pm_runtime_put(&p->pdev->dev);
 576        p->native_cs_high = spi->mode & SPI_CS_HIGH;
 577        p->native_cs_inited = true;
 578        return 0;
 579}
 580
 581static int sh_msiof_prepare_message(struct spi_master *master,
 582                                    struct spi_message *msg)
 583{
 584        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 585        const struct spi_device *spi = msg->spi;
 586        u32 ss, cs_high;
 587
 588        /* Configure pins before asserting CS */
 589        if (gpio_is_valid(spi->cs_gpio)) {
 590                ss = p->unused_ss;
 591                cs_high = p->native_cs_high;
 592        } else {
 593                ss = spi->chip_select;
 594                cs_high = !!(spi->mode & SPI_CS_HIGH);
 595        }
 596        sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
 597                                  !!(spi->mode & SPI_CPHA),
 598                                  !!(spi->mode & SPI_3WIRE),
 599                                  !!(spi->mode & SPI_LSB_FIRST), cs_high);
 600        return 0;
 601}
 602
 603static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 604{
 605        bool slave = spi_controller_is_slave(p->master);
 606        int ret = 0;
 607
 608        /* setup clock and rx/tx signals */
 609        if (!slave)
 610                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
 611        if (rx_buf && !ret)
 612                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
 613        if (!ret)
 614                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
 615
 616        /* start by setting frame bit */
 617        if (!ret && !slave)
 618                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
 619
 620        return ret;
 621}
 622
 623static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 624{
 625        bool slave = spi_controller_is_slave(p->master);
 626        int ret = 0;
 627
 628        /* shut down frame, rx/tx and clock signals */
 629        if (!slave)
 630                ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
 631        if (!ret)
 632                ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
 633        if (rx_buf && !ret)
 634                ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
 635        if (!ret && !slave)
 636                ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
 637
 638        return ret;
 639}
 640
 641static int sh_msiof_slave_abort(struct spi_master *master)
 642{
 643        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 644
 645        p->slave_aborted = true;
 646        complete(&p->done);
 647        return 0;
 648}
 649
 650static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
 651{
 652        if (spi_controller_is_slave(p->master)) {
 653                if (wait_for_completion_interruptible(&p->done) ||
 654                    p->slave_aborted) {
 655                        dev_dbg(&p->pdev->dev, "interrupted\n");
 656                        return -EINTR;
 657                }
 658        } else {
 659                if (!wait_for_completion_timeout(&p->done, HZ)) {
 660                        dev_err(&p->pdev->dev, "timeout\n");
 661                        return -ETIMEDOUT;
 662                }
 663        }
 664
 665        return 0;
 666}
 667
 668static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 669                                  void (*tx_fifo)(struct sh_msiof_spi_priv *,
 670                                                  const void *, int, int),
 671                                  void (*rx_fifo)(struct sh_msiof_spi_priv *,
 672                                                  void *, int, int),
 673                                  const void *tx_buf, void *rx_buf,
 674                                  int words, int bits)
 675{
 676        int fifo_shift;
 677        int ret;
 678
 679        /* limit maximum word transfer to rx/tx fifo size */
 680        if (tx_buf)
 681                words = min_t(int, words, p->tx_fifo_size);
 682        if (rx_buf)
 683                words = min_t(int, words, p->rx_fifo_size);
 684
 685        /* the fifo contents need shifting */
 686        fifo_shift = 32 - bits;
 687
 688        /* default FIFO watermarks for PIO */
 689        sh_msiof_write(p, FCTR, 0);
 690
 691        /* setup msiof transfer mode registers */
 692        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
 693        sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
 694
 695        /* write tx fifo */
 696        if (tx_buf)
 697                tx_fifo(p, tx_buf, words, fifo_shift);
 698
 699        reinit_completion(&p->done);
 700        p->slave_aborted = false;
 701
 702        ret = sh_msiof_spi_start(p, rx_buf);
 703        if (ret) {
 704                dev_err(&p->pdev->dev, "failed to start hardware\n");
 705                goto stop_ier;
 706        }
 707
 708        /* wait for tx fifo to be emptied / rx fifo to be filled */
 709        ret = sh_msiof_wait_for_completion(p);
 710        if (ret)
 711                goto stop_reset;
 712
 713        /* read rx fifo */
 714        if (rx_buf)
 715                rx_fifo(p, rx_buf, words, fifo_shift);
 716
 717        /* clear status bits */
 718        sh_msiof_reset_str(p);
 719
 720        ret = sh_msiof_spi_stop(p, rx_buf);
 721        if (ret) {
 722                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 723                return ret;
 724        }
 725
 726        return words;
 727
 728stop_reset:
 729        sh_msiof_reset_str(p);
 730        sh_msiof_spi_stop(p, rx_buf);
 731stop_ier:
 732        sh_msiof_write(p, IER, 0);
 733        return ret;
 734}
 735
 736static void sh_msiof_dma_complete(void *arg)
 737{
 738        struct sh_msiof_spi_priv *p = arg;
 739
 740        sh_msiof_write(p, IER, 0);
 741        complete(&p->done);
 742}
 743
 744static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 745                             void *rx, unsigned int len)
 746{
 747        u32 ier_bits = 0;
 748        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 749        dma_cookie_t cookie;
 750        int ret;
 751
 752        /* First prepare and submit the DMA request(s), as this may fail */
 753        if (rx) {
 754                ier_bits |= IER_RDREQE | IER_RDMAE;
 755                desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
 756                                        p->rx_dma_addr, len, DMA_DEV_TO_MEM,
 757                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 758                if (!desc_rx)
 759                        return -EAGAIN;
 760
 761                desc_rx->callback = sh_msiof_dma_complete;
 762                desc_rx->callback_param = p;
 763                cookie = dmaengine_submit(desc_rx);
 764                if (dma_submit_error(cookie))
 765                        return cookie;
 766        }
 767
 768        if (tx) {
 769                ier_bits |= IER_TDREQE | IER_TDMAE;
 770                dma_sync_single_for_device(p->master->dma_tx->device->dev,
 771                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
 772                desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
 773                                        p->tx_dma_addr, len, DMA_MEM_TO_DEV,
 774                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 775                if (!desc_tx) {
 776                        ret = -EAGAIN;
 777                        goto no_dma_tx;
 778                }
 779
 780                if (rx) {
 781                        /* No callback */
 782                        desc_tx->callback = NULL;
 783                } else {
 784                        desc_tx->callback = sh_msiof_dma_complete;
 785                        desc_tx->callback_param = p;
 786                }
 787                cookie = dmaengine_submit(desc_tx);
 788                if (dma_submit_error(cookie)) {
 789                        ret = cookie;
 790                        goto no_dma_tx;
 791                }
 792        }
 793
 794        /* 1 stage FIFO watermarks for DMA */
 795        sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
 796
 797        /* setup msiof transfer mode registers (32-bit words) */
 798        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 799
 800        sh_msiof_write(p, IER, ier_bits);
 801
 802        reinit_completion(&p->done);
 803        p->slave_aborted = false;
 804
 805        /* Now start DMA */
 806        if (rx)
 807                dma_async_issue_pending(p->master->dma_rx);
 808        if (tx)
 809                dma_async_issue_pending(p->master->dma_tx);
 810
 811        ret = sh_msiof_spi_start(p, rx);
 812        if (ret) {
 813                dev_err(&p->pdev->dev, "failed to start hardware\n");
 814                goto stop_dma;
 815        }
 816
 817        /* wait for tx/rx DMA completion */
 818        ret = sh_msiof_wait_for_completion(p);
 819        if (ret)
 820                goto stop_reset;
 821
 822        if (!rx) {
 823                reinit_completion(&p->done);
 824                sh_msiof_write(p, IER, IER_TEOFE);
 825
 826                /* wait for tx fifo to be emptied */
 827                ret = sh_msiof_wait_for_completion(p);
 828                if (ret)
 829                        goto stop_reset;
 830        }
 831
 832        /* clear status bits */
 833        sh_msiof_reset_str(p);
 834
 835        ret = sh_msiof_spi_stop(p, rx);
 836        if (ret) {
 837                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 838                return ret;
 839        }
 840
 841        if (rx)
 842                dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
 843                                        p->rx_dma_addr, len,
 844                                        DMA_FROM_DEVICE);
 845
 846        return 0;
 847
 848stop_reset:
 849        sh_msiof_reset_str(p);
 850        sh_msiof_spi_stop(p, rx);
 851stop_dma:
 852        if (tx)
 853                dmaengine_terminate_all(p->master->dma_tx);
 854no_dma_tx:
 855        if (rx)
 856                dmaengine_terminate_all(p->master->dma_rx);
 857        sh_msiof_write(p, IER, 0);
 858        return ret;
 859}
 860
 861static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
 862{
 863        /* src or dst can be unaligned, but not both */
 864        if ((unsigned long)src & 3) {
 865                while (words--) {
 866                        *dst++ = swab32(get_unaligned(src));
 867                        src++;
 868                }
 869        } else if ((unsigned long)dst & 3) {
 870                while (words--) {
 871                        put_unaligned(swab32(*src++), dst);
 872                        dst++;
 873                }
 874        } else {
 875                while (words--)
 876                        *dst++ = swab32(*src++);
 877        }
 878}
 879
 880static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
 881{
 882        /* src or dst can be unaligned, but not both */
 883        if ((unsigned long)src & 3) {
 884                while (words--) {
 885                        *dst++ = swahw32(get_unaligned(src));
 886                        src++;
 887                }
 888        } else if ((unsigned long)dst & 3) {
 889                while (words--) {
 890                        put_unaligned(swahw32(*src++), dst);
 891                        dst++;
 892                }
 893        } else {
 894                while (words--)
 895                        *dst++ = swahw32(*src++);
 896        }
 897}
 898
 899static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
 900{
 901        memcpy(dst, src, words * 4);
 902}
 903
 904static int sh_msiof_transfer_one(struct spi_master *master,
 905                                 struct spi_device *spi,
 906                                 struct spi_transfer *t)
 907{
 908        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 909        void (*copy32)(u32 *, const u32 *, unsigned int);
 910        void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
 911        void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
 912        const void *tx_buf = t->tx_buf;
 913        void *rx_buf = t->rx_buf;
 914        unsigned int len = t->len;
 915        unsigned int bits = t->bits_per_word;
 916        unsigned int bytes_per_word;
 917        unsigned int words;
 918        int n;
 919        bool swab;
 920        int ret;
 921
 922        /* setup clocks (clock already enabled in chipselect()) */
 923        if (!spi_controller_is_slave(p->master))
 924                sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
 925
 926        while (master->dma_tx && len > 15) {
 927                /*
 928                 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
 929                 *  words, with byte resp. word swapping.
 930                 */
 931                unsigned int l = 0;
 932
 933                if (tx_buf)
 934                        l = min(len, p->tx_fifo_size * 4);
 935                if (rx_buf)
 936                        l = min(len, p->rx_fifo_size * 4);
 937
 938                if (bits <= 8) {
 939                        if (l & 3)
 940                                break;
 941                        copy32 = copy_bswap32;
 942                } else if (bits <= 16) {
 943                        if (l & 3)
 944                                break;
 945                        copy32 = copy_wswap32;
 946                } else {
 947                        copy32 = copy_plain32;
 948                }
 949
 950                if (tx_buf)
 951                        copy32(p->tx_dma_page, tx_buf, l / 4);
 952
 953                ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
 954                if (ret == -EAGAIN) {
 955                        dev_warn_once(&p->pdev->dev,
 956                                "DMA not available, falling back to PIO\n");
 957                        break;
 958                }
 959                if (ret)
 960                        return ret;
 961
 962                if (rx_buf) {
 963                        copy32(rx_buf, p->rx_dma_page, l / 4);
 964                        rx_buf += l;
 965                }
 966                if (tx_buf)
 967                        tx_buf += l;
 968
 969                len -= l;
 970                if (!len)
 971                        return 0;
 972        }
 973
 974        if (bits <= 8 && len > 15 && !(len & 3)) {
 975                bits = 32;
 976                swab = true;
 977        } else {
 978                swab = false;
 979        }
 980
 981        /* setup bytes per word and fifo read/write functions */
 982        if (bits <= 8) {
 983                bytes_per_word = 1;
 984                tx_fifo = sh_msiof_spi_write_fifo_8;
 985                rx_fifo = sh_msiof_spi_read_fifo_8;
 986        } else if (bits <= 16) {
 987                bytes_per_word = 2;
 988                if ((unsigned long)tx_buf & 0x01)
 989                        tx_fifo = sh_msiof_spi_write_fifo_16u;
 990                else
 991                        tx_fifo = sh_msiof_spi_write_fifo_16;
 992
 993                if ((unsigned long)rx_buf & 0x01)
 994                        rx_fifo = sh_msiof_spi_read_fifo_16u;
 995                else
 996                        rx_fifo = sh_msiof_spi_read_fifo_16;
 997        } else if (swab) {
 998                bytes_per_word = 4;
 999                if ((unsigned long)tx_buf & 0x03)
1000                        tx_fifo = sh_msiof_spi_write_fifo_s32u;
1001                else
1002                        tx_fifo = sh_msiof_spi_write_fifo_s32;
1003
1004                if ((unsigned long)rx_buf & 0x03)
1005                        rx_fifo = sh_msiof_spi_read_fifo_s32u;
1006                else
1007                        rx_fifo = sh_msiof_spi_read_fifo_s32;
1008        } else {
1009                bytes_per_word = 4;
1010                if ((unsigned long)tx_buf & 0x03)
1011                        tx_fifo = sh_msiof_spi_write_fifo_32u;
1012                else
1013                        tx_fifo = sh_msiof_spi_write_fifo_32;
1014
1015                if ((unsigned long)rx_buf & 0x03)
1016                        rx_fifo = sh_msiof_spi_read_fifo_32u;
1017                else
1018                        rx_fifo = sh_msiof_spi_read_fifo_32;
1019        }
1020
1021        /* transfer in fifo sized chunks */
1022        words = len / bytes_per_word;
1023
1024        while (words > 0) {
1025                n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
1026                                           words, bits);
1027                if (n < 0)
1028                        return n;
1029
1030                if (tx_buf)
1031                        tx_buf += n * bytes_per_word;
1032                if (rx_buf)
1033                        rx_buf += n * bytes_per_word;
1034                words -= n;
1035        }
1036
1037        return 0;
1038}
1039
1040static const struct sh_msiof_chipdata sh_data = {
1041        .tx_fifo_size = 64,
1042        .rx_fifo_size = 64,
1043        .master_flags = 0,
1044        .min_div = 1,
1045};
1046
1047static const struct sh_msiof_chipdata rcar_gen2_data = {
1048        .tx_fifo_size = 64,
1049        .rx_fifo_size = 64,
1050        .master_flags = SPI_MASTER_MUST_TX,
1051        .min_div = 1,
1052};
1053
1054static const struct sh_msiof_chipdata rcar_gen3_data = {
1055        .tx_fifo_size = 64,
1056        .rx_fifo_size = 64,
1057        .master_flags = SPI_MASTER_MUST_TX,
1058        .min_div = 2,
1059};
1060
1061static const struct of_device_id sh_msiof_match[] = {
1062        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
1063        { .compatible = "renesas,msiof-r8a7743",   .data = &rcar_gen2_data },
1064        { .compatible = "renesas,msiof-r8a7745",   .data = &rcar_gen2_data },
1065        { .compatible = "renesas,msiof-r8a7790",   .data = &rcar_gen2_data },
1066        { .compatible = "renesas,msiof-r8a7791",   .data = &rcar_gen2_data },
1067        { .compatible = "renesas,msiof-r8a7792",   .data = &rcar_gen2_data },
1068        { .compatible = "renesas,msiof-r8a7793",   .data = &rcar_gen2_data },
1069        { .compatible = "renesas,msiof-r8a7794",   .data = &rcar_gen2_data },
1070        { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
1071        { .compatible = "renesas,msiof-r8a7796",   .data = &rcar_gen3_data },
1072        { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
1073        { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
1074        {},
1075};
1076MODULE_DEVICE_TABLE(of, sh_msiof_match);
1077
1078#ifdef CONFIG_OF
1079static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1080{
1081        struct sh_msiof_spi_info *info;
1082        struct device_node *np = dev->of_node;
1083        u32 num_cs = 1;
1084
1085        info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
1086        if (!info)
1087                return NULL;
1088
1089        info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
1090                                                            : MSIOF_SPI_MASTER;
1091
1092        /* Parse the MSIOF properties */
1093        if (info->mode == MSIOF_SPI_MASTER)
1094                of_property_read_u32(np, "num-cs", &num_cs);
1095        of_property_read_u32(np, "renesas,tx-fifo-size",
1096                                        &info->tx_fifo_override);
1097        of_property_read_u32(np, "renesas,rx-fifo-size",
1098                                        &info->rx_fifo_override);
1099        of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1100        of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
1101
1102        info->num_chipselect = num_cs;
1103
1104        return info;
1105}
1106#else
1107static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1108{
1109        return NULL;
1110}
1111#endif
1112
1113static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
1114{
1115        struct device *dev = &p->pdev->dev;
1116        unsigned int used_ss_mask = 0;
1117        unsigned int cs_gpios = 0;
1118        unsigned int num_cs, i;
1119        int ret;
1120
1121        ret = gpiod_count(dev, "cs");
1122        if (ret <= 0)
1123                return 0;
1124
1125        num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
1126        for (i = 0; i < num_cs; i++) {
1127                struct gpio_desc *gpiod;
1128
1129                gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
1130                if (!IS_ERR(gpiod)) {
1131                        cs_gpios++;
1132                        continue;
1133                }
1134
1135                if (PTR_ERR(gpiod) != -ENOENT)
1136                        return PTR_ERR(gpiod);
1137
1138                if (i >= MAX_SS) {
1139                        dev_err(dev, "Invalid native chip select %d\n", i);
1140                        return -EINVAL;
1141                }
1142                used_ss_mask |= BIT(i);
1143        }
1144        p->unused_ss = ffz(used_ss_mask);
1145        if (cs_gpios && p->unused_ss >= MAX_SS) {
1146                dev_err(dev, "No unused native chip select available\n");
1147                return -EINVAL;
1148        }
1149        return 0;
1150}
1151
1152static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
1153        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
1154{
1155        dma_cap_mask_t mask;
1156        struct dma_chan *chan;
1157        struct dma_slave_config cfg;
1158        int ret;
1159
1160        dma_cap_zero(mask);
1161        dma_cap_set(DMA_SLAVE, mask);
1162
1163        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1164                                (void *)(unsigned long)id, dev,
1165                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1166        if (!chan) {
1167                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1168                return NULL;
1169        }
1170
1171        memset(&cfg, 0, sizeof(cfg));
1172        cfg.direction = dir;
1173        if (dir == DMA_MEM_TO_DEV) {
1174                cfg.dst_addr = port_addr;
1175                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1176        } else {
1177                cfg.src_addr = port_addr;
1178                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1179        }
1180
1181        ret = dmaengine_slave_config(chan, &cfg);
1182        if (ret) {
1183                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1184                dma_release_channel(chan);
1185                return NULL;
1186        }
1187
1188        return chan;
1189}
1190
1191static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1192{
1193        struct platform_device *pdev = p->pdev;
1194        struct device *dev = &pdev->dev;
1195        const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
1196        unsigned int dma_tx_id, dma_rx_id;
1197        const struct resource *res;
1198        struct spi_master *master;
1199        struct device *tx_dev, *rx_dev;
1200
1201        if (dev->of_node) {
1202                /* In the OF case we will get the slave IDs from the DT */
1203                dma_tx_id = 0;
1204                dma_rx_id = 0;
1205        } else if (info && info->dma_tx_id && info->dma_rx_id) {
1206                dma_tx_id = info->dma_tx_id;
1207                dma_rx_id = info->dma_rx_id;
1208        } else {
1209                /* The driver assumes no error */
1210                return 0;
1211        }
1212
1213        /* The DMA engine uses the second register set, if present */
1214        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1215        if (!res)
1216                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1217
1218        master = p->master;
1219        master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1220                                                   dma_tx_id,
1221                                                   res->start + TFDR);
1222        if (!master->dma_tx)
1223                return -ENODEV;
1224
1225        master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1226                                                   dma_rx_id,
1227                                                   res->start + RFDR);
1228        if (!master->dma_rx)
1229                goto free_tx_chan;
1230
1231        p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1232        if (!p->tx_dma_page)
1233                goto free_rx_chan;
1234
1235        p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1236        if (!p->rx_dma_page)
1237                goto free_tx_page;
1238
1239        tx_dev = master->dma_tx->device->dev;
1240        p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
1241                                        DMA_TO_DEVICE);
1242        if (dma_mapping_error(tx_dev, p->tx_dma_addr))
1243                goto free_rx_page;
1244
1245        rx_dev = master->dma_rx->device->dev;
1246        p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
1247                                        DMA_FROM_DEVICE);
1248        if (dma_mapping_error(rx_dev, p->rx_dma_addr))
1249                goto unmap_tx_page;
1250
1251        dev_info(dev, "DMA available");
1252        return 0;
1253
1254unmap_tx_page:
1255        dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
1256free_rx_page:
1257        free_page((unsigned long)p->rx_dma_page);
1258free_tx_page:
1259        free_page((unsigned long)p->tx_dma_page);
1260free_rx_chan:
1261        dma_release_channel(master->dma_rx);
1262free_tx_chan:
1263        dma_release_channel(master->dma_tx);
1264        master->dma_tx = NULL;
1265        return -ENODEV;
1266}
1267
1268static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1269{
1270        struct spi_master *master = p->master;
1271
1272        if (!master->dma_tx)
1273                return;
1274
1275        dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
1276                         PAGE_SIZE, DMA_FROM_DEVICE);
1277        dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
1278                         PAGE_SIZE, DMA_TO_DEVICE);
1279        free_page((unsigned long)p->rx_dma_page);
1280        free_page((unsigned long)p->tx_dma_page);
1281        dma_release_channel(master->dma_rx);
1282        dma_release_channel(master->dma_tx);
1283}
1284
1285static int sh_msiof_spi_probe(struct platform_device *pdev)
1286{
1287        struct resource *r;
1288        struct spi_master *master;
1289        const struct sh_msiof_chipdata *chipdata;
1290        struct sh_msiof_spi_info *info;
1291        struct sh_msiof_spi_priv *p;
1292        int i;
1293        int ret;
1294
1295        chipdata = of_device_get_match_data(&pdev->dev);
1296        if (chipdata) {
1297                info = sh_msiof_spi_parse_dt(&pdev->dev);
1298        } else {
1299                chipdata = (const void *)pdev->id_entry->driver_data;
1300                info = dev_get_platdata(&pdev->dev);
1301        }
1302
1303        if (!info) {
1304                dev_err(&pdev->dev, "failed to obtain device info\n");
1305                return -ENXIO;
1306        }
1307
1308        if (info->mode == MSIOF_SPI_SLAVE)
1309                master = spi_alloc_slave(&pdev->dev,
1310                                         sizeof(struct sh_msiof_spi_priv));
1311        else
1312                master = spi_alloc_master(&pdev->dev,
1313                                          sizeof(struct sh_msiof_spi_priv));
1314        if (master == NULL)
1315                return -ENOMEM;
1316
1317        p = spi_master_get_devdata(master);
1318
1319        platform_set_drvdata(pdev, p);
1320        p->master = master;
1321        p->info = info;
1322        p->min_div = chipdata->min_div;
1323
1324        init_completion(&p->done);
1325
1326        p->clk = devm_clk_get(&pdev->dev, NULL);
1327        if (IS_ERR(p->clk)) {
1328                dev_err(&pdev->dev, "cannot get clock\n");
1329                ret = PTR_ERR(p->clk);
1330                goto err1;
1331        }
1332
1333        i = platform_get_irq(pdev, 0);
1334        if (i < 0) {
1335                dev_err(&pdev->dev, "cannot get platform IRQ\n");
1336                ret = -ENOENT;
1337                goto err1;
1338        }
1339
1340        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1341        p->mapbase = devm_ioremap_resource(&pdev->dev, r);
1342        if (IS_ERR(p->mapbase)) {
1343                ret = PTR_ERR(p->mapbase);
1344                goto err1;
1345        }
1346
1347        ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
1348                               dev_name(&pdev->dev), p);
1349        if (ret) {
1350                dev_err(&pdev->dev, "unable to request irq\n");
1351                goto err1;
1352        }
1353
1354        p->pdev = pdev;
1355        pm_runtime_enable(&pdev->dev);
1356
1357        /* Platform data may override FIFO sizes */
1358        p->tx_fifo_size = chipdata->tx_fifo_size;
1359        p->rx_fifo_size = chipdata->rx_fifo_size;
1360        if (p->info->tx_fifo_override)
1361                p->tx_fifo_size = p->info->tx_fifo_override;
1362        if (p->info->rx_fifo_override)
1363                p->rx_fifo_size = p->info->rx_fifo_override;
1364
1365        /* Setup GPIO chip selects */
1366        master->num_chipselect = p->info->num_chipselect;
1367        ret = sh_msiof_get_cs_gpios(p);
1368        if (ret)
1369                goto err1;
1370
1371        /* init master code */
1372        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1373        master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1374        master->flags = chipdata->master_flags;
1375        master->bus_num = pdev->id;
1376        master->dev.of_node = pdev->dev.of_node;
1377        master->setup = sh_msiof_spi_setup;
1378        master->prepare_message = sh_msiof_prepare_message;
1379        master->slave_abort = sh_msiof_slave_abort;
1380        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
1381        master->auto_runtime_pm = true;
1382        master->transfer_one = sh_msiof_transfer_one;
1383
1384        ret = sh_msiof_request_dma(p);
1385        if (ret < 0)
1386                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1387
1388        ret = devm_spi_register_master(&pdev->dev, master);
1389        if (ret < 0) {
1390                dev_err(&pdev->dev, "spi_register_master error.\n");
1391                goto err2;
1392        }
1393
1394        return 0;
1395
1396 err2:
1397        sh_msiof_release_dma(p);
1398        pm_runtime_disable(&pdev->dev);
1399 err1:
1400        spi_master_put(master);
1401        return ret;
1402}
1403
1404static int sh_msiof_spi_remove(struct platform_device *pdev)
1405{
1406        struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1407
1408        sh_msiof_release_dma(p);
1409        pm_runtime_disable(&pdev->dev);
1410        return 0;
1411}
1412
1413static const struct platform_device_id spi_driver_ids[] = {
1414        { "spi_sh_msiof",       (kernel_ulong_t)&sh_data },
1415        {},
1416};
1417MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1418
1419static struct platform_driver sh_msiof_spi_drv = {
1420        .probe          = sh_msiof_spi_probe,
1421        .remove         = sh_msiof_spi_remove,
1422        .id_table       = spi_driver_ids,
1423        .driver         = {
1424                .name           = "spi_sh_msiof",
1425                .of_match_table = of_match_ptr(sh_msiof_match),
1426        },
1427};
1428module_platform_driver(sh_msiof_spi_drv);
1429
1430MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
1431MODULE_AUTHOR("Magnus Damm");
1432MODULE_LICENSE("GPL v2");
1433MODULE_ALIAS("platform:spi_sh_msiof");
1434