linux/drivers/spi/spi-sh-msiof.c
<<
>>
Prefs
   1/*
   2 * SuperH MSIOF SPI Master Interface
   3 *
   4 * Copyright (c) 2009 Magnus Damm
   5 * Copyright (C) 2014 Glider bvba
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 */
  12
  13#include <linux/bitmap.h>
  14#include <linux/clk.h>
  15#include <linux/completion.h>
  16#include <linux/delay.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/dmaengine.h>
  19#include <linux/err.h>
  20#include <linux/gpio.h>
  21#include <linux/interrupt.h>
  22#include <linux/io.h>
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/of.h>
  26#include <linux/of_device.h>
  27#include <linux/platform_device.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/sh_dma.h>
  30
  31#include <linux/spi/sh_msiof.h>
  32#include <linux/spi/spi.h>
  33
  34#include <asm/unaligned.h>
  35
  36
  37struct sh_msiof_chipdata {
  38        u16 tx_fifo_size;
  39        u16 rx_fifo_size;
  40        u16 master_flags;
  41};
  42
  43struct sh_msiof_spi_priv {
  44        struct spi_master *master;
  45        void __iomem *mapbase;
  46        struct clk *clk;
  47        struct platform_device *pdev;
  48        struct sh_msiof_spi_info *info;
  49        struct completion done;
  50        unsigned int tx_fifo_size;
  51        unsigned int rx_fifo_size;
  52        void *tx_dma_page;
  53        void *rx_dma_page;
  54        dma_addr_t tx_dma_addr;
  55        dma_addr_t rx_dma_addr;
  56};
  57
  58#define TMDR1   0x00    /* Transmit Mode Register 1 */
  59#define TMDR2   0x04    /* Transmit Mode Register 2 */
  60#define TMDR3   0x08    /* Transmit Mode Register 3 */
  61#define RMDR1   0x10    /* Receive Mode Register 1 */
  62#define RMDR2   0x14    /* Receive Mode Register 2 */
  63#define RMDR3   0x18    /* Receive Mode Register 3 */
  64#define TSCR    0x20    /* Transmit Clock Select Register */
  65#define RSCR    0x22    /* Receive Clock Select Register (SH, A1, APE6) */
  66#define CTR     0x28    /* Control Register */
  67#define FCTR    0x30    /* FIFO Control Register */
  68#define STR     0x40    /* Status Register */
  69#define IER     0x44    /* Interrupt Enable Register */
  70#define TDR1    0x48    /* Transmit Control Data Register 1 (SH, A1) */
  71#define TDR2    0x4c    /* Transmit Control Data Register 2 (SH, A1) */
  72#define TFDR    0x50    /* Transmit FIFO Data Register */
  73#define RDR1    0x58    /* Receive Control Data Register 1 (SH, A1) */
  74#define RDR2    0x5c    /* Receive Control Data Register 2 (SH, A1) */
  75#define RFDR    0x60    /* Receive FIFO Data Register */
  76
  77/* TMDR1 and RMDR1 */
  78#define MDR1_TRMD        0x80000000 /* Transfer Mode (1 = Master mode) */
  79#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
  80#define MDR1_SYNCMD_SPI  0x20000000 /*   Level mode/SPI */
  81#define MDR1_SYNCMD_LR   0x30000000 /*   L/R mode */
  82#define MDR1_SYNCAC_SHIFT        25 /* Sync Polarity (1 = Active-low) */
  83#define MDR1_BITLSB_SHIFT        24 /* MSB/LSB First (1 = LSB first) */
  84#define MDR1_DTDL_SHIFT          20 /* Data Pin Bit Delay for MSIOF_SYNC */
  85#define MDR1_SYNCDL_SHIFT        16 /* Frame Sync Signal Timing Delay */
  86#define MDR1_FLD_MASK    0x0000000c /* Frame Sync Signal Interval (0-3) */
  87#define MDR1_FLD_SHIFT            2
  88#define MDR1_XXSTP       0x00000001 /* Transmission/Reception Stop on FIFO */
  89/* TMDR1 */
  90#define TMDR1_PCON       0x40000000 /* Transfer Signal Connection */
  91
  92/* TMDR2 and RMDR2 */
  93#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
  94#define MDR2_WDLEN1(i)  (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
  95#define MDR2_GRPMASK1   0x00000001 /* Group Output Mask 1 (SH, A1) */
  96
  97/* TSCR and RSCR */
  98#define SCR_BRPS_MASK       0x1f00 /* Prescaler Setting (1-32) */
  99#define SCR_BRPS(i)     (((i) - 1) << 8)
 100#define SCR_BRDV_MASK       0x0007 /* Baud Rate Generator's Division Ratio */
 101#define SCR_BRDV_DIV_2      0x0000
 102#define SCR_BRDV_DIV_4      0x0001
 103#define SCR_BRDV_DIV_8      0x0002
 104#define SCR_BRDV_DIV_16     0x0003
 105#define SCR_BRDV_DIV_32     0x0004
 106#define SCR_BRDV_DIV_1      0x0007
 107
 108/* CTR */
 109#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
 110#define CTR_TSCKIZ_SCK  0x80000000 /*   Disable SCK when TX disabled */
 111#define CTR_TSCKIZ_POL_SHIFT    30 /*   Transmit Clock Polarity */
 112#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
 113#define CTR_RSCKIZ_SCK  0x20000000 /*   Must match CTR_TSCKIZ_SCK */
 114#define CTR_RSCKIZ_POL_SHIFT    28 /*   Receive Clock Polarity */
 115#define CTR_TEDG_SHIFT          27 /* Transmit Timing (1 = falling edge) */
 116#define CTR_REDG_SHIFT          26 /* Receive Timing (1 = falling edge) */
 117#define CTR_TXDIZ_MASK  0x00c00000 /* Pin Output When TX is Disabled */
 118#define CTR_TXDIZ_LOW   0x00000000 /*   0 */
 119#define CTR_TXDIZ_HIGH  0x00400000 /*   1 */
 120#define CTR_TXDIZ_HIZ   0x00800000 /*   High-impedance */
 121#define CTR_TSCKE       0x00008000 /* Transmit Serial Clock Output Enable */
 122#define CTR_TFSE        0x00004000 /* Transmit Frame Sync Signal Output Enable */
 123#define CTR_TXE         0x00000200 /* Transmit Enable */
 124#define CTR_RXE         0x00000100 /* Receive Enable */
 125
 126/* FCTR */
 127#define FCTR_TFWM_MASK  0xe0000000 /* Transmit FIFO Watermark */
 128#define FCTR_TFWM_64    0x00000000 /*  Transfer Request when 64 empty stages */
 129#define FCTR_TFWM_32    0x20000000 /*  Transfer Request when 32 empty stages */
 130#define FCTR_TFWM_24    0x40000000 /*  Transfer Request when 24 empty stages */
 131#define FCTR_TFWM_16    0x60000000 /*  Transfer Request when 16 empty stages */
 132#define FCTR_TFWM_12    0x80000000 /*  Transfer Request when 12 empty stages */
 133#define FCTR_TFWM_8     0xa0000000 /*  Transfer Request when 8 empty stages */
 134#define FCTR_TFWM_4     0xc0000000 /*  Transfer Request when 4 empty stages */
 135#define FCTR_TFWM_1     0xe0000000 /*  Transfer Request when 1 empty stage */
 136#define FCTR_TFUA_MASK  0x07f00000 /* Transmit FIFO Usable Area */
 137#define FCTR_TFUA_SHIFT         20
 138#define FCTR_TFUA(i)    ((i) << FCTR_TFUA_SHIFT)
 139#define FCTR_RFWM_MASK  0x0000e000 /* Receive FIFO Watermark */
 140#define FCTR_RFWM_1     0x00000000 /*  Transfer Request when 1 valid stages */
 141#define FCTR_RFWM_4     0x00002000 /*  Transfer Request when 4 valid stages */
 142#define FCTR_RFWM_8     0x00004000 /*  Transfer Request when 8 valid stages */
 143#define FCTR_RFWM_16    0x00006000 /*  Transfer Request when 16 valid stages */
 144#define FCTR_RFWM_32    0x00008000 /*  Transfer Request when 32 valid stages */
 145#define FCTR_RFWM_64    0x0000a000 /*  Transfer Request when 64 valid stages */
 146#define FCTR_RFWM_128   0x0000c000 /*  Transfer Request when 128 valid stages */
 147#define FCTR_RFWM_256   0x0000e000 /*  Transfer Request when 256 valid stages */
 148#define FCTR_RFUA_MASK  0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
 149#define FCTR_RFUA_SHIFT          4
 150#define FCTR_RFUA(i)    ((i) << FCTR_RFUA_SHIFT)
 151
 152/* STR */
 153#define STR_TFEMP       0x20000000 /* Transmit FIFO Empty */
 154#define STR_TDREQ       0x10000000 /* Transmit Data Transfer Request */
 155#define STR_TEOF        0x00800000 /* Frame Transmission End */
 156#define STR_TFSERR      0x00200000 /* Transmit Frame Synchronization Error */
 157#define STR_TFOVF       0x00100000 /* Transmit FIFO Overflow */
 158#define STR_TFUDF       0x00080000 /* Transmit FIFO Underflow */
 159#define STR_RFFUL       0x00002000 /* Receive FIFO Full */
 160#define STR_RDREQ       0x00001000 /* Receive Data Transfer Request */
 161#define STR_REOF        0x00000080 /* Frame Reception End */
 162#define STR_RFSERR      0x00000020 /* Receive Frame Synchronization Error */
 163#define STR_RFUDF       0x00000010 /* Receive FIFO Underflow */
 164#define STR_RFOVF       0x00000008 /* Receive FIFO Overflow */
 165
 166/* IER */
 167#define IER_TDMAE       0x80000000 /* Transmit Data DMA Transfer Req. Enable */
 168#define IER_TFEMPE      0x20000000 /* Transmit FIFO Empty Enable */
 169#define IER_TDREQE      0x10000000 /* Transmit Data Transfer Request Enable */
 170#define IER_TEOFE       0x00800000 /* Frame Transmission End Enable */
 171#define IER_TFSERRE     0x00200000 /* Transmit Frame Sync Error Enable */
 172#define IER_TFOVFE      0x00100000 /* Transmit FIFO Overflow Enable */
 173#define IER_TFUDFE      0x00080000 /* Transmit FIFO Underflow Enable */
 174#define IER_RDMAE       0x00008000 /* Receive Data DMA Transfer Req. Enable */
 175#define IER_RFFULE      0x00002000 /* Receive FIFO Full Enable */
 176#define IER_RDREQE      0x00001000 /* Receive Data Transfer Request Enable */
 177#define IER_REOFE       0x00000080 /* Frame Reception End Enable */
 178#define IER_RFSERRE     0x00000020 /* Receive Frame Sync Error Enable */
 179#define IER_RFUDFE      0x00000010 /* Receive FIFO Underflow Enable */
 180#define IER_RFOVFE      0x00000008 /* Receive FIFO Overflow Enable */
 181
 182
 183static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 184{
 185        switch (reg_offs) {
 186        case TSCR:
 187        case RSCR:
 188                return ioread16(p->mapbase + reg_offs);
 189        default:
 190                return ioread32(p->mapbase + reg_offs);
 191        }
 192}
 193
 194static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
 195                           u32 value)
 196{
 197        switch (reg_offs) {
 198        case TSCR:
 199        case RSCR:
 200                iowrite16(value, p->mapbase + reg_offs);
 201                break;
 202        default:
 203                iowrite32(value, p->mapbase + reg_offs);
 204                break;
 205        }
 206}
 207
 208static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
 209                                    u32 clr, u32 set)
 210{
 211        u32 mask = clr | set;
 212        u32 data;
 213        int k;
 214
 215        data = sh_msiof_read(p, CTR);
 216        data &= ~clr;
 217        data |= set;
 218        sh_msiof_write(p, CTR, data);
 219
 220        for (k = 100; k > 0; k--) {
 221                if ((sh_msiof_read(p, CTR) & mask) == set)
 222                        break;
 223
 224                udelay(10);
 225        }
 226
 227        return k > 0 ? 0 : -ETIMEDOUT;
 228}
 229
 230static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 231{
 232        struct sh_msiof_spi_priv *p = data;
 233
 234        /* just disable the interrupt and wake up */
 235        sh_msiof_write(p, IER, 0);
 236        complete(&p->done);
 237
 238        return IRQ_HANDLED;
 239}
 240
 241static struct {
 242        unsigned short div;
 243        unsigned short brdv;
 244} const sh_msiof_spi_div_table[] = {
 245        { 1,    SCR_BRDV_DIV_1 },
 246        { 2,    SCR_BRDV_DIV_2 },
 247        { 4,    SCR_BRDV_DIV_4 },
 248        { 8,    SCR_BRDV_DIV_8 },
 249        { 16,   SCR_BRDV_DIV_16 },
 250        { 32,   SCR_BRDV_DIV_32 },
 251};
 252
 253static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 254                                      unsigned long parent_rate, u32 spi_hz)
 255{
 256        unsigned long div = 1024;
 257        u32 brps, scr;
 258        size_t k;
 259
 260        if (!WARN_ON(!spi_hz || !parent_rate))
 261                div = DIV_ROUND_UP(parent_rate, spi_hz);
 262
 263        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
 264                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
 265                /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
 266                if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
 267                        continue;
 268                if (brps <= 32) /* max of brdv is 32 */
 269                        break;
 270        }
 271
 272        k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
 273
 274        scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
 275        sh_msiof_write(p, TSCR, scr);
 276        if (!(p->master->flags & SPI_MASTER_MUST_TX))
 277                sh_msiof_write(p, RSCR, scr);
 278}
 279
 280static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
 281{
 282        /*
 283         * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
 284         * b'000                : 0
 285         * b'001                : 100
 286         * b'010                : 200
 287         * b'011 (SYNCDL only)  : 300
 288         * b'101                : 50
 289         * b'110                : 150
 290         */
 291        if (dtdl_or_syncdl % 100)
 292                return dtdl_or_syncdl / 100 + 5;
 293        else
 294                return dtdl_or_syncdl / 100;
 295}
 296
 297static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
 298{
 299        u32 val;
 300
 301        if (!p->info)
 302                return 0;
 303
 304        /* check if DTDL and SYNCDL is allowed value */
 305        if (p->info->dtdl > 200 || p->info->syncdl > 300) {
 306                dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
 307                return 0;
 308        }
 309
 310        /* check if the sum of DTDL and SYNCDL becomes an integer value  */
 311        if ((p->info->dtdl + p->info->syncdl) % 100) {
 312                dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
 313                return 0;
 314        }
 315
 316        val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
 317        val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
 318
 319        return val;
 320}
 321
 322static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
 323                                      u32 cpol, u32 cpha,
 324                                      u32 tx_hi_z, u32 lsb_first, u32 cs_high)
 325{
 326        u32 tmp;
 327        int edge;
 328
 329        /*
 330         * CPOL CPHA     TSCKIZ RSCKIZ TEDG REDG
 331         *    0    0         10     10    1    1
 332         *    0    1         10     10    0    0
 333         *    1    0         11     11    0    0
 334         *    1    1         11     11    1    1
 335         */
 336        tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
 337        tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
 338        tmp |= lsb_first << MDR1_BITLSB_SHIFT;
 339        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
 340        sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
 341        if (p->master->flags & SPI_MASTER_MUST_TX) {
 342                /* These bits are reserved if RX needs TX */
 343                tmp &= ~0x0000ffff;
 344        }
 345        sh_msiof_write(p, RMDR1, tmp);
 346
 347        tmp = 0;
 348        tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
 349        tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
 350
 351        edge = cpol ^ !cpha;
 352
 353        tmp |= edge << CTR_TEDG_SHIFT;
 354        tmp |= edge << CTR_REDG_SHIFT;
 355        tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
 356        sh_msiof_write(p, CTR, tmp);
 357}
 358
 359static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 360                                       const void *tx_buf, void *rx_buf,
 361                                       u32 bits, u32 words)
 362{
 363        u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
 364
 365        if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
 366                sh_msiof_write(p, TMDR2, dr2);
 367        else
 368                sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
 369
 370        if (rx_buf)
 371                sh_msiof_write(p, RMDR2, dr2);
 372}
 373
 374static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 375{
 376        sh_msiof_write(p, STR, sh_msiof_read(p, STR));
 377}
 378
 379static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
 380                                      const void *tx_buf, int words, int fs)
 381{
 382        const u8 *buf_8 = tx_buf;
 383        int k;
 384
 385        for (k = 0; k < words; k++)
 386                sh_msiof_write(p, TFDR, buf_8[k] << fs);
 387}
 388
 389static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
 390                                       const void *tx_buf, int words, int fs)
 391{
 392        const u16 *buf_16 = tx_buf;
 393        int k;
 394
 395        for (k = 0; k < words; k++)
 396                sh_msiof_write(p, TFDR, buf_16[k] << fs);
 397}
 398
 399static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
 400                                        const void *tx_buf, int words, int fs)
 401{
 402        const u16 *buf_16 = tx_buf;
 403        int k;
 404
 405        for (k = 0; k < words; k++)
 406                sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
 407}
 408
 409static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
 410                                       const void *tx_buf, int words, int fs)
 411{
 412        const u32 *buf_32 = tx_buf;
 413        int k;
 414
 415        for (k = 0; k < words; k++)
 416                sh_msiof_write(p, TFDR, buf_32[k] << fs);
 417}
 418
 419static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
 420                                        const void *tx_buf, int words, int fs)
 421{
 422        const u32 *buf_32 = tx_buf;
 423        int k;
 424
 425        for (k = 0; k < words; k++)
 426                sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
 427}
 428
 429static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
 430                                        const void *tx_buf, int words, int fs)
 431{
 432        const u32 *buf_32 = tx_buf;
 433        int k;
 434
 435        for (k = 0; k < words; k++)
 436                sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
 437}
 438
 439static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
 440                                         const void *tx_buf, int words, int fs)
 441{
 442        const u32 *buf_32 = tx_buf;
 443        int k;
 444
 445        for (k = 0; k < words; k++)
 446                sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 447}
 448
 449static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
 450                                     void *rx_buf, int words, int fs)
 451{
 452        u8 *buf_8 = rx_buf;
 453        int k;
 454
 455        for (k = 0; k < words; k++)
 456                buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
 457}
 458
 459static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
 460                                      void *rx_buf, int words, int fs)
 461{
 462        u16 *buf_16 = rx_buf;
 463        int k;
 464
 465        for (k = 0; k < words; k++)
 466                buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
 467}
 468
 469static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
 470                                       void *rx_buf, int words, int fs)
 471{
 472        u16 *buf_16 = rx_buf;
 473        int k;
 474
 475        for (k = 0; k < words; k++)
 476                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
 477}
 478
 479static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
 480                                      void *rx_buf, int words, int fs)
 481{
 482        u32 *buf_32 = rx_buf;
 483        int k;
 484
 485        for (k = 0; k < words; k++)
 486                buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
 487}
 488
 489static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
 490                                       void *rx_buf, int words, int fs)
 491{
 492        u32 *buf_32 = rx_buf;
 493        int k;
 494
 495        for (k = 0; k < words; k++)
 496                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
 497}
 498
 499static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
 500                                       void *rx_buf, int words, int fs)
 501{
 502        u32 *buf_32 = rx_buf;
 503        int k;
 504
 505        for (k = 0; k < words; k++)
 506                buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
 507}
 508
 509static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
 510                                       void *rx_buf, int words, int fs)
 511{
 512        u32 *buf_32 = rx_buf;
 513        int k;
 514
 515        for (k = 0; k < words; k++)
 516                put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
 517}
 518
 519static int sh_msiof_spi_setup(struct spi_device *spi)
 520{
 521        struct device_node      *np = spi->master->dev.of_node;
 522        struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
 523
 524        pm_runtime_get_sync(&p->pdev->dev);
 525
 526        if (!np) {
 527                /*
 528                 * Use spi->controller_data for CS (same strategy as spi_gpio),
 529                 * if any. otherwise let HW control CS
 530                 */
 531                spi->cs_gpio = (uintptr_t)spi->controller_data;
 532        }
 533
 534        /* Configure pins before deasserting CS */
 535        sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
 536                                  !!(spi->mode & SPI_CPHA),
 537                                  !!(spi->mode & SPI_3WIRE),
 538                                  !!(spi->mode & SPI_LSB_FIRST),
 539                                  !!(spi->mode & SPI_CS_HIGH));
 540
 541        if (spi->cs_gpio >= 0)
 542                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 543
 544
 545        pm_runtime_put(&p->pdev->dev);
 546
 547        return 0;
 548}
 549
 550static int sh_msiof_prepare_message(struct spi_master *master,
 551                                    struct spi_message *msg)
 552{
 553        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 554        const struct spi_device *spi = msg->spi;
 555
 556        /* Configure pins before asserting CS */
 557        sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
 558                                  !!(spi->mode & SPI_CPHA),
 559                                  !!(spi->mode & SPI_3WIRE),
 560                                  !!(spi->mode & SPI_LSB_FIRST),
 561                                  !!(spi->mode & SPI_CS_HIGH));
 562        return 0;
 563}
 564
 565static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 566{
 567        int ret;
 568
 569        /* setup clock and rx/tx signals */
 570        ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
 571        if (rx_buf && !ret)
 572                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
 573        if (!ret)
 574                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
 575
 576        /* start by setting frame bit */
 577        if (!ret)
 578                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
 579
 580        return ret;
 581}
 582
 583static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 584{
 585        int ret;
 586
 587        /* shut down frame, rx/tx and clock signals */
 588        ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
 589        if (!ret)
 590                ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
 591        if (rx_buf && !ret)
 592                ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
 593        if (!ret)
 594                ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
 595
 596        return ret;
 597}
 598
 599static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 600                                  void (*tx_fifo)(struct sh_msiof_spi_priv *,
 601                                                  const void *, int, int),
 602                                  void (*rx_fifo)(struct sh_msiof_spi_priv *,
 603                                                  void *, int, int),
 604                                  const void *tx_buf, void *rx_buf,
 605                                  int words, int bits)
 606{
 607        int fifo_shift;
 608        int ret;
 609
 610        /* limit maximum word transfer to rx/tx fifo size */
 611        if (tx_buf)
 612                words = min_t(int, words, p->tx_fifo_size);
 613        if (rx_buf)
 614                words = min_t(int, words, p->rx_fifo_size);
 615
 616        /* the fifo contents need shifting */
 617        fifo_shift = 32 - bits;
 618
 619        /* default FIFO watermarks for PIO */
 620        sh_msiof_write(p, FCTR, 0);
 621
 622        /* setup msiof transfer mode registers */
 623        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
 624        sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
 625
 626        /* write tx fifo */
 627        if (tx_buf)
 628                tx_fifo(p, tx_buf, words, fifo_shift);
 629
 630        reinit_completion(&p->done);
 631
 632        ret = sh_msiof_spi_start(p, rx_buf);
 633        if (ret) {
 634                dev_err(&p->pdev->dev, "failed to start hardware\n");
 635                goto stop_ier;
 636        }
 637
 638        /* wait for tx fifo to be emptied / rx fifo to be filled */
 639        if (!wait_for_completion_timeout(&p->done, HZ)) {
 640                dev_err(&p->pdev->dev, "PIO timeout\n");
 641                ret = -ETIMEDOUT;
 642                goto stop_reset;
 643        }
 644
 645        /* read rx fifo */
 646        if (rx_buf)
 647                rx_fifo(p, rx_buf, words, fifo_shift);
 648
 649        /* clear status bits */
 650        sh_msiof_reset_str(p);
 651
 652        ret = sh_msiof_spi_stop(p, rx_buf);
 653        if (ret) {
 654                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 655                return ret;
 656        }
 657
 658        return words;
 659
 660stop_reset:
 661        sh_msiof_reset_str(p);
 662        sh_msiof_spi_stop(p, rx_buf);
 663stop_ier:
 664        sh_msiof_write(p, IER, 0);
 665        return ret;
 666}
 667
 668static void sh_msiof_dma_complete(void *arg)
 669{
 670        struct sh_msiof_spi_priv *p = arg;
 671
 672        sh_msiof_write(p, IER, 0);
 673        complete(&p->done);
 674}
 675
 676static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 677                             void *rx, unsigned int len)
 678{
 679        u32 ier_bits = 0;
 680        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 681        dma_cookie_t cookie;
 682        int ret;
 683
 684        /* First prepare and submit the DMA request(s), as this may fail */
 685        if (rx) {
 686                ier_bits |= IER_RDREQE | IER_RDMAE;
 687                desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
 688                                        p->rx_dma_addr, len, DMA_FROM_DEVICE,
 689                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 690                if (!desc_rx)
 691                        return -EAGAIN;
 692
 693                desc_rx->callback = sh_msiof_dma_complete;
 694                desc_rx->callback_param = p;
 695                cookie = dmaengine_submit(desc_rx);
 696                if (dma_submit_error(cookie))
 697                        return cookie;
 698        }
 699
 700        if (tx) {
 701                ier_bits |= IER_TDREQE | IER_TDMAE;
 702                dma_sync_single_for_device(p->master->dma_tx->device->dev,
 703                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
 704                desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
 705                                        p->tx_dma_addr, len, DMA_TO_DEVICE,
 706                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 707                if (!desc_tx) {
 708                        ret = -EAGAIN;
 709                        goto no_dma_tx;
 710                }
 711
 712                if (rx) {
 713                        /* No callback */
 714                        desc_tx->callback = NULL;
 715                } else {
 716                        desc_tx->callback = sh_msiof_dma_complete;
 717                        desc_tx->callback_param = p;
 718                }
 719                cookie = dmaengine_submit(desc_tx);
 720                if (dma_submit_error(cookie)) {
 721                        ret = cookie;
 722                        goto no_dma_tx;
 723                }
 724        }
 725
 726        /* 1 stage FIFO watermarks for DMA */
 727        sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
 728
 729        /* setup msiof transfer mode registers (32-bit words) */
 730        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 731
 732        sh_msiof_write(p, IER, ier_bits);
 733
 734        reinit_completion(&p->done);
 735
 736        /* Now start DMA */
 737        if (rx)
 738                dma_async_issue_pending(p->master->dma_rx);
 739        if (tx)
 740                dma_async_issue_pending(p->master->dma_tx);
 741
 742        ret = sh_msiof_spi_start(p, rx);
 743        if (ret) {
 744                dev_err(&p->pdev->dev, "failed to start hardware\n");
 745                goto stop_dma;
 746        }
 747
 748        /* wait for tx fifo to be emptied / rx fifo to be filled */
 749        if (!wait_for_completion_timeout(&p->done, HZ)) {
 750                dev_err(&p->pdev->dev, "DMA timeout\n");
 751                ret = -ETIMEDOUT;
 752                goto stop_reset;
 753        }
 754
 755        /* clear status bits */
 756        sh_msiof_reset_str(p);
 757
 758        ret = sh_msiof_spi_stop(p, rx);
 759        if (ret) {
 760                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 761                return ret;
 762        }
 763
 764        if (rx)
 765                dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
 766                                        p->rx_dma_addr, len,
 767                                        DMA_FROM_DEVICE);
 768
 769        return 0;
 770
 771stop_reset:
 772        sh_msiof_reset_str(p);
 773        sh_msiof_spi_stop(p, rx);
 774stop_dma:
 775        if (tx)
 776                dmaengine_terminate_all(p->master->dma_tx);
 777no_dma_tx:
 778        if (rx)
 779                dmaengine_terminate_all(p->master->dma_rx);
 780        sh_msiof_write(p, IER, 0);
 781        return ret;
 782}
 783
 784static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
 785{
 786        /* src or dst can be unaligned, but not both */
 787        if ((unsigned long)src & 3) {
 788                while (words--) {
 789                        *dst++ = swab32(get_unaligned(src));
 790                        src++;
 791                }
 792        } else if ((unsigned long)dst & 3) {
 793                while (words--) {
 794                        put_unaligned(swab32(*src++), dst);
 795                        dst++;
 796                }
 797        } else {
 798                while (words--)
 799                        *dst++ = swab32(*src++);
 800        }
 801}
 802
 803static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
 804{
 805        /* src or dst can be unaligned, but not both */
 806        if ((unsigned long)src & 3) {
 807                while (words--) {
 808                        *dst++ = swahw32(get_unaligned(src));
 809                        src++;
 810                }
 811        } else if ((unsigned long)dst & 3) {
 812                while (words--) {
 813                        put_unaligned(swahw32(*src++), dst);
 814                        dst++;
 815                }
 816        } else {
 817                while (words--)
 818                        *dst++ = swahw32(*src++);
 819        }
 820}
 821
 822static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
 823{
 824        memcpy(dst, src, words * 4);
 825}
 826
 827static int sh_msiof_transfer_one(struct spi_master *master,
 828                                 struct spi_device *spi,
 829                                 struct spi_transfer *t)
 830{
 831        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 832        void (*copy32)(u32 *, const u32 *, unsigned int);
 833        void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
 834        void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
 835        const void *tx_buf = t->tx_buf;
 836        void *rx_buf = t->rx_buf;
 837        unsigned int len = t->len;
 838        unsigned int bits = t->bits_per_word;
 839        unsigned int bytes_per_word;
 840        unsigned int words;
 841        int n;
 842        bool swab;
 843        int ret;
 844
 845        /* setup clocks (clock already enabled in chipselect()) */
 846        sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
 847
 848        while (master->dma_tx && len > 15) {
 849                /*
 850                 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
 851                 *  words, with byte resp. word swapping.
 852                 */
 853                unsigned int l = 0;
 854
 855                if (tx_buf)
 856                        l = min(len, p->tx_fifo_size * 4);
 857                if (rx_buf)
 858                        l = min(len, p->rx_fifo_size * 4);
 859
 860                if (bits <= 8) {
 861                        if (l & 3)
 862                                break;
 863                        copy32 = copy_bswap32;
 864                } else if (bits <= 16) {
 865                        if (l & 1)
 866                                break;
 867                        copy32 = copy_wswap32;
 868                } else {
 869                        copy32 = copy_plain32;
 870                }
 871
 872                if (tx_buf)
 873                        copy32(p->tx_dma_page, tx_buf, l / 4);
 874
 875                ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
 876                if (ret == -EAGAIN) {
 877                        pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
 878                                     dev_driver_string(&p->pdev->dev),
 879                                     dev_name(&p->pdev->dev));
 880                        break;
 881                }
 882                if (ret)
 883                        return ret;
 884
 885                if (rx_buf) {
 886                        copy32(rx_buf, p->rx_dma_page, l / 4);
 887                        rx_buf += l;
 888                }
 889                if (tx_buf)
 890                        tx_buf += l;
 891
 892                len -= l;
 893                if (!len)
 894                        return 0;
 895        }
 896
 897        if (bits <= 8 && len > 15 && !(len & 3)) {
 898                bits = 32;
 899                swab = true;
 900        } else {
 901                swab = false;
 902        }
 903
 904        /* setup bytes per word and fifo read/write functions */
 905        if (bits <= 8) {
 906                bytes_per_word = 1;
 907                tx_fifo = sh_msiof_spi_write_fifo_8;
 908                rx_fifo = sh_msiof_spi_read_fifo_8;
 909        } else if (bits <= 16) {
 910                bytes_per_word = 2;
 911                if ((unsigned long)tx_buf & 0x01)
 912                        tx_fifo = sh_msiof_spi_write_fifo_16u;
 913                else
 914                        tx_fifo = sh_msiof_spi_write_fifo_16;
 915
 916                if ((unsigned long)rx_buf & 0x01)
 917                        rx_fifo = sh_msiof_spi_read_fifo_16u;
 918                else
 919                        rx_fifo = sh_msiof_spi_read_fifo_16;
 920        } else if (swab) {
 921                bytes_per_word = 4;
 922                if ((unsigned long)tx_buf & 0x03)
 923                        tx_fifo = sh_msiof_spi_write_fifo_s32u;
 924                else
 925                        tx_fifo = sh_msiof_spi_write_fifo_s32;
 926
 927                if ((unsigned long)rx_buf & 0x03)
 928                        rx_fifo = sh_msiof_spi_read_fifo_s32u;
 929                else
 930                        rx_fifo = sh_msiof_spi_read_fifo_s32;
 931        } else {
 932                bytes_per_word = 4;
 933                if ((unsigned long)tx_buf & 0x03)
 934                        tx_fifo = sh_msiof_spi_write_fifo_32u;
 935                else
 936                        tx_fifo = sh_msiof_spi_write_fifo_32;
 937
 938                if ((unsigned long)rx_buf & 0x03)
 939                        rx_fifo = sh_msiof_spi_read_fifo_32u;
 940                else
 941                        rx_fifo = sh_msiof_spi_read_fifo_32;
 942        }
 943
 944        /* transfer in fifo sized chunks */
 945        words = len / bytes_per_word;
 946
 947        while (words > 0) {
 948                n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
 949                                           words, bits);
 950                if (n < 0)
 951                        return n;
 952
 953                if (tx_buf)
 954                        tx_buf += n * bytes_per_word;
 955                if (rx_buf)
 956                        rx_buf += n * bytes_per_word;
 957                words -= n;
 958        }
 959
 960        return 0;
 961}
 962
 963static const struct sh_msiof_chipdata sh_data = {
 964        .tx_fifo_size = 64,
 965        .rx_fifo_size = 64,
 966        .master_flags = 0,
 967};
 968
 969static const struct sh_msiof_chipdata r8a779x_data = {
 970        .tx_fifo_size = 64,
 971        .rx_fifo_size = 64,
 972        .master_flags = SPI_MASTER_MUST_TX,
 973};
 974
 975static const struct of_device_id sh_msiof_match[] = {
 976        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
 977        { .compatible = "renesas,msiof-r8a7790",   .data = &r8a779x_data },
 978        { .compatible = "renesas,msiof-r8a7791",   .data = &r8a779x_data },
 979        { .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
 980        { .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
 981        { .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
 982        { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
 983        { .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
 984        { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
 985        { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
 986        {},
 987};
 988MODULE_DEVICE_TABLE(of, sh_msiof_match);
 989
 990#ifdef CONFIG_OF
 991static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
 992{
 993        struct sh_msiof_spi_info *info;
 994        struct device_node *np = dev->of_node;
 995        u32 num_cs = 1;
 996
 997        info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
 998        if (!info)
 999                return NULL;
1000
1001        /* Parse the MSIOF properties */
1002        of_property_read_u32(np, "num-cs", &num_cs);
1003        of_property_read_u32(np, "renesas,tx-fifo-size",
1004                                        &info->tx_fifo_override);
1005        of_property_read_u32(np, "renesas,rx-fifo-size",
1006                                        &info->rx_fifo_override);
1007        of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1008        of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
1009
1010        info->num_chipselect = num_cs;
1011
1012        return info;
1013}
1014#else
1015static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1016{
1017        return NULL;
1018}
1019#endif
1020
1021static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
1022        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
1023{
1024        dma_cap_mask_t mask;
1025        struct dma_chan *chan;
1026        struct dma_slave_config cfg;
1027        int ret;
1028
1029        dma_cap_zero(mask);
1030        dma_cap_set(DMA_SLAVE, mask);
1031
1032        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1033                                (void *)(unsigned long)id, dev,
1034                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1035        if (!chan) {
1036                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1037                return NULL;
1038        }
1039
1040        memset(&cfg, 0, sizeof(cfg));
1041        cfg.direction = dir;
1042        if (dir == DMA_MEM_TO_DEV) {
1043                cfg.dst_addr = port_addr;
1044                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1045        } else {
1046                cfg.src_addr = port_addr;
1047                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1048        }
1049
1050        ret = dmaengine_slave_config(chan, &cfg);
1051        if (ret) {
1052                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1053                dma_release_channel(chan);
1054                return NULL;
1055        }
1056
1057        return chan;
1058}
1059
1060static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1061{
1062        struct platform_device *pdev = p->pdev;
1063        struct device *dev = &pdev->dev;
1064        const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
1065        unsigned int dma_tx_id, dma_rx_id;
1066        const struct resource *res;
1067        struct spi_master *master;
1068        struct device *tx_dev, *rx_dev;
1069
1070        if (dev->of_node) {
1071                /* In the OF case we will get the slave IDs from the DT */
1072                dma_tx_id = 0;
1073                dma_rx_id = 0;
1074        } else if (info && info->dma_tx_id && info->dma_rx_id) {
1075                dma_tx_id = info->dma_tx_id;
1076                dma_rx_id = info->dma_rx_id;
1077        } else {
1078                /* The driver assumes no error */
1079                return 0;
1080        }
1081
1082        /* The DMA engine uses the second register set, if present */
1083        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1084        if (!res)
1085                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1086
1087        master = p->master;
1088        master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1089                                                   dma_tx_id,
1090                                                   res->start + TFDR);
1091        if (!master->dma_tx)
1092                return -ENODEV;
1093
1094        master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1095                                                   dma_rx_id,
1096                                                   res->start + RFDR);
1097        if (!master->dma_rx)
1098                goto free_tx_chan;
1099
1100        p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1101        if (!p->tx_dma_page)
1102                goto free_rx_chan;
1103
1104        p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1105        if (!p->rx_dma_page)
1106                goto free_tx_page;
1107
1108        tx_dev = master->dma_tx->device->dev;
1109        p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
1110                                        DMA_TO_DEVICE);
1111        if (dma_mapping_error(tx_dev, p->tx_dma_addr))
1112                goto free_rx_page;
1113
1114        rx_dev = master->dma_rx->device->dev;
1115        p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
1116                                        DMA_FROM_DEVICE);
1117        if (dma_mapping_error(rx_dev, p->rx_dma_addr))
1118                goto unmap_tx_page;
1119
1120        dev_info(dev, "DMA available");
1121        return 0;
1122
1123unmap_tx_page:
1124        dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
1125free_rx_page:
1126        free_page((unsigned long)p->rx_dma_page);
1127free_tx_page:
1128        free_page((unsigned long)p->tx_dma_page);
1129free_rx_chan:
1130        dma_release_channel(master->dma_rx);
1131free_tx_chan:
1132        dma_release_channel(master->dma_tx);
1133        master->dma_tx = NULL;
1134        return -ENODEV;
1135}
1136
1137static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1138{
1139        struct spi_master *master = p->master;
1140        struct device *dev;
1141
1142        if (!master->dma_tx)
1143                return;
1144
1145        dev = &p->pdev->dev;
1146        dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
1147                         PAGE_SIZE, DMA_FROM_DEVICE);
1148        dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
1149                         PAGE_SIZE, DMA_TO_DEVICE);
1150        free_page((unsigned long)p->rx_dma_page);
1151        free_page((unsigned long)p->tx_dma_page);
1152        dma_release_channel(master->dma_rx);
1153        dma_release_channel(master->dma_tx);
1154}
1155
1156static int sh_msiof_spi_probe(struct platform_device *pdev)
1157{
1158        struct resource *r;
1159        struct spi_master *master;
1160        const struct sh_msiof_chipdata *chipdata;
1161        const struct of_device_id *of_id;
1162        struct sh_msiof_spi_priv *p;
1163        int i;
1164        int ret;
1165
1166        master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
1167        if (master == NULL) {
1168                dev_err(&pdev->dev, "failed to allocate spi master\n");
1169                return -ENOMEM;
1170        }
1171
1172        p = spi_master_get_devdata(master);
1173
1174        platform_set_drvdata(pdev, p);
1175        p->master = master;
1176
1177        of_id = of_match_device(sh_msiof_match, &pdev->dev);
1178        if (of_id) {
1179                chipdata = of_id->data;
1180                p->info = sh_msiof_spi_parse_dt(&pdev->dev);
1181        } else {
1182                chipdata = (const void *)pdev->id_entry->driver_data;
1183                p->info = dev_get_platdata(&pdev->dev);
1184        }
1185
1186        if (!p->info) {
1187                dev_err(&pdev->dev, "failed to obtain device info\n");
1188                ret = -ENXIO;
1189                goto err1;
1190        }
1191
1192        init_completion(&p->done);
1193
1194        p->clk = devm_clk_get(&pdev->dev, NULL);
1195        if (IS_ERR(p->clk)) {
1196                dev_err(&pdev->dev, "cannot get clock\n");
1197                ret = PTR_ERR(p->clk);
1198                goto err1;
1199        }
1200
1201        i = platform_get_irq(pdev, 0);
1202        if (i < 0) {
1203                dev_err(&pdev->dev, "cannot get platform IRQ\n");
1204                ret = -ENOENT;
1205                goto err1;
1206        }
1207
1208        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1209        p->mapbase = devm_ioremap_resource(&pdev->dev, r);
1210        if (IS_ERR(p->mapbase)) {
1211                ret = PTR_ERR(p->mapbase);
1212                goto err1;
1213        }
1214
1215        ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
1216                               dev_name(&pdev->dev), p);
1217        if (ret) {
1218                dev_err(&pdev->dev, "unable to request irq\n");
1219                goto err1;
1220        }
1221
1222        p->pdev = pdev;
1223        pm_runtime_enable(&pdev->dev);
1224
1225        /* Platform data may override FIFO sizes */
1226        p->tx_fifo_size = chipdata->tx_fifo_size;
1227        p->rx_fifo_size = chipdata->rx_fifo_size;
1228        if (p->info->tx_fifo_override)
1229                p->tx_fifo_size = p->info->tx_fifo_override;
1230        if (p->info->rx_fifo_override)
1231                p->rx_fifo_size = p->info->rx_fifo_override;
1232
1233        /* init master code */
1234        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1235        master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1236        master->flags = chipdata->master_flags;
1237        master->bus_num = pdev->id;
1238        master->dev.of_node = pdev->dev.of_node;
1239        master->num_chipselect = p->info->num_chipselect;
1240        master->setup = sh_msiof_spi_setup;
1241        master->prepare_message = sh_msiof_prepare_message;
1242        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
1243        master->auto_runtime_pm = true;
1244        master->transfer_one = sh_msiof_transfer_one;
1245
1246        ret = sh_msiof_request_dma(p);
1247        if (ret < 0)
1248                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1249
1250        ret = devm_spi_register_master(&pdev->dev, master);
1251        if (ret < 0) {
1252                dev_err(&pdev->dev, "spi_register_master error.\n");
1253                goto err2;
1254        }
1255
1256        return 0;
1257
1258 err2:
1259        sh_msiof_release_dma(p);
1260        pm_runtime_disable(&pdev->dev);
1261 err1:
1262        spi_master_put(master);
1263        return ret;
1264}
1265
1266static int sh_msiof_spi_remove(struct platform_device *pdev)
1267{
1268        struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1269
1270        sh_msiof_release_dma(p);
1271        pm_runtime_disable(&pdev->dev);
1272        return 0;
1273}
1274
1275static const struct platform_device_id spi_driver_ids[] = {
1276        { "spi_sh_msiof",       (kernel_ulong_t)&sh_data },
1277        {},
1278};
1279MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1280
1281static struct platform_driver sh_msiof_spi_drv = {
1282        .probe          = sh_msiof_spi_probe,
1283        .remove         = sh_msiof_spi_remove,
1284        .id_table       = spi_driver_ids,
1285        .driver         = {
1286                .name           = "spi_sh_msiof",
1287                .of_match_table = of_match_ptr(sh_msiof_match),
1288        },
1289};
1290module_platform_driver(sh_msiof_spi_drv);
1291
1292MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
1293MODULE_AUTHOR("Magnus Damm");
1294MODULE_LICENSE("GPL v2");
1295MODULE_ALIAS("platform:spi_sh_msiof");
1296