linux/drivers/spi/spi-sh-msiof.c
<<
>>
Prefs
   1/*
   2 * SuperH MSIOF SPI Master Interface
   3 *
   4 * Copyright (c) 2009 Magnus Damm
   5 * Copyright (C) 2014 Renesas Electronics Corporation
   6 * Copyright (C) 2014-2017 Glider bvba
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 */
  13
  14#include <linux/bitmap.h>
  15#include <linux/clk.h>
  16#include <linux/completion.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/dmaengine.h>
  20#include <linux/err.h>
  21#include <linux/gpio.h>
  22#include <linux/interrupt.h>
  23#include <linux/io.h>
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/of.h>
  27#include <linux/of_device.h>
  28#include <linux/platform_device.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/sh_dma.h>
  31
  32#include <linux/spi/sh_msiof.h>
  33#include <linux/spi/spi.h>
  34
  35#include <asm/unaligned.h>
  36
  37struct sh_msiof_chipdata {
  38        u16 tx_fifo_size;
  39        u16 rx_fifo_size;
  40        u16 master_flags;
  41};
  42
  43struct sh_msiof_spi_priv {
  44        struct spi_master *master;
  45        void __iomem *mapbase;
  46        struct clk *clk;
  47        struct platform_device *pdev;
  48        struct sh_msiof_spi_info *info;
  49        struct completion done;
  50        unsigned int tx_fifo_size;
  51        unsigned int rx_fifo_size;
  52        void *tx_dma_page;
  53        void *rx_dma_page;
  54        dma_addr_t tx_dma_addr;
  55        dma_addr_t rx_dma_addr;
  56        bool slave_aborted;
  57};
  58
  59#define TMDR1   0x00    /* Transmit Mode Register 1 */
  60#define TMDR2   0x04    /* Transmit Mode Register 2 */
  61#define TMDR3   0x08    /* Transmit Mode Register 3 */
  62#define RMDR1   0x10    /* Receive Mode Register 1 */
  63#define RMDR2   0x14    /* Receive Mode Register 2 */
  64#define RMDR3   0x18    /* Receive Mode Register 3 */
  65#define TSCR    0x20    /* Transmit Clock Select Register */
  66#define RSCR    0x22    /* Receive Clock Select Register (SH, A1, APE6) */
  67#define CTR     0x28    /* Control Register */
  68#define FCTR    0x30    /* FIFO Control Register */
  69#define STR     0x40    /* Status Register */
  70#define IER     0x44    /* Interrupt Enable Register */
  71#define TDR1    0x48    /* Transmit Control Data Register 1 (SH, A1) */
  72#define TDR2    0x4c    /* Transmit Control Data Register 2 (SH, A1) */
  73#define TFDR    0x50    /* Transmit FIFO Data Register */
  74#define RDR1    0x58    /* Receive Control Data Register 1 (SH, A1) */
  75#define RDR2    0x5c    /* Receive Control Data Register 2 (SH, A1) */
  76#define RFDR    0x60    /* Receive FIFO Data Register */
  77
  78/* TMDR1 and RMDR1 */
  79#define MDR1_TRMD        0x80000000 /* Transfer Mode (1 = Master mode) */
  80#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
  81#define MDR1_SYNCMD_SPI  0x20000000 /*   Level mode/SPI */
  82#define MDR1_SYNCMD_LR   0x30000000 /*   L/R mode */
  83#define MDR1_SYNCAC_SHIFT        25 /* Sync Polarity (1 = Active-low) */
  84#define MDR1_BITLSB_SHIFT        24 /* MSB/LSB First (1 = LSB first) */
  85#define MDR1_DTDL_SHIFT          20 /* Data Pin Bit Delay for MSIOF_SYNC */
  86#define MDR1_SYNCDL_SHIFT        16 /* Frame Sync Signal Timing Delay */
  87#define MDR1_FLD_MASK    0x0000000c /* Frame Sync Signal Interval (0-3) */
  88#define MDR1_FLD_SHIFT            2
  89#define MDR1_XXSTP       0x00000001 /* Transmission/Reception Stop on FIFO */
  90/* TMDR1 */
  91#define TMDR1_PCON       0x40000000 /* Transfer Signal Connection */
  92
  93/* TMDR2 and RMDR2 */
  94#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
  95#define MDR2_WDLEN1(i)  (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
  96#define MDR2_GRPMASK1   0x00000001 /* Group Output Mask 1 (SH, A1) */
  97
  98/* TSCR and RSCR */
  99#define SCR_BRPS_MASK       0x1f00 /* Prescaler Setting (1-32) */
 100#define SCR_BRPS(i)     (((i) - 1) << 8)
 101#define SCR_BRDV_MASK       0x0007 /* Baud Rate Generator's Division Ratio */
 102#define SCR_BRDV_DIV_2      0x0000
 103#define SCR_BRDV_DIV_4      0x0001
 104#define SCR_BRDV_DIV_8      0x0002
 105#define SCR_BRDV_DIV_16     0x0003
 106#define SCR_BRDV_DIV_32     0x0004
 107#define SCR_BRDV_DIV_1      0x0007
 108
 109/* CTR */
 110#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
 111#define CTR_TSCKIZ_SCK  0x80000000 /*   Disable SCK when TX disabled */
 112#define CTR_TSCKIZ_POL_SHIFT    30 /*   Transmit Clock Polarity */
 113#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
 114#define CTR_RSCKIZ_SCK  0x20000000 /*   Must match CTR_TSCKIZ_SCK */
 115#define CTR_RSCKIZ_POL_SHIFT    28 /*   Receive Clock Polarity */
 116#define CTR_TEDG_SHIFT          27 /* Transmit Timing (1 = falling edge) */
 117#define CTR_REDG_SHIFT          26 /* Receive Timing (1 = falling edge) */
 118#define CTR_TXDIZ_MASK  0x00c00000 /* Pin Output When TX is Disabled */
 119#define CTR_TXDIZ_LOW   0x00000000 /*   0 */
 120#define CTR_TXDIZ_HIGH  0x00400000 /*   1 */
 121#define CTR_TXDIZ_HIZ   0x00800000 /*   High-impedance */
 122#define CTR_TSCKE       0x00008000 /* Transmit Serial Clock Output Enable */
 123#define CTR_TFSE        0x00004000 /* Transmit Frame Sync Signal Output Enable */
 124#define CTR_TXE         0x00000200 /* Transmit Enable */
 125#define CTR_RXE         0x00000100 /* Receive Enable */
 126
 127/* FCTR */
 128#define FCTR_TFWM_MASK  0xe0000000 /* Transmit FIFO Watermark */
 129#define FCTR_TFWM_64    0x00000000 /*  Transfer Request when 64 empty stages */
 130#define FCTR_TFWM_32    0x20000000 /*  Transfer Request when 32 empty stages */
 131#define FCTR_TFWM_24    0x40000000 /*  Transfer Request when 24 empty stages */
 132#define FCTR_TFWM_16    0x60000000 /*  Transfer Request when 16 empty stages */
 133#define FCTR_TFWM_12    0x80000000 /*  Transfer Request when 12 empty stages */
 134#define FCTR_TFWM_8     0xa0000000 /*  Transfer Request when 8 empty stages */
 135#define FCTR_TFWM_4     0xc0000000 /*  Transfer Request when 4 empty stages */
 136#define FCTR_TFWM_1     0xe0000000 /*  Transfer Request when 1 empty stage */
 137#define FCTR_TFUA_MASK  0x07f00000 /* Transmit FIFO Usable Area */
 138#define FCTR_TFUA_SHIFT         20
 139#define FCTR_TFUA(i)    ((i) << FCTR_TFUA_SHIFT)
 140#define FCTR_RFWM_MASK  0x0000e000 /* Receive FIFO Watermark */
 141#define FCTR_RFWM_1     0x00000000 /*  Transfer Request when 1 valid stages */
 142#define FCTR_RFWM_4     0x00002000 /*  Transfer Request when 4 valid stages */
 143#define FCTR_RFWM_8     0x00004000 /*  Transfer Request when 8 valid stages */
 144#define FCTR_RFWM_16    0x00006000 /*  Transfer Request when 16 valid stages */
 145#define FCTR_RFWM_32    0x00008000 /*  Transfer Request when 32 valid stages */
 146#define FCTR_RFWM_64    0x0000a000 /*  Transfer Request when 64 valid stages */
 147#define FCTR_RFWM_128   0x0000c000 /*  Transfer Request when 128 valid stages */
 148#define FCTR_RFWM_256   0x0000e000 /*  Transfer Request when 256 valid stages */
 149#define FCTR_RFUA_MASK  0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
 150#define FCTR_RFUA_SHIFT          4
 151#define FCTR_RFUA(i)    ((i) << FCTR_RFUA_SHIFT)
 152
 153/* STR */
 154#define STR_TFEMP       0x20000000 /* Transmit FIFO Empty */
 155#define STR_TDREQ       0x10000000 /* Transmit Data Transfer Request */
 156#define STR_TEOF        0x00800000 /* Frame Transmission End */
 157#define STR_TFSERR      0x00200000 /* Transmit Frame Synchronization Error */
 158#define STR_TFOVF       0x00100000 /* Transmit FIFO Overflow */
 159#define STR_TFUDF       0x00080000 /* Transmit FIFO Underflow */
 160#define STR_RFFUL       0x00002000 /* Receive FIFO Full */
 161#define STR_RDREQ       0x00001000 /* Receive Data Transfer Request */
 162#define STR_REOF        0x00000080 /* Frame Reception End */
 163#define STR_RFSERR      0x00000020 /* Receive Frame Synchronization Error */
 164#define STR_RFUDF       0x00000010 /* Receive FIFO Underflow */
 165#define STR_RFOVF       0x00000008 /* Receive FIFO Overflow */
 166
 167/* IER */
 168#define IER_TDMAE       0x80000000 /* Transmit Data DMA Transfer Req. Enable */
 169#define IER_TFEMPE      0x20000000 /* Transmit FIFO Empty Enable */
 170#define IER_TDREQE      0x10000000 /* Transmit Data Transfer Request Enable */
 171#define IER_TEOFE       0x00800000 /* Frame Transmission End Enable */
 172#define IER_TFSERRE     0x00200000 /* Transmit Frame Sync Error Enable */
 173#define IER_TFOVFE      0x00100000 /* Transmit FIFO Overflow Enable */
 174#define IER_TFUDFE      0x00080000 /* Transmit FIFO Underflow Enable */
 175#define IER_RDMAE       0x00008000 /* Receive Data DMA Transfer Req. Enable */
 176#define IER_RFFULE      0x00002000 /* Receive FIFO Full Enable */
 177#define IER_RDREQE      0x00001000 /* Receive Data Transfer Request Enable */
 178#define IER_REOFE       0x00000080 /* Frame Reception End Enable */
 179#define IER_RFSERRE     0x00000020 /* Receive Frame Sync Error Enable */
 180#define IER_RFUDFE      0x00000010 /* Receive FIFO Underflow Enable */
 181#define IER_RFOVFE      0x00000008 /* Receive FIFO Overflow Enable */
 182
 183
 184static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 185{
 186        switch (reg_offs) {
 187        case TSCR:
 188        case RSCR:
 189                return ioread16(p->mapbase + reg_offs);
 190        default:
 191                return ioread32(p->mapbase + reg_offs);
 192        }
 193}
 194
 195static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
 196                           u32 value)
 197{
 198        switch (reg_offs) {
 199        case TSCR:
 200        case RSCR:
 201                iowrite16(value, p->mapbase + reg_offs);
 202                break;
 203        default:
 204                iowrite32(value, p->mapbase + reg_offs);
 205                break;
 206        }
 207}
 208
 209static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
 210                                    u32 clr, u32 set)
 211{
 212        u32 mask = clr | set;
 213        u32 data;
 214        int k;
 215
 216        data = sh_msiof_read(p, CTR);
 217        data &= ~clr;
 218        data |= set;
 219        sh_msiof_write(p, CTR, data);
 220
 221        for (k = 100; k > 0; k--) {
 222                if ((sh_msiof_read(p, CTR) & mask) == set)
 223                        break;
 224
 225                udelay(10);
 226        }
 227
 228        return k > 0 ? 0 : -ETIMEDOUT;
 229}
 230
 231static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 232{
 233        struct sh_msiof_spi_priv *p = data;
 234
 235        /* just disable the interrupt and wake up */
 236        sh_msiof_write(p, IER, 0);
 237        complete(&p->done);
 238
 239        return IRQ_HANDLED;
 240}
 241
 242static struct {
 243        unsigned short div;
 244        unsigned short brdv;
 245} const sh_msiof_spi_div_table[] = {
 246        { 1,    SCR_BRDV_DIV_1 },
 247        { 2,    SCR_BRDV_DIV_2 },
 248        { 4,    SCR_BRDV_DIV_4 },
 249        { 8,    SCR_BRDV_DIV_8 },
 250        { 16,   SCR_BRDV_DIV_16 },
 251        { 32,   SCR_BRDV_DIV_32 },
 252};
 253
 254static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 255                                      unsigned long parent_rate, u32 spi_hz)
 256{
 257        unsigned long div = 1024;
 258        u32 brps, scr;
 259        size_t k;
 260
 261        if (!WARN_ON(!spi_hz || !parent_rate))
 262                div = DIV_ROUND_UP(parent_rate, spi_hz);
 263
 264        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
 265                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
 266                /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
 267                if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
 268                        continue;
 269                if (brps <= 32) /* max of brdv is 32 */
 270                        break;
 271        }
 272
 273        k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
 274
 275        scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
 276        sh_msiof_write(p, TSCR, scr);
 277        if (!(p->master->flags & SPI_MASTER_MUST_TX))
 278                sh_msiof_write(p, RSCR, scr);
 279}
 280
 281static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
 282{
 283        /*
 284         * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
 285         * b'000                : 0
 286         * b'001                : 100
 287         * b'010                : 200
 288         * b'011 (SYNCDL only)  : 300
 289         * b'101                : 50
 290         * b'110                : 150
 291         */
 292        if (dtdl_or_syncdl % 100)
 293                return dtdl_or_syncdl / 100 + 5;
 294        else
 295                return dtdl_or_syncdl / 100;
 296}
 297
 298static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
 299{
 300        u32 val;
 301
 302        if (!p->info)
 303                return 0;
 304
 305        /* check if DTDL and SYNCDL is allowed value */
 306        if (p->info->dtdl > 200 || p->info->syncdl > 300) {
 307                dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
 308                return 0;
 309        }
 310
 311        /* check if the sum of DTDL and SYNCDL becomes an integer value  */
 312        if ((p->info->dtdl + p->info->syncdl) % 100) {
 313                dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
 314                return 0;
 315        }
 316
 317        val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
 318        val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
 319
 320        return val;
 321}
 322
 323static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
 324                                      u32 cpol, u32 cpha,
 325                                      u32 tx_hi_z, u32 lsb_first, u32 cs_high)
 326{
 327        u32 tmp;
 328        int edge;
 329
 330        /*
 331         * CPOL CPHA     TSCKIZ RSCKIZ TEDG REDG
 332         *    0    0         10     10    1    1
 333         *    0    1         10     10    0    0
 334         *    1    0         11     11    0    0
 335         *    1    1         11     11    1    1
 336         */
 337        tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
 338        tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
 339        tmp |= lsb_first << MDR1_BITLSB_SHIFT;
 340        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
 341        if (spi_controller_is_slave(p->master))
 342                sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
 343        else
 344                sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
 345        if (p->master->flags & SPI_MASTER_MUST_TX) {
 346                /* These bits are reserved if RX needs TX */
 347                tmp &= ~0x0000ffff;
 348        }
 349        sh_msiof_write(p, RMDR1, tmp);
 350
 351        tmp = 0;
 352        tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
 353        tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
 354
 355        edge = cpol ^ !cpha;
 356
 357        tmp |= edge << CTR_TEDG_SHIFT;
 358        tmp |= edge << CTR_REDG_SHIFT;
 359        tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
 360        sh_msiof_write(p, CTR, tmp);
 361}
 362
 363static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 364                                       const void *tx_buf, void *rx_buf,
 365                                       u32 bits, u32 words)
 366{
 367        u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
 368
 369        if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
 370                sh_msiof_write(p, TMDR2, dr2);
 371        else
 372                sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
 373
 374        if (rx_buf)
 375                sh_msiof_write(p, RMDR2, dr2);
 376}
 377
 378static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 379{
 380        sh_msiof_write(p, STR, sh_msiof_read(p, STR));
 381}
 382
 383static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
 384                                      const void *tx_buf, int words, int fs)
 385{
 386        const u8 *buf_8 = tx_buf;
 387        int k;
 388
 389        for (k = 0; k < words; k++)
 390                sh_msiof_write(p, TFDR, buf_8[k] << fs);
 391}
 392
 393static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
 394                                       const void *tx_buf, int words, int fs)
 395{
 396        const u16 *buf_16 = tx_buf;
 397        int k;
 398
 399        for (k = 0; k < words; k++)
 400                sh_msiof_write(p, TFDR, buf_16[k] << fs);
 401}
 402
 403static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
 404                                        const void *tx_buf, int words, int fs)
 405{
 406        const u16 *buf_16 = tx_buf;
 407        int k;
 408
 409        for (k = 0; k < words; k++)
 410                sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
 411}
 412
 413static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
 414                                       const void *tx_buf, int words, int fs)
 415{
 416        const u32 *buf_32 = tx_buf;
 417        int k;
 418
 419        for (k = 0; k < words; k++)
 420                sh_msiof_write(p, TFDR, buf_32[k] << fs);
 421}
 422
 423static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
 424                                        const void *tx_buf, int words, int fs)
 425{
 426        const u32 *buf_32 = tx_buf;
 427        int k;
 428
 429        for (k = 0; k < words; k++)
 430                sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
 431}
 432
 433static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
 434                                        const void *tx_buf, int words, int fs)
 435{
 436        const u32 *buf_32 = tx_buf;
 437        int k;
 438
 439        for (k = 0; k < words; k++)
 440                sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
 441}
 442
 443static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
 444                                         const void *tx_buf, int words, int fs)
 445{
 446        const u32 *buf_32 = tx_buf;
 447        int k;
 448
 449        for (k = 0; k < words; k++)
 450                sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 451}
 452
 453static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
 454                                     void *rx_buf, int words, int fs)
 455{
 456        u8 *buf_8 = rx_buf;
 457        int k;
 458
 459        for (k = 0; k < words; k++)
 460                buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
 461}
 462
 463static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
 464                                      void *rx_buf, int words, int fs)
 465{
 466        u16 *buf_16 = rx_buf;
 467        int k;
 468
 469        for (k = 0; k < words; k++)
 470                buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
 471}
 472
 473static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
 474                                       void *rx_buf, int words, int fs)
 475{
 476        u16 *buf_16 = rx_buf;
 477        int k;
 478
 479        for (k = 0; k < words; k++)
 480                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
 481}
 482
 483static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
 484                                      void *rx_buf, int words, int fs)
 485{
 486        u32 *buf_32 = rx_buf;
 487        int k;
 488
 489        for (k = 0; k < words; k++)
 490                buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
 491}
 492
 493static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
 494                                       void *rx_buf, int words, int fs)
 495{
 496        u32 *buf_32 = rx_buf;
 497        int k;
 498
 499        for (k = 0; k < words; k++)
 500                put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
 501}
 502
 503static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
 504                                       void *rx_buf, int words, int fs)
 505{
 506        u32 *buf_32 = rx_buf;
 507        int k;
 508
 509        for (k = 0; k < words; k++)
 510                buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
 511}
 512
 513static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
 514                                       void *rx_buf, int words, int fs)
 515{
 516        u32 *buf_32 = rx_buf;
 517        int k;
 518
 519        for (k = 0; k < words; k++)
 520                put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
 521}
 522
 523static int sh_msiof_spi_setup(struct spi_device *spi)
 524{
 525        struct device_node      *np = spi->master->dev.of_node;
 526        struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
 527
 528        pm_runtime_get_sync(&p->pdev->dev);
 529
 530        if (!np) {
 531                /*
 532                 * Use spi->controller_data for CS (same strategy as spi_gpio),
 533                 * if any. otherwise let HW control CS
 534                 */
 535                spi->cs_gpio = (uintptr_t)spi->controller_data;
 536        }
 537
 538        /* Configure pins before deasserting CS */
 539        sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
 540                                  !!(spi->mode & SPI_CPHA),
 541                                  !!(spi->mode & SPI_3WIRE),
 542                                  !!(spi->mode & SPI_LSB_FIRST),
 543                                  !!(spi->mode & SPI_CS_HIGH));
 544
 545        if (spi->cs_gpio >= 0)
 546                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 547
 548
 549        pm_runtime_put(&p->pdev->dev);
 550
 551        return 0;
 552}
 553
 554static int sh_msiof_prepare_message(struct spi_master *master,
 555                                    struct spi_message *msg)
 556{
 557        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 558        const struct spi_device *spi = msg->spi;
 559
 560        /* Configure pins before asserting CS */
 561        sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
 562                                  !!(spi->mode & SPI_CPHA),
 563                                  !!(spi->mode & SPI_3WIRE),
 564                                  !!(spi->mode & SPI_LSB_FIRST),
 565                                  !!(spi->mode & SPI_CS_HIGH));
 566        return 0;
 567}
 568
 569static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 570{
 571        bool slave = spi_controller_is_slave(p->master);
 572        int ret = 0;
 573
 574        /* setup clock and rx/tx signals */
 575        if (!slave)
 576                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
 577        if (rx_buf && !ret)
 578                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
 579        if (!ret)
 580                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
 581
 582        /* start by setting frame bit */
 583        if (!ret && !slave)
 584                ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
 585
 586        return ret;
 587}
 588
 589static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 590{
 591        bool slave = spi_controller_is_slave(p->master);
 592        int ret = 0;
 593
 594        /* shut down frame, rx/tx and clock signals */
 595        if (!slave)
 596                ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
 597        if (!ret)
 598                ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
 599        if (rx_buf && !ret)
 600                ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
 601        if (!ret && !slave)
 602                ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
 603
 604        return ret;
 605}
 606
 607static int sh_msiof_slave_abort(struct spi_master *master)
 608{
 609        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 610
 611        p->slave_aborted = true;
 612        complete(&p->done);
 613        return 0;
 614}
 615
 616static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
 617{
 618        if (spi_controller_is_slave(p->master)) {
 619                if (wait_for_completion_interruptible(&p->done) ||
 620                    p->slave_aborted) {
 621                        dev_dbg(&p->pdev->dev, "interrupted\n");
 622                        return -EINTR;
 623                }
 624        } else {
 625                if (!wait_for_completion_timeout(&p->done, HZ)) {
 626                        dev_err(&p->pdev->dev, "timeout\n");
 627                        return -ETIMEDOUT;
 628                }
 629        }
 630
 631        return 0;
 632}
 633
 634static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 635                                  void (*tx_fifo)(struct sh_msiof_spi_priv *,
 636                                                  const void *, int, int),
 637                                  void (*rx_fifo)(struct sh_msiof_spi_priv *,
 638                                                  void *, int, int),
 639                                  const void *tx_buf, void *rx_buf,
 640                                  int words, int bits)
 641{
 642        int fifo_shift;
 643        int ret;
 644
 645        /* limit maximum word transfer to rx/tx fifo size */
 646        if (tx_buf)
 647                words = min_t(int, words, p->tx_fifo_size);
 648        if (rx_buf)
 649                words = min_t(int, words, p->rx_fifo_size);
 650
 651        /* the fifo contents need shifting */
 652        fifo_shift = 32 - bits;
 653
 654        /* default FIFO watermarks for PIO */
 655        sh_msiof_write(p, FCTR, 0);
 656
 657        /* setup msiof transfer mode registers */
 658        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
 659        sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
 660
 661        /* write tx fifo */
 662        if (tx_buf)
 663                tx_fifo(p, tx_buf, words, fifo_shift);
 664
 665        reinit_completion(&p->done);
 666        p->slave_aborted = false;
 667
 668        ret = sh_msiof_spi_start(p, rx_buf);
 669        if (ret) {
 670                dev_err(&p->pdev->dev, "failed to start hardware\n");
 671                goto stop_ier;
 672        }
 673
 674        /* wait for tx fifo to be emptied / rx fifo to be filled */
 675        ret = sh_msiof_wait_for_completion(p);
 676        if (ret)
 677                goto stop_reset;
 678
 679        /* read rx fifo */
 680        if (rx_buf)
 681                rx_fifo(p, rx_buf, words, fifo_shift);
 682
 683        /* clear status bits */
 684        sh_msiof_reset_str(p);
 685
 686        ret = sh_msiof_spi_stop(p, rx_buf);
 687        if (ret) {
 688                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 689                return ret;
 690        }
 691
 692        return words;
 693
 694stop_reset:
 695        sh_msiof_reset_str(p);
 696        sh_msiof_spi_stop(p, rx_buf);
 697stop_ier:
 698        sh_msiof_write(p, IER, 0);
 699        return ret;
 700}
 701
 702static void sh_msiof_dma_complete(void *arg)
 703{
 704        struct sh_msiof_spi_priv *p = arg;
 705
 706        sh_msiof_write(p, IER, 0);
 707        complete(&p->done);
 708}
 709
 710static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 711                             void *rx, unsigned int len)
 712{
 713        u32 ier_bits = 0;
 714        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 715        dma_cookie_t cookie;
 716        int ret;
 717
 718        /* First prepare and submit the DMA request(s), as this may fail */
 719        if (rx) {
 720                ier_bits |= IER_RDREQE | IER_RDMAE;
 721                desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
 722                                        p->rx_dma_addr, len, DMA_FROM_DEVICE,
 723                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 724                if (!desc_rx)
 725                        return -EAGAIN;
 726
 727                desc_rx->callback = sh_msiof_dma_complete;
 728                desc_rx->callback_param = p;
 729                cookie = dmaengine_submit(desc_rx);
 730                if (dma_submit_error(cookie))
 731                        return cookie;
 732        }
 733
 734        if (tx) {
 735                ier_bits |= IER_TDREQE | IER_TDMAE;
 736                dma_sync_single_for_device(p->master->dma_tx->device->dev,
 737                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
 738                desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
 739                                        p->tx_dma_addr, len, DMA_TO_DEVICE,
 740                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 741                if (!desc_tx) {
 742                        ret = -EAGAIN;
 743                        goto no_dma_tx;
 744                }
 745
 746                if (rx) {
 747                        /* No callback */
 748                        desc_tx->callback = NULL;
 749                } else {
 750                        desc_tx->callback = sh_msiof_dma_complete;
 751                        desc_tx->callback_param = p;
 752                }
 753                cookie = dmaengine_submit(desc_tx);
 754                if (dma_submit_error(cookie)) {
 755                        ret = cookie;
 756                        goto no_dma_tx;
 757                }
 758        }
 759
 760        /* 1 stage FIFO watermarks for DMA */
 761        sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
 762
 763        /* setup msiof transfer mode registers (32-bit words) */
 764        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 765
 766        sh_msiof_write(p, IER, ier_bits);
 767
 768        reinit_completion(&p->done);
 769        p->slave_aborted = false;
 770
 771        /* Now start DMA */
 772        if (rx)
 773                dma_async_issue_pending(p->master->dma_rx);
 774        if (tx)
 775                dma_async_issue_pending(p->master->dma_tx);
 776
 777        ret = sh_msiof_spi_start(p, rx);
 778        if (ret) {
 779                dev_err(&p->pdev->dev, "failed to start hardware\n");
 780                goto stop_dma;
 781        }
 782
 783        /* wait for tx fifo to be emptied / rx fifo to be filled */
 784        ret = sh_msiof_wait_for_completion(p);
 785        if (ret)
 786                goto stop_reset;
 787
 788        /* clear status bits */
 789        sh_msiof_reset_str(p);
 790
 791        ret = sh_msiof_spi_stop(p, rx);
 792        if (ret) {
 793                dev_err(&p->pdev->dev, "failed to shut down hardware\n");
 794                return ret;
 795        }
 796
 797        if (rx)
 798                dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
 799                                        p->rx_dma_addr, len,
 800                                        DMA_FROM_DEVICE);
 801
 802        return 0;
 803
 804stop_reset:
 805        sh_msiof_reset_str(p);
 806        sh_msiof_spi_stop(p, rx);
 807stop_dma:
 808        if (tx)
 809                dmaengine_terminate_all(p->master->dma_tx);
 810no_dma_tx:
 811        if (rx)
 812                dmaengine_terminate_all(p->master->dma_rx);
 813        sh_msiof_write(p, IER, 0);
 814        return ret;
 815}
 816
 817static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
 818{
 819        /* src or dst can be unaligned, but not both */
 820        if ((unsigned long)src & 3) {
 821                while (words--) {
 822                        *dst++ = swab32(get_unaligned(src));
 823                        src++;
 824                }
 825        } else if ((unsigned long)dst & 3) {
 826                while (words--) {
 827                        put_unaligned(swab32(*src++), dst);
 828                        dst++;
 829                }
 830        } else {
 831                while (words--)
 832                        *dst++ = swab32(*src++);
 833        }
 834}
 835
 836static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
 837{
 838        /* src or dst can be unaligned, but not both */
 839        if ((unsigned long)src & 3) {
 840                while (words--) {
 841                        *dst++ = swahw32(get_unaligned(src));
 842                        src++;
 843                }
 844        } else if ((unsigned long)dst & 3) {
 845                while (words--) {
 846                        put_unaligned(swahw32(*src++), dst);
 847                        dst++;
 848                }
 849        } else {
 850                while (words--)
 851                        *dst++ = swahw32(*src++);
 852        }
 853}
 854
 855static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
 856{
 857        memcpy(dst, src, words * 4);
 858}
 859
 860static int sh_msiof_transfer_one(struct spi_master *master,
 861                                 struct spi_device *spi,
 862                                 struct spi_transfer *t)
 863{
 864        struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
 865        void (*copy32)(u32 *, const u32 *, unsigned int);
 866        void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
 867        void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
 868        const void *tx_buf = t->tx_buf;
 869        void *rx_buf = t->rx_buf;
 870        unsigned int len = t->len;
 871        unsigned int bits = t->bits_per_word;
 872        unsigned int bytes_per_word;
 873        unsigned int words;
 874        int n;
 875        bool swab;
 876        int ret;
 877
 878        /* setup clocks (clock already enabled in chipselect()) */
 879        if (!spi_controller_is_slave(p->master))
 880                sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
 881
 882        while (master->dma_tx && len > 15) {
 883                /*
 884                 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
 885                 *  words, with byte resp. word swapping.
 886                 */
 887                unsigned int l = 0;
 888
 889                if (tx_buf)
 890                        l = min(len, p->tx_fifo_size * 4);
 891                if (rx_buf)
 892                        l = min(len, p->rx_fifo_size * 4);
 893
 894                if (bits <= 8) {
 895                        if (l & 3)
 896                                break;
 897                        copy32 = copy_bswap32;
 898                } else if (bits <= 16) {
 899                        if (l & 1)
 900                                break;
 901                        copy32 = copy_wswap32;
 902                } else {
 903                        copy32 = copy_plain32;
 904                }
 905
 906                if (tx_buf)
 907                        copy32(p->tx_dma_page, tx_buf, l / 4);
 908
 909                ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
 910                if (ret == -EAGAIN) {
 911                        pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
 912                                     dev_driver_string(&p->pdev->dev),
 913                                     dev_name(&p->pdev->dev));
 914                        break;
 915                }
 916                if (ret)
 917                        return ret;
 918
 919                if (rx_buf) {
 920                        copy32(rx_buf, p->rx_dma_page, l / 4);
 921                        rx_buf += l;
 922                }
 923                if (tx_buf)
 924                        tx_buf += l;
 925
 926                len -= l;
 927                if (!len)
 928                        return 0;
 929        }
 930
 931        if (bits <= 8 && len > 15 && !(len & 3)) {
 932                bits = 32;
 933                swab = true;
 934        } else {
 935                swab = false;
 936        }
 937
 938        /* setup bytes per word and fifo read/write functions */
 939        if (bits <= 8) {
 940                bytes_per_word = 1;
 941                tx_fifo = sh_msiof_spi_write_fifo_8;
 942                rx_fifo = sh_msiof_spi_read_fifo_8;
 943        } else if (bits <= 16) {
 944                bytes_per_word = 2;
 945                if ((unsigned long)tx_buf & 0x01)
 946                        tx_fifo = sh_msiof_spi_write_fifo_16u;
 947                else
 948                        tx_fifo = sh_msiof_spi_write_fifo_16;
 949
 950                if ((unsigned long)rx_buf & 0x01)
 951                        rx_fifo = sh_msiof_spi_read_fifo_16u;
 952                else
 953                        rx_fifo = sh_msiof_spi_read_fifo_16;
 954        } else if (swab) {
 955                bytes_per_word = 4;
 956                if ((unsigned long)tx_buf & 0x03)
 957                        tx_fifo = sh_msiof_spi_write_fifo_s32u;
 958                else
 959                        tx_fifo = sh_msiof_spi_write_fifo_s32;
 960
 961                if ((unsigned long)rx_buf & 0x03)
 962                        rx_fifo = sh_msiof_spi_read_fifo_s32u;
 963                else
 964                        rx_fifo = sh_msiof_spi_read_fifo_s32;
 965        } else {
 966                bytes_per_word = 4;
 967                if ((unsigned long)tx_buf & 0x03)
 968                        tx_fifo = sh_msiof_spi_write_fifo_32u;
 969                else
 970                        tx_fifo = sh_msiof_spi_write_fifo_32;
 971
 972                if ((unsigned long)rx_buf & 0x03)
 973                        rx_fifo = sh_msiof_spi_read_fifo_32u;
 974                else
 975                        rx_fifo = sh_msiof_spi_read_fifo_32;
 976        }
 977
 978        /* transfer in fifo sized chunks */
 979        words = len / bytes_per_word;
 980
 981        while (words > 0) {
 982                n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
 983                                           words, bits);
 984                if (n < 0)
 985                        return n;
 986
 987                if (tx_buf)
 988                        tx_buf += n * bytes_per_word;
 989                if (rx_buf)
 990                        rx_buf += n * bytes_per_word;
 991                words -= n;
 992        }
 993
 994        return 0;
 995}
 996
 997static const struct sh_msiof_chipdata sh_data = {
 998        .tx_fifo_size = 64,
 999        .rx_fifo_size = 64,
1000        .master_flags = 0,
1001};
1002
1003static const struct sh_msiof_chipdata r8a779x_data = {
1004        .tx_fifo_size = 64,
1005        .rx_fifo_size = 64,
1006        .master_flags = SPI_MASTER_MUST_TX,
1007};
1008
1009static const struct of_device_id sh_msiof_match[] = {
1010        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
1011        { .compatible = "renesas,msiof-r8a7790",   .data = &r8a779x_data },
1012        { .compatible = "renesas,msiof-r8a7791",   .data = &r8a779x_data },
1013        { .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
1014        { .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
1015        { .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
1016        { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
1017        { .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
1018        { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
1019        { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
1020        {},
1021};
1022MODULE_DEVICE_TABLE(of, sh_msiof_match);
1023
1024#ifdef CONFIG_OF
1025static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1026{
1027        struct sh_msiof_spi_info *info;
1028        struct device_node *np = dev->of_node;
1029        u32 num_cs = 1;
1030
1031        info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
1032        if (!info)
1033                return NULL;
1034
1035        info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
1036                                                            : MSIOF_SPI_MASTER;
1037
1038        /* Parse the MSIOF properties */
1039        if (info->mode == MSIOF_SPI_MASTER)
1040                of_property_read_u32(np, "num-cs", &num_cs);
1041        of_property_read_u32(np, "renesas,tx-fifo-size",
1042                                        &info->tx_fifo_override);
1043        of_property_read_u32(np, "renesas,rx-fifo-size",
1044                                        &info->rx_fifo_override);
1045        of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1046        of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
1047
1048        info->num_chipselect = num_cs;
1049
1050        return info;
1051}
1052#else
1053static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1054{
1055        return NULL;
1056}
1057#endif
1058
1059static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
1060        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
1061{
1062        dma_cap_mask_t mask;
1063        struct dma_chan *chan;
1064        struct dma_slave_config cfg;
1065        int ret;
1066
1067        dma_cap_zero(mask);
1068        dma_cap_set(DMA_SLAVE, mask);
1069
1070        chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1071                                (void *)(unsigned long)id, dev,
1072                                dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1073        if (!chan) {
1074                dev_warn(dev, "dma_request_slave_channel_compat failed\n");
1075                return NULL;
1076        }
1077
1078        memset(&cfg, 0, sizeof(cfg));
1079        cfg.direction = dir;
1080        if (dir == DMA_MEM_TO_DEV) {
1081                cfg.dst_addr = port_addr;
1082                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1083        } else {
1084                cfg.src_addr = port_addr;
1085                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1086        }
1087
1088        ret = dmaengine_slave_config(chan, &cfg);
1089        if (ret) {
1090                dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1091                dma_release_channel(chan);
1092                return NULL;
1093        }
1094
1095        return chan;
1096}
1097
1098static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1099{
1100        struct platform_device *pdev = p->pdev;
1101        struct device *dev = &pdev->dev;
1102        const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
1103        unsigned int dma_tx_id, dma_rx_id;
1104        const struct resource *res;
1105        struct spi_master *master;
1106        struct device *tx_dev, *rx_dev;
1107
1108        if (dev->of_node) {
1109                /* In the OF case we will get the slave IDs from the DT */
1110                dma_tx_id = 0;
1111                dma_rx_id = 0;
1112        } else if (info && info->dma_tx_id && info->dma_rx_id) {
1113                dma_tx_id = info->dma_tx_id;
1114                dma_rx_id = info->dma_rx_id;
1115        } else {
1116                /* The driver assumes no error */
1117                return 0;
1118        }
1119
1120        /* The DMA engine uses the second register set, if present */
1121        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1122        if (!res)
1123                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124
1125        master = p->master;
1126        master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1127                                                   dma_tx_id,
1128                                                   res->start + TFDR);
1129        if (!master->dma_tx)
1130                return -ENODEV;
1131
1132        master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1133                                                   dma_rx_id,
1134                                                   res->start + RFDR);
1135        if (!master->dma_rx)
1136                goto free_tx_chan;
1137
1138        p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1139        if (!p->tx_dma_page)
1140                goto free_rx_chan;
1141
1142        p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1143        if (!p->rx_dma_page)
1144                goto free_tx_page;
1145
1146        tx_dev = master->dma_tx->device->dev;
1147        p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
1148                                        DMA_TO_DEVICE);
1149        if (dma_mapping_error(tx_dev, p->tx_dma_addr))
1150                goto free_rx_page;
1151
1152        rx_dev = master->dma_rx->device->dev;
1153        p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
1154                                        DMA_FROM_DEVICE);
1155        if (dma_mapping_error(rx_dev, p->rx_dma_addr))
1156                goto unmap_tx_page;
1157
1158        dev_info(dev, "DMA available");
1159        return 0;
1160
1161unmap_tx_page:
1162        dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
1163free_rx_page:
1164        free_page((unsigned long)p->rx_dma_page);
1165free_tx_page:
1166        free_page((unsigned long)p->tx_dma_page);
1167free_rx_chan:
1168        dma_release_channel(master->dma_rx);
1169free_tx_chan:
1170        dma_release_channel(master->dma_tx);
1171        master->dma_tx = NULL;
1172        return -ENODEV;
1173}
1174
1175static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1176{
1177        struct spi_master *master = p->master;
1178        struct device *dev;
1179
1180        if (!master->dma_tx)
1181                return;
1182
1183        dev = &p->pdev->dev;
1184        dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
1185                         PAGE_SIZE, DMA_FROM_DEVICE);
1186        dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
1187                         PAGE_SIZE, DMA_TO_DEVICE);
1188        free_page((unsigned long)p->rx_dma_page);
1189        free_page((unsigned long)p->tx_dma_page);
1190        dma_release_channel(master->dma_rx);
1191        dma_release_channel(master->dma_tx);
1192}
1193
1194static int sh_msiof_spi_probe(struct platform_device *pdev)
1195{
1196        struct resource *r;
1197        struct spi_master *master;
1198        const struct sh_msiof_chipdata *chipdata;
1199        const struct of_device_id *of_id;
1200        struct sh_msiof_spi_info *info;
1201        struct sh_msiof_spi_priv *p;
1202        int i;
1203        int ret;
1204
1205        of_id = of_match_device(sh_msiof_match, &pdev->dev);
1206        if (of_id) {
1207                chipdata = of_id->data;
1208                info = sh_msiof_spi_parse_dt(&pdev->dev);
1209        } else {
1210                chipdata = (const void *)pdev->id_entry->driver_data;
1211                info = dev_get_platdata(&pdev->dev);
1212        }
1213
1214        if (!info) {
1215                dev_err(&pdev->dev, "failed to obtain device info\n");
1216                return -ENXIO;
1217        }
1218
1219        if (info->mode == MSIOF_SPI_SLAVE)
1220                master = spi_alloc_slave(&pdev->dev,
1221                                         sizeof(struct sh_msiof_spi_priv));
1222        else
1223                master = spi_alloc_master(&pdev->dev,
1224                                          sizeof(struct sh_msiof_spi_priv));
1225        if (master == NULL)
1226                return -ENOMEM;
1227
1228        p = spi_master_get_devdata(master);
1229
1230        platform_set_drvdata(pdev, p);
1231        p->master = master;
1232        p->info = info;
1233
1234        init_completion(&p->done);
1235
1236        p->clk = devm_clk_get(&pdev->dev, NULL);
1237        if (IS_ERR(p->clk)) {
1238                dev_err(&pdev->dev, "cannot get clock\n");
1239                ret = PTR_ERR(p->clk);
1240                goto err1;
1241        }
1242
1243        i = platform_get_irq(pdev, 0);
1244        if (i < 0) {
1245                dev_err(&pdev->dev, "cannot get platform IRQ\n");
1246                ret = -ENOENT;
1247                goto err1;
1248        }
1249
1250        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1251        p->mapbase = devm_ioremap_resource(&pdev->dev, r);
1252        if (IS_ERR(p->mapbase)) {
1253                ret = PTR_ERR(p->mapbase);
1254                goto err1;
1255        }
1256
1257        ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
1258                               dev_name(&pdev->dev), p);
1259        if (ret) {
1260                dev_err(&pdev->dev, "unable to request irq\n");
1261                goto err1;
1262        }
1263
1264        p->pdev = pdev;
1265        pm_runtime_enable(&pdev->dev);
1266
1267        /* Platform data may override FIFO sizes */
1268        p->tx_fifo_size = chipdata->tx_fifo_size;
1269        p->rx_fifo_size = chipdata->rx_fifo_size;
1270        if (p->info->tx_fifo_override)
1271                p->tx_fifo_size = p->info->tx_fifo_override;
1272        if (p->info->rx_fifo_override)
1273                p->rx_fifo_size = p->info->rx_fifo_override;
1274
1275        /* init master code */
1276        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1277        master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1278        master->flags = chipdata->master_flags;
1279        master->bus_num = pdev->id;
1280        master->dev.of_node = pdev->dev.of_node;
1281        master->num_chipselect = p->info->num_chipselect;
1282        master->setup = sh_msiof_spi_setup;
1283        master->prepare_message = sh_msiof_prepare_message;
1284        master->slave_abort = sh_msiof_slave_abort;
1285        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
1286        master->auto_runtime_pm = true;
1287        master->transfer_one = sh_msiof_transfer_one;
1288
1289        ret = sh_msiof_request_dma(p);
1290        if (ret < 0)
1291                dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1292
1293        ret = devm_spi_register_master(&pdev->dev, master);
1294        if (ret < 0) {
1295                dev_err(&pdev->dev, "spi_register_master error.\n");
1296                goto err2;
1297        }
1298
1299        return 0;
1300
1301 err2:
1302        sh_msiof_release_dma(p);
1303        pm_runtime_disable(&pdev->dev);
1304 err1:
1305        spi_master_put(master);
1306        return ret;
1307}
1308
1309static int sh_msiof_spi_remove(struct platform_device *pdev)
1310{
1311        struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1312
1313        sh_msiof_release_dma(p);
1314        pm_runtime_disable(&pdev->dev);
1315        return 0;
1316}
1317
1318static const struct platform_device_id spi_driver_ids[] = {
1319        { "spi_sh_msiof",       (kernel_ulong_t)&sh_data },
1320        {},
1321};
1322MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1323
1324static struct platform_driver sh_msiof_spi_drv = {
1325        .probe          = sh_msiof_spi_probe,
1326        .remove         = sh_msiof_spi_remove,
1327        .id_table       = spi_driver_ids,
1328        .driver         = {
1329                .name           = "spi_sh_msiof",
1330                .of_match_table = of_match_ptr(sh_msiof_match),
1331        },
1332};
1333module_platform_driver(sh_msiof_spi_drv);
1334
1335MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
1336MODULE_AUTHOR("Magnus Damm");
1337MODULE_LICENSE("GPL v2");
1338MODULE_ALIAS("platform:spi_sh_msiof");
1339