linux/drivers/mtd/spi-nor/cadence-quadspi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for Cadence QSPI Controller
   4 *
   5 * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
   6 */
   7#include <linux/clk.h>
   8#include <linux/completion.h>
   9#include <linux/delay.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/err.h>
  13#include <linux/errno.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/iopoll.h>
  17#include <linux/jiffies.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/mtd/mtd.h>
  21#include <linux/mtd/partitions.h>
  22#include <linux/mtd/spi-nor.h>
  23#include <linux/of_device.h>
  24#include <linux/of.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/reset.h>
  28#include <linux/sched.h>
  29#include <linux/spi/spi.h>
  30#include <linux/timer.h>
  31
  32#define CQSPI_NAME                      "cadence-qspi"
  33#define CQSPI_MAX_CHIPSELECT            16
  34
  35/* Quirks */
  36#define CQSPI_NEEDS_WR_DELAY            BIT(0)
  37
  38/* Capabilities mask */
  39#define CQSPI_BASE_HWCAPS_MASK                                  \
  40        (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |             \
  41        SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 |       \
  42        SNOR_HWCAPS_PP)
  43
  44struct cqspi_st;
  45
  46struct cqspi_flash_pdata {
  47        struct spi_nor  nor;
  48        struct cqspi_st *cqspi;
  49        u32             clk_rate;
  50        u32             read_delay;
  51        u32             tshsl_ns;
  52        u32             tsd2d_ns;
  53        u32             tchsh_ns;
  54        u32             tslch_ns;
  55        u8              inst_width;
  56        u8              addr_width;
  57        u8              data_width;
  58        u8              cs;
  59        bool            registered;
  60        bool            use_direct_mode;
  61};
  62
  63struct cqspi_st {
  64        struct platform_device  *pdev;
  65
  66        struct clk              *clk;
  67        unsigned int            sclk;
  68
  69        void __iomem            *iobase;
  70        void __iomem            *ahb_base;
  71        resource_size_t         ahb_size;
  72        struct completion       transfer_complete;
  73        struct mutex            bus_mutex;
  74
  75        struct dma_chan         *rx_chan;
  76        struct completion       rx_dma_complete;
  77        dma_addr_t              mmap_phys_base;
  78
  79        int                     current_cs;
  80        int                     current_page_size;
  81        int                     current_erase_size;
  82        int                     current_addr_width;
  83        unsigned long           master_ref_clk_hz;
  84        bool                    is_decoded_cs;
  85        u32                     fifo_depth;
  86        u32                     fifo_width;
  87        bool                    rclk_en;
  88        u32                     trigger_address;
  89        u32                     wr_delay;
  90        struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
  91};
  92
  93struct cqspi_driver_platdata {
  94        u32 hwcaps_mask;
  95        u8 quirks;
  96};
  97
  98/* Operation timeout value */
  99#define CQSPI_TIMEOUT_MS                        500
 100#define CQSPI_READ_TIMEOUT_MS                   10
 101
 102/* Instruction type */
 103#define CQSPI_INST_TYPE_SINGLE                  0
 104#define CQSPI_INST_TYPE_DUAL                    1
 105#define CQSPI_INST_TYPE_QUAD                    2
 106#define CQSPI_INST_TYPE_OCTAL                   3
 107
 108#define CQSPI_DUMMY_CLKS_PER_BYTE               8
 109#define CQSPI_DUMMY_BYTES_MAX                   4
 110#define CQSPI_DUMMY_CLKS_MAX                    31
 111
 112#define CQSPI_STIG_DATA_LEN_MAX                 8
 113
 114/* Register map */
 115#define CQSPI_REG_CONFIG                        0x00
 116#define CQSPI_REG_CONFIG_ENABLE_MASK            BIT(0)
 117#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL       BIT(7)
 118#define CQSPI_REG_CONFIG_DECODE_MASK            BIT(9)
 119#define CQSPI_REG_CONFIG_CHIPSELECT_LSB         10
 120#define CQSPI_REG_CONFIG_DMA_MASK               BIT(15)
 121#define CQSPI_REG_CONFIG_BAUD_LSB               19
 122#define CQSPI_REG_CONFIG_IDLE_LSB               31
 123#define CQSPI_REG_CONFIG_CHIPSELECT_MASK        0xF
 124#define CQSPI_REG_CONFIG_BAUD_MASK              0xF
 125
 126#define CQSPI_REG_RD_INSTR                      0x04
 127#define CQSPI_REG_RD_INSTR_OPCODE_LSB           0
 128#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB       8
 129#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB        12
 130#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB        16
 131#define CQSPI_REG_RD_INSTR_MODE_EN_LSB          20
 132#define CQSPI_REG_RD_INSTR_DUMMY_LSB            24
 133#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK      0x3
 134#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK       0x3
 135#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK       0x3
 136#define CQSPI_REG_RD_INSTR_DUMMY_MASK           0x1F
 137
 138#define CQSPI_REG_WR_INSTR                      0x08
 139#define CQSPI_REG_WR_INSTR_OPCODE_LSB           0
 140#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB        12
 141#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB        16
 142
 143#define CQSPI_REG_DELAY                         0x0C
 144#define CQSPI_REG_DELAY_TSLCH_LSB               0
 145#define CQSPI_REG_DELAY_TCHSH_LSB               8
 146#define CQSPI_REG_DELAY_TSD2D_LSB               16
 147#define CQSPI_REG_DELAY_TSHSL_LSB               24
 148#define CQSPI_REG_DELAY_TSLCH_MASK              0xFF
 149#define CQSPI_REG_DELAY_TCHSH_MASK              0xFF
 150#define CQSPI_REG_DELAY_TSD2D_MASK              0xFF
 151#define CQSPI_REG_DELAY_TSHSL_MASK              0xFF
 152
 153#define CQSPI_REG_READCAPTURE                   0x10
 154#define CQSPI_REG_READCAPTURE_BYPASS_LSB        0
 155#define CQSPI_REG_READCAPTURE_DELAY_LSB         1
 156#define CQSPI_REG_READCAPTURE_DELAY_MASK        0xF
 157
 158#define CQSPI_REG_SIZE                          0x14
 159#define CQSPI_REG_SIZE_ADDRESS_LSB              0
 160#define CQSPI_REG_SIZE_PAGE_LSB                 4
 161#define CQSPI_REG_SIZE_BLOCK_LSB                16
 162#define CQSPI_REG_SIZE_ADDRESS_MASK             0xF
 163#define CQSPI_REG_SIZE_PAGE_MASK                0xFFF
 164#define CQSPI_REG_SIZE_BLOCK_MASK               0x3F
 165
 166#define CQSPI_REG_SRAMPARTITION                 0x18
 167#define CQSPI_REG_INDIRECTTRIGGER               0x1C
 168
 169#define CQSPI_REG_DMA                           0x20
 170#define CQSPI_REG_DMA_SINGLE_LSB                0
 171#define CQSPI_REG_DMA_BURST_LSB                 8
 172#define CQSPI_REG_DMA_SINGLE_MASK               0xFF
 173#define CQSPI_REG_DMA_BURST_MASK                0xFF
 174
 175#define CQSPI_REG_REMAP                         0x24
 176#define CQSPI_REG_MODE_BIT                      0x28
 177
 178#define CQSPI_REG_SDRAMLEVEL                    0x2C
 179#define CQSPI_REG_SDRAMLEVEL_RD_LSB             0
 180#define CQSPI_REG_SDRAMLEVEL_WR_LSB             16
 181#define CQSPI_REG_SDRAMLEVEL_RD_MASK            0xFFFF
 182#define CQSPI_REG_SDRAMLEVEL_WR_MASK            0xFFFF
 183
 184#define CQSPI_REG_IRQSTATUS                     0x40
 185#define CQSPI_REG_IRQMASK                       0x44
 186
 187#define CQSPI_REG_INDIRECTRD                    0x60
 188#define CQSPI_REG_INDIRECTRD_START_MASK         BIT(0)
 189#define CQSPI_REG_INDIRECTRD_CANCEL_MASK        BIT(1)
 190#define CQSPI_REG_INDIRECTRD_DONE_MASK          BIT(5)
 191
 192#define CQSPI_REG_INDIRECTRDWATERMARK           0x64
 193#define CQSPI_REG_INDIRECTRDSTARTADDR           0x68
 194#define CQSPI_REG_INDIRECTRDBYTES               0x6C
 195
 196#define CQSPI_REG_CMDCTRL                       0x90
 197#define CQSPI_REG_CMDCTRL_EXECUTE_MASK          BIT(0)
 198#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK       BIT(1)
 199#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB          12
 200#define CQSPI_REG_CMDCTRL_WR_EN_LSB             15
 201#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB         16
 202#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB           19
 203#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB          20
 204#define CQSPI_REG_CMDCTRL_RD_EN_LSB             23
 205#define CQSPI_REG_CMDCTRL_OPCODE_LSB            24
 206#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK         0x7
 207#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK        0x3
 208#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK         0x7
 209
 210#define CQSPI_REG_INDIRECTWR                    0x70
 211#define CQSPI_REG_INDIRECTWR_START_MASK         BIT(0)
 212#define CQSPI_REG_INDIRECTWR_CANCEL_MASK        BIT(1)
 213#define CQSPI_REG_INDIRECTWR_DONE_MASK          BIT(5)
 214
 215#define CQSPI_REG_INDIRECTWRWATERMARK           0x74
 216#define CQSPI_REG_INDIRECTWRSTARTADDR           0x78
 217#define CQSPI_REG_INDIRECTWRBYTES               0x7C
 218
 219#define CQSPI_REG_CMDADDRESS                    0x94
 220#define CQSPI_REG_CMDREADDATALOWER              0xA0
 221#define CQSPI_REG_CMDREADDATAUPPER              0xA4
 222#define CQSPI_REG_CMDWRITEDATALOWER             0xA8
 223#define CQSPI_REG_CMDWRITEDATAUPPER             0xAC
 224
 225/* Interrupt status bits */
 226#define CQSPI_REG_IRQ_MODE_ERR                  BIT(0)
 227#define CQSPI_REG_IRQ_UNDERFLOW                 BIT(1)
 228#define CQSPI_REG_IRQ_IND_COMP                  BIT(2)
 229#define CQSPI_REG_IRQ_IND_RD_REJECT             BIT(3)
 230#define CQSPI_REG_IRQ_WR_PROTECTED_ERR          BIT(4)
 231#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR           BIT(5)
 232#define CQSPI_REG_IRQ_WATERMARK                 BIT(6)
 233#define CQSPI_REG_IRQ_IND_SRAM_FULL             BIT(12)
 234
 235#define CQSPI_IRQ_MASK_RD               (CQSPI_REG_IRQ_WATERMARK        | \
 236                                         CQSPI_REG_IRQ_IND_SRAM_FULL    | \
 237                                         CQSPI_REG_IRQ_IND_COMP)
 238
 239#define CQSPI_IRQ_MASK_WR               (CQSPI_REG_IRQ_IND_COMP         | \
 240                                         CQSPI_REG_IRQ_WATERMARK        | \
 241                                         CQSPI_REG_IRQ_UNDERFLOW)
 242
 243#define CQSPI_IRQ_STATUS_MASK           0x1FFFF
 244
 245static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
 246{
 247        u32 val;
 248
 249        return readl_relaxed_poll_timeout(reg, val,
 250                                          (((clr ? ~val : val) & mask) == mask),
 251                                          10, CQSPI_TIMEOUT_MS * 1000);
 252}
 253
 254static bool cqspi_is_idle(struct cqspi_st *cqspi)
 255{
 256        u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
 257
 258        return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
 259}
 260
 261static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
 262{
 263        u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
 264
 265        reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
 266        return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
 267}
 268
 269static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
 270{
 271        struct cqspi_st *cqspi = dev;
 272        unsigned int irq_status;
 273
 274        /* Read interrupt status */
 275        irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
 276
 277        /* Clear interrupt */
 278        writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
 279
 280        irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
 281
 282        if (irq_status)
 283                complete(&cqspi->transfer_complete);
 284
 285        return IRQ_HANDLED;
 286}
 287
 288static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
 289{
 290        struct cqspi_flash_pdata *f_pdata = nor->priv;
 291        u32 rdreg = 0;
 292
 293        rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
 294        rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
 295        rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
 296
 297        return rdreg;
 298}
 299
 300static int cqspi_wait_idle(struct cqspi_st *cqspi)
 301{
 302        const unsigned int poll_idle_retry = 3;
 303        unsigned int count = 0;
 304        unsigned long timeout;
 305
 306        timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
 307        while (1) {
 308                /*
 309                 * Read few times in succession to ensure the controller
 310                 * is indeed idle, that is, the bit does not transition
 311                 * low again.
 312                 */
 313                if (cqspi_is_idle(cqspi))
 314                        count++;
 315                else
 316                        count = 0;
 317
 318                if (count >= poll_idle_retry)
 319                        return 0;
 320
 321                if (time_after(jiffies, timeout)) {
 322                        /* Timeout, in busy mode. */
 323                        dev_err(&cqspi->pdev->dev,
 324                                "QSPI is still busy after %dms timeout.\n",
 325                                CQSPI_TIMEOUT_MS);
 326                        return -ETIMEDOUT;
 327                }
 328
 329                cpu_relax();
 330        }
 331}
 332
 333static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
 334{
 335        void __iomem *reg_base = cqspi->iobase;
 336        int ret;
 337
 338        /* Write the CMDCTRL without start execution. */
 339        writel(reg, reg_base + CQSPI_REG_CMDCTRL);
 340        /* Start execute */
 341        reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
 342        writel(reg, reg_base + CQSPI_REG_CMDCTRL);
 343
 344        /* Polling for completion. */
 345        ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
 346                                 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
 347        if (ret) {
 348                dev_err(&cqspi->pdev->dev,
 349                        "Flash command execution timed out.\n");
 350                return ret;
 351        }
 352
 353        /* Polling QSPI idle status. */
 354        return cqspi_wait_idle(cqspi);
 355}
 356
 357static int cqspi_command_read(struct spi_nor *nor,
 358                              const u8 *txbuf, const unsigned n_tx,
 359                              u8 *rxbuf, const unsigned n_rx)
 360{
 361        struct cqspi_flash_pdata *f_pdata = nor->priv;
 362        struct cqspi_st *cqspi = f_pdata->cqspi;
 363        void __iomem *reg_base = cqspi->iobase;
 364        unsigned int rdreg;
 365        unsigned int reg;
 366        unsigned int read_len;
 367        int status;
 368
 369        if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
 370                dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
 371                        n_rx, rxbuf);
 372                return -EINVAL;
 373        }
 374
 375        reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 376
 377        rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
 378        writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
 379
 380        reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
 381
 382        /* 0 means 1 byte. */
 383        reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
 384                << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
 385        status = cqspi_exec_flash_cmd(cqspi, reg);
 386        if (status)
 387                return status;
 388
 389        reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
 390
 391        /* Put the read value into rx_buf */
 392        read_len = (n_rx > 4) ? 4 : n_rx;
 393        memcpy(rxbuf, &reg, read_len);
 394        rxbuf += read_len;
 395
 396        if (n_rx > 4) {
 397                reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
 398
 399                read_len = n_rx - read_len;
 400                memcpy(rxbuf, &reg, read_len);
 401        }
 402
 403        return 0;
 404}
 405
 406static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
 407                               const u8 *txbuf, const unsigned n_tx)
 408{
 409        struct cqspi_flash_pdata *f_pdata = nor->priv;
 410        struct cqspi_st *cqspi = f_pdata->cqspi;
 411        void __iomem *reg_base = cqspi->iobase;
 412        unsigned int reg;
 413        unsigned int data;
 414        u32 write_len;
 415        int ret;
 416
 417        if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
 418                dev_err(nor->dev,
 419                        "Invalid input argument, cmdlen %d txbuf 0x%p\n",
 420                        n_tx, txbuf);
 421                return -EINVAL;
 422        }
 423
 424        reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 425        if (n_tx) {
 426                reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
 427                reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
 428                        << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
 429                data = 0;
 430                write_len = (n_tx > 4) ? 4 : n_tx;
 431                memcpy(&data, txbuf, write_len);
 432                txbuf += write_len;
 433                writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
 434
 435                if (n_tx > 4) {
 436                        data = 0;
 437                        write_len = n_tx - 4;
 438                        memcpy(&data, txbuf, write_len);
 439                        writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
 440                }
 441        }
 442        ret = cqspi_exec_flash_cmd(cqspi, reg);
 443        return ret;
 444}
 445
 446static int cqspi_command_write_addr(struct spi_nor *nor,
 447                                    const u8 opcode, const unsigned int addr)
 448{
 449        struct cqspi_flash_pdata *f_pdata = nor->priv;
 450        struct cqspi_st *cqspi = f_pdata->cqspi;
 451        void __iomem *reg_base = cqspi->iobase;
 452        unsigned int reg;
 453
 454        reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 455        reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
 456        reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
 457                << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
 458
 459        writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
 460
 461        return cqspi_exec_flash_cmd(cqspi, reg);
 462}
 463
 464static int cqspi_read_setup(struct spi_nor *nor)
 465{
 466        struct cqspi_flash_pdata *f_pdata = nor->priv;
 467        struct cqspi_st *cqspi = f_pdata->cqspi;
 468        void __iomem *reg_base = cqspi->iobase;
 469        unsigned int dummy_clk = 0;
 470        unsigned int reg;
 471
 472        reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
 473        reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
 474
 475        /* Setup dummy clock cycles */
 476        dummy_clk = nor->read_dummy;
 477        if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
 478                dummy_clk = CQSPI_DUMMY_CLKS_MAX;
 479
 480        if (dummy_clk / 8) {
 481                reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
 482                /* Set mode bits high to ensure chip doesn't enter XIP */
 483                writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
 484
 485                /* Need to subtract the mode byte (8 clocks). */
 486                if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
 487                        dummy_clk -= 8;
 488
 489                if (dummy_clk)
 490                        reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
 491                               << CQSPI_REG_RD_INSTR_DUMMY_LSB;
 492        }
 493
 494        writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 495
 496        /* Set address width */
 497        reg = readl(reg_base + CQSPI_REG_SIZE);
 498        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 499        reg |= (nor->addr_width - 1);
 500        writel(reg, reg_base + CQSPI_REG_SIZE);
 501        return 0;
 502}
 503
 504static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
 505                                       loff_t from_addr, const size_t n_rx)
 506{
 507        struct cqspi_flash_pdata *f_pdata = nor->priv;
 508        struct cqspi_st *cqspi = f_pdata->cqspi;
 509        void __iomem *reg_base = cqspi->iobase;
 510        void __iomem *ahb_base = cqspi->ahb_base;
 511        unsigned int remaining = n_rx;
 512        unsigned int mod_bytes = n_rx % 4;
 513        unsigned int bytes_to_read = 0;
 514        u8 *rxbuf_end = rxbuf + n_rx;
 515        int ret = 0;
 516
 517        writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
 518        writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
 519
 520        /* Clear all interrupts. */
 521        writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
 522
 523        writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
 524
 525        reinit_completion(&cqspi->transfer_complete);
 526        writel(CQSPI_REG_INDIRECTRD_START_MASK,
 527               reg_base + CQSPI_REG_INDIRECTRD);
 528
 529        while (remaining > 0) {
 530                if (!wait_for_completion_timeout(&cqspi->transfer_complete,
 531                                msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
 532                        ret = -ETIMEDOUT;
 533
 534                bytes_to_read = cqspi_get_rd_sram_level(cqspi);
 535
 536                if (ret && bytes_to_read == 0) {
 537                        dev_err(nor->dev, "Indirect read timeout, no bytes\n");
 538                        goto failrd;
 539                }
 540
 541                while (bytes_to_read != 0) {
 542                        unsigned int word_remain = round_down(remaining, 4);
 543
 544                        bytes_to_read *= cqspi->fifo_width;
 545                        bytes_to_read = bytes_to_read > remaining ?
 546                                        remaining : bytes_to_read;
 547                        bytes_to_read = round_down(bytes_to_read, 4);
 548                        /* Read 4 byte word chunks then single bytes */
 549                        if (bytes_to_read) {
 550                                ioread32_rep(ahb_base, rxbuf,
 551                                             (bytes_to_read / 4));
 552                        } else if (!word_remain && mod_bytes) {
 553                                unsigned int temp = ioread32(ahb_base);
 554
 555                                bytes_to_read = mod_bytes;
 556                                memcpy(rxbuf, &temp, min((unsigned int)
 557                                                         (rxbuf_end - rxbuf),
 558                                                         bytes_to_read));
 559                        }
 560                        rxbuf += bytes_to_read;
 561                        remaining -= bytes_to_read;
 562                        bytes_to_read = cqspi_get_rd_sram_level(cqspi);
 563                }
 564
 565                if (remaining > 0)
 566                        reinit_completion(&cqspi->transfer_complete);
 567        }
 568
 569        /* Check indirect done status */
 570        ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
 571                                 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
 572        if (ret) {
 573                dev_err(nor->dev,
 574                        "Indirect read completion error (%i)\n", ret);
 575                goto failrd;
 576        }
 577
 578        /* Disable interrupt */
 579        writel(0, reg_base + CQSPI_REG_IRQMASK);
 580
 581        /* Clear indirect completion status */
 582        writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
 583
 584        return 0;
 585
 586failrd:
 587        /* Disable interrupt */
 588        writel(0, reg_base + CQSPI_REG_IRQMASK);
 589
 590        /* Cancel the indirect read */
 591        writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
 592               reg_base + CQSPI_REG_INDIRECTRD);
 593        return ret;
 594}
 595
 596static int cqspi_write_setup(struct spi_nor *nor)
 597{
 598        unsigned int reg;
 599        struct cqspi_flash_pdata *f_pdata = nor->priv;
 600        struct cqspi_st *cqspi = f_pdata->cqspi;
 601        void __iomem *reg_base = cqspi->iobase;
 602
 603        /* Set opcode. */
 604        reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
 605        writel(reg, reg_base + CQSPI_REG_WR_INSTR);
 606        reg = cqspi_calc_rdreg(nor, nor->program_opcode);
 607        writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 608
 609        reg = readl(reg_base + CQSPI_REG_SIZE);
 610        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 611        reg |= (nor->addr_width - 1);
 612        writel(reg, reg_base + CQSPI_REG_SIZE);
 613        return 0;
 614}
 615
 616static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
 617                                        const u8 *txbuf, const size_t n_tx)
 618{
 619        const unsigned int page_size = nor->page_size;
 620        struct cqspi_flash_pdata *f_pdata = nor->priv;
 621        struct cqspi_st *cqspi = f_pdata->cqspi;
 622        void __iomem *reg_base = cqspi->iobase;
 623        unsigned int remaining = n_tx;
 624        unsigned int write_bytes;
 625        int ret;
 626
 627        writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
 628        writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
 629
 630        /* Clear all interrupts. */
 631        writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
 632
 633        writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
 634
 635        reinit_completion(&cqspi->transfer_complete);
 636        writel(CQSPI_REG_INDIRECTWR_START_MASK,
 637               reg_base + CQSPI_REG_INDIRECTWR);
 638        /*
 639         * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
 640         * Controller programming sequence, couple of cycles of
 641         * QSPI_REF_CLK delay is required for the above bit to
 642         * be internally synchronized by the QSPI module. Provide 5
 643         * cycles of delay.
 644         */
 645        if (cqspi->wr_delay)
 646                ndelay(cqspi->wr_delay);
 647
 648        while (remaining > 0) {
 649                size_t write_words, mod_bytes;
 650
 651                write_bytes = remaining > page_size ? page_size : remaining;
 652                write_words = write_bytes / 4;
 653                mod_bytes = write_bytes % 4;
 654                /* Write 4 bytes at a time then single bytes. */
 655                if (write_words) {
 656                        iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
 657                        txbuf += (write_words * 4);
 658                }
 659                if (mod_bytes) {
 660                        unsigned int temp = 0xFFFFFFFF;
 661
 662                        memcpy(&temp, txbuf, mod_bytes);
 663                        iowrite32(temp, cqspi->ahb_base);
 664                        txbuf += mod_bytes;
 665                }
 666
 667                if (!wait_for_completion_timeout(&cqspi->transfer_complete,
 668                                        msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
 669                        dev_err(nor->dev, "Indirect write timeout\n");
 670                        ret = -ETIMEDOUT;
 671                        goto failwr;
 672                }
 673
 674                remaining -= write_bytes;
 675
 676                if (remaining > 0)
 677                        reinit_completion(&cqspi->transfer_complete);
 678        }
 679
 680        /* Check indirect done status */
 681        ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
 682                                 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
 683        if (ret) {
 684                dev_err(nor->dev,
 685                        "Indirect write completion error (%i)\n", ret);
 686                goto failwr;
 687        }
 688
 689        /* Disable interrupt. */
 690        writel(0, reg_base + CQSPI_REG_IRQMASK);
 691
 692        /* Clear indirect completion status */
 693        writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
 694
 695        cqspi_wait_idle(cqspi);
 696
 697        return 0;
 698
 699failwr:
 700        /* Disable interrupt. */
 701        writel(0, reg_base + CQSPI_REG_IRQMASK);
 702
 703        /* Cancel the indirect write */
 704        writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
 705               reg_base + CQSPI_REG_INDIRECTWR);
 706        return ret;
 707}
 708
 709static void cqspi_chipselect(struct spi_nor *nor)
 710{
 711        struct cqspi_flash_pdata *f_pdata = nor->priv;
 712        struct cqspi_st *cqspi = f_pdata->cqspi;
 713        void __iomem *reg_base = cqspi->iobase;
 714        unsigned int chip_select = f_pdata->cs;
 715        unsigned int reg;
 716
 717        reg = readl(reg_base + CQSPI_REG_CONFIG);
 718        if (cqspi->is_decoded_cs) {
 719                reg |= CQSPI_REG_CONFIG_DECODE_MASK;
 720        } else {
 721                reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
 722
 723                /* Convert CS if without decoder.
 724                 * CS0 to 4b'1110
 725                 * CS1 to 4b'1101
 726                 * CS2 to 4b'1011
 727                 * CS3 to 4b'0111
 728                 */
 729                chip_select = 0xF & ~(1 << chip_select);
 730        }
 731
 732        reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
 733                 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
 734        reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
 735            << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
 736        writel(reg, reg_base + CQSPI_REG_CONFIG);
 737}
 738
 739static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
 740{
 741        struct cqspi_flash_pdata *f_pdata = nor->priv;
 742        struct cqspi_st *cqspi = f_pdata->cqspi;
 743        void __iomem *iobase = cqspi->iobase;
 744        unsigned int reg;
 745
 746        /* configure page size and block size. */
 747        reg = readl(iobase + CQSPI_REG_SIZE);
 748        reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
 749        reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
 750        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 751        reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
 752        reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
 753        reg |= (nor->addr_width - 1);
 754        writel(reg, iobase + CQSPI_REG_SIZE);
 755
 756        /* configure the chip select */
 757        cqspi_chipselect(nor);
 758
 759        /* Store the new configuration of the controller */
 760        cqspi->current_page_size = nor->page_size;
 761        cqspi->current_erase_size = nor->mtd.erasesize;
 762        cqspi->current_addr_width = nor->addr_width;
 763}
 764
 765static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
 766                                           const unsigned int ns_val)
 767{
 768        unsigned int ticks;
 769
 770        ticks = ref_clk_hz / 1000;      /* kHz */
 771        ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
 772
 773        return ticks;
 774}
 775
 776static void cqspi_delay(struct spi_nor *nor)
 777{
 778        struct cqspi_flash_pdata *f_pdata = nor->priv;
 779        struct cqspi_st *cqspi = f_pdata->cqspi;
 780        void __iomem *iobase = cqspi->iobase;
 781        const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
 782        unsigned int tshsl, tchsh, tslch, tsd2d;
 783        unsigned int reg;
 784        unsigned int tsclk;
 785
 786        /* calculate the number of ref ticks for one sclk tick */
 787        tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
 788
 789        tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
 790        /* this particular value must be at least one sclk */
 791        if (tshsl < tsclk)
 792                tshsl = tsclk;
 793
 794        tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
 795        tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
 796        tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
 797
 798        reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
 799               << CQSPI_REG_DELAY_TSHSL_LSB;
 800        reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
 801                << CQSPI_REG_DELAY_TCHSH_LSB;
 802        reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
 803                << CQSPI_REG_DELAY_TSLCH_LSB;
 804        reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
 805                << CQSPI_REG_DELAY_TSD2D_LSB;
 806        writel(reg, iobase + CQSPI_REG_DELAY);
 807}
 808
 809static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
 810{
 811        const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
 812        void __iomem *reg_base = cqspi->iobase;
 813        u32 reg, div;
 814
 815        /* Recalculate the baudrate divisor based on QSPI specification. */
 816        div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
 817
 818        reg = readl(reg_base + CQSPI_REG_CONFIG);
 819        reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
 820        reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
 821        writel(reg, reg_base + CQSPI_REG_CONFIG);
 822}
 823
 824static void cqspi_readdata_capture(struct cqspi_st *cqspi,
 825                                   const bool bypass,
 826                                   const unsigned int delay)
 827{
 828        void __iomem *reg_base = cqspi->iobase;
 829        unsigned int reg;
 830
 831        reg = readl(reg_base + CQSPI_REG_READCAPTURE);
 832
 833        if (bypass)
 834                reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
 835        else
 836                reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
 837
 838        reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
 839                 << CQSPI_REG_READCAPTURE_DELAY_LSB);
 840
 841        reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
 842                << CQSPI_REG_READCAPTURE_DELAY_LSB;
 843
 844        writel(reg, reg_base + CQSPI_REG_READCAPTURE);
 845}
 846
 847static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
 848{
 849        void __iomem *reg_base = cqspi->iobase;
 850        unsigned int reg;
 851
 852        reg = readl(reg_base + CQSPI_REG_CONFIG);
 853
 854        if (enable)
 855                reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
 856        else
 857                reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
 858
 859        writel(reg, reg_base + CQSPI_REG_CONFIG);
 860}
 861
 862static void cqspi_configure(struct spi_nor *nor)
 863{
 864        struct cqspi_flash_pdata *f_pdata = nor->priv;
 865        struct cqspi_st *cqspi = f_pdata->cqspi;
 866        const unsigned int sclk = f_pdata->clk_rate;
 867        int switch_cs = (cqspi->current_cs != f_pdata->cs);
 868        int switch_ck = (cqspi->sclk != sclk);
 869
 870        if ((cqspi->current_page_size != nor->page_size) ||
 871            (cqspi->current_erase_size != nor->mtd.erasesize) ||
 872            (cqspi->current_addr_width != nor->addr_width))
 873                switch_cs = 1;
 874
 875        if (switch_cs || switch_ck)
 876                cqspi_controller_enable(cqspi, 0);
 877
 878        /* Switch chip select. */
 879        if (switch_cs) {
 880                cqspi->current_cs = f_pdata->cs;
 881                cqspi_configure_cs_and_sizes(nor);
 882        }
 883
 884        /* Setup baudrate divisor and delays */
 885        if (switch_ck) {
 886                cqspi->sclk = sclk;
 887                cqspi_config_baudrate_div(cqspi);
 888                cqspi_delay(nor);
 889                cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
 890                                       f_pdata->read_delay);
 891        }
 892
 893        if (switch_cs || switch_ck)
 894                cqspi_controller_enable(cqspi, 1);
 895}
 896
 897static int cqspi_set_protocol(struct spi_nor *nor, const int read)
 898{
 899        struct cqspi_flash_pdata *f_pdata = nor->priv;
 900
 901        f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
 902        f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
 903        f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
 904
 905        if (read) {
 906                switch (nor->read_proto) {
 907                case SNOR_PROTO_1_1_1:
 908                        f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
 909                        break;
 910                case SNOR_PROTO_1_1_2:
 911                        f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
 912                        break;
 913                case SNOR_PROTO_1_1_4:
 914                        f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
 915                        break;
 916                case SNOR_PROTO_1_1_8:
 917                        f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
 918                        break;
 919                default:
 920                        return -EINVAL;
 921                }
 922        }
 923
 924        cqspi_configure(nor);
 925
 926        return 0;
 927}
 928
 929static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
 930                           size_t len, const u_char *buf)
 931{
 932        struct cqspi_flash_pdata *f_pdata = nor->priv;
 933        struct cqspi_st *cqspi = f_pdata->cqspi;
 934        int ret;
 935
 936        ret = cqspi_set_protocol(nor, 0);
 937        if (ret)
 938                return ret;
 939
 940        ret = cqspi_write_setup(nor);
 941        if (ret)
 942                return ret;
 943
 944        if (f_pdata->use_direct_mode) {
 945                memcpy_toio(cqspi->ahb_base + to, buf, len);
 946                ret = cqspi_wait_idle(cqspi);
 947        } else {
 948                ret = cqspi_indirect_write_execute(nor, to, buf, len);
 949        }
 950        if (ret)
 951                return ret;
 952
 953        return len;
 954}
 955
 956static void cqspi_rx_dma_callback(void *param)
 957{
 958        struct cqspi_st *cqspi = param;
 959
 960        complete(&cqspi->rx_dma_complete);
 961}
 962
 963static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
 964                                     loff_t from, size_t len)
 965{
 966        struct cqspi_flash_pdata *f_pdata = nor->priv;
 967        struct cqspi_st *cqspi = f_pdata->cqspi;
 968        enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 969        dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
 970        int ret = 0;
 971        struct dma_async_tx_descriptor *tx;
 972        dma_cookie_t cookie;
 973        dma_addr_t dma_dst;
 974
 975        if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
 976                memcpy_fromio(buf, cqspi->ahb_base + from, len);
 977                return 0;
 978        }
 979
 980        dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
 981        if (dma_mapping_error(nor->dev, dma_dst)) {
 982                dev_err(nor->dev, "dma mapping failed\n");
 983                return -ENOMEM;
 984        }
 985        tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
 986                                       len, flags);
 987        if (!tx) {
 988                dev_err(nor->dev, "device_prep_dma_memcpy error\n");
 989                ret = -EIO;
 990                goto err_unmap;
 991        }
 992
 993        tx->callback = cqspi_rx_dma_callback;
 994        tx->callback_param = cqspi;
 995        cookie = tx->tx_submit(tx);
 996        reinit_completion(&cqspi->rx_dma_complete);
 997
 998        ret = dma_submit_error(cookie);
 999        if (ret) {
1000                dev_err(nor->dev, "dma_submit_error %d\n", cookie);
1001                ret = -EIO;
1002                goto err_unmap;
1003        }
1004
1005        dma_async_issue_pending(cqspi->rx_chan);
1006        if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
1007                                         msecs_to_jiffies(len))) {
1008                dmaengine_terminate_sync(cqspi->rx_chan);
1009                dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
1010                ret = -ETIMEDOUT;
1011                goto err_unmap;
1012        }
1013
1014err_unmap:
1015        dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
1016
1017        return ret;
1018}
1019
1020static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
1021                          size_t len, u_char *buf)
1022{
1023        struct cqspi_flash_pdata *f_pdata = nor->priv;
1024        int ret;
1025
1026        ret = cqspi_set_protocol(nor, 1);
1027        if (ret)
1028                return ret;
1029
1030        ret = cqspi_read_setup(nor);
1031        if (ret)
1032                return ret;
1033
1034        if (f_pdata->use_direct_mode)
1035                ret = cqspi_direct_read_execute(nor, buf, from, len);
1036        else
1037                ret = cqspi_indirect_read_execute(nor, buf, from, len);
1038        if (ret)
1039                return ret;
1040
1041        return len;
1042}
1043
1044static int cqspi_erase(struct spi_nor *nor, loff_t offs)
1045{
1046        int ret;
1047
1048        ret = cqspi_set_protocol(nor, 0);
1049        if (ret)
1050                return ret;
1051
1052        /* Send write enable, then erase commands. */
1053        ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
1054        if (ret)
1055                return ret;
1056
1057        /* Set up command buffer. */
1058        ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
1059        if (ret)
1060                return ret;
1061
1062        return 0;
1063}
1064
1065static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
1066{
1067        struct cqspi_flash_pdata *f_pdata = nor->priv;
1068        struct cqspi_st *cqspi = f_pdata->cqspi;
1069
1070        mutex_lock(&cqspi->bus_mutex);
1071
1072        return 0;
1073}
1074
1075static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
1076{
1077        struct cqspi_flash_pdata *f_pdata = nor->priv;
1078        struct cqspi_st *cqspi = f_pdata->cqspi;
1079
1080        mutex_unlock(&cqspi->bus_mutex);
1081}
1082
1083static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
1084{
1085        int ret;
1086
1087        ret = cqspi_set_protocol(nor, 0);
1088        if (!ret)
1089                ret = cqspi_command_read(nor, &opcode, 1, buf, len);
1090
1091        return ret;
1092}
1093
1094static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
1095{
1096        int ret;
1097
1098        ret = cqspi_set_protocol(nor, 0);
1099        if (!ret)
1100                ret = cqspi_command_write(nor, opcode, buf, len);
1101
1102        return ret;
1103}
1104
1105static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
1106                                    struct cqspi_flash_pdata *f_pdata,
1107                                    struct device_node *np)
1108{
1109        if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
1110                dev_err(&pdev->dev, "couldn't determine read-delay\n");
1111                return -ENXIO;
1112        }
1113
1114        if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
1115                dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
1116                return -ENXIO;
1117        }
1118
1119        if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
1120                dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
1121                return -ENXIO;
1122        }
1123
1124        if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
1125                dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
1126                return -ENXIO;
1127        }
1128
1129        if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1130                dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1131                return -ENXIO;
1132        }
1133
1134        if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1135                dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1136                return -ENXIO;
1137        }
1138
1139        return 0;
1140}
1141
1142static int cqspi_of_get_pdata(struct platform_device *pdev)
1143{
1144        struct device_node *np = pdev->dev.of_node;
1145        struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1146
1147        cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1148
1149        if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1150                dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
1151                return -ENXIO;
1152        }
1153
1154        if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
1155                dev_err(&pdev->dev, "couldn't determine fifo-width\n");
1156                return -ENXIO;
1157        }
1158
1159        if (of_property_read_u32(np, "cdns,trigger-address",
1160                                 &cqspi->trigger_address)) {
1161                dev_err(&pdev->dev, "couldn't determine trigger-address\n");
1162                return -ENXIO;
1163        }
1164
1165        cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
1166
1167        return 0;
1168}
1169
1170static void cqspi_controller_init(struct cqspi_st *cqspi)
1171{
1172        u32 reg;
1173
1174        cqspi_controller_enable(cqspi, 0);
1175
1176        /* Configure the remap address register, no remap */
1177        writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1178
1179        /* Disable all interrupts. */
1180        writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1181
1182        /* Configure the SRAM split to 1:1 . */
1183        writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1184
1185        /* Load indirect trigger address. */
1186        writel(cqspi->trigger_address,
1187               cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1188
1189        /* Program read watermark -- 1/2 of the FIFO. */
1190        writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1191               cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1192        /* Program write watermark -- 1/8 of the FIFO. */
1193        writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1194               cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1195
1196        /* Enable Direct Access Controller */
1197        reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1198        reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
1199        writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1200
1201        cqspi_controller_enable(cqspi, 1);
1202}
1203
1204static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
1205{
1206        dma_cap_mask_t mask;
1207
1208        dma_cap_zero(mask);
1209        dma_cap_set(DMA_MEMCPY, mask);
1210
1211        cqspi->rx_chan = dma_request_chan_by_mask(&mask);
1212        if (IS_ERR(cqspi->rx_chan)) {
1213                dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
1214                cqspi->rx_chan = NULL;
1215        }
1216        init_completion(&cqspi->rx_dma_complete);
1217}
1218
1219static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
1220{
1221        struct platform_device *pdev = cqspi->pdev;
1222        struct device *dev = &pdev->dev;
1223        const struct cqspi_driver_platdata *ddata;
1224        struct spi_nor_hwcaps hwcaps;
1225        struct cqspi_flash_pdata *f_pdata;
1226        struct spi_nor *nor;
1227        struct mtd_info *mtd;
1228        unsigned int cs;
1229        int i, ret;
1230
1231        ddata = of_device_get_match_data(dev);
1232        if (!ddata) {
1233                dev_err(dev, "Couldn't find driver data\n");
1234                return -EINVAL;
1235        }
1236        hwcaps.mask = ddata->hwcaps_mask;
1237
1238        /* Get flash device data */
1239        for_each_available_child_of_node(dev->of_node, np) {
1240                ret = of_property_read_u32(np, "reg", &cs);
1241                if (ret) {
1242                        dev_err(dev, "Couldn't determine chip select.\n");
1243                        goto err;
1244                }
1245
1246                if (cs >= CQSPI_MAX_CHIPSELECT) {
1247                        ret = -EINVAL;
1248                        dev_err(dev, "Chip select %d out of range.\n", cs);
1249                        goto err;
1250                }
1251
1252                f_pdata = &cqspi->f_pdata[cs];
1253                f_pdata->cqspi = cqspi;
1254                f_pdata->cs = cs;
1255
1256                ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
1257                if (ret)
1258                        goto err;
1259
1260                nor = &f_pdata->nor;
1261                mtd = &nor->mtd;
1262
1263                mtd->priv = nor;
1264
1265                nor->dev = dev;
1266                spi_nor_set_flash_node(nor, np);
1267                nor->priv = f_pdata;
1268
1269                nor->read_reg = cqspi_read_reg;
1270                nor->write_reg = cqspi_write_reg;
1271                nor->read = cqspi_read;
1272                nor->write = cqspi_write;
1273                nor->erase = cqspi_erase;
1274                nor->prepare = cqspi_prep;
1275                nor->unprepare = cqspi_unprep;
1276
1277                mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
1278                                           dev_name(dev), cs);
1279                if (!mtd->name) {
1280                        ret = -ENOMEM;
1281                        goto err;
1282                }
1283
1284                ret = spi_nor_scan(nor, NULL, &hwcaps);
1285                if (ret)
1286                        goto err;
1287
1288                ret = mtd_device_register(mtd, NULL, 0);
1289                if (ret)
1290                        goto err;
1291
1292                f_pdata->registered = true;
1293
1294                if (mtd->size <= cqspi->ahb_size) {
1295                        f_pdata->use_direct_mode = true;
1296                        dev_dbg(nor->dev, "using direct mode for %s\n",
1297                                mtd->name);
1298
1299                        if (!cqspi->rx_chan)
1300                                cqspi_request_mmap_dma(cqspi);
1301                }
1302        }
1303
1304        return 0;
1305
1306err:
1307        for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1308                if (cqspi->f_pdata[i].registered)
1309                        mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1310        return ret;
1311}
1312
1313static int cqspi_probe(struct platform_device *pdev)
1314{
1315        struct device_node *np = pdev->dev.of_node;
1316        struct device *dev = &pdev->dev;
1317        struct cqspi_st *cqspi;
1318        struct resource *res;
1319        struct resource *res_ahb;
1320        struct reset_control *rstc, *rstc_ocp;
1321        const struct cqspi_driver_platdata *ddata;
1322        int ret;
1323        int irq;
1324
1325        cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
1326        if (!cqspi)
1327                return -ENOMEM;
1328
1329        mutex_init(&cqspi->bus_mutex);
1330        cqspi->pdev = pdev;
1331        platform_set_drvdata(pdev, cqspi);
1332
1333        /* Obtain configuration from OF. */
1334        ret = cqspi_of_get_pdata(pdev);
1335        if (ret) {
1336                dev_err(dev, "Cannot get mandatory OF data.\n");
1337                return -ENODEV;
1338        }
1339
1340        /* Obtain QSPI clock. */
1341        cqspi->clk = devm_clk_get(dev, NULL);
1342        if (IS_ERR(cqspi->clk)) {
1343                dev_err(dev, "Cannot claim QSPI clock.\n");
1344                return PTR_ERR(cqspi->clk);
1345        }
1346
1347        /* Obtain and remap controller address. */
1348        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1349        cqspi->iobase = devm_ioremap_resource(dev, res);
1350        if (IS_ERR(cqspi->iobase)) {
1351                dev_err(dev, "Cannot remap controller address.\n");
1352                return PTR_ERR(cqspi->iobase);
1353        }
1354
1355        /* Obtain and remap AHB address. */
1356        res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1357        cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
1358        if (IS_ERR(cqspi->ahb_base)) {
1359                dev_err(dev, "Cannot remap AHB address.\n");
1360                return PTR_ERR(cqspi->ahb_base);
1361        }
1362        cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
1363        cqspi->ahb_size = resource_size(res_ahb);
1364
1365        init_completion(&cqspi->transfer_complete);
1366
1367        /* Obtain IRQ line. */
1368        irq = platform_get_irq(pdev, 0);
1369        if (irq < 0) {
1370                dev_err(dev, "Cannot obtain IRQ.\n");
1371                return -ENXIO;
1372        }
1373
1374        pm_runtime_enable(dev);
1375        ret = pm_runtime_get_sync(dev);
1376        if (ret < 0) {
1377                pm_runtime_put_noidle(dev);
1378                return ret;
1379        }
1380
1381        ret = clk_prepare_enable(cqspi->clk);
1382        if (ret) {
1383                dev_err(dev, "Cannot enable QSPI clock.\n");
1384                goto probe_clk_failed;
1385        }
1386
1387        /* Obtain QSPI reset control */
1388        rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
1389        if (IS_ERR(rstc)) {
1390                dev_err(dev, "Cannot get QSPI reset.\n");
1391                return PTR_ERR(rstc);
1392        }
1393
1394        rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
1395        if (IS_ERR(rstc_ocp)) {
1396                dev_err(dev, "Cannot get QSPI OCP reset.\n");
1397                return PTR_ERR(rstc_ocp);
1398        }
1399
1400        reset_control_assert(rstc);
1401        reset_control_deassert(rstc);
1402
1403        reset_control_assert(rstc_ocp);
1404        reset_control_deassert(rstc_ocp);
1405
1406        cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
1407        ddata  = of_device_get_match_data(dev);
1408        if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
1409                cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
1410                                                   cqspi->master_ref_clk_hz);
1411
1412        ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
1413                               pdev->name, cqspi);
1414        if (ret) {
1415                dev_err(dev, "Cannot request IRQ.\n");
1416                goto probe_irq_failed;
1417        }
1418
1419        cqspi_wait_idle(cqspi);
1420        cqspi_controller_init(cqspi);
1421        cqspi->current_cs = -1;
1422        cqspi->sclk = 0;
1423
1424        ret = cqspi_setup_flash(cqspi, np);
1425        if (ret) {
1426                dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
1427                goto probe_setup_failed;
1428        }
1429
1430        return ret;
1431probe_setup_failed:
1432        cqspi_controller_enable(cqspi, 0);
1433probe_irq_failed:
1434        clk_disable_unprepare(cqspi->clk);
1435probe_clk_failed:
1436        pm_runtime_put_sync(dev);
1437        pm_runtime_disable(dev);
1438        return ret;
1439}
1440
1441static int cqspi_remove(struct platform_device *pdev)
1442{
1443        struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1444        int i;
1445
1446        for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1447                if (cqspi->f_pdata[i].registered)
1448                        mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1449
1450        cqspi_controller_enable(cqspi, 0);
1451
1452        if (cqspi->rx_chan)
1453                dma_release_channel(cqspi->rx_chan);
1454
1455        clk_disable_unprepare(cqspi->clk);
1456
1457        pm_runtime_put_sync(&pdev->dev);
1458        pm_runtime_disable(&pdev->dev);
1459
1460        return 0;
1461}
1462
1463#ifdef CONFIG_PM_SLEEP
1464static int cqspi_suspend(struct device *dev)
1465{
1466        struct cqspi_st *cqspi = dev_get_drvdata(dev);
1467
1468        cqspi_controller_enable(cqspi, 0);
1469        return 0;
1470}
1471
1472static int cqspi_resume(struct device *dev)
1473{
1474        struct cqspi_st *cqspi = dev_get_drvdata(dev);
1475
1476        cqspi_controller_enable(cqspi, 1);
1477        return 0;
1478}
1479
1480static const struct dev_pm_ops cqspi__dev_pm_ops = {
1481        .suspend = cqspi_suspend,
1482        .resume = cqspi_resume,
1483};
1484
1485#define CQSPI_DEV_PM_OPS        (&cqspi__dev_pm_ops)
1486#else
1487#define CQSPI_DEV_PM_OPS        NULL
1488#endif
1489
1490static const struct cqspi_driver_platdata cdns_qspi = {
1491        .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
1492};
1493
1494static const struct cqspi_driver_platdata k2g_qspi = {
1495        .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
1496        .quirks = CQSPI_NEEDS_WR_DELAY,
1497};
1498
1499static const struct cqspi_driver_platdata am654_ospi = {
1500        .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
1501        .quirks = CQSPI_NEEDS_WR_DELAY,
1502};
1503
1504static const struct of_device_id cqspi_dt_ids[] = {
1505        {
1506                .compatible = "cdns,qspi-nor",
1507                .data = &cdns_qspi,
1508        },
1509        {
1510                .compatible = "ti,k2g-qspi",
1511                .data = &k2g_qspi,
1512        },
1513        {
1514                .compatible = "ti,am654-ospi",
1515                .data = &am654_ospi,
1516        },
1517        { /* end of table */ }
1518};
1519
1520MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
1521
1522static struct platform_driver cqspi_platform_driver = {
1523        .probe = cqspi_probe,
1524        .remove = cqspi_remove,
1525        .driver = {
1526                .name = CQSPI_NAME,
1527                .pm = CQSPI_DEV_PM_OPS,
1528                .of_match_table = cqspi_dt_ids,
1529        },
1530};
1531
1532module_platform_driver(cqspi_platform_driver);
1533
1534MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
1535MODULE_LICENSE("GPL v2");
1536MODULE_ALIAS("platform:" CQSPI_NAME);
1537MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
1538MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
1539