linux/drivers/spi/spi-stm32-qspi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
   4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/errno.h>
  11#include <linux/io.h>
  12#include <linux/iopoll.h>
  13#include <linux/interrupt.h>
  14#include <linux/module.h>
  15#include <linux/mutex.h>
  16#include <linux/of.h>
  17#include <linux/of_device.h>
  18#include <linux/pinctrl/consumer.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/platform_device.h>
  21#include <linux/reset.h>
  22#include <linux/sizes.h>
  23#include <linux/spi/spi-mem.h>
  24
  25#define QSPI_CR                 0x00
  26#define CR_EN                   BIT(0)
  27#define CR_ABORT                BIT(1)
  28#define CR_DMAEN                BIT(2)
  29#define CR_TCEN                 BIT(3)
  30#define CR_SSHIFT               BIT(4)
  31#define CR_DFM                  BIT(6)
  32#define CR_FSEL                 BIT(7)
  33#define CR_FTHRES_SHIFT         8
  34#define CR_TEIE                 BIT(16)
  35#define CR_TCIE                 BIT(17)
  36#define CR_FTIE                 BIT(18)
  37#define CR_SMIE                 BIT(19)
  38#define CR_TOIE                 BIT(20)
  39#define CR_PRESC_MASK           GENMASK(31, 24)
  40
  41#define QSPI_DCR                0x04
  42#define DCR_FSIZE_MASK          GENMASK(20, 16)
  43
  44#define QSPI_SR                 0x08
  45#define SR_TEF                  BIT(0)
  46#define SR_TCF                  BIT(1)
  47#define SR_FTF                  BIT(2)
  48#define SR_SMF                  BIT(3)
  49#define SR_TOF                  BIT(4)
  50#define SR_BUSY                 BIT(5)
  51#define SR_FLEVEL_MASK          GENMASK(13, 8)
  52
  53#define QSPI_FCR                0x0c
  54#define FCR_CTEF                BIT(0)
  55#define FCR_CTCF                BIT(1)
  56
  57#define QSPI_DLR                0x10
  58
  59#define QSPI_CCR                0x14
  60#define CCR_INST_MASK           GENMASK(7, 0)
  61#define CCR_IMODE_MASK          GENMASK(9, 8)
  62#define CCR_ADMODE_MASK         GENMASK(11, 10)
  63#define CCR_ADSIZE_MASK         GENMASK(13, 12)
  64#define CCR_DCYC_MASK           GENMASK(22, 18)
  65#define CCR_DMODE_MASK          GENMASK(25, 24)
  66#define CCR_FMODE_MASK          GENMASK(27, 26)
  67#define CCR_FMODE_INDW          (0U << 26)
  68#define CCR_FMODE_INDR          (1U << 26)
  69#define CCR_FMODE_APM           (2U << 26)
  70#define CCR_FMODE_MM            (3U << 26)
  71#define CCR_BUSWIDTH_0          0x0
  72#define CCR_BUSWIDTH_1          0x1
  73#define CCR_BUSWIDTH_2          0x2
  74#define CCR_BUSWIDTH_4          0x3
  75
  76#define QSPI_AR                 0x18
  77#define QSPI_ABR                0x1c
  78#define QSPI_DR                 0x20
  79#define QSPI_PSMKR              0x24
  80#define QSPI_PSMAR              0x28
  81#define QSPI_PIR                0x2c
  82#define QSPI_LPTR               0x30
  83
  84#define STM32_QSPI_MAX_MMAP_SZ  SZ_256M
  85#define STM32_QSPI_MAX_NORCHIP  2
  86
  87#define STM32_FIFO_TIMEOUT_US 30000
  88#define STM32_BUSY_TIMEOUT_US 100000
  89#define STM32_ABT_TIMEOUT_US 100000
  90#define STM32_COMP_TIMEOUT_MS 1000
  91#define STM32_AUTOSUSPEND_DELAY -1
  92
  93struct stm32_qspi_flash {
  94        struct stm32_qspi *qspi;
  95        u32 cs;
  96        u32 presc;
  97};
  98
  99struct stm32_qspi {
 100        struct device *dev;
 101        struct spi_controller *ctrl;
 102        phys_addr_t phys_base;
 103        void __iomem *io_base;
 104        void __iomem *mm_base;
 105        resource_size_t mm_size;
 106        struct clk *clk;
 107        u32 clk_rate;
 108        struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
 109        struct completion data_completion;
 110        u32 fmode;
 111
 112        struct dma_chan *dma_chtx;
 113        struct dma_chan *dma_chrx;
 114        struct completion dma_completion;
 115
 116        u32 cr_reg;
 117        u32 dcr_reg;
 118
 119        /*
 120         * to protect device configuration, could be different between
 121         * 2 flash access (bk1, bk2)
 122         */
 123        struct mutex lock;
 124};
 125
 126static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
 127{
 128        struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
 129        u32 cr, sr;
 130
 131        sr = readl_relaxed(qspi->io_base + QSPI_SR);
 132
 133        if (sr & (SR_TEF | SR_TCF)) {
 134                /* disable irq */
 135                cr = readl_relaxed(qspi->io_base + QSPI_CR);
 136                cr &= ~CR_TCIE & ~CR_TEIE;
 137                writel_relaxed(cr, qspi->io_base + QSPI_CR);
 138                complete(&qspi->data_completion);
 139        }
 140
 141        return IRQ_HANDLED;
 142}
 143
 144static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
 145{
 146        *val = readb_relaxed(addr);
 147}
 148
 149static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
 150{
 151        writeb_relaxed(*val, addr);
 152}
 153
 154static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
 155                              const struct spi_mem_op *op)
 156{
 157        void (*tx_fifo)(u8 *val, void __iomem *addr);
 158        u32 len = op->data.nbytes, sr;
 159        u8 *buf;
 160        int ret;
 161
 162        if (op->data.dir == SPI_MEM_DATA_IN) {
 163                tx_fifo = stm32_qspi_read_fifo;
 164                buf = op->data.buf.in;
 165
 166        } else {
 167                tx_fifo = stm32_qspi_write_fifo;
 168                buf = (u8 *)op->data.buf.out;
 169        }
 170
 171        while (len--) {
 172                ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
 173                                                        sr, (sr & SR_FTF), 1,
 174                                                        STM32_FIFO_TIMEOUT_US);
 175                if (ret) {
 176                        dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
 177                                len, sr);
 178                        return ret;
 179                }
 180                tx_fifo(buf++, qspi->io_base + QSPI_DR);
 181        }
 182
 183        return 0;
 184}
 185
 186static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
 187                            const struct spi_mem_op *op)
 188{
 189        memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
 190                      op->data.nbytes);
 191        return 0;
 192}
 193
 194static void stm32_qspi_dma_callback(void *arg)
 195{
 196        struct completion *dma_completion = arg;
 197
 198        complete(dma_completion);
 199}
 200
 201static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
 202                             const struct spi_mem_op *op)
 203{
 204        struct dma_async_tx_descriptor *desc;
 205        enum dma_transfer_direction dma_dir;
 206        struct dma_chan *dma_ch;
 207        struct sg_table sgt;
 208        dma_cookie_t cookie;
 209        u32 cr, t_out;
 210        int err;
 211
 212        if (op->data.dir == SPI_MEM_DATA_IN) {
 213                dma_dir = DMA_DEV_TO_MEM;
 214                dma_ch = qspi->dma_chrx;
 215        } else {
 216                dma_dir = DMA_MEM_TO_DEV;
 217                dma_ch = qspi->dma_chtx;
 218        }
 219
 220        /*
 221         * spi_map_buf return -EINVAL if the buffer is not DMA-able
 222         * (DMA-able: in vmalloc | kmap | virt_addr_valid)
 223         */
 224        err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
 225        if (err)
 226                return err;
 227
 228        desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
 229                                       dma_dir, DMA_PREP_INTERRUPT);
 230        if (!desc) {
 231                err = -ENOMEM;
 232                goto out_unmap;
 233        }
 234
 235        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 236
 237        reinit_completion(&qspi->dma_completion);
 238        desc->callback = stm32_qspi_dma_callback;
 239        desc->callback_param = &qspi->dma_completion;
 240        cookie = dmaengine_submit(desc);
 241        err = dma_submit_error(cookie);
 242        if (err)
 243                goto out;
 244
 245        dma_async_issue_pending(dma_ch);
 246
 247        writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
 248
 249        t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
 250        if (!wait_for_completion_timeout(&qspi->dma_completion,
 251                                         msecs_to_jiffies(t_out)))
 252                err = -ETIMEDOUT;
 253
 254        if (err)
 255                dmaengine_terminate_all(dma_ch);
 256
 257out:
 258        writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
 259out_unmap:
 260        spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
 261
 262        return err;
 263}
 264
 265static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
 266{
 267        if (!op->data.nbytes)
 268                return 0;
 269
 270        if (qspi->fmode == CCR_FMODE_MM)
 271                return stm32_qspi_tx_mm(qspi, op);
 272        else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
 273                 (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
 274                  op->data.nbytes > 4)
 275                if (!stm32_qspi_tx_dma(qspi, op))
 276                        return 0;
 277
 278        return stm32_qspi_tx_poll(qspi, op);
 279}
 280
 281static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
 282{
 283        u32 sr;
 284
 285        return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
 286                                                 !(sr & SR_BUSY), 1,
 287                                                 STM32_BUSY_TIMEOUT_US);
 288}
 289
 290static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
 291                               const struct spi_mem_op *op)
 292{
 293        u32 cr, sr;
 294        int err = 0;
 295
 296        if (!op->data.nbytes)
 297                goto wait_nobusy;
 298
 299        if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
 300                goto out;
 301
 302        reinit_completion(&qspi->data_completion);
 303        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 304        writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
 305
 306        if (!wait_for_completion_timeout(&qspi->data_completion,
 307                                msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
 308                err = -ETIMEDOUT;
 309        } else {
 310                sr = readl_relaxed(qspi->io_base + QSPI_SR);
 311                if (sr & SR_TEF)
 312                        err = -EIO;
 313        }
 314
 315out:
 316        /* clear flags */
 317        writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
 318wait_nobusy:
 319        if (!err)
 320                err = stm32_qspi_wait_nobusy(qspi);
 321
 322        return err;
 323}
 324
 325static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
 326{
 327        if (buswidth == 4)
 328                return CCR_BUSWIDTH_4;
 329
 330        return buswidth;
 331}
 332
 333static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
 334{
 335        struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
 336        struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
 337        u32 ccr, cr;
 338        int timeout, err = 0;
 339
 340        dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
 341                op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
 342                op->dummy.buswidth, op->data.buswidth,
 343                op->addr.val, op->data.nbytes);
 344
 345        err = stm32_qspi_wait_nobusy(qspi);
 346        if (err)
 347                goto abort;
 348
 349        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 350        cr &= ~CR_PRESC_MASK & ~CR_FSEL;
 351        cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
 352        cr |= FIELD_PREP(CR_FSEL, flash->cs);
 353        writel_relaxed(cr, qspi->io_base + QSPI_CR);
 354
 355        if (op->data.nbytes)
 356                writel_relaxed(op->data.nbytes - 1,
 357                               qspi->io_base + QSPI_DLR);
 358
 359        ccr = qspi->fmode;
 360        ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
 361        ccr |= FIELD_PREP(CCR_IMODE_MASK,
 362                          stm32_qspi_get_mode(qspi, op->cmd.buswidth));
 363
 364        if (op->addr.nbytes) {
 365                ccr |= FIELD_PREP(CCR_ADMODE_MASK,
 366                                  stm32_qspi_get_mode(qspi, op->addr.buswidth));
 367                ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
 368        }
 369
 370        if (op->dummy.buswidth && op->dummy.nbytes)
 371                ccr |= FIELD_PREP(CCR_DCYC_MASK,
 372                                  op->dummy.nbytes * 8 / op->dummy.buswidth);
 373
 374        if (op->data.nbytes) {
 375                ccr |= FIELD_PREP(CCR_DMODE_MASK,
 376                                  stm32_qspi_get_mode(qspi, op->data.buswidth));
 377        }
 378
 379        writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
 380
 381        if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
 382                writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
 383
 384        err = stm32_qspi_tx(qspi, op);
 385
 386        /*
 387         * Abort in:
 388         * -error case
 389         * -read memory map: prefetching must be stopped if we read the last
 390         *  byte of device (device size - fifo size). like device size is not
 391         *  knows, the prefetching is always stop.
 392         */
 393        if (err || qspi->fmode == CCR_FMODE_MM)
 394                goto abort;
 395
 396        /* wait end of tx in indirect mode */
 397        err = stm32_qspi_wait_cmd(qspi, op);
 398        if (err)
 399                goto abort;
 400
 401        return 0;
 402
 403abort:
 404        cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
 405        writel_relaxed(cr, qspi->io_base + QSPI_CR);
 406
 407        /* wait clear of abort bit by hw */
 408        timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
 409                                                    cr, !(cr & CR_ABORT), 1,
 410                                                    STM32_ABT_TIMEOUT_US);
 411
 412        writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
 413
 414        if (err || timeout)
 415                dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
 416                        __func__, err, timeout);
 417
 418        return err;
 419}
 420
 421static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 422{
 423        struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
 424        int ret;
 425
 426        ret = pm_runtime_get_sync(qspi->dev);
 427        if (ret < 0) {
 428                pm_runtime_put_noidle(qspi->dev);
 429                return ret;
 430        }
 431
 432        mutex_lock(&qspi->lock);
 433        if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
 434                qspi->fmode = CCR_FMODE_INDR;
 435        else
 436                qspi->fmode = CCR_FMODE_INDW;
 437
 438        ret = stm32_qspi_send(mem, op);
 439        mutex_unlock(&qspi->lock);
 440
 441        pm_runtime_mark_last_busy(qspi->dev);
 442        pm_runtime_put_autosuspend(qspi->dev);
 443
 444        return ret;
 445}
 446
 447static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
 448{
 449        struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
 450
 451        if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
 452                return -EOPNOTSUPP;
 453
 454        /* should never happen, as mm_base == null is an error probe exit condition */
 455        if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
 456                return -EOPNOTSUPP;
 457
 458        if (!qspi->mm_size)
 459                return -EOPNOTSUPP;
 460
 461        return 0;
 462}
 463
 464static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
 465                                      u64 offs, size_t len, void *buf)
 466{
 467        struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
 468        struct spi_mem_op op;
 469        u32 addr_max;
 470        int ret;
 471
 472        ret = pm_runtime_get_sync(qspi->dev);
 473        if (ret < 0) {
 474                pm_runtime_put_noidle(qspi->dev);
 475                return ret;
 476        }
 477
 478        mutex_lock(&qspi->lock);
 479        /* make a local copy of desc op_tmpl and complete dirmap rdesc
 480         * spi_mem_op template with offs, len and *buf in  order to get
 481         * all needed transfer information into struct spi_mem_op
 482         */
 483        memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
 484        dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
 485
 486        op.data.nbytes = len;
 487        op.addr.val = desc->info.offset + offs;
 488        op.data.buf.in = buf;
 489
 490        addr_max = op.addr.val + op.data.nbytes + 1;
 491        if (addr_max < qspi->mm_size && op.addr.buswidth)
 492                qspi->fmode = CCR_FMODE_MM;
 493        else
 494                qspi->fmode = CCR_FMODE_INDR;
 495
 496        ret = stm32_qspi_send(desc->mem, &op);
 497        mutex_unlock(&qspi->lock);
 498
 499        pm_runtime_mark_last_busy(qspi->dev);
 500        pm_runtime_put_autosuspend(qspi->dev);
 501
 502        return ret ?: len;
 503}
 504
 505static int stm32_qspi_setup(struct spi_device *spi)
 506{
 507        struct spi_controller *ctrl = spi->master;
 508        struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
 509        struct stm32_qspi_flash *flash;
 510        u32 presc;
 511        int ret;
 512
 513        if (ctrl->busy)
 514                return -EBUSY;
 515
 516        if (!spi->max_speed_hz)
 517                return -EINVAL;
 518
 519        ret = pm_runtime_get_sync(qspi->dev);
 520        if (ret < 0) {
 521                pm_runtime_put_noidle(qspi->dev);
 522                return ret;
 523        }
 524
 525        presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
 526
 527        flash = &qspi->flash[spi->chip_select];
 528        flash->qspi = qspi;
 529        flash->cs = spi->chip_select;
 530        flash->presc = presc;
 531
 532        mutex_lock(&qspi->lock);
 533        qspi->cr_reg = 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
 534        writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
 535
 536        /* set dcr fsize to max address */
 537        qspi->dcr_reg = DCR_FSIZE_MASK;
 538        writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
 539        mutex_unlock(&qspi->lock);
 540
 541        pm_runtime_mark_last_busy(qspi->dev);
 542        pm_runtime_put_autosuspend(qspi->dev);
 543
 544        return 0;
 545}
 546
 547static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
 548{
 549        struct dma_slave_config dma_cfg;
 550        struct device *dev = qspi->dev;
 551        int ret = 0;
 552
 553        memset(&dma_cfg, 0, sizeof(dma_cfg));
 554
 555        dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 556        dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 557        dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
 558        dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
 559        dma_cfg.src_maxburst = 4;
 560        dma_cfg.dst_maxburst = 4;
 561
 562        qspi->dma_chrx = dma_request_chan(dev, "rx");
 563        if (IS_ERR(qspi->dma_chrx)) {
 564                ret = PTR_ERR(qspi->dma_chrx);
 565                qspi->dma_chrx = NULL;
 566                if (ret == -EPROBE_DEFER)
 567                        goto out;
 568        } else {
 569                if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
 570                        dev_err(dev, "dma rx config failed\n");
 571                        dma_release_channel(qspi->dma_chrx);
 572                        qspi->dma_chrx = NULL;
 573                }
 574        }
 575
 576        qspi->dma_chtx = dma_request_chan(dev, "tx");
 577        if (IS_ERR(qspi->dma_chtx)) {
 578                ret = PTR_ERR(qspi->dma_chtx);
 579                qspi->dma_chtx = NULL;
 580        } else {
 581                if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
 582                        dev_err(dev, "dma tx config failed\n");
 583                        dma_release_channel(qspi->dma_chtx);
 584                        qspi->dma_chtx = NULL;
 585                }
 586        }
 587
 588out:
 589        init_completion(&qspi->dma_completion);
 590
 591        if (ret != -EPROBE_DEFER)
 592                ret = 0;
 593
 594        return ret;
 595}
 596
 597static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
 598{
 599        if (qspi->dma_chtx)
 600                dma_release_channel(qspi->dma_chtx);
 601        if (qspi->dma_chrx)
 602                dma_release_channel(qspi->dma_chrx);
 603}
 604
 605/*
 606 * no special host constraint, so use default spi_mem_default_supports_op
 607 * to check supported mode.
 608 */
 609static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
 610        .exec_op        = stm32_qspi_exec_op,
 611        .dirmap_create  = stm32_qspi_dirmap_create,
 612        .dirmap_read    = stm32_qspi_dirmap_read,
 613};
 614
 615static int stm32_qspi_probe(struct platform_device *pdev)
 616{
 617        struct device *dev = &pdev->dev;
 618        struct spi_controller *ctrl;
 619        struct reset_control *rstc;
 620        struct stm32_qspi *qspi;
 621        struct resource *res;
 622        int ret, irq;
 623
 624        ctrl = spi_alloc_master(dev, sizeof(*qspi));
 625        if (!ctrl)
 626                return -ENOMEM;
 627
 628        qspi = spi_controller_get_devdata(ctrl);
 629        qspi->ctrl = ctrl;
 630
 631        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
 632        qspi->io_base = devm_ioremap_resource(dev, res);
 633        if (IS_ERR(qspi->io_base)) {
 634                ret = PTR_ERR(qspi->io_base);
 635                goto err_master_put;
 636        }
 637
 638        qspi->phys_base = res->start;
 639
 640        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
 641        qspi->mm_base = devm_ioremap_resource(dev, res);
 642        if (IS_ERR(qspi->mm_base)) {
 643                ret = PTR_ERR(qspi->mm_base);
 644                goto err_master_put;
 645        }
 646
 647        qspi->mm_size = resource_size(res);
 648        if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
 649                ret = -EINVAL;
 650                goto err_master_put;
 651        }
 652
 653        irq = platform_get_irq(pdev, 0);
 654        if (irq < 0) {
 655                ret = irq;
 656                goto err_master_put;
 657        }
 658
 659        ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
 660                               dev_name(dev), qspi);
 661        if (ret) {
 662                dev_err(dev, "failed to request irq\n");
 663                goto err_master_put;
 664        }
 665
 666        init_completion(&qspi->data_completion);
 667
 668        qspi->clk = devm_clk_get(dev, NULL);
 669        if (IS_ERR(qspi->clk)) {
 670                ret = PTR_ERR(qspi->clk);
 671                goto err_master_put;
 672        }
 673
 674        qspi->clk_rate = clk_get_rate(qspi->clk);
 675        if (!qspi->clk_rate) {
 676                ret = -EINVAL;
 677                goto err_master_put;
 678        }
 679
 680        ret = clk_prepare_enable(qspi->clk);
 681        if (ret) {
 682                dev_err(dev, "can not enable the clock\n");
 683                goto err_master_put;
 684        }
 685
 686        rstc = devm_reset_control_get_exclusive(dev, NULL);
 687        if (IS_ERR(rstc)) {
 688                ret = PTR_ERR(rstc);
 689                if (ret == -EPROBE_DEFER)
 690                        goto err_clk_disable;
 691        } else {
 692                reset_control_assert(rstc);
 693                udelay(2);
 694                reset_control_deassert(rstc);
 695        }
 696
 697        qspi->dev = dev;
 698        platform_set_drvdata(pdev, qspi);
 699        ret = stm32_qspi_dma_setup(qspi);
 700        if (ret)
 701                goto err_dma_free;
 702
 703        mutex_init(&qspi->lock);
 704
 705        ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
 706                | SPI_TX_DUAL | SPI_TX_QUAD;
 707        ctrl->setup = stm32_qspi_setup;
 708        ctrl->bus_num = -1;
 709        ctrl->mem_ops = &stm32_qspi_mem_ops;
 710        ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
 711        ctrl->dev.of_node = dev->of_node;
 712
 713        pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
 714        pm_runtime_use_autosuspend(dev);
 715        pm_runtime_set_active(dev);
 716        pm_runtime_enable(dev);
 717        pm_runtime_get_noresume(dev);
 718
 719        ret = devm_spi_register_master(dev, ctrl);
 720        if (ret)
 721                goto err_pm_runtime_free;
 722
 723        pm_runtime_mark_last_busy(dev);
 724        pm_runtime_put_autosuspend(dev);
 725
 726        return 0;
 727
 728err_pm_runtime_free:
 729        pm_runtime_get_sync(qspi->dev);
 730        /* disable qspi */
 731        writel_relaxed(0, qspi->io_base + QSPI_CR);
 732        mutex_destroy(&qspi->lock);
 733        pm_runtime_put_noidle(qspi->dev);
 734        pm_runtime_disable(qspi->dev);
 735        pm_runtime_set_suspended(qspi->dev);
 736        pm_runtime_dont_use_autosuspend(qspi->dev);
 737err_dma_free:
 738        stm32_qspi_dma_free(qspi);
 739err_clk_disable:
 740        clk_disable_unprepare(qspi->clk);
 741err_master_put:
 742        spi_master_put(qspi->ctrl);
 743
 744        return ret;
 745}
 746
 747static int stm32_qspi_remove(struct platform_device *pdev)
 748{
 749        struct stm32_qspi *qspi = platform_get_drvdata(pdev);
 750
 751        pm_runtime_get_sync(qspi->dev);
 752        /* disable qspi */
 753        writel_relaxed(0, qspi->io_base + QSPI_CR);
 754        stm32_qspi_dma_free(qspi);
 755        mutex_destroy(&qspi->lock);
 756        pm_runtime_put_noidle(qspi->dev);
 757        pm_runtime_disable(qspi->dev);
 758        pm_runtime_set_suspended(qspi->dev);
 759        pm_runtime_dont_use_autosuspend(qspi->dev);
 760        clk_disable_unprepare(qspi->clk);
 761
 762        return 0;
 763}
 764
 765static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
 766{
 767        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 768
 769        clk_disable_unprepare(qspi->clk);
 770
 771        return 0;
 772}
 773
 774static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
 775{
 776        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 777
 778        return clk_prepare_enable(qspi->clk);
 779}
 780
 781static int __maybe_unused stm32_qspi_suspend(struct device *dev)
 782{
 783        pinctrl_pm_select_sleep_state(dev);
 784
 785        return pm_runtime_force_suspend(dev);
 786}
 787
 788static int __maybe_unused stm32_qspi_resume(struct device *dev)
 789{
 790        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 791        int ret;
 792
 793        ret = pm_runtime_force_resume(dev);
 794        if (ret < 0)
 795                return ret;
 796
 797        pinctrl_pm_select_default_state(dev);
 798
 799        ret = pm_runtime_get_sync(dev);
 800        if (ret < 0) {
 801                pm_runtime_put_noidle(dev);
 802                return ret;
 803        }
 804
 805        writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
 806        writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
 807
 808        pm_runtime_mark_last_busy(dev);
 809        pm_runtime_put_autosuspend(dev);
 810
 811        return 0;
 812}
 813
 814static const struct dev_pm_ops stm32_qspi_pm_ops = {
 815        SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
 816                           stm32_qspi_runtime_resume, NULL)
 817        SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
 818};
 819
 820static const struct of_device_id stm32_qspi_match[] = {
 821        {.compatible = "st,stm32f469-qspi"},
 822        {}
 823};
 824MODULE_DEVICE_TABLE(of, stm32_qspi_match);
 825
 826static struct platform_driver stm32_qspi_driver = {
 827        .probe  = stm32_qspi_probe,
 828        .remove = stm32_qspi_remove,
 829        .driver = {
 830                .name = "stm32-qspi",
 831                .of_match_table = stm32_qspi_match,
 832                .pm = &stm32_qspi_pm_ops,
 833        },
 834};
 835module_platform_driver(stm32_qspi_driver);
 836
 837MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
 838MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
 839MODULE_LICENSE("GPL v2");
 840