linux/drivers/spi/spi-stm32-qspi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
   4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/errno.h>
  11#include <linux/io.h>
  12#include <linux/iopoll.h>
  13#include <linux/interrupt.h>
  14#include <linux/module.h>
  15#include <linux/mutex.h>
  16#include <linux/of.h>
  17#include <linux/of_device.h>
  18#include <linux/pinctrl/consumer.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/platform_device.h>
  21#include <linux/reset.h>
  22#include <linux/sizes.h>
  23#include <linux/spi/spi-mem.h>
  24
  25#define QSPI_CR                 0x00
  26#define CR_EN                   BIT(0)
  27#define CR_ABORT                BIT(1)
  28#define CR_DMAEN                BIT(2)
  29#define CR_TCEN                 BIT(3)
  30#define CR_SSHIFT               BIT(4)
  31#define CR_DFM                  BIT(6)
  32#define CR_FSEL                 BIT(7)
  33#define CR_FTHRES_SHIFT         8
  34#define CR_TEIE                 BIT(16)
  35#define CR_TCIE                 BIT(17)
  36#define CR_FTIE                 BIT(18)
  37#define CR_SMIE                 BIT(19)
  38#define CR_TOIE                 BIT(20)
  39#define CR_APMS                 BIT(22)
  40#define CR_PRESC_MASK           GENMASK(31, 24)
  41
  42#define QSPI_DCR                0x04
  43#define DCR_FSIZE_MASK          GENMASK(20, 16)
  44
  45#define QSPI_SR                 0x08
  46#define SR_TEF                  BIT(0)
  47#define SR_TCF                  BIT(1)
  48#define SR_FTF                  BIT(2)
  49#define SR_SMF                  BIT(3)
  50#define SR_TOF                  BIT(4)
  51#define SR_BUSY                 BIT(5)
  52#define SR_FLEVEL_MASK          GENMASK(13, 8)
  53
  54#define QSPI_FCR                0x0c
  55#define FCR_CTEF                BIT(0)
  56#define FCR_CTCF                BIT(1)
  57#define FCR_CSMF                BIT(3)
  58
  59#define QSPI_DLR                0x10
  60
  61#define QSPI_CCR                0x14
  62#define CCR_INST_MASK           GENMASK(7, 0)
  63#define CCR_IMODE_MASK          GENMASK(9, 8)
  64#define CCR_ADMODE_MASK         GENMASK(11, 10)
  65#define CCR_ADSIZE_MASK         GENMASK(13, 12)
  66#define CCR_DCYC_MASK           GENMASK(22, 18)
  67#define CCR_DMODE_MASK          GENMASK(25, 24)
  68#define CCR_FMODE_MASK          GENMASK(27, 26)
  69#define CCR_FMODE_INDW          (0U << 26)
  70#define CCR_FMODE_INDR          (1U << 26)
  71#define CCR_FMODE_APM           (2U << 26)
  72#define CCR_FMODE_MM            (3U << 26)
  73#define CCR_BUSWIDTH_0          0x0
  74#define CCR_BUSWIDTH_1          0x1
  75#define CCR_BUSWIDTH_2          0x2
  76#define CCR_BUSWIDTH_4          0x3
  77
  78#define QSPI_AR                 0x18
  79#define QSPI_ABR                0x1c
  80#define QSPI_DR                 0x20
  81#define QSPI_PSMKR              0x24
  82#define QSPI_PSMAR              0x28
  83#define QSPI_PIR                0x2c
  84#define QSPI_LPTR               0x30
  85
  86#define STM32_QSPI_MAX_MMAP_SZ  SZ_256M
  87#define STM32_QSPI_MAX_NORCHIP  2
  88
  89#define STM32_FIFO_TIMEOUT_US 30000
  90#define STM32_BUSY_TIMEOUT_US 100000
  91#define STM32_ABT_TIMEOUT_US 100000
  92#define STM32_COMP_TIMEOUT_MS 1000
  93#define STM32_AUTOSUSPEND_DELAY -1
  94
  95struct stm32_qspi_flash {
  96        u32 cs;
  97        u32 presc;
  98};
  99
 100struct stm32_qspi {
 101        struct device *dev;
 102        struct spi_controller *ctrl;
 103        phys_addr_t phys_base;
 104        void __iomem *io_base;
 105        void __iomem *mm_base;
 106        resource_size_t mm_size;
 107        struct clk *clk;
 108        u32 clk_rate;
 109        struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
 110        struct completion data_completion;
 111        struct completion match_completion;
 112        u32 fmode;
 113
 114        struct dma_chan *dma_chtx;
 115        struct dma_chan *dma_chrx;
 116        struct completion dma_completion;
 117
 118        u32 cr_reg;
 119        u32 dcr_reg;
 120        unsigned long status_timeout;
 121
 122        /*
 123         * to protect device configuration, could be different between
 124         * 2 flash access (bk1, bk2)
 125         */
 126        struct mutex lock;
 127};
 128
 129static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
 130{
 131        struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
 132        u32 cr, sr;
 133
 134        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 135        sr = readl_relaxed(qspi->io_base + QSPI_SR);
 136
 137        if (cr & CR_SMIE && sr & SR_SMF) {
 138                /* disable irq */
 139                cr &= ~CR_SMIE;
 140                writel_relaxed(cr, qspi->io_base + QSPI_CR);
 141                complete(&qspi->match_completion);
 142
 143                return IRQ_HANDLED;
 144        }
 145
 146        if (sr & (SR_TEF | SR_TCF)) {
 147                /* disable irq */
 148                cr &= ~CR_TCIE & ~CR_TEIE;
 149                writel_relaxed(cr, qspi->io_base + QSPI_CR);
 150                complete(&qspi->data_completion);
 151        }
 152
 153        return IRQ_HANDLED;
 154}
 155
 156static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
 157{
 158        *val = readb_relaxed(addr);
 159}
 160
 161static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
 162{
 163        writeb_relaxed(*val, addr);
 164}
 165
 166static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
 167                              const struct spi_mem_op *op)
 168{
 169        void (*tx_fifo)(u8 *val, void __iomem *addr);
 170        u32 len = op->data.nbytes, sr;
 171        u8 *buf;
 172        int ret;
 173
 174        if (op->data.dir == SPI_MEM_DATA_IN) {
 175                tx_fifo = stm32_qspi_read_fifo;
 176                buf = op->data.buf.in;
 177
 178        } else {
 179                tx_fifo = stm32_qspi_write_fifo;
 180                buf = (u8 *)op->data.buf.out;
 181        }
 182
 183        while (len--) {
 184                ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
 185                                                        sr, (sr & SR_FTF), 1,
 186                                                        STM32_FIFO_TIMEOUT_US);
 187                if (ret) {
 188                        dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
 189                                len, sr);
 190                        return ret;
 191                }
 192                tx_fifo(buf++, qspi->io_base + QSPI_DR);
 193        }
 194
 195        return 0;
 196}
 197
 198static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
 199                            const struct spi_mem_op *op)
 200{
 201        memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
 202                      op->data.nbytes);
 203        return 0;
 204}
 205
 206static void stm32_qspi_dma_callback(void *arg)
 207{
 208        struct completion *dma_completion = arg;
 209
 210        complete(dma_completion);
 211}
 212
 213static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
 214                             const struct spi_mem_op *op)
 215{
 216        struct dma_async_tx_descriptor *desc;
 217        enum dma_transfer_direction dma_dir;
 218        struct dma_chan *dma_ch;
 219        struct sg_table sgt;
 220        dma_cookie_t cookie;
 221        u32 cr, t_out;
 222        int err;
 223
 224        if (op->data.dir == SPI_MEM_DATA_IN) {
 225                dma_dir = DMA_DEV_TO_MEM;
 226                dma_ch = qspi->dma_chrx;
 227        } else {
 228                dma_dir = DMA_MEM_TO_DEV;
 229                dma_ch = qspi->dma_chtx;
 230        }
 231
 232        /*
 233         * spi_map_buf return -EINVAL if the buffer is not DMA-able
 234         * (DMA-able: in vmalloc | kmap | virt_addr_valid)
 235         */
 236        err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
 237        if (err)
 238                return err;
 239
 240        desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
 241                                       dma_dir, DMA_PREP_INTERRUPT);
 242        if (!desc) {
 243                err = -ENOMEM;
 244                goto out_unmap;
 245        }
 246
 247        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 248
 249        reinit_completion(&qspi->dma_completion);
 250        desc->callback = stm32_qspi_dma_callback;
 251        desc->callback_param = &qspi->dma_completion;
 252        cookie = dmaengine_submit(desc);
 253        err = dma_submit_error(cookie);
 254        if (err)
 255                goto out;
 256
 257        dma_async_issue_pending(dma_ch);
 258
 259        writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
 260
 261        t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
 262        if (!wait_for_completion_timeout(&qspi->dma_completion,
 263                                         msecs_to_jiffies(t_out)))
 264                err = -ETIMEDOUT;
 265
 266        if (err)
 267                dmaengine_terminate_all(dma_ch);
 268
 269out:
 270        writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
 271out_unmap:
 272        spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
 273
 274        return err;
 275}
 276
 277static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
 278{
 279        if (!op->data.nbytes)
 280                return 0;
 281
 282        if (qspi->fmode == CCR_FMODE_MM)
 283                return stm32_qspi_tx_mm(qspi, op);
 284        else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
 285                 (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
 286                  op->data.nbytes > 4)
 287                if (!stm32_qspi_tx_dma(qspi, op))
 288                        return 0;
 289
 290        return stm32_qspi_tx_poll(qspi, op);
 291}
 292
 293static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
 294{
 295        u32 sr;
 296
 297        return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
 298                                                 !(sr & SR_BUSY), 1,
 299                                                 STM32_BUSY_TIMEOUT_US);
 300}
 301
 302static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
 303                               const struct spi_mem_op *op)
 304{
 305        u32 cr, sr;
 306        int err = 0;
 307
 308        if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
 309            qspi->fmode == CCR_FMODE_APM)
 310                goto out;
 311
 312        reinit_completion(&qspi->data_completion);
 313        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 314        writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
 315
 316        if (!wait_for_completion_timeout(&qspi->data_completion,
 317                                msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
 318                err = -ETIMEDOUT;
 319        } else {
 320                sr = readl_relaxed(qspi->io_base + QSPI_SR);
 321                if (sr & SR_TEF)
 322                        err = -EIO;
 323        }
 324
 325out:
 326        /* clear flags */
 327        writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
 328        if (!err)
 329                err = stm32_qspi_wait_nobusy(qspi);
 330
 331        return err;
 332}
 333
 334static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
 335                                       const struct spi_mem_op *op)
 336{
 337        u32 cr;
 338
 339        reinit_completion(&qspi->match_completion);
 340        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 341        writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
 342
 343        if (!wait_for_completion_timeout(&qspi->match_completion,
 344                                msecs_to_jiffies(qspi->status_timeout)))
 345                return -ETIMEDOUT;
 346
 347        writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
 348
 349        return 0;
 350}
 351
 352static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
 353{
 354        if (buswidth == 4)
 355                return CCR_BUSWIDTH_4;
 356
 357        return buswidth;
 358}
 359
 360static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
 361{
 362        struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
 363        struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
 364        u32 ccr, cr;
 365        int timeout, err = 0, err_poll_status = 0;
 366
 367        dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
 368                op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
 369                op->dummy.buswidth, op->data.buswidth,
 370                op->addr.val, op->data.nbytes);
 371
 372        cr = readl_relaxed(qspi->io_base + QSPI_CR);
 373        cr &= ~CR_PRESC_MASK & ~CR_FSEL;
 374        cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
 375        cr |= FIELD_PREP(CR_FSEL, flash->cs);
 376        writel_relaxed(cr, qspi->io_base + QSPI_CR);
 377
 378        if (op->data.nbytes)
 379                writel_relaxed(op->data.nbytes - 1,
 380                               qspi->io_base + QSPI_DLR);
 381
 382        ccr = qspi->fmode;
 383        ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
 384        ccr |= FIELD_PREP(CCR_IMODE_MASK,
 385                          stm32_qspi_get_mode(qspi, op->cmd.buswidth));
 386
 387        if (op->addr.nbytes) {
 388                ccr |= FIELD_PREP(CCR_ADMODE_MASK,
 389                                  stm32_qspi_get_mode(qspi, op->addr.buswidth));
 390                ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
 391        }
 392
 393        if (op->dummy.nbytes)
 394                ccr |= FIELD_PREP(CCR_DCYC_MASK,
 395                                  op->dummy.nbytes * 8 / op->dummy.buswidth);
 396
 397        if (op->data.nbytes) {
 398                ccr |= FIELD_PREP(CCR_DMODE_MASK,
 399                                  stm32_qspi_get_mode(qspi, op->data.buswidth));
 400        }
 401
 402        writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
 403
 404        if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
 405                writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
 406
 407        if (qspi->fmode == CCR_FMODE_APM)
 408                err_poll_status = stm32_qspi_wait_poll_status(qspi, op);
 409
 410        err = stm32_qspi_tx(qspi, op);
 411
 412        /*
 413         * Abort in:
 414         * -error case
 415         * -read memory map: prefetching must be stopped if we read the last
 416         *  byte of device (device size - fifo size). like device size is not
 417         *  knows, the prefetching is always stop.
 418         */
 419        if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
 420                goto abort;
 421
 422        /* wait end of tx in indirect mode */
 423        err = stm32_qspi_wait_cmd(qspi, op);
 424        if (err)
 425                goto abort;
 426
 427        return 0;
 428
 429abort:
 430        cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
 431        writel_relaxed(cr, qspi->io_base + QSPI_CR);
 432
 433        /* wait clear of abort bit by hw */
 434        timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
 435                                                    cr, !(cr & CR_ABORT), 1,
 436                                                    STM32_ABT_TIMEOUT_US);
 437
 438        writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
 439
 440        if (err || err_poll_status || timeout)
 441                dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
 442                        __func__, err, err_poll_status, timeout);
 443
 444        return err;
 445}
 446
 447static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
 448                                  u16 mask, u16 match,
 449                                  unsigned long initial_delay_us,
 450                                  unsigned long polling_rate_us,
 451                                  unsigned long timeout_ms)
 452{
 453        struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
 454        int ret;
 455
 456        if (!spi_mem_supports_op(mem, op))
 457                return -EOPNOTSUPP;
 458
 459        ret = pm_runtime_resume_and_get(qspi->dev);
 460        if (ret < 0)
 461                return ret;
 462
 463        mutex_lock(&qspi->lock);
 464
 465        writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
 466        writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
 467        qspi->fmode = CCR_FMODE_APM;
 468        qspi->status_timeout = timeout_ms;
 469
 470        ret = stm32_qspi_send(mem, op);
 471        mutex_unlock(&qspi->lock);
 472
 473        pm_runtime_mark_last_busy(qspi->dev);
 474        pm_runtime_put_autosuspend(qspi->dev);
 475
 476        return ret;
 477}
 478
 479static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 480{
 481        struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
 482        int ret;
 483
 484        ret = pm_runtime_resume_and_get(qspi->dev);
 485        if (ret < 0)
 486                return ret;
 487
 488        mutex_lock(&qspi->lock);
 489        if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
 490                qspi->fmode = CCR_FMODE_INDR;
 491        else
 492                qspi->fmode = CCR_FMODE_INDW;
 493
 494        ret = stm32_qspi_send(mem, op);
 495        mutex_unlock(&qspi->lock);
 496
 497        pm_runtime_mark_last_busy(qspi->dev);
 498        pm_runtime_put_autosuspend(qspi->dev);
 499
 500        return ret;
 501}
 502
 503static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
 504{
 505        struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
 506
 507        if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
 508                return -EOPNOTSUPP;
 509
 510        /* should never happen, as mm_base == null is an error probe exit condition */
 511        if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
 512                return -EOPNOTSUPP;
 513
 514        if (!qspi->mm_size)
 515                return -EOPNOTSUPP;
 516
 517        return 0;
 518}
 519
 520static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
 521                                      u64 offs, size_t len, void *buf)
 522{
 523        struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
 524        struct spi_mem_op op;
 525        u32 addr_max;
 526        int ret;
 527
 528        ret = pm_runtime_resume_and_get(qspi->dev);
 529        if (ret < 0)
 530                return ret;
 531
 532        mutex_lock(&qspi->lock);
 533        /* make a local copy of desc op_tmpl and complete dirmap rdesc
 534         * spi_mem_op template with offs, len and *buf in  order to get
 535         * all needed transfer information into struct spi_mem_op
 536         */
 537        memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
 538        dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
 539
 540        op.data.nbytes = len;
 541        op.addr.val = desc->info.offset + offs;
 542        op.data.buf.in = buf;
 543
 544        addr_max = op.addr.val + op.data.nbytes + 1;
 545        if (addr_max < qspi->mm_size && op.addr.buswidth)
 546                qspi->fmode = CCR_FMODE_MM;
 547        else
 548                qspi->fmode = CCR_FMODE_INDR;
 549
 550        ret = stm32_qspi_send(desc->mem, &op);
 551        mutex_unlock(&qspi->lock);
 552
 553        pm_runtime_mark_last_busy(qspi->dev);
 554        pm_runtime_put_autosuspend(qspi->dev);
 555
 556        return ret ?: len;
 557}
 558
 559static int stm32_qspi_setup(struct spi_device *spi)
 560{
 561        struct spi_controller *ctrl = spi->master;
 562        struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
 563        struct stm32_qspi_flash *flash;
 564        u32 presc;
 565        int ret;
 566
 567        if (ctrl->busy)
 568                return -EBUSY;
 569
 570        if (!spi->max_speed_hz)
 571                return -EINVAL;
 572
 573        ret = pm_runtime_resume_and_get(qspi->dev);
 574        if (ret < 0)
 575                return ret;
 576
 577        presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
 578
 579        flash = &qspi->flash[spi->chip_select];
 580        flash->cs = spi->chip_select;
 581        flash->presc = presc;
 582
 583        mutex_lock(&qspi->lock);
 584        qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
 585        writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
 586
 587        /* set dcr fsize to max address */
 588        qspi->dcr_reg = DCR_FSIZE_MASK;
 589        writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
 590        mutex_unlock(&qspi->lock);
 591
 592        pm_runtime_mark_last_busy(qspi->dev);
 593        pm_runtime_put_autosuspend(qspi->dev);
 594
 595        return 0;
 596}
 597
 598static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
 599{
 600        struct dma_slave_config dma_cfg;
 601        struct device *dev = qspi->dev;
 602        int ret = 0;
 603
 604        memset(&dma_cfg, 0, sizeof(dma_cfg));
 605
 606        dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 607        dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 608        dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
 609        dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
 610        dma_cfg.src_maxburst = 4;
 611        dma_cfg.dst_maxburst = 4;
 612
 613        qspi->dma_chrx = dma_request_chan(dev, "rx");
 614        if (IS_ERR(qspi->dma_chrx)) {
 615                ret = PTR_ERR(qspi->dma_chrx);
 616                qspi->dma_chrx = NULL;
 617                if (ret == -EPROBE_DEFER)
 618                        goto out;
 619        } else {
 620                if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
 621                        dev_err(dev, "dma rx config failed\n");
 622                        dma_release_channel(qspi->dma_chrx);
 623                        qspi->dma_chrx = NULL;
 624                }
 625        }
 626
 627        qspi->dma_chtx = dma_request_chan(dev, "tx");
 628        if (IS_ERR(qspi->dma_chtx)) {
 629                ret = PTR_ERR(qspi->dma_chtx);
 630                qspi->dma_chtx = NULL;
 631        } else {
 632                if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
 633                        dev_err(dev, "dma tx config failed\n");
 634                        dma_release_channel(qspi->dma_chtx);
 635                        qspi->dma_chtx = NULL;
 636                }
 637        }
 638
 639out:
 640        init_completion(&qspi->dma_completion);
 641
 642        if (ret != -EPROBE_DEFER)
 643                ret = 0;
 644
 645        return ret;
 646}
 647
 648static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
 649{
 650        if (qspi->dma_chtx)
 651                dma_release_channel(qspi->dma_chtx);
 652        if (qspi->dma_chrx)
 653                dma_release_channel(qspi->dma_chrx);
 654}
 655
 656/*
 657 * no special host constraint, so use default spi_mem_default_supports_op
 658 * to check supported mode.
 659 */
 660static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
 661        .exec_op        = stm32_qspi_exec_op,
 662        .dirmap_create  = stm32_qspi_dirmap_create,
 663        .dirmap_read    = stm32_qspi_dirmap_read,
 664        .poll_status    = stm32_qspi_poll_status,
 665};
 666
 667static int stm32_qspi_probe(struct platform_device *pdev)
 668{
 669        struct device *dev = &pdev->dev;
 670        struct spi_controller *ctrl;
 671        struct reset_control *rstc;
 672        struct stm32_qspi *qspi;
 673        struct resource *res;
 674        int ret, irq;
 675
 676        ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
 677        if (!ctrl)
 678                return -ENOMEM;
 679
 680        qspi = spi_controller_get_devdata(ctrl);
 681        qspi->ctrl = ctrl;
 682
 683        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
 684        qspi->io_base = devm_ioremap_resource(dev, res);
 685        if (IS_ERR(qspi->io_base))
 686                return PTR_ERR(qspi->io_base);
 687
 688        qspi->phys_base = res->start;
 689
 690        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
 691        qspi->mm_base = devm_ioremap_resource(dev, res);
 692        if (IS_ERR(qspi->mm_base))
 693                return PTR_ERR(qspi->mm_base);
 694
 695        qspi->mm_size = resource_size(res);
 696        if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
 697                return -EINVAL;
 698
 699        irq = platform_get_irq(pdev, 0);
 700        if (irq < 0)
 701                return irq;
 702
 703        ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
 704                               dev_name(dev), qspi);
 705        if (ret) {
 706                dev_err(dev, "failed to request irq\n");
 707                return ret;
 708        }
 709
 710        init_completion(&qspi->data_completion);
 711        init_completion(&qspi->match_completion);
 712
 713        qspi->clk = devm_clk_get(dev, NULL);
 714        if (IS_ERR(qspi->clk))
 715                return PTR_ERR(qspi->clk);
 716
 717        qspi->clk_rate = clk_get_rate(qspi->clk);
 718        if (!qspi->clk_rate)
 719                return -EINVAL;
 720
 721        ret = clk_prepare_enable(qspi->clk);
 722        if (ret) {
 723                dev_err(dev, "can not enable the clock\n");
 724                return ret;
 725        }
 726
 727        rstc = devm_reset_control_get_exclusive(dev, NULL);
 728        if (IS_ERR(rstc)) {
 729                ret = PTR_ERR(rstc);
 730                if (ret == -EPROBE_DEFER)
 731                        goto err_clk_disable;
 732        } else {
 733                reset_control_assert(rstc);
 734                udelay(2);
 735                reset_control_deassert(rstc);
 736        }
 737
 738        qspi->dev = dev;
 739        platform_set_drvdata(pdev, qspi);
 740        ret = stm32_qspi_dma_setup(qspi);
 741        if (ret)
 742                goto err_dma_free;
 743
 744        mutex_init(&qspi->lock);
 745
 746        ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
 747                | SPI_TX_DUAL | SPI_TX_QUAD;
 748        ctrl->setup = stm32_qspi_setup;
 749        ctrl->bus_num = -1;
 750        ctrl->mem_ops = &stm32_qspi_mem_ops;
 751        ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
 752        ctrl->dev.of_node = dev->of_node;
 753
 754        pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
 755        pm_runtime_use_autosuspend(dev);
 756        pm_runtime_set_active(dev);
 757        pm_runtime_enable(dev);
 758        pm_runtime_get_noresume(dev);
 759
 760        ret = spi_register_master(ctrl);
 761        if (ret)
 762                goto err_pm_runtime_free;
 763
 764        pm_runtime_mark_last_busy(dev);
 765        pm_runtime_put_autosuspend(dev);
 766
 767        return 0;
 768
 769err_pm_runtime_free:
 770        pm_runtime_get_sync(qspi->dev);
 771        /* disable qspi */
 772        writel_relaxed(0, qspi->io_base + QSPI_CR);
 773        mutex_destroy(&qspi->lock);
 774        pm_runtime_put_noidle(qspi->dev);
 775        pm_runtime_disable(qspi->dev);
 776        pm_runtime_set_suspended(qspi->dev);
 777        pm_runtime_dont_use_autosuspend(qspi->dev);
 778err_dma_free:
 779        stm32_qspi_dma_free(qspi);
 780err_clk_disable:
 781        clk_disable_unprepare(qspi->clk);
 782
 783        return ret;
 784}
 785
 786static int stm32_qspi_remove(struct platform_device *pdev)
 787{
 788        struct stm32_qspi *qspi = platform_get_drvdata(pdev);
 789
 790        pm_runtime_get_sync(qspi->dev);
 791        spi_unregister_master(qspi->ctrl);
 792        /* disable qspi */
 793        writel_relaxed(0, qspi->io_base + QSPI_CR);
 794        stm32_qspi_dma_free(qspi);
 795        mutex_destroy(&qspi->lock);
 796        pm_runtime_put_noidle(qspi->dev);
 797        pm_runtime_disable(qspi->dev);
 798        pm_runtime_set_suspended(qspi->dev);
 799        pm_runtime_dont_use_autosuspend(qspi->dev);
 800        clk_disable_unprepare(qspi->clk);
 801
 802        return 0;
 803}
 804
 805static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
 806{
 807        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 808
 809        clk_disable_unprepare(qspi->clk);
 810
 811        return 0;
 812}
 813
 814static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
 815{
 816        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 817
 818        return clk_prepare_enable(qspi->clk);
 819}
 820
 821static int __maybe_unused stm32_qspi_suspend(struct device *dev)
 822{
 823        pinctrl_pm_select_sleep_state(dev);
 824
 825        return pm_runtime_force_suspend(dev);
 826}
 827
 828static int __maybe_unused stm32_qspi_resume(struct device *dev)
 829{
 830        struct stm32_qspi *qspi = dev_get_drvdata(dev);
 831        int ret;
 832
 833        ret = pm_runtime_force_resume(dev);
 834        if (ret < 0)
 835                return ret;
 836
 837        pinctrl_pm_select_default_state(dev);
 838
 839        ret = pm_runtime_resume_and_get(dev);
 840        if (ret < 0)
 841                return ret;
 842
 843        writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
 844        writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
 845
 846        pm_runtime_mark_last_busy(dev);
 847        pm_runtime_put_autosuspend(dev);
 848
 849        return 0;
 850}
 851
 852static const struct dev_pm_ops stm32_qspi_pm_ops = {
 853        SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
 854                           stm32_qspi_runtime_resume, NULL)
 855        SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
 856};
 857
 858static const struct of_device_id stm32_qspi_match[] = {
 859        {.compatible = "st,stm32f469-qspi"},
 860        {}
 861};
 862MODULE_DEVICE_TABLE(of, stm32_qspi_match);
 863
 864static struct platform_driver stm32_qspi_driver = {
 865        .probe  = stm32_qspi_probe,
 866        .remove = stm32_qspi_remove,
 867        .driver = {
 868                .name = "stm32-qspi",
 869                .of_match_table = stm32_qspi_match,
 870                .pm = &stm32_qspi_pm_ops,
 871        },
 872};
 873module_platform_driver(stm32_qspi_driver);
 874
 875MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
 876MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
 877MODULE_LICENSE("GPL v2");
 878