linux/drivers/spi/spi-mtk-nor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Mediatek SPI NOR controller driver
   4//
   5// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
   6
   7#include <linux/bits.h>
   8#include <linux/clk.h>
   9#include <linux/completion.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/iopoll.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/of_device.h>
  17#include <linux/pm_runtime.h>
  18#include <linux/spi/spi.h>
  19#include <linux/spi/spi-mem.h>
  20#include <linux/string.h>
  21
  22#define DRIVER_NAME "mtk-spi-nor"
  23
  24#define MTK_NOR_REG_CMD                 0x00
  25#define MTK_NOR_CMD_WRITE               BIT(4)
  26#define MTK_NOR_CMD_PROGRAM             BIT(2)
  27#define MTK_NOR_CMD_READ                BIT(0)
  28#define MTK_NOR_CMD_MASK                GENMASK(5, 0)
  29
  30#define MTK_NOR_REG_PRG_CNT             0x04
  31#define MTK_NOR_PRG_CNT_MAX             56
  32#define MTK_NOR_REG_RDATA               0x0c
  33
  34#define MTK_NOR_REG_RADR0               0x10
  35#define MTK_NOR_REG_RADR(n)             (MTK_NOR_REG_RADR0 + 4 * (n))
  36#define MTK_NOR_REG_RADR3               0xc8
  37
  38#define MTK_NOR_REG_WDATA               0x1c
  39
  40#define MTK_NOR_REG_PRGDATA0            0x20
  41#define MTK_NOR_REG_PRGDATA(n)          (MTK_NOR_REG_PRGDATA0 + 4 * (n))
  42#define MTK_NOR_REG_PRGDATA_MAX         5
  43
  44#define MTK_NOR_REG_SHIFT0              0x38
  45#define MTK_NOR_REG_SHIFT(n)            (MTK_NOR_REG_SHIFT0 + 4 * (n))
  46#define MTK_NOR_REG_SHIFT_MAX           9
  47
  48#define MTK_NOR_REG_CFG1                0x60
  49#define MTK_NOR_FAST_READ               BIT(0)
  50
  51#define MTK_NOR_REG_CFG2                0x64
  52#define MTK_NOR_WR_CUSTOM_OP_EN         BIT(4)
  53#define MTK_NOR_WR_BUF_EN               BIT(0)
  54
  55#define MTK_NOR_REG_PP_DATA             0x98
  56
  57#define MTK_NOR_REG_IRQ_STAT            0xa8
  58#define MTK_NOR_REG_IRQ_EN              0xac
  59#define MTK_NOR_IRQ_DMA                 BIT(7)
  60#define MTK_NOR_IRQ_MASK                GENMASK(7, 0)
  61
  62#define MTK_NOR_REG_CFG3                0xb4
  63#define MTK_NOR_DISABLE_WREN            BIT(7)
  64#define MTK_NOR_DISABLE_SR_POLL         BIT(5)
  65
  66#define MTK_NOR_REG_WP                  0xc4
  67#define MTK_NOR_ENABLE_SF_CMD           0x30
  68
  69#define MTK_NOR_REG_BUSCFG              0xcc
  70#define MTK_NOR_4B_ADDR                 BIT(4)
  71#define MTK_NOR_QUAD_ADDR               BIT(3)
  72#define MTK_NOR_QUAD_READ               BIT(2)
  73#define MTK_NOR_DUAL_ADDR               BIT(1)
  74#define MTK_NOR_DUAL_READ               BIT(0)
  75#define MTK_NOR_BUS_MODE_MASK           GENMASK(4, 0)
  76
  77#define MTK_NOR_REG_DMA_CTL             0x718
  78#define MTK_NOR_DMA_START               BIT(0)
  79
  80#define MTK_NOR_REG_DMA_FADR            0x71c
  81#define MTK_NOR_REG_DMA_DADR            0x720
  82#define MTK_NOR_REG_DMA_END_DADR        0x724
  83#define MTK_NOR_REG_DMA_DADR_HB         0x738
  84#define MTK_NOR_REG_DMA_END_DADR_HB     0x73c
  85
  86#define MTK_NOR_PRG_MAX_SIZE            6
  87// Reading DMA src/dst addresses have to be 16-byte aligned
  88#define MTK_NOR_DMA_ALIGN               16
  89#define MTK_NOR_DMA_ALIGN_MASK          (MTK_NOR_DMA_ALIGN - 1)
  90// and we allocate a bounce buffer if destination address isn't aligned.
  91#define MTK_NOR_BOUNCE_BUF_SIZE         PAGE_SIZE
  92
  93// Buffered page program can do one 128-byte transfer
  94#define MTK_NOR_PP_SIZE                 128
  95
  96#define CLK_TO_US(sp, clkcnt)           DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
  97
  98struct mtk_nor {
  99        struct spi_controller *ctlr;
 100        struct device *dev;
 101        void __iomem *base;
 102        u8 *buffer;
 103        dma_addr_t buffer_dma;
 104        struct clk *spi_clk;
 105        struct clk *ctlr_clk;
 106        unsigned int spi_freq;
 107        bool wbuf_en;
 108        bool has_irq;
 109        bool high_dma;
 110        struct completion op_done;
 111};
 112
 113static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
 114{
 115        u32 val = readl(sp->base + reg);
 116
 117        val &= ~clr;
 118        val |= set;
 119        writel(val, sp->base + reg);
 120}
 121
 122static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
 123{
 124        ulong delay = CLK_TO_US(sp, clk);
 125        u32 reg;
 126        int ret;
 127
 128        writel(cmd, sp->base + MTK_NOR_REG_CMD);
 129        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
 130                                 delay / 3, (delay + 1) * 200);
 131        if (ret < 0)
 132                dev_err(sp->dev, "command %u timeout.\n", cmd);
 133        return ret;
 134}
 135
 136static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
 137{
 138        u32 addr = op->addr.val;
 139        int i;
 140
 141        for (i = 0; i < 3; i++) {
 142                writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
 143                addr >>= 8;
 144        }
 145        if (op->addr.nbytes == 4) {
 146                writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
 147                mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
 148        } else {
 149                mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
 150        }
 151}
 152
 153static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
 154{
 155        return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
 156}
 157
 158static bool mtk_nor_match_read(const struct spi_mem_op *op)
 159{
 160        int dummy = 0;
 161
 162        if (op->dummy.buswidth)
 163                dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
 164
 165        if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
 166                if (op->addr.buswidth == 1)
 167                        return dummy == 8;
 168                else if (op->addr.buswidth == 2)
 169                        return dummy == 4;
 170                else if (op->addr.buswidth == 4)
 171                        return dummy == 6;
 172        } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
 173                if (op->cmd.opcode == 0x03)
 174                        return dummy == 0;
 175                else if (op->cmd.opcode == 0x0b)
 176                        return dummy == 8;
 177        }
 178        return false;
 179}
 180
 181static bool mtk_nor_match_prg(const struct spi_mem_op *op)
 182{
 183        int tx_len, rx_len, prg_len, prg_left;
 184
 185        // prg mode is spi-only.
 186        if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
 187            (op->dummy.buswidth > 1) || (op->data.buswidth > 1))
 188                return false;
 189
 190        tx_len = op->cmd.nbytes + op->addr.nbytes;
 191
 192        if (op->data.dir == SPI_MEM_DATA_OUT) {
 193                // count dummy bytes only if we need to write data after it
 194                tx_len += op->dummy.nbytes;
 195
 196                // leave at least one byte for data
 197                if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
 198                        return false;
 199
 200                // if there's no addr, meaning adjust_op_size is impossible,
 201                // check data length as well.
 202                if ((!op->addr.nbytes) &&
 203                    (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
 204                        return false;
 205        } else if (op->data.dir == SPI_MEM_DATA_IN) {
 206                if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
 207                        return false;
 208
 209                rx_len = op->data.nbytes;
 210                prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
 211                if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
 212                        prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
 213                if (rx_len > prg_left) {
 214                        if (!op->addr.nbytes)
 215                                return false;
 216                        rx_len = prg_left;
 217                }
 218
 219                prg_len = tx_len + op->dummy.nbytes + rx_len;
 220                if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
 221                        return false;
 222        } else {
 223                prg_len = tx_len + op->dummy.nbytes;
 224                if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
 225                        return false;
 226        }
 227        return true;
 228}
 229
 230static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
 231{
 232        int tx_len, tx_left, prg_left;
 233
 234        tx_len = op->cmd.nbytes + op->addr.nbytes;
 235        if (op->data.dir == SPI_MEM_DATA_OUT) {
 236                tx_len += op->dummy.nbytes;
 237                tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
 238                if (op->data.nbytes > tx_left)
 239                        op->data.nbytes = tx_left;
 240        } else if (op->data.dir == SPI_MEM_DATA_IN) {
 241                prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
 242                if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
 243                        prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
 244                if (op->data.nbytes > prg_left)
 245                        op->data.nbytes = prg_left;
 246        }
 247}
 248
 249static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 250{
 251        struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
 252
 253        if (!op->data.nbytes)
 254                return 0;
 255
 256        if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
 257                if ((op->data.dir == SPI_MEM_DATA_IN) &&
 258                    mtk_nor_match_read(op)) {
 259                        // limit size to prevent timeout calculation overflow
 260                        if (op->data.nbytes > 0x400000)
 261                                op->data.nbytes = 0x400000;
 262
 263                        if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
 264                            (op->data.nbytes < MTK_NOR_DMA_ALIGN))
 265                                op->data.nbytes = 1;
 266                        else if (!need_bounce(sp, op))
 267                                op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
 268                        else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
 269                                op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
 270                        return 0;
 271                } else if (op->data.dir == SPI_MEM_DATA_OUT) {
 272                        if (op->data.nbytes >= MTK_NOR_PP_SIZE)
 273                                op->data.nbytes = MTK_NOR_PP_SIZE;
 274                        else
 275                                op->data.nbytes = 1;
 276                        return 0;
 277                }
 278        }
 279
 280        mtk_nor_adj_prg_size(op);
 281        return 0;
 282}
 283
 284static bool mtk_nor_supports_op(struct spi_mem *mem,
 285                                const struct spi_mem_op *op)
 286{
 287        if (!spi_mem_default_supports_op(mem, op))
 288                return false;
 289
 290        if (op->cmd.buswidth != 1)
 291                return false;
 292
 293        if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
 294                switch(op->data.dir) {
 295                case SPI_MEM_DATA_IN:
 296                        if (mtk_nor_match_read(op))
 297                                return true;
 298                        break;
 299                case SPI_MEM_DATA_OUT:
 300                        if ((op->addr.buswidth == 1) &&
 301                            (op->dummy.nbytes == 0) &&
 302                            (op->data.buswidth == 1))
 303                                return true;
 304                        break;
 305                default:
 306                        break;
 307                }
 308        }
 309
 310        return mtk_nor_match_prg(op);
 311}
 312
 313static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
 314{
 315        u32 reg = 0;
 316
 317        if (op->addr.nbytes == 4)
 318                reg |= MTK_NOR_4B_ADDR;
 319
 320        if (op->data.buswidth == 4) {
 321                reg |= MTK_NOR_QUAD_READ;
 322                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
 323                if (op->addr.buswidth == 4)
 324                        reg |= MTK_NOR_QUAD_ADDR;
 325        } else if (op->data.buswidth == 2) {
 326                reg |= MTK_NOR_DUAL_READ;
 327                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
 328                if (op->addr.buswidth == 2)
 329                        reg |= MTK_NOR_DUAL_ADDR;
 330        } else {
 331                if (op->cmd.opcode == 0x0b)
 332                        mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
 333                else
 334                        mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
 335        }
 336        mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
 337}
 338
 339static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
 340                            dma_addr_t dma_addr)
 341{
 342        int ret = 0;
 343        ulong delay;
 344        u32 reg;
 345
 346        writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
 347        writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
 348        writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
 349
 350        if (sp->high_dma) {
 351                writel(upper_32_bits(dma_addr),
 352                       sp->base + MTK_NOR_REG_DMA_DADR_HB);
 353                writel(upper_32_bits(dma_addr + length),
 354                       sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
 355        }
 356
 357        if (sp->has_irq) {
 358                reinit_completion(&sp->op_done);
 359                mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
 360        }
 361
 362        mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
 363
 364        delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
 365
 366        if (sp->has_irq) {
 367                if (!wait_for_completion_timeout(&sp->op_done,
 368                                                 (delay + 1) * 100))
 369                        ret = -ETIMEDOUT;
 370        } else {
 371                ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
 372                                         !(reg & MTK_NOR_DMA_START), delay / 3,
 373                                         (delay + 1) * 100);
 374        }
 375
 376        if (ret < 0)
 377                dev_err(sp->dev, "dma read timeout.\n");
 378
 379        return ret;
 380}
 381
 382static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
 383{
 384        unsigned int rdlen;
 385        int ret;
 386
 387        if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
 388                rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
 389        else
 390                rdlen = op->data.nbytes;
 391
 392        ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
 393
 394        if (!ret)
 395                memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
 396
 397        return ret;
 398}
 399
 400static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
 401{
 402        int ret;
 403        dma_addr_t dma_addr;
 404
 405        if (need_bounce(sp, op))
 406                return mtk_nor_read_bounce(sp, op);
 407
 408        dma_addr = dma_map_single(sp->dev, op->data.buf.in,
 409                                  op->data.nbytes, DMA_FROM_DEVICE);
 410
 411        if (dma_mapping_error(sp->dev, dma_addr))
 412                return -EINVAL;
 413
 414        ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
 415
 416        dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
 417
 418        return ret;
 419}
 420
 421static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
 422{
 423        u8 *buf = op->data.buf.in;
 424        int ret;
 425
 426        ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
 427        if (!ret)
 428                buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
 429        return ret;
 430}
 431
 432static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
 433{
 434        int ret;
 435        u32 val;
 436
 437        if (sp->wbuf_en)
 438                return 0;
 439
 440        val = readl(sp->base + MTK_NOR_REG_CFG2);
 441        writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
 442        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
 443                                 val & MTK_NOR_WR_BUF_EN, 0, 10000);
 444        if (!ret)
 445                sp->wbuf_en = true;
 446        return ret;
 447}
 448
 449static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
 450{
 451        int ret;
 452        u32 val;
 453
 454        if (!sp->wbuf_en)
 455                return 0;
 456        val = readl(sp->base + MTK_NOR_REG_CFG2);
 457        writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
 458        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
 459                                 !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
 460        if (!ret)
 461                sp->wbuf_en = false;
 462        return ret;
 463}
 464
 465static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
 466{
 467        const u8 *buf = op->data.buf.out;
 468        u32 val;
 469        int ret, i;
 470
 471        ret = mtk_nor_write_buffer_enable(sp);
 472        if (ret < 0)
 473                return ret;
 474
 475        for (i = 0; i < op->data.nbytes; i += 4) {
 476                val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
 477                      buf[i];
 478                writel(val, sp->base + MTK_NOR_REG_PP_DATA);
 479        }
 480        return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
 481                                (op->data.nbytes + 5) * BITS_PER_BYTE);
 482}
 483
 484static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
 485                                 const struct spi_mem_op *op)
 486{
 487        const u8 *buf = op->data.buf.out;
 488        int ret;
 489
 490        ret = mtk_nor_write_buffer_disable(sp);
 491        if (ret < 0)
 492                return ret;
 493        writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
 494        return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
 495}
 496
 497static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
 498{
 499        int rx_len = 0;
 500        int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
 501        int tx_len, prg_len;
 502        int i, ret;
 503        void __iomem *reg;
 504        u8 bufbyte;
 505
 506        tx_len = op->cmd.nbytes + op->addr.nbytes;
 507
 508        // count dummy bytes only if we need to write data after it
 509        if (op->data.dir == SPI_MEM_DATA_OUT)
 510                tx_len += op->dummy.nbytes + op->data.nbytes;
 511        else if (op->data.dir == SPI_MEM_DATA_IN)
 512                rx_len = op->data.nbytes;
 513
 514        prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
 515                  op->data.nbytes;
 516
 517        // an invalid op may reach here if the caller calls exec_op without
 518        // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
 519        // spi-mem won't try this op again with generic spi transfers.
 520        if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
 521            (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
 522            (prg_len > MTK_NOR_PRG_CNT_MAX / 8))
 523                return -EINVAL;
 524
 525        // fill tx data
 526        for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
 527                reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 528                bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
 529                writeb(bufbyte, reg);
 530        }
 531
 532        for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
 533                reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 534                bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
 535                writeb(bufbyte, reg);
 536        }
 537
 538        if (op->data.dir == SPI_MEM_DATA_OUT) {
 539                for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
 540                        reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 541                        writeb(0, reg);
 542                }
 543
 544                for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
 545                        reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 546                        writeb(((const u8 *)(op->data.buf.out))[i], reg);
 547                }
 548        }
 549
 550        for (; reg_offset >= 0; reg_offset--) {
 551                reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 552                writeb(0, reg);
 553        }
 554
 555        // trigger op
 556        writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
 557        ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
 558                               prg_len * BITS_PER_BYTE);
 559        if (ret)
 560                return ret;
 561
 562        // fetch read data
 563        reg_offset = 0;
 564        if (op->data.dir == SPI_MEM_DATA_IN) {
 565                for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
 566                        reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
 567                        ((u8 *)(op->data.buf.in))[i] = readb(reg);
 568                }
 569        }
 570
 571        return 0;
 572}
 573
 574static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 575{
 576        struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
 577        int ret;
 578
 579        if ((op->data.nbytes == 0) ||
 580            ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
 581                return mtk_nor_spi_mem_prg(sp, op);
 582
 583        if (op->data.dir == SPI_MEM_DATA_OUT) {
 584                mtk_nor_set_addr(sp, op);
 585                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
 586                if (op->data.nbytes == MTK_NOR_PP_SIZE)
 587                        return mtk_nor_pp_buffered(sp, op);
 588                return mtk_nor_pp_unbuffered(sp, op);
 589        }
 590
 591        if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
 592                ret = mtk_nor_write_buffer_disable(sp);
 593                if (ret < 0)
 594                        return ret;
 595                mtk_nor_setup_bus(sp, op);
 596                if (op->data.nbytes == 1) {
 597                        mtk_nor_set_addr(sp, op);
 598                        return mtk_nor_read_pio(sp, op);
 599                } else {
 600                        return mtk_nor_read_dma(sp, op);
 601                }
 602        }
 603
 604        return mtk_nor_spi_mem_prg(sp, op);
 605}
 606
 607static int mtk_nor_setup(struct spi_device *spi)
 608{
 609        struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
 610
 611        if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
 612                dev_err(&spi->dev, "spi clock should be %u Hz.\n",
 613                        sp->spi_freq);
 614                return -EINVAL;
 615        }
 616        spi->max_speed_hz = sp->spi_freq;
 617
 618        return 0;
 619}
 620
 621static int mtk_nor_transfer_one_message(struct spi_controller *master,
 622                                        struct spi_message *m)
 623{
 624        struct mtk_nor *sp = spi_controller_get_devdata(master);
 625        struct spi_transfer *t = NULL;
 626        unsigned long trx_len = 0;
 627        int stat = 0;
 628        int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
 629        void __iomem *reg;
 630        const u8 *txbuf;
 631        u8 *rxbuf;
 632        int i;
 633
 634        list_for_each_entry(t, &m->transfers, transfer_list) {
 635                txbuf = t->tx_buf;
 636                for (i = 0; i < t->len; i++, reg_offset--) {
 637                        reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 638                        if (txbuf)
 639                                writeb(txbuf[i], reg);
 640                        else
 641                                writeb(0, reg);
 642                }
 643                trx_len += t->len;
 644        }
 645
 646        writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
 647
 648        stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
 649                                trx_len * BITS_PER_BYTE);
 650        if (stat < 0)
 651                goto msg_done;
 652
 653        reg_offset = trx_len - 1;
 654        list_for_each_entry(t, &m->transfers, transfer_list) {
 655                rxbuf = t->rx_buf;
 656                for (i = 0; i < t->len; i++, reg_offset--) {
 657                        reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
 658                        if (rxbuf)
 659                                rxbuf[i] = readb(reg);
 660                }
 661        }
 662
 663        m->actual_length = trx_len;
 664msg_done:
 665        m->status = stat;
 666        spi_finalize_current_message(master);
 667
 668        return 0;
 669}
 670
 671static void mtk_nor_disable_clk(struct mtk_nor *sp)
 672{
 673        clk_disable_unprepare(sp->spi_clk);
 674        clk_disable_unprepare(sp->ctlr_clk);
 675}
 676
 677static int mtk_nor_enable_clk(struct mtk_nor *sp)
 678{
 679        int ret;
 680
 681        ret = clk_prepare_enable(sp->spi_clk);
 682        if (ret)
 683                return ret;
 684
 685        ret = clk_prepare_enable(sp->ctlr_clk);
 686        if (ret) {
 687                clk_disable_unprepare(sp->spi_clk);
 688                return ret;
 689        }
 690
 691        return 0;
 692}
 693
 694static void mtk_nor_init(struct mtk_nor *sp)
 695{
 696        writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
 697        writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
 698
 699        writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
 700        mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
 701        mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
 702                    MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
 703}
 704
 705static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
 706{
 707        struct mtk_nor *sp = data;
 708        u32 irq_status, irq_enabled;
 709
 710        irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
 711        irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
 712        // write status back to clear interrupt
 713        writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
 714
 715        if (!(irq_status & irq_enabled))
 716                return IRQ_NONE;
 717
 718        if (irq_status & MTK_NOR_IRQ_DMA) {
 719                complete(&sp->op_done);
 720                writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
 721        }
 722
 723        return IRQ_HANDLED;
 724}
 725
 726static size_t mtk_max_msg_size(struct spi_device *spi)
 727{
 728        return MTK_NOR_PRG_MAX_SIZE;
 729}
 730
 731static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
 732        .adjust_op_size = mtk_nor_adjust_op_size,
 733        .supports_op = mtk_nor_supports_op,
 734        .exec_op = mtk_nor_exec_op
 735};
 736
 737static const struct of_device_id mtk_nor_match[] = {
 738        { .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
 739        { .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
 740        { /* sentinel */ }
 741};
 742MODULE_DEVICE_TABLE(of, mtk_nor_match);
 743
 744static int mtk_nor_probe(struct platform_device *pdev)
 745{
 746        struct spi_controller *ctlr;
 747        struct mtk_nor *sp;
 748        void __iomem *base;
 749        struct clk *spi_clk, *ctlr_clk;
 750        int ret, irq;
 751        unsigned long dma_bits;
 752
 753        base = devm_platform_ioremap_resource(pdev, 0);
 754        if (IS_ERR(base))
 755                return PTR_ERR(base);
 756
 757        spi_clk = devm_clk_get(&pdev->dev, "spi");
 758        if (IS_ERR(spi_clk))
 759                return PTR_ERR(spi_clk);
 760
 761        ctlr_clk = devm_clk_get(&pdev->dev, "sf");
 762        if (IS_ERR(ctlr_clk))
 763                return PTR_ERR(ctlr_clk);
 764
 765        dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
 766        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
 767                dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
 768                return -EINVAL;
 769        }
 770
 771        ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
 772        if (!ctlr) {
 773                dev_err(&pdev->dev, "failed to allocate spi controller\n");
 774                return -ENOMEM;
 775        }
 776
 777        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
 778        ctlr->dev.of_node = pdev->dev.of_node;
 779        ctlr->max_message_size = mtk_max_msg_size;
 780        ctlr->mem_ops = &mtk_nor_mem_ops;
 781        ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
 782        ctlr->num_chipselect = 1;
 783        ctlr->setup = mtk_nor_setup;
 784        ctlr->transfer_one_message = mtk_nor_transfer_one_message;
 785        ctlr->auto_runtime_pm = true;
 786
 787        dev_set_drvdata(&pdev->dev, ctlr);
 788
 789        sp = spi_controller_get_devdata(ctlr);
 790        sp->base = base;
 791        sp->has_irq = false;
 792        sp->wbuf_en = false;
 793        sp->ctlr = ctlr;
 794        sp->dev = &pdev->dev;
 795        sp->spi_clk = spi_clk;
 796        sp->ctlr_clk = ctlr_clk;
 797        sp->high_dma = (dma_bits > 32);
 798        sp->buffer = dmam_alloc_coherent(&pdev->dev,
 799                                MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
 800                                &sp->buffer_dma, GFP_KERNEL);
 801        if (!sp->buffer)
 802                return -ENOMEM;
 803
 804        if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
 805                dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
 806                return -ENOMEM;
 807        }
 808
 809        ret = mtk_nor_enable_clk(sp);
 810        if (ret < 0)
 811                return ret;
 812
 813        sp->spi_freq = clk_get_rate(sp->spi_clk);
 814
 815        mtk_nor_init(sp);
 816
 817        irq = platform_get_irq_optional(pdev, 0);
 818
 819        if (irq < 0) {
 820                dev_warn(sp->dev, "IRQ not available.");
 821        } else {
 822                ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
 823                                       pdev->name, sp);
 824                if (ret < 0) {
 825                        dev_warn(sp->dev, "failed to request IRQ.");
 826                } else {
 827                        init_completion(&sp->op_done);
 828                        sp->has_irq = true;
 829                }
 830        }
 831
 832        pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
 833        pm_runtime_use_autosuspend(&pdev->dev);
 834        pm_runtime_set_active(&pdev->dev);
 835        pm_runtime_enable(&pdev->dev);
 836        pm_runtime_get_noresume(&pdev->dev);
 837
 838        ret = devm_spi_register_controller(&pdev->dev, ctlr);
 839        if (ret < 0)
 840                goto err_probe;
 841
 842        pm_runtime_mark_last_busy(&pdev->dev);
 843        pm_runtime_put_autosuspend(&pdev->dev);
 844
 845        dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
 846
 847        return 0;
 848
 849err_probe:
 850        pm_runtime_disable(&pdev->dev);
 851        pm_runtime_set_suspended(&pdev->dev);
 852        pm_runtime_dont_use_autosuspend(&pdev->dev);
 853
 854        mtk_nor_disable_clk(sp);
 855
 856        return ret;
 857}
 858
 859static int mtk_nor_remove(struct platform_device *pdev)
 860{
 861        struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
 862        struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
 863
 864        pm_runtime_disable(&pdev->dev);
 865        pm_runtime_set_suspended(&pdev->dev);
 866        pm_runtime_dont_use_autosuspend(&pdev->dev);
 867
 868        mtk_nor_disable_clk(sp);
 869
 870        return 0;
 871}
 872
 873static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
 874{
 875        struct spi_controller *ctlr = dev_get_drvdata(dev);
 876        struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
 877
 878        mtk_nor_disable_clk(sp);
 879
 880        return 0;
 881}
 882
 883static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
 884{
 885        struct spi_controller *ctlr = dev_get_drvdata(dev);
 886        struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
 887
 888        return mtk_nor_enable_clk(sp);
 889}
 890
 891static int __maybe_unused mtk_nor_suspend(struct device *dev)
 892{
 893        return pm_runtime_force_suspend(dev);
 894}
 895
 896static int __maybe_unused mtk_nor_resume(struct device *dev)
 897{
 898        return pm_runtime_force_resume(dev);
 899}
 900
 901static const struct dev_pm_ops mtk_nor_pm_ops = {
 902        SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
 903                           mtk_nor_runtime_resume, NULL)
 904        SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
 905};
 906
 907static struct platform_driver mtk_nor_driver = {
 908        .driver = {
 909                .name = DRIVER_NAME,
 910                .of_match_table = mtk_nor_match,
 911                .pm = &mtk_nor_pm_ops,
 912        },
 913        .probe = mtk_nor_probe,
 914        .remove = mtk_nor_remove,
 915};
 916
 917module_platform_driver(mtk_nor_driver);
 918
 919MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
 920MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
 921MODULE_LICENSE("GPL v2");
 922MODULE_ALIAS("platform:" DRIVER_NAME);
 923