linux/drivers/spi/spi-mtk-nor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Mediatek SPI NOR controller driver
   4//
   5// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
   6
   7#include <linux/bits.h>
   8#include <linux/clk.h>
   9#include <linux/completion.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/iopoll.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/of_device.h>
  17#include <linux/spi/spi.h>
  18#include <linux/spi/spi-mem.h>
  19#include <linux/string.h>
  20
  21#define DRIVER_NAME "mtk-spi-nor"
  22
  23#define MTK_NOR_REG_CMD                 0x00
  24#define MTK_NOR_CMD_WRITE               BIT(4)
  25#define MTK_NOR_CMD_PROGRAM             BIT(2)
  26#define MTK_NOR_CMD_READ                BIT(0)
  27#define MTK_NOR_CMD_MASK                GENMASK(5, 0)
  28
  29#define MTK_NOR_REG_PRG_CNT             0x04
  30#define MTK_NOR_REG_RDATA               0x0c
  31
  32#define MTK_NOR_REG_RADR0               0x10
  33#define MTK_NOR_REG_RADR(n)             (MTK_NOR_REG_RADR0 + 4 * (n))
  34#define MTK_NOR_REG_RADR3               0xc8
  35
  36#define MTK_NOR_REG_WDATA               0x1c
  37
  38#define MTK_NOR_REG_PRGDATA0            0x20
  39#define MTK_NOR_REG_PRGDATA(n)          (MTK_NOR_REG_PRGDATA0 + 4 * (n))
  40#define MTK_NOR_REG_PRGDATA_MAX         5
  41
  42#define MTK_NOR_REG_SHIFT0              0x38
  43#define MTK_NOR_REG_SHIFT(n)            (MTK_NOR_REG_SHIFT0 + 4 * (n))
  44#define MTK_NOR_REG_SHIFT_MAX           9
  45
  46#define MTK_NOR_REG_CFG1                0x60
  47#define MTK_NOR_FAST_READ               BIT(0)
  48
  49#define MTK_NOR_REG_CFG2                0x64
  50#define MTK_NOR_WR_CUSTOM_OP_EN         BIT(4)
  51#define MTK_NOR_WR_BUF_EN               BIT(0)
  52
  53#define MTK_NOR_REG_PP_DATA             0x98
  54
  55#define MTK_NOR_REG_IRQ_STAT            0xa8
  56#define MTK_NOR_REG_IRQ_EN              0xac
  57#define MTK_NOR_IRQ_DMA                 BIT(7)
  58#define MTK_NOR_IRQ_MASK                GENMASK(7, 0)
  59
  60#define MTK_NOR_REG_CFG3                0xb4
  61#define MTK_NOR_DISABLE_WREN            BIT(7)
  62#define MTK_NOR_DISABLE_SR_POLL         BIT(5)
  63
  64#define MTK_NOR_REG_WP                  0xc4
  65#define MTK_NOR_ENABLE_SF_CMD           0x30
  66
  67#define MTK_NOR_REG_BUSCFG              0xcc
  68#define MTK_NOR_4B_ADDR                 BIT(4)
  69#define MTK_NOR_QUAD_ADDR               BIT(3)
  70#define MTK_NOR_QUAD_READ               BIT(2)
  71#define MTK_NOR_DUAL_ADDR               BIT(1)
  72#define MTK_NOR_DUAL_READ               BIT(0)
  73#define MTK_NOR_BUS_MODE_MASK           GENMASK(4, 0)
  74
  75#define MTK_NOR_REG_DMA_CTL             0x718
  76#define MTK_NOR_DMA_START               BIT(0)
  77
  78#define MTK_NOR_REG_DMA_FADR            0x71c
  79#define MTK_NOR_REG_DMA_DADR            0x720
  80#define MTK_NOR_REG_DMA_END_DADR        0x724
  81
  82#define MTK_NOR_PRG_MAX_SIZE            6
  83// Reading DMA src/dst addresses have to be 16-byte aligned
  84#define MTK_NOR_DMA_ALIGN               16
  85#define MTK_NOR_DMA_ALIGN_MASK          (MTK_NOR_DMA_ALIGN - 1)
  86// and we allocate a bounce buffer if destination address isn't aligned.
  87#define MTK_NOR_BOUNCE_BUF_SIZE         PAGE_SIZE
  88
  89// Buffered page program can do one 128-byte transfer
  90#define MTK_NOR_PP_SIZE                 128
  91
  92#define CLK_TO_US(sp, clkcnt)           ((clkcnt) * 1000000 / sp->spi_freq)
  93
  94struct mtk_nor {
  95        struct spi_controller *ctlr;
  96        struct device *dev;
  97        void __iomem *base;
  98        u8 *buffer;
  99        struct clk *spi_clk;
 100        struct clk *ctlr_clk;
 101        unsigned int spi_freq;
 102        bool wbuf_en;
 103        bool has_irq;
 104        struct completion op_done;
 105};
 106
 107static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
 108{
 109        u32 val = readl(sp->base + reg);
 110
 111        val &= ~clr;
 112        val |= set;
 113        writel(val, sp->base + reg);
 114}
 115
 116static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
 117{
 118        ulong delay = CLK_TO_US(sp, clk);
 119        u32 reg;
 120        int ret;
 121
 122        writel(cmd, sp->base + MTK_NOR_REG_CMD);
 123        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
 124                                 delay / 3, (delay + 1) * 200);
 125        if (ret < 0)
 126                dev_err(sp->dev, "command %u timeout.\n", cmd);
 127        return ret;
 128}
 129
 130static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
 131{
 132        u32 addr = op->addr.val;
 133        int i;
 134
 135        for (i = 0; i < 3; i++) {
 136                writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
 137                addr >>= 8;
 138        }
 139        if (op->addr.nbytes == 4) {
 140                writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
 141                mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
 142        } else {
 143                mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
 144        }
 145}
 146
 147static bool mtk_nor_match_read(const struct spi_mem_op *op)
 148{
 149        int dummy = 0;
 150
 151        if (op->dummy.buswidth)
 152                dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
 153
 154        if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
 155                if (op->addr.buswidth == 1)
 156                        return dummy == 8;
 157                else if (op->addr.buswidth == 2)
 158                        return dummy == 4;
 159                else if (op->addr.buswidth == 4)
 160                        return dummy == 6;
 161        } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
 162                if (op->cmd.opcode == 0x03)
 163                        return dummy == 0;
 164                else if (op->cmd.opcode == 0x0b)
 165                        return dummy == 8;
 166        }
 167        return false;
 168}
 169
 170static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 171{
 172        size_t len;
 173
 174        if (!op->data.nbytes)
 175                return 0;
 176
 177        if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
 178                if ((op->data.dir == SPI_MEM_DATA_IN) &&
 179                    mtk_nor_match_read(op)) {
 180                        if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
 181                            (op->data.nbytes < MTK_NOR_DMA_ALIGN))
 182                                op->data.nbytes = 1;
 183                        else if (!((ulong)(op->data.buf.in) &
 184                                   MTK_NOR_DMA_ALIGN_MASK))
 185                                op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
 186                        else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
 187                                op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
 188                        return 0;
 189                } else if (op->data.dir == SPI_MEM_DATA_OUT) {
 190                        if (op->data.nbytes >= MTK_NOR_PP_SIZE)
 191                                op->data.nbytes = MTK_NOR_PP_SIZE;
 192                        else
 193                                op->data.nbytes = 1;
 194                        return 0;
 195                }
 196        }
 197
 198        len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes -
 199              op->dummy.nbytes;
 200        if (op->data.nbytes > len)
 201                op->data.nbytes = len;
 202
 203        return 0;
 204}
 205
 206static bool mtk_nor_supports_op(struct spi_mem *mem,
 207                                const struct spi_mem_op *op)
 208{
 209        size_t len;
 210
 211        if (op->cmd.buswidth != 1)
 212                return false;
 213
 214        if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
 215                if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
 216                        return true;
 217                else if (op->data.dir == SPI_MEM_DATA_OUT)
 218                        return (op->addr.buswidth == 1) &&
 219                               (op->dummy.buswidth == 0) &&
 220                               (op->data.buswidth == 1);
 221        }
 222        len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
 223        if ((len > MTK_NOR_PRG_MAX_SIZE) ||
 224            ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
 225                return false;
 226        return true;
 227}
 228
 229static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
 230{
 231        u32 reg = 0;
 232
 233        if (op->addr.nbytes == 4)
 234                reg |= MTK_NOR_4B_ADDR;
 235
 236        if (op->data.buswidth == 4) {
 237                reg |= MTK_NOR_QUAD_READ;
 238                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
 239                if (op->addr.buswidth == 4)
 240                        reg |= MTK_NOR_QUAD_ADDR;
 241        } else if (op->data.buswidth == 2) {
 242                reg |= MTK_NOR_DUAL_READ;
 243                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
 244                if (op->addr.buswidth == 2)
 245                        reg |= MTK_NOR_DUAL_ADDR;
 246        } else {
 247                if (op->cmd.opcode == 0x0b)
 248                        mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
 249                else
 250                        mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
 251        }
 252        mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
 253}
 254
 255static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
 256                            u8 *buffer)
 257{
 258        int ret = 0;
 259        ulong delay;
 260        u32 reg;
 261        dma_addr_t dma_addr;
 262
 263        dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
 264        if (dma_mapping_error(sp->dev, dma_addr)) {
 265                dev_err(sp->dev, "failed to map dma buffer.\n");
 266                return -EINVAL;
 267        }
 268
 269        writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
 270        writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
 271        writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
 272
 273        if (sp->has_irq) {
 274                reinit_completion(&sp->op_done);
 275                mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
 276        }
 277
 278        mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
 279
 280        delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
 281
 282        if (sp->has_irq) {
 283                if (!wait_for_completion_timeout(&sp->op_done,
 284                                                 (delay + 1) * 100))
 285                        ret = -ETIMEDOUT;
 286        } else {
 287                ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
 288                                         !(reg & MTK_NOR_DMA_START), delay / 3,
 289                                         (delay + 1) * 100);
 290        }
 291
 292        dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
 293        if (ret < 0)
 294                dev_err(sp->dev, "dma read timeout.\n");
 295
 296        return ret;
 297}
 298
 299static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
 300                               unsigned int length, u8 *buffer)
 301{
 302        unsigned int rdlen;
 303        int ret;
 304
 305        if (length & MTK_NOR_DMA_ALIGN_MASK)
 306                rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
 307        else
 308                rdlen = length;
 309
 310        ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
 311        if (ret)
 312                return ret;
 313
 314        memcpy(buffer, sp->buffer, length);
 315        return 0;
 316}
 317
 318static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
 319{
 320        u8 *buf = op->data.buf.in;
 321        int ret;
 322
 323        ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
 324        if (!ret)
 325                buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
 326        return ret;
 327}
 328
 329static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
 330{
 331        int ret;
 332        u32 val;
 333
 334        if (sp->wbuf_en)
 335                return 0;
 336
 337        val = readl(sp->base + MTK_NOR_REG_CFG2);
 338        writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
 339        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
 340                                 val & MTK_NOR_WR_BUF_EN, 0, 10000);
 341        if (!ret)
 342                sp->wbuf_en = true;
 343        return ret;
 344}
 345
 346static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
 347{
 348        int ret;
 349        u32 val;
 350
 351        if (!sp->wbuf_en)
 352                return 0;
 353        val = readl(sp->base + MTK_NOR_REG_CFG2);
 354        writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
 355        ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
 356                                 !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
 357        if (!ret)
 358                sp->wbuf_en = false;
 359        return ret;
 360}
 361
 362static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
 363{
 364        const u8 *buf = op->data.buf.out;
 365        u32 val;
 366        int ret, i;
 367
 368        ret = mtk_nor_write_buffer_enable(sp);
 369        if (ret < 0)
 370                return ret;
 371
 372        for (i = 0; i < op->data.nbytes; i += 4) {
 373                val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
 374                      buf[i];
 375                writel(val, sp->base + MTK_NOR_REG_PP_DATA);
 376        }
 377        return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
 378                                (op->data.nbytes + 5) * BITS_PER_BYTE);
 379}
 380
 381static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
 382                                 const struct spi_mem_op *op)
 383{
 384        const u8 *buf = op->data.buf.out;
 385        int ret;
 386
 387        ret = mtk_nor_write_buffer_disable(sp);
 388        if (ret < 0)
 389                return ret;
 390        writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
 391        return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
 392}
 393
 394int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 395{
 396        struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
 397        int ret;
 398
 399        if ((op->data.nbytes == 0) ||
 400            ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
 401                return -ENOTSUPP;
 402
 403        if (op->data.dir == SPI_MEM_DATA_OUT) {
 404                mtk_nor_set_addr(sp, op);
 405                writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
 406                if (op->data.nbytes == MTK_NOR_PP_SIZE)
 407                        return mtk_nor_pp_buffered(sp, op);
 408                return mtk_nor_pp_unbuffered(sp, op);
 409        }
 410
 411        if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
 412                ret = mtk_nor_write_buffer_disable(sp);
 413                if (ret < 0)
 414                        return ret;
 415                mtk_nor_setup_bus(sp, op);
 416                if (op->data.nbytes == 1) {
 417                        mtk_nor_set_addr(sp, op);
 418                        return mtk_nor_read_pio(sp, op);
 419                } else if (((ulong)(op->data.buf.in) &
 420                            MTK_NOR_DMA_ALIGN_MASK)) {
 421                        return mtk_nor_read_bounce(sp, op->addr.val,
 422                                                   op->data.nbytes,
 423                                                   op->data.buf.in);
 424                } else {
 425                        return mtk_nor_read_dma(sp, op->addr.val,
 426                                                op->data.nbytes,
 427                                                op->data.buf.in);
 428                }
 429        }
 430
 431        return -ENOTSUPP;
 432}
 433
 434static int mtk_nor_setup(struct spi_device *spi)
 435{
 436        struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
 437
 438        if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
 439                dev_err(&spi->dev, "spi clock should be %u Hz.\n",
 440                        sp->spi_freq);
 441                return -EINVAL;
 442        }
 443        spi->max_speed_hz = sp->spi_freq;
 444
 445        return 0;
 446}
 447
 448static int mtk_nor_transfer_one_message(struct spi_controller *master,
 449                                        struct spi_message *m)
 450{
 451        struct mtk_nor *sp = spi_controller_get_devdata(master);
 452        struct spi_transfer *t = NULL;
 453        unsigned long trx_len = 0;
 454        int stat = 0;
 455        int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
 456        void __iomem *reg;
 457        const u8 *txbuf;
 458        u8 *rxbuf;
 459        int i;
 460
 461        list_for_each_entry(t, &m->transfers, transfer_list) {
 462                txbuf = t->tx_buf;
 463                for (i = 0; i < t->len; i++, reg_offset--) {
 464                        reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
 465                        if (txbuf)
 466                                writeb(txbuf[i], reg);
 467                        else
 468                                writeb(0, reg);
 469                }
 470                trx_len += t->len;
 471        }
 472
 473        writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
 474
 475        stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
 476                                trx_len * BITS_PER_BYTE);
 477        if (stat < 0)
 478                goto msg_done;
 479
 480        reg_offset = trx_len - 1;
 481        list_for_each_entry(t, &m->transfers, transfer_list) {
 482                rxbuf = t->rx_buf;
 483                for (i = 0; i < t->len; i++, reg_offset--) {
 484                        reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
 485                        if (rxbuf)
 486                                rxbuf[i] = readb(reg);
 487                }
 488        }
 489
 490        m->actual_length = trx_len;
 491msg_done:
 492        m->status = stat;
 493        spi_finalize_current_message(master);
 494
 495        return 0;
 496}
 497
 498static void mtk_nor_disable_clk(struct mtk_nor *sp)
 499{
 500        clk_disable_unprepare(sp->spi_clk);
 501        clk_disable_unprepare(sp->ctlr_clk);
 502}
 503
 504static int mtk_nor_enable_clk(struct mtk_nor *sp)
 505{
 506        int ret;
 507
 508        ret = clk_prepare_enable(sp->spi_clk);
 509        if (ret)
 510                return ret;
 511
 512        ret = clk_prepare_enable(sp->ctlr_clk);
 513        if (ret) {
 514                clk_disable_unprepare(sp->spi_clk);
 515                return ret;
 516        }
 517
 518        return 0;
 519}
 520
 521static int mtk_nor_init(struct mtk_nor *sp)
 522{
 523        int ret;
 524
 525        ret = mtk_nor_enable_clk(sp);
 526        if (ret)
 527                return ret;
 528
 529        sp->spi_freq = clk_get_rate(sp->spi_clk);
 530
 531        writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
 532        mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
 533        mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
 534                    MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
 535
 536        return ret;
 537}
 538
 539static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
 540{
 541        struct mtk_nor *sp = data;
 542        u32 irq_status, irq_enabled;
 543
 544        irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
 545        irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
 546        // write status back to clear interrupt
 547        writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
 548
 549        if (!(irq_status & irq_enabled))
 550                return IRQ_NONE;
 551
 552        if (irq_status & MTK_NOR_IRQ_DMA) {
 553                complete(&sp->op_done);
 554                writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
 555        }
 556
 557        return IRQ_HANDLED;
 558}
 559
 560static size_t mtk_max_msg_size(struct spi_device *spi)
 561{
 562        return MTK_NOR_PRG_MAX_SIZE;
 563}
 564
 565static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
 566        .adjust_op_size = mtk_nor_adjust_op_size,
 567        .supports_op = mtk_nor_supports_op,
 568        .exec_op = mtk_nor_exec_op
 569};
 570
 571static const struct of_device_id mtk_nor_match[] = {
 572        { .compatible = "mediatek,mt8173-nor" },
 573        { /* sentinel */ }
 574};
 575MODULE_DEVICE_TABLE(of, mtk_nor_match);
 576
 577static int mtk_nor_probe(struct platform_device *pdev)
 578{
 579        struct spi_controller *ctlr;
 580        struct mtk_nor *sp;
 581        void __iomem *base;
 582        u8 *buffer;
 583        struct clk *spi_clk, *ctlr_clk;
 584        int ret, irq;
 585
 586        base = devm_platform_ioremap_resource(pdev, 0);
 587        if (IS_ERR(base))
 588                return PTR_ERR(base);
 589
 590        spi_clk = devm_clk_get(&pdev->dev, "spi");
 591        if (IS_ERR(spi_clk))
 592                return PTR_ERR(spi_clk);
 593
 594        ctlr_clk = devm_clk_get(&pdev->dev, "sf");
 595        if (IS_ERR(ctlr_clk))
 596                return PTR_ERR(ctlr_clk);
 597
 598        buffer = devm_kmalloc(&pdev->dev,
 599                              MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
 600                              GFP_KERNEL);
 601        if (!buffer)
 602                return -ENOMEM;
 603
 604        if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
 605                buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
 606                                ~MTK_NOR_DMA_ALIGN_MASK);
 607
 608        ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
 609        if (!ctlr) {
 610                dev_err(&pdev->dev, "failed to allocate spi controller\n");
 611                return -ENOMEM;
 612        }
 613
 614        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
 615        ctlr->dev.of_node = pdev->dev.of_node;
 616        ctlr->max_message_size = mtk_max_msg_size;
 617        ctlr->mem_ops = &mtk_nor_mem_ops;
 618        ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
 619        ctlr->num_chipselect = 1;
 620        ctlr->setup = mtk_nor_setup;
 621        ctlr->transfer_one_message = mtk_nor_transfer_one_message;
 622
 623        dev_set_drvdata(&pdev->dev, ctlr);
 624
 625        sp = spi_controller_get_devdata(ctlr);
 626        sp->base = base;
 627        sp->buffer = buffer;
 628        sp->has_irq = false;
 629        sp->wbuf_en = false;
 630        sp->ctlr = ctlr;
 631        sp->dev = &pdev->dev;
 632        sp->spi_clk = spi_clk;
 633        sp->ctlr_clk = ctlr_clk;
 634
 635        irq = platform_get_irq_optional(pdev, 0);
 636        if (irq < 0) {
 637                dev_warn(sp->dev, "IRQ not available.");
 638        } else {
 639                writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT);
 640                writel(0, base + MTK_NOR_REG_IRQ_EN);
 641                ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
 642                                       pdev->name, sp);
 643                if (ret < 0) {
 644                        dev_warn(sp->dev, "failed to request IRQ.");
 645                } else {
 646                        init_completion(&sp->op_done);
 647                        sp->has_irq = true;
 648                }
 649        }
 650
 651        ret = mtk_nor_init(sp);
 652        if (ret < 0) {
 653                kfree(ctlr);
 654                return ret;
 655        }
 656
 657        dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
 658
 659        return devm_spi_register_controller(&pdev->dev, ctlr);
 660}
 661
 662static int mtk_nor_remove(struct platform_device *pdev)
 663{
 664        struct spi_controller *ctlr;
 665        struct mtk_nor *sp;
 666
 667        ctlr = dev_get_drvdata(&pdev->dev);
 668        sp = spi_controller_get_devdata(ctlr);
 669
 670        mtk_nor_disable_clk(sp);
 671
 672        return 0;
 673}
 674
 675static struct platform_driver mtk_nor_driver = {
 676        .driver = {
 677                .name = DRIVER_NAME,
 678                .of_match_table = mtk_nor_match,
 679        },
 680        .probe = mtk_nor_probe,
 681        .remove = mtk_nor_remove,
 682};
 683
 684module_platform_driver(mtk_nor_driver);
 685
 686MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
 687MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
 688MODULE_LICENSE("GPL v2");
 689MODULE_ALIAS("platform:" DRIVER_NAME);
 690