uboot/drivers/spi/mtk_snor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Mediatek SPI-NOR controller driver
   4//
   5// Copyright (C) 2020 SkyLake Huang <SkyLake.Huang@mediatek.com>
   6//
   7// Some parts are based on drivers/spi/spi-mtk-nor.c of linux version
   8
   9#include <clk.h>
  10#include <common.h>
  11#include <cpu_func.h>
  12#include <dm.h>
  13#include <dm/device.h>
  14#include <dm/device_compat.h>
  15#include <dm/devres.h>
  16#include <dm/pinctrl.h>
  17#include <linux/bitops.h>
  18#include <linux/completion.h>
  19#include <linux/io.h>
  20#include <linux/iopoll.h>
  21#include <spi.h>
  22#include <spi-mem.h>
  23#include <stdbool.h>
  24#include <watchdog.h>
  25#include <linux/dma-mapping.h>
  26
  27#define DRIVER_NAME "mtk-spi-nor"
  28
  29#define MTK_NOR_REG_CMD 0x00
  30#define MTK_NOR_CMD_WRSR BIT(5)
  31#define MTK_NOR_CMD_WRITE BIT(4)
  32#define MTK_NOR_CMD_PROGRAM BIT(2)
  33#define MTK_NOR_CMD_RDSR BIT(1)
  34#define MTK_NOR_CMD_READ BIT(0)
  35#define MTK_NOR_CMD_MASK GENMASK(5, 0)
  36
  37#define MTK_NOR_REG_PRG_CNT 0x04
  38#define MTK_NOR_REG_RDSR 0x08
  39#define MTK_NOR_REG_RDATA 0x0c
  40
  41#define MTK_NOR_REG_RADR0 0x10
  42#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
  43#define MTK_NOR_REG_RADR3 0xc8
  44
  45#define MTK_NOR_REG_WDATA 0x1c
  46
  47#define MTK_NOR_REG_PRGDATA0 0x20
  48#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
  49#define MTK_NOR_REG_PRGDATA_MAX 5
  50
  51#define MTK_NOR_REG_SHIFT0 0x38
  52#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
  53#define MTK_NOR_REG_SHIFT_MAX 9
  54
  55#define MTK_NOR_REG_CFG1 0x60
  56#define MTK_NOR_FAST_READ BIT(0)
  57
  58#define MTK_NOR_REG_CFG2 0x64
  59#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
  60#define MTK_NOR_WR_BUF_EN BIT(0)
  61
  62#define MTK_NOR_REG_PP_DATA 0x98
  63
  64#define MTK_NOR_REG_IRQ_STAT 0xa8
  65#define MTK_NOR_REG_IRQ_EN 0xac
  66#define MTK_NOR_IRQ_DMA BIT(7)
  67#define MTK_NOR_IRQ_WRSR BIT(5)
  68#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
  69
  70#define MTK_NOR_REG_CFG3 0xb4
  71#define MTK_NOR_DISABLE_WREN BIT(7)
  72#define MTK_NOR_DISABLE_SR_POLL BIT(5)
  73
  74#define MTK_NOR_REG_WP 0xc4
  75#define MTK_NOR_ENABLE_SF_CMD 0x30
  76
  77#define MTK_NOR_REG_BUSCFG 0xcc
  78#define MTK_NOR_4B_ADDR BIT(4)
  79#define MTK_NOR_QUAD_ADDR BIT(3)
  80#define MTK_NOR_QUAD_READ BIT(2)
  81#define MTK_NOR_DUAL_ADDR BIT(1)
  82#define MTK_NOR_DUAL_READ BIT(0)
  83#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
  84
  85#define MTK_NOR_REG_DMA_CTL 0x718
  86#define MTK_NOR_DMA_START BIT(0)
  87
  88#define MTK_NOR_REG_DMA_FADR 0x71c
  89#define MTK_NOR_REG_DMA_DADR 0x720
  90#define MTK_NOR_REG_DMA_END_DADR 0x724
  91
  92#define MTK_NOR_PRG_MAX_SIZE 6
  93// Reading DMA src/dst addresses have to be 16-byte aligned
  94#define MTK_NOR_DMA_ALIGN 16
  95#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
  96// and we allocate a bounce buffer if destination address isn't aligned.
  97#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
  98
  99// Buffered page program can do one 128-byte transfer
 100#define MTK_NOR_PP_SIZE 128
 101
 102#define CLK_TO_US(priv, clkcnt) DIV_ROUND_UP(clkcnt, (priv)->spi_freq / 1000000)
 103
 104#define MTK_NOR_UNLOCK_ALL 0x0
 105
 106struct mtk_snor_priv {
 107        struct device *dev;
 108        void __iomem *base;
 109        u8 *buffer;
 110        struct clk spi_clk;
 111        struct clk ctlr_clk;
 112        unsigned int spi_freq;
 113        bool wbuf_en;
 114};
 115
 116static inline void mtk_snor_rmw(struct mtk_snor_priv *priv, u32 reg, u32 set,
 117                                u32 clr)
 118{
 119        u32 val = readl(priv->base + reg);
 120
 121        val &= ~clr;
 122        val |= set;
 123        writel(val, priv->base + reg);
 124}
 125
 126static inline int mtk_snor_cmd_exec(struct mtk_snor_priv *priv, u32 cmd,
 127                                    ulong clk)
 128{
 129        unsigned long long delay = CLK_TO_US(priv, clk);
 130        u32 reg;
 131        int ret;
 132
 133        writel(cmd, priv->base + MTK_NOR_REG_CMD);
 134        delay = (delay + 1) * 200;
 135        ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CMD, reg,
 136                                 !(reg & cmd), delay);
 137        if (ret < 0)
 138                dev_err(priv->dev, "command %u timeout.\n", cmd);
 139        return ret;
 140}
 141
 142static void mtk_snor_set_addr(struct mtk_snor_priv *priv,
 143                              const struct spi_mem_op *op)
 144{
 145        u32 addr = op->addr.val;
 146        int i;
 147
 148        for (i = 0; i < 3; i++) {
 149                writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR(i));
 150                addr >>= 8;
 151        }
 152        if (op->addr.nbytes == 4) {
 153                writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR3);
 154                mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
 155        } else {
 156                mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
 157        }
 158}
 159
 160static bool need_bounce(const struct spi_mem_op *op)
 161{
 162        return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
 163}
 164
 165static int mtk_snor_adjust_op_size(struct spi_slave *slave,
 166                                   struct spi_mem_op *op)
 167{
 168        if (!op->data.nbytes)
 169                return 0;
 170
 171        if (op->addr.nbytes == 3 || op->addr.nbytes == 4) {
 172                if (op->data.dir == SPI_MEM_DATA_IN) { //&&
 173                        // limit size to prevent timeout calculation overflow
 174                        if (op->data.nbytes > 0x400000)
 175                                op->data.nbytes = 0x400000;
 176                        if (op->addr.val & MTK_NOR_DMA_ALIGN_MASK ||
 177                            op->data.nbytes < MTK_NOR_DMA_ALIGN)
 178                                op->data.nbytes = 1;
 179                        else if (!need_bounce(op))
 180                                op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
 181                        else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
 182                                op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
 183                        return 0;
 184                } else if (op->data.dir == SPI_MEM_DATA_OUT) {
 185                        if (op->data.nbytes >= MTK_NOR_PP_SIZE)
 186                                op->data.nbytes = MTK_NOR_PP_SIZE;
 187                        else
 188                                op->data.nbytes = 1;
 189                        return 0;
 190                }
 191        }
 192
 193        return 0;
 194}
 195
 196static bool mtk_snor_supports_op(struct spi_slave *slave,
 197                                 const struct spi_mem_op *op)
 198{
 199        /* This controller only supports 1-1-1 write mode */
 200        if (op->data.dir == SPI_MEM_DATA_OUT &&
 201            (op->cmd.buswidth != 1 || op->data.buswidth != 1))
 202                return false;
 203
 204        return true;
 205}
 206
 207static void mtk_snor_setup_bus(struct mtk_snor_priv *priv,
 208                               const struct spi_mem_op *op)
 209{
 210        u32 reg = 0;
 211
 212        if (op->addr.nbytes == 4)
 213                reg |= MTK_NOR_4B_ADDR;
 214
 215        if (op->data.buswidth == 4) {
 216                reg |= MTK_NOR_QUAD_READ;
 217                writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(4));
 218                if (op->addr.buswidth == 4)
 219                        reg |= MTK_NOR_QUAD_ADDR;
 220        } else if (op->data.buswidth == 2) {
 221                reg |= MTK_NOR_DUAL_READ;
 222                writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(3));
 223                if (op->addr.buswidth == 2)
 224                        reg |= MTK_NOR_DUAL_ADDR;
 225        } else {
 226                if (op->cmd.opcode == 0x0b)
 227                        mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ,
 228                                     0);
 229                else
 230                        mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, 0,
 231                                     MTK_NOR_FAST_READ);
 232        }
 233        mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
 234}
 235
 236static int mtk_snor_dma_exec(struct mtk_snor_priv *priv, u32 from,
 237                             unsigned int length, dma_addr_t dma_addr)
 238{
 239        int ret = 0;
 240        ulong delay;
 241        u32 reg;
 242
 243        writel(from, priv->base + MTK_NOR_REG_DMA_FADR);
 244        writel(dma_addr, priv->base + MTK_NOR_REG_DMA_DADR);
 245        writel(dma_addr + length, priv->base + MTK_NOR_REG_DMA_END_DADR);
 246
 247        mtk_snor_rmw(priv, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
 248
 249        delay = CLK_TO_US(priv, (length + 5) * BITS_PER_BYTE);
 250
 251        delay = (delay + 1) * 100;
 252        ret = readl_poll_timeout(priv->base + MTK_NOR_REG_DMA_CTL, reg,
 253                                 !(reg & MTK_NOR_DMA_START), delay);
 254
 255        if (ret < 0)
 256                dev_err(priv->dev, "dma read timeout.\n");
 257
 258        return ret;
 259}
 260
 261static int mtk_snor_read_bounce(struct mtk_snor_priv *priv,
 262                                const struct spi_mem_op *op)
 263{
 264        unsigned int rdlen;
 265        int ret;
 266
 267        if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
 268                rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) &
 269                        ~MTK_NOR_DMA_ALIGN_MASK;
 270        else
 271                rdlen = op->data.nbytes;
 272
 273        ret = mtk_snor_dma_exec(priv, op->addr.val, rdlen,
 274                                (dma_addr_t)priv->buffer);
 275
 276        if (!ret)
 277                memcpy(op->data.buf.in, priv->buffer, op->data.nbytes);
 278
 279        return ret;
 280}
 281
 282static int mtk_snor_read_dma(struct mtk_snor_priv *priv,
 283                             const struct spi_mem_op *op)
 284{
 285        int ret;
 286        dma_addr_t dma_addr;
 287
 288        if (need_bounce(op))
 289                return mtk_snor_read_bounce(priv, op);
 290
 291        dma_addr = dma_map_single(op->data.buf.in, op->data.nbytes,
 292                                  DMA_FROM_DEVICE);
 293
 294        if (dma_mapping_error(priv->dev, dma_addr))
 295                return -EINVAL;
 296
 297        ret = mtk_snor_dma_exec(priv, op->addr.val, op->data.nbytes, dma_addr);
 298
 299        dma_unmap_single(dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
 300
 301        return ret;
 302}
 303
 304static int mtk_snor_read_pio(struct mtk_snor_priv *priv,
 305                             const struct spi_mem_op *op)
 306{
 307        u8 *buf = op->data.buf.in;
 308        int ret;
 309
 310        ret = mtk_snor_cmd_exec(priv, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
 311        if (!ret)
 312                buf[0] = readb(priv->base + MTK_NOR_REG_RDATA);
 313        return ret;
 314}
 315
 316static int mtk_snor_write_buffer_enable(struct mtk_snor_priv *priv)
 317{
 318        int ret;
 319        u32 val;
 320
 321        if (priv->wbuf_en)
 322                return 0;
 323
 324        val = readl(priv->base + MTK_NOR_REG_CFG2);
 325        writel(val | MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
 326        ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
 327                                 val & MTK_NOR_WR_BUF_EN, 10000);
 328        if (!ret)
 329                priv->wbuf_en = true;
 330        return ret;
 331}
 332
 333static int mtk_snor_write_buffer_disable(struct mtk_snor_priv *priv)
 334{
 335        int ret;
 336        u32 val;
 337
 338        if (!priv->wbuf_en)
 339                return 0;
 340        val = readl(priv->base + MTK_NOR_REG_CFG2);
 341        writel(val & ~MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
 342        ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
 343                                 !(val & MTK_NOR_WR_BUF_EN), 10000);
 344        if (!ret)
 345                priv->wbuf_en = false;
 346        return ret;
 347}
 348
 349static int mtk_snor_pp_buffered(struct mtk_snor_priv *priv,
 350                                const struct spi_mem_op *op)
 351{
 352        const u8 *buf = op->data.buf.out;
 353        u32 val;
 354        int ret, i;
 355
 356        ret = mtk_snor_write_buffer_enable(priv);
 357        if (ret < 0)
 358                return ret;
 359
 360        for (i = 0; i < op->data.nbytes; i += 4) {
 361                val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
 362                      buf[i];
 363                writel(val, priv->base + MTK_NOR_REG_PP_DATA);
 364        }
 365        mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE,
 366                          (op->data.nbytes + 5) * BITS_PER_BYTE);
 367        return mtk_snor_write_buffer_disable(priv);
 368}
 369
 370static int mtk_snor_pp_unbuffered(struct mtk_snor_priv *priv,
 371                                  const struct spi_mem_op *op)
 372{
 373        const u8 *buf = op->data.buf.out;
 374        int ret;
 375
 376        ret = mtk_snor_write_buffer_disable(priv);
 377        if (ret < 0)
 378                return ret;
 379        writeb(buf[0], priv->base + MTK_NOR_REG_WDATA);
 380        return mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
 381}
 382
 383static int mtk_snor_cmd_program(struct mtk_snor_priv *priv,
 384                                const struct spi_mem_op *op)
 385{
 386        u32 tx_len = 0;
 387        u32 trx_len = 0;
 388        int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
 389        void __iomem *reg;
 390        u8 *txbuf;
 391        int tx_cnt = 0;
 392        u8 *rxbuf = op->data.buf.in;
 393        int i = 0;
 394
 395        tx_len = 1 + op->addr.nbytes + op->dummy.nbytes;
 396        trx_len = tx_len + op->data.nbytes;
 397        if (op->data.dir == SPI_MEM_DATA_OUT)
 398                tx_len += op->data.nbytes;
 399
 400        txbuf = kmalloc_array(tx_len, sizeof(u8), GFP_KERNEL);
 401        memset(txbuf, 0x0, tx_len * sizeof(u8));
 402
 403        /* Join all bytes to be transferred */
 404        txbuf[tx_cnt] = op->cmd.opcode;
 405        tx_cnt++;
 406        for (i = op->addr.nbytes; i > 0; i--, tx_cnt++)
 407                txbuf[tx_cnt] = ((u8 *)&op->addr.val)[i - 1];
 408        for (i = op->dummy.nbytes; i > 0; i--, tx_cnt++)
 409                txbuf[tx_cnt] = 0x0;
 410        if (op->data.dir == SPI_MEM_DATA_OUT)
 411                for (i = op->data.nbytes; i > 0; i--, tx_cnt++)
 412                        txbuf[tx_cnt] = ((u8 *)op->data.buf.out)[i - 1];
 413
 414        for (i = MTK_NOR_REG_PRGDATA_MAX; i >= 0; i--)
 415                writeb(0, priv->base + MTK_NOR_REG_PRGDATA(i));
 416
 417        for (i = 0; i < tx_len; i++, reg_offset--)
 418                writeb(txbuf[i], priv->base + MTK_NOR_REG_PRGDATA(reg_offset));
 419
 420        kfree(txbuf);
 421
 422        writel(trx_len * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
 423
 424        mtk_snor_cmd_exec(priv, MTK_NOR_CMD_PROGRAM, trx_len * BITS_PER_BYTE);
 425
 426        reg_offset = op->data.nbytes - 1;
 427        for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
 428                reg = priv->base + MTK_NOR_REG_SHIFT(reg_offset);
 429                rxbuf[i] = readb(reg);
 430        }
 431
 432        return 0;
 433}
 434
 435static int mtk_snor_exec_op(struct spi_slave *slave,
 436                            const struct spi_mem_op *op)
 437{
 438        struct udevice *bus = dev_get_parent(slave->dev);
 439        struct mtk_snor_priv *priv = dev_get_priv(bus);
 440        int ret;
 441
 442        if (op->data.dir == SPI_MEM_NO_DATA || op->addr.nbytes == 0) {
 443                return mtk_snor_cmd_program(priv, op);
 444        } else if (op->data.dir == SPI_MEM_DATA_OUT) {
 445                mtk_snor_set_addr(priv, op);
 446                writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA0);
 447                if (op->data.nbytes == MTK_NOR_PP_SIZE)
 448                        return mtk_snor_pp_buffered(priv, op);
 449                return mtk_snor_pp_unbuffered(priv, op);
 450        } else if (op->data.dir == SPI_MEM_DATA_IN) {
 451                ret = mtk_snor_write_buffer_disable(priv);
 452                if (ret < 0)
 453                        return ret;
 454                mtk_snor_setup_bus(priv, op);
 455                if (op->data.nbytes == 1) {
 456                        mtk_snor_set_addr(priv, op);
 457                        return mtk_snor_read_pio(priv, op);
 458                } else {
 459                        return mtk_snor_read_dma(priv, op);
 460                }
 461        }
 462
 463        return -ENOTSUPP;
 464}
 465
 466static int mtk_snor_probe(struct udevice *bus)
 467{
 468        struct mtk_snor_priv *priv = dev_get_priv(bus);
 469        u8 *buffer;
 470        int ret;
 471        u32 reg;
 472
 473        priv->base = (void __iomem *)devfdt_get_addr(bus);
 474        if (!priv->base)
 475                return -EINVAL;
 476
 477        ret = clk_get_by_name(bus, "spi", &priv->spi_clk);
 478        if (ret < 0)
 479                return ret;
 480
 481        ret = clk_get_by_name(bus, "sf", &priv->ctlr_clk);
 482        if (ret < 0)
 483                return ret;
 484
 485        buffer = devm_kmalloc(bus, MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
 486                              GFP_KERNEL);
 487        if (!buffer)
 488                return -ENOMEM;
 489        if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
 490                buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
 491                                ~MTK_NOR_DMA_ALIGN_MASK);
 492        priv->buffer = buffer;
 493
 494        clk_enable(&priv->spi_clk);
 495        clk_enable(&priv->ctlr_clk);
 496
 497        priv->spi_freq = clk_get_rate(&priv->spi_clk);
 498        printf("spi frequency: %d Hz\n", priv->spi_freq);
 499
 500        /* With this setting, we issue one command at a time to
 501         * accommodate to SPI-mem framework.
 502         */
 503        writel(MTK_NOR_ENABLE_SF_CMD, priv->base + MTK_NOR_REG_WP);
 504        mtk_snor_rmw(priv, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
 505        mtk_snor_rmw(priv, MTK_NOR_REG_CFG3,
 506                     MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
 507
 508        /* Unlock all blocks using write status command.
 509         * SPI-MEM hasn't implemented unlock procedure on MXIC devices.
 510         * We may remove this later.
 511         */
 512        writel(2 * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
 513        writel(MTK_NOR_UNLOCK_ALL, priv->base + MTK_NOR_REG_PRGDATA(5));
 514        writel(MTK_NOR_IRQ_WRSR, priv->base + MTK_NOR_REG_IRQ_EN);
 515        writel(MTK_NOR_CMD_WRSR, priv->base + MTK_NOR_REG_CMD);
 516        ret = readl_poll_timeout(priv->base + MTK_NOR_REG_IRQ_STAT, reg,
 517                                 !(reg & MTK_NOR_IRQ_WRSR),
 518                                 ((3 * BITS_PER_BYTE) + 1) * 200);
 519
 520        return 0;
 521}
 522
 523static int mtk_snor_set_speed(struct udevice *bus, uint speed)
 524{
 525        /* MTK's SNOR controller does not have a bus clock divider.
 526         * We setup maximum bus clock in dts.
 527         */
 528
 529        return 0;
 530}
 531
 532static int mtk_snor_set_mode(struct udevice *bus, uint mode)
 533{
 534        /* We set up mode later for each transmission.
 535         */
 536        return 0;
 537}
 538
 539static const struct spi_controller_mem_ops mtk_snor_mem_ops = {
 540        .adjust_op_size = mtk_snor_adjust_op_size,
 541        .supports_op = mtk_snor_supports_op,
 542        .exec_op = mtk_snor_exec_op
 543};
 544
 545static const struct dm_spi_ops mtk_snor_ops = {
 546        .mem_ops = &mtk_snor_mem_ops,
 547        .set_speed = mtk_snor_set_speed,
 548        .set_mode = mtk_snor_set_mode,
 549};
 550
 551static const struct udevice_id mtk_snor_ids[] = {
 552        { .compatible = "mediatek,mtk-snor" },
 553        {}
 554};
 555
 556U_BOOT_DRIVER(mtk_snor) = {
 557        .name = "mtk_snor",
 558        .id = UCLASS_SPI,
 559        .of_match = mtk_snor_ids,
 560        .ops = &mtk_snor_ops,
 561        .priv_auto = sizeof(struct mtk_snor_priv),
 562        .probe = mtk_snor_probe,
 563};
 564