linux/drivers/mtd/nand/raw/intel-nand-controller.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Copyright (c) 2020 Intel Corporation. */
   3
   4#include <linux/clk.h>
   5#include <linux/completion.h>
   6#include <linux/dmaengine.h>
   7#include <linux/dma-direction.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/err.h>
  10#include <linux/init.h>
  11#include <linux/iopoll.h>
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14
  15#include <linux/mtd/mtd.h>
  16#include <linux/mtd/rawnand.h>
  17#include <linux/mtd/nand.h>
  18
  19#include <linux/platform_device.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/types.h>
  23#include <linux/units.h>
  24#include <asm/unaligned.h>
  25
  26#define EBU_CLC                 0x000
  27#define EBU_CLC_RST             0x00000000u
  28
  29#define EBU_ADDR_SEL(n)         (0x020 + (n) * 4)
  30/* 5 bits 26:22 included for comparison in the ADDR_SELx */
  31#define EBU_ADDR_MASK(x)        ((x) << 4)
  32#define EBU_ADDR_SEL_REGEN      0x1
  33
  34#define EBU_BUSCON(n)           (0x060 + (n) * 4)
  35#define EBU_BUSCON_CMULT_V4     0x1
  36#define EBU_BUSCON_RECOVC(n)    ((n) << 2)
  37#define EBU_BUSCON_HOLDC(n)     ((n) << 4)
  38#define EBU_BUSCON_WAITRDC(n)   ((n) << 6)
  39#define EBU_BUSCON_WAITWRC(n)   ((n) << 8)
  40#define EBU_BUSCON_BCGEN_CS     0x0
  41#define EBU_BUSCON_SETUP_EN     BIT(22)
  42#define EBU_BUSCON_ALEC         0xC000
  43
  44#define EBU_CON                 0x0B0
  45#define EBU_CON_NANDM_EN        BIT(0)
  46#define EBU_CON_NANDM_DIS       0x0
  47#define EBU_CON_CSMUX_E_EN      BIT(1)
  48#define EBU_CON_ALE_P_LOW       BIT(2)
  49#define EBU_CON_CLE_P_LOW       BIT(3)
  50#define EBU_CON_CS_P_LOW        BIT(4)
  51#define EBU_CON_SE_P_LOW        BIT(5)
  52#define EBU_CON_WP_P_LOW        BIT(6)
  53#define EBU_CON_PRE_P_LOW       BIT(7)
  54#define EBU_CON_IN_CS_S(n)      ((n) << 8)
  55#define EBU_CON_OUT_CS_S(n)     ((n) << 10)
  56#define EBU_CON_LAT_EN_CS_P     ((0x3D) << 18)
  57
  58#define EBU_WAIT                0x0B4
  59#define EBU_WAIT_RDBY           BIT(0)
  60#define EBU_WAIT_WR_C           BIT(3)
  61
  62#define HSNAND_CTL1             0x110
  63#define HSNAND_CTL1_ADDR_SHIFT  24
  64
  65#define HSNAND_CTL2             0x114
  66#define HSNAND_CTL2_ADDR_SHIFT  8
  67#define HSNAND_CTL2_CYC_N_V5    (0x2 << 16)
  68
  69#define HSNAND_INT_MSK_CTL      0x124
  70#define HSNAND_INT_MSK_CTL_WR_C BIT(4)
  71
  72#define HSNAND_INT_STA          0x128
  73#define HSNAND_INT_STA_WR_C     BIT(4)
  74
  75#define HSNAND_CTL              0x130
  76#define HSNAND_CTL_ENABLE_ECC   BIT(0)
  77#define HSNAND_CTL_GO           BIT(2)
  78#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n))
  79#define HSNAND_CTL_RW_READ      0x0
  80#define HSNAND_CTL_RW_WRITE     BIT(10)
  81#define HSNAND_CTL_ECC_OFF_V8TH BIT(11)
  82#define HSNAND_CTL_CKFF_EN      0x0
  83#define HSNAND_CTL_MSG_EN       BIT(17)
  84
  85#define HSNAND_PARA0            0x13c
  86#define HSNAND_PARA0_PAGE_V8192 0x3
  87#define HSNAND_PARA0_PIB_V256   (0x3 << 4)
  88#define HSNAND_PARA0_BYP_EN_NP  0x0
  89#define HSNAND_PARA0_BYP_DEC_NP 0x0
  90#define HSNAND_PARA0_TYPE_ONFI  BIT(18)
  91#define HSNAND_PARA0_ADEP_EN    BIT(21)
  92
  93#define HSNAND_CMSG_0           0x150
  94#define HSNAND_CMSG_1           0x154
  95
  96#define HSNAND_ALE_OFFS         BIT(2)
  97#define HSNAND_CLE_OFFS         BIT(3)
  98#define HSNAND_CS_OFFS          BIT(4)
  99
 100#define HSNAND_ECC_OFFSET       0x008
 101
 102#define NAND_DATA_IFACE_CHECK_ONLY      -1
 103
 104#define MAX_CS  2
 105
 106#define USEC_PER_SEC    1000000L
 107
 108struct ebu_nand_cs {
 109        void __iomem *chipaddr;
 110        dma_addr_t nand_pa;
 111        u32 addr_sel;
 112};
 113
 114struct ebu_nand_controller {
 115        struct nand_controller controller;
 116        struct nand_chip chip;
 117        struct device *dev;
 118        void __iomem *ebu;
 119        void __iomem *hsnand;
 120        struct dma_chan *dma_tx;
 121        struct dma_chan *dma_rx;
 122        struct completion dma_access_complete;
 123        unsigned long clk_rate;
 124        struct clk *clk;
 125        u32 nd_para0;
 126        u8 cs_num;
 127        struct ebu_nand_cs cs[MAX_CS];
 128};
 129
 130static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
 131{
 132        return container_of(chip, struct ebu_nand_controller, chip);
 133}
 134
 135static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
 136{
 137        struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
 138        u32 status;
 139
 140        return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
 141                                  (status & EBU_WAIT_RDBY) ||
 142                                  (status & EBU_WAIT_WR_C), 20, timeout_ms);
 143}
 144
 145static u8 ebu_nand_readb(struct nand_chip *chip)
 146{
 147        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 148        u8 cs_num = ebu_host->cs_num;
 149        u8 val;
 150
 151        val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
 152        ebu_nand_waitrdy(chip, 1000);
 153        return val;
 154}
 155
 156static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
 157{
 158        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 159        u8 cs_num = ebu_host->cs_num;
 160
 161        writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
 162        ebu_nand_waitrdy(chip, 1000);
 163}
 164
 165static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
 166{
 167        int i;
 168
 169        for (i = 0; i < len; i++)
 170                buf[i] = ebu_nand_readb(chip);
 171}
 172
 173static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
 174{
 175        int i;
 176
 177        for (i = 0; i < len; i++)
 178                ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
 179}
 180
 181static void ebu_nand_disable(struct nand_chip *chip)
 182{
 183        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 184
 185        writel(0, ebu_host->ebu + EBU_CON);
 186}
 187
 188static void ebu_select_chip(struct nand_chip *chip)
 189{
 190        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 191        void __iomem *nand_con = ebu_host->ebu + EBU_CON;
 192        u32 cs = ebu_host->cs_num;
 193
 194        writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
 195               EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
 196               EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
 197               EBU_CON_LAT_EN_CS_P, nand_con);
 198}
 199
 200static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
 201                                const struct nand_interface_config *conf)
 202{
 203        struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
 204        unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
 205        unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
 206        const struct nand_sdr_timings *timings;
 207        u32 trecov, thold, twrwait, trdwait;
 208        u32 reg = 0;
 209
 210        timings = nand_get_sdr_timings(conf);
 211        if (IS_ERR(timings))
 212                return PTR_ERR(timings);
 213
 214        if (csline == NAND_DATA_IFACE_CHECK_ONLY)
 215                return 0;
 216
 217        trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
 218                              period);
 219        reg |= EBU_BUSCON_RECOVC(trecov);
 220
 221        thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
 222        reg |= EBU_BUSCON_HOLDC(thold);
 223
 224        trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
 225                               period);
 226        reg |= EBU_BUSCON_WAITRDC(trdwait);
 227
 228        twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
 229        reg |= EBU_BUSCON_WAITWRC(twrwait);
 230
 231        reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
 232                EBU_BUSCON_SETUP_EN;
 233
 234        writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
 235
 236        return 0;
 237}
 238
 239static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
 240                                  struct mtd_oob_region *oobregion)
 241{
 242        struct nand_chip *chip = mtd_to_nand(mtd);
 243
 244        if (section)
 245                return -ERANGE;
 246
 247        oobregion->offset = HSNAND_ECC_OFFSET;
 248        oobregion->length = chip->ecc.total;
 249
 250        return 0;
 251}
 252
 253static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
 254                                   struct mtd_oob_region *oobregion)
 255{
 256        struct nand_chip *chip = mtd_to_nand(mtd);
 257
 258        if (section)
 259                return -ERANGE;
 260
 261        oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
 262        oobregion->length = mtd->oobsize - oobregion->offset;
 263
 264        return 0;
 265}
 266
 267static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
 268        .ecc = ebu_nand_ooblayout_ecc,
 269        .free = ebu_nand_ooblayout_free,
 270};
 271
 272static void ebu_dma_rx_callback(void *cookie)
 273{
 274        struct ebu_nand_controller *ebu_host = cookie;
 275
 276        dmaengine_terminate_async(ebu_host->dma_rx);
 277
 278        complete(&ebu_host->dma_access_complete);
 279}
 280
 281static void ebu_dma_tx_callback(void *cookie)
 282{
 283        struct ebu_nand_controller *ebu_host = cookie;
 284
 285        dmaengine_terminate_async(ebu_host->dma_tx);
 286
 287        complete(&ebu_host->dma_access_complete);
 288}
 289
 290static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
 291                         const u8 *buf, u32 len)
 292{
 293        struct dma_async_tx_descriptor *tx;
 294        struct completion *dma_completion;
 295        dma_async_tx_callback callback;
 296        struct dma_chan *chan;
 297        dma_cookie_t cookie;
 298        unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 299        dma_addr_t buf_dma;
 300        int ret;
 301        u32 timeout;
 302
 303        if (dir == DMA_DEV_TO_MEM) {
 304                chan = ebu_host->dma_rx;
 305                dma_completion = &ebu_host->dma_access_complete;
 306                callback = ebu_dma_rx_callback;
 307        } else {
 308                chan = ebu_host->dma_tx;
 309                dma_completion = &ebu_host->dma_access_complete;
 310                callback = ebu_dma_tx_callback;
 311        }
 312
 313        buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
 314        if (dma_mapping_error(chan->device->dev, buf_dma)) {
 315                dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
 316                ret = -EIO;
 317                goto err_unmap;
 318        }
 319
 320        tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
 321        if (!tx) {
 322                ret = -ENXIO;
 323                goto err_unmap;
 324        }
 325
 326        tx->callback = callback;
 327        tx->callback_param = ebu_host;
 328        cookie = tx->tx_submit(tx);
 329
 330        ret = dma_submit_error(cookie);
 331        if (ret) {
 332                dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
 333                ret = -EIO;
 334                goto err_unmap;
 335        }
 336
 337        init_completion(dma_completion);
 338        dma_async_issue_pending(chan);
 339
 340        /* Wait DMA to finish the data transfer.*/
 341        timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
 342        if (!timeout) {
 343                dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
 344                        dmaengine_tx_status(chan, cookie, NULL));
 345                dmaengine_terminate_sync(chan);
 346                ret = -ETIMEDOUT;
 347                goto err_unmap;
 348        }
 349
 350        return 0;
 351
 352err_unmap:
 353        dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
 354
 355        return ret;
 356}
 357
 358static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
 359                             int page, u32 cmd)
 360{
 361        unsigned int val;
 362
 363        val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
 364        writel(val, ebu_host->hsnand + HSNAND_CTL1);
 365        val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
 366        writel(val, ebu_host->hsnand + HSNAND_CTL2);
 367
 368        writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
 369
 370        /* clear first, will update later */
 371        writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
 372        writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
 373
 374        writel(HSNAND_INT_MSK_CTL_WR_C,
 375               ebu_host->hsnand + HSNAND_INT_MSK_CTL);
 376
 377        if (!cmd)
 378                val = HSNAND_CTL_RW_READ;
 379        else
 380                val = HSNAND_CTL_RW_WRITE;
 381
 382        writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
 383               HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
 384               HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
 385               ebu_host->hsnand + HSNAND_CTL);
 386}
 387
 388static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
 389                                    int oob_required, int page)
 390{
 391        struct mtd_info *mtd = nand_to_mtd(chip);
 392        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 393        int ret, reg_data;
 394
 395        ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
 396
 397        ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
 398        if (ret)
 399                return ret;
 400
 401        if (oob_required)
 402                chip->ecc.read_oob(chip, page);
 403
 404        reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
 405        reg_data &= ~HSNAND_CTL_GO;
 406        writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
 407
 408        return 0;
 409}
 410
 411static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
 412                                     int oob_required, int page)
 413{
 414        struct mtd_info *mtd = nand_to_mtd(chip);
 415        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 416        void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
 417        int reg_data, ret, val;
 418        u32 reg;
 419
 420        ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
 421
 422        ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
 423        if (ret)
 424                return ret;
 425
 426        if (oob_required) {
 427                reg = get_unaligned_le32(chip->oob_poi);
 428                writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
 429
 430                reg = get_unaligned_le32(chip->oob_poi + 4);
 431                writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
 432        }
 433
 434        ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
 435                                        10, 1000);
 436        if (ret)
 437                return ret;
 438
 439        reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
 440        reg_data &= ~HSNAND_CTL_GO;
 441        writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
 442
 443        return 0;
 444}
 445
 446static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
 447
 448static int ebu_nand_attach_chip(struct nand_chip *chip)
 449{
 450        struct mtd_info *mtd = nand_to_mtd(chip);
 451        struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
 452        u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
 453        u32 ecc_strength_ds = chip->ecc.strength;
 454        u32 ecc_size = chip->ecc.size;
 455        u32 writesize = mtd->writesize;
 456        u32 blocksize = mtd->erasesize;
 457        int bch_algo, start, val;
 458
 459        /* Default to an ECC size of 512 */
 460        if (!chip->ecc.size)
 461                chip->ecc.size = 512;
 462
 463        switch (ecc_size) {
 464        case 512:
 465                start = 1;
 466                if (!ecc_strength_ds)
 467                        ecc_strength_ds = 4;
 468                break;
 469        case 1024:
 470                start = 4;
 471                if (!ecc_strength_ds)
 472                        ecc_strength_ds = 32;
 473                break;
 474        default:
 475                return -EINVAL;
 476        }
 477
 478        /* BCH ECC algorithm Settings for number of bits per 512B/1024B */
 479        bch_algo = round_up(start + 1, 4);
 480        for (val = start; val < bch_algo; val++) {
 481                if (ecc_strength_ds == ecc_strength[val])
 482                        break;
 483        }
 484        if (val == bch_algo)
 485                return -EINVAL;
 486
 487        if (ecc_strength_ds == 8)
 488                ecc_bytes = 14;
 489        else
 490                ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
 491
 492        ecc_steps = writesize / ecc_size;
 493        ecc_total = ecc_steps * ecc_bytes;
 494        if ((ecc_total + 8) > mtd->oobsize)
 495                return -ERANGE;
 496
 497        chip->ecc.total = ecc_total;
 498        pagesize = fls(writesize >> 11);
 499        if (pagesize > HSNAND_PARA0_PAGE_V8192)
 500                return -ERANGE;
 501
 502        pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
 503        if (pg_per_blk > HSNAND_PARA0_PIB_V256)
 504                return -ERANGE;
 505
 506        ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
 507                             HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
 508                             HSNAND_PARA0_TYPE_ONFI | (val << 29);
 509
 510        mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
 511        chip->ecc.read_page = ebu_nand_read_page_hwecc;
 512        chip->ecc.write_page = ebu_nand_write_page_hwecc;
 513
 514        return 0;
 515}
 516
 517static int ebu_nand_exec_op(struct nand_chip *chip,
 518                            const struct nand_operation *op, bool check_only)
 519{
 520        const struct nand_op_instr *instr = NULL;
 521        unsigned int op_id;
 522        int i, timeout_ms, ret = 0;
 523
 524        if (check_only)
 525                return 0;
 526
 527        ebu_select_chip(chip);
 528        for (op_id = 0; op_id < op->ninstrs; op_id++) {
 529                instr = &op->instrs[op_id];
 530
 531                switch (instr->type) {
 532                case NAND_OP_CMD_INSTR:
 533                        ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
 534                                        instr->ctx.cmd.opcode);
 535                        break;
 536
 537                case NAND_OP_ADDR_INSTR:
 538                        for (i = 0; i < instr->ctx.addr.naddrs; i++)
 539                                ebu_nand_writeb(chip,
 540                                                HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
 541                                                instr->ctx.addr.addrs[i]);
 542                        break;
 543
 544                case NAND_OP_DATA_IN_INSTR:
 545                        ebu_read_buf(chip, instr->ctx.data.buf.in,
 546                                     instr->ctx.data.len);
 547                        break;
 548
 549                case NAND_OP_DATA_OUT_INSTR:
 550                        ebu_write_buf(chip, instr->ctx.data.buf.out,
 551                                      instr->ctx.data.len);
 552                        break;
 553
 554                case NAND_OP_WAITRDY_INSTR:
 555                        timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
 556                        ret = ebu_nand_waitrdy(chip, timeout_ms);
 557                        break;
 558                }
 559        }
 560
 561        return ret;
 562}
 563
 564static const struct nand_controller_ops ebu_nand_controller_ops = {
 565        .attach_chip = ebu_nand_attach_chip,
 566        .setup_interface = ebu_nand_set_timings,
 567        .exec_op = ebu_nand_exec_op,
 568};
 569
 570static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
 571{
 572        if (ebu_host->dma_rx)
 573                dma_release_channel(ebu_host->dma_rx);
 574
 575        if (ebu_host->dma_tx)
 576                dma_release_channel(ebu_host->dma_tx);
 577}
 578
 579static int ebu_nand_probe(struct platform_device *pdev)
 580{
 581        struct device *dev = &pdev->dev;
 582        struct ebu_nand_controller *ebu_host;
 583        struct nand_chip *nand;
 584        struct mtd_info *mtd;
 585        struct resource *res;
 586        char *resname;
 587        int ret;
 588        u32 cs;
 589
 590        ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
 591        if (!ebu_host)
 592                return -ENOMEM;
 593
 594        ebu_host->dev = dev;
 595        nand_controller_init(&ebu_host->controller);
 596
 597        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
 598        ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
 599        if (IS_ERR(ebu_host->ebu))
 600                return PTR_ERR(ebu_host->ebu);
 601
 602        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
 603        ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
 604        if (IS_ERR(ebu_host->hsnand))
 605                return PTR_ERR(ebu_host->hsnand);
 606
 607        ret = device_property_read_u32(dev, "reg", &cs);
 608        if (ret) {
 609                dev_err(dev, "failed to get chip select: %d\n", ret);
 610                return ret;
 611        }
 612        ebu_host->cs_num = cs;
 613
 614        resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
 615        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
 616        ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
 617        ebu_host->cs[cs].nand_pa = res->start;
 618        if (IS_ERR(ebu_host->cs[cs].chipaddr))
 619                return PTR_ERR(ebu_host->cs[cs].chipaddr);
 620
 621        ebu_host->clk = devm_clk_get(dev, NULL);
 622        if (IS_ERR(ebu_host->clk))
 623                return dev_err_probe(dev, PTR_ERR(ebu_host->clk),
 624                                     "failed to get clock\n");
 625
 626        ret = clk_prepare_enable(ebu_host->clk);
 627        if (ret) {
 628                dev_err(dev, "failed to enable clock: %d\n", ret);
 629                return ret;
 630        }
 631        ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
 632
 633        ebu_host->dma_tx = dma_request_chan(dev, "tx");
 634        if (IS_ERR(ebu_host->dma_tx)) {
 635                ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
 636                                    "failed to request DMA tx chan!.\n");
 637                goto err_disable_unprepare_clk;
 638        }
 639
 640        ebu_host->dma_rx = dma_request_chan(dev, "rx");
 641        if (IS_ERR(ebu_host->dma_rx)) {
 642                ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
 643                                    "failed to request DMA rx chan!.\n");
 644                ebu_host->dma_rx = NULL;
 645                goto err_cleanup_dma;
 646        }
 647
 648        resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
 649        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
 650        if (!res) {
 651                ret = -EINVAL;
 652                goto err_cleanup_dma;
 653        }
 654        ebu_host->cs[cs].addr_sel = res->start;
 655        writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
 656               ebu_host->ebu + EBU_ADDR_SEL(cs));
 657
 658        nand_set_flash_node(&ebu_host->chip, dev->of_node);
 659
 660        mtd = nand_to_mtd(&ebu_host->chip);
 661        if (!mtd->name) {
 662                dev_err(ebu_host->dev, "NAND label property is mandatory\n");
 663                ret = -EINVAL;
 664                goto err_cleanup_dma;
 665        }
 666
 667        mtd->dev.parent = dev;
 668        ebu_host->dev = dev;
 669
 670        platform_set_drvdata(pdev, ebu_host);
 671        nand_set_controller_data(&ebu_host->chip, ebu_host);
 672
 673        nand = &ebu_host->chip;
 674        nand->controller = &ebu_host->controller;
 675        nand->controller->ops = &ebu_nand_controller_ops;
 676
 677        /* Scan to find existence of the device */
 678        ret = nand_scan(&ebu_host->chip, 1);
 679        if (ret)
 680                goto err_cleanup_dma;
 681
 682        ret = mtd_device_register(mtd, NULL, 0);
 683        if (ret)
 684                goto err_clean_nand;
 685
 686        return 0;
 687
 688err_clean_nand:
 689        nand_cleanup(&ebu_host->chip);
 690err_cleanup_dma:
 691        ebu_dma_cleanup(ebu_host);
 692err_disable_unprepare_clk:
 693        clk_disable_unprepare(ebu_host->clk);
 694
 695        return ret;
 696}
 697
 698static int ebu_nand_remove(struct platform_device *pdev)
 699{
 700        struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
 701        int ret;
 702
 703        ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
 704        WARN_ON(ret);
 705        nand_cleanup(&ebu_host->chip);
 706        ebu_nand_disable(&ebu_host->chip);
 707        ebu_dma_cleanup(ebu_host);
 708        clk_disable_unprepare(ebu_host->clk);
 709
 710        return 0;
 711}
 712
 713static const struct of_device_id ebu_nand_match[] = {
 714        { .compatible = "intel,nand-controller" },
 715        { .compatible = "intel,lgm-ebunand" },
 716        {}
 717};
 718MODULE_DEVICE_TABLE(of, ebu_nand_match);
 719
 720static struct platform_driver ebu_nand_driver = {
 721        .probe = ebu_nand_probe,
 722        .remove = ebu_nand_remove,
 723        .driver = {
 724                .name = "intel-nand-controller",
 725                .of_match_table = ebu_nand_match,
 726        },
 727
 728};
 729module_platform_driver(ebu_nand_driver);
 730
 731MODULE_LICENSE("GPL v2");
 732MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
 733MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
 734