linux/drivers/mtd/nand/raw/cadence-nand-controller.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cadence NAND flash controller driver
   4 *
   5 * Copyright (C) 2019 Cadence
   6 *
   7 * Author: Piotr Sroka <piotrs@cadence.com>
   8 */
   9
  10#include <linux/bitfield.h>
  11#include <linux/clk.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/dmaengine.h>
  14#include <linux/interrupt.h>
  15#include <linux/module.h>
  16#include <linux/mtd/mtd.h>
  17#include <linux/mtd/rawnand.h>
  18#include <linux/of_device.h>
  19#include <linux/iopoll.h>
  20
  21/*
  22 * HPNFC can work in 3 modes:
  23 * -  PIO - can work in master or slave DMA
  24 * -  CDMA - needs Master DMA for accessing command descriptors.
  25 * -  Generic mode - can use only slave DMA.
  26 * CDMA and PIO modes can be used to execute only base commands.
  27 * Generic mode can be used to execute any command
  28 * on NAND flash memory. Driver uses CDMA mode for
  29 * block erasing, page reading, page programing.
  30 * Generic mode is used for executing rest of commands.
  31 */
  32
  33#define MAX_OOB_SIZE_PER_SECTOR 32
  34#define MAX_ADDRESS_CYC         6
  35#define MAX_ERASE_ADDRESS_CYC   3
  36#define MAX_DATA_SIZE           0xFFFC
  37#define DMA_DATA_SIZE_ALIGN     8
  38
  39/* Register definition. */
  40/*
  41 * Command register 0.
  42 * Writing data to this register will initiate a new transaction
  43 * of the NF controller.
  44 */
  45#define CMD_REG0                        0x0000
  46/* Command type field mask. */
  47#define         CMD_REG0_CT             GENMASK(31, 30)
  48/* Command type CDMA. */
  49#define         CMD_REG0_CT_CDMA        0uL
  50/* Command type generic. */
  51#define         CMD_REG0_CT_GEN         3uL
  52/* Command thread number field mask. */
  53#define         CMD_REG0_TN             GENMASK(27, 24)
  54
  55/* Command register 2. */
  56#define CMD_REG2                        0x0008
  57/* Command register 3. */
  58#define CMD_REG3                        0x000C
  59/* Pointer register to select which thread status will be selected. */
  60#define CMD_STATUS_PTR                  0x0010
  61/* Command status register for selected thread. */
  62#define CMD_STATUS                      0x0014
  63
  64/* Interrupt status register. */
  65#define INTR_STATUS                     0x0110
  66#define         INTR_STATUS_SDMA_ERR    BIT(22)
  67#define         INTR_STATUS_SDMA_TRIGG  BIT(21)
  68#define         INTR_STATUS_UNSUPP_CMD  BIT(19)
  69#define         INTR_STATUS_DDMA_TERR   BIT(18)
  70#define         INTR_STATUS_CDMA_TERR   BIT(17)
  71#define         INTR_STATUS_CDMA_IDL    BIT(16)
  72
  73/* Interrupt enable register. */
  74#define INTR_ENABLE                             0x0114
  75#define         INTR_ENABLE_INTR_EN             BIT(31)
  76#define         INTR_ENABLE_SDMA_ERR_EN         BIT(22)
  77#define         INTR_ENABLE_SDMA_TRIGG_EN       BIT(21)
  78#define         INTR_ENABLE_UNSUPP_CMD_EN       BIT(19)
  79#define         INTR_ENABLE_DDMA_TERR_EN        BIT(18)
  80#define         INTR_ENABLE_CDMA_TERR_EN        BIT(17)
  81#define         INTR_ENABLE_CDMA_IDLE_EN        BIT(16)
  82
  83/* Controller internal state. */
  84#define CTRL_STATUS                             0x0118
  85#define         CTRL_STATUS_INIT_COMP           BIT(9)
  86#define         CTRL_STATUS_CTRL_BUSY           BIT(8)
  87
  88/* Command Engine threads state. */
  89#define TRD_STATUS                              0x0120
  90
  91/* Command Engine interrupt thread error status. */
  92#define TRD_ERR_INT_STATUS                      0x0128
  93/* Command Engine interrupt thread error enable. */
  94#define TRD_ERR_INT_STATUS_EN                   0x0130
  95/* Command Engine interrupt thread complete status. */
  96#define TRD_COMP_INT_STATUS                     0x0138
  97
  98/*
  99 * Transfer config 0 register.
 100 * Configures data transfer parameters.
 101 */
 102#define TRAN_CFG_0                              0x0400
 103/* Offset value from the beginning of the page. */
 104#define         TRAN_CFG_0_OFFSET               GENMASK(31, 16)
 105/* Numbers of sectors to transfer within singlNF device's page. */
 106#define         TRAN_CFG_0_SEC_CNT              GENMASK(7, 0)
 107
 108/*
 109 * Transfer config 1 register.
 110 * Configures data transfer parameters.
 111 */
 112#define TRAN_CFG_1                              0x0404
 113/* Size of last data sector. */
 114#define         TRAN_CFG_1_LAST_SEC_SIZE        GENMASK(31, 16)
 115/* Size of not-last data sector. */
 116#define         TRAN_CFG_1_SECTOR_SIZE          GENMASK(15, 0)
 117
 118/* ECC engine configuration register 0. */
 119#define ECC_CONFIG_0                            0x0428
 120/* Correction strength. */
 121#define         ECC_CONFIG_0_CORR_STR           GENMASK(10, 8)
 122/* Enable erased pages detection mechanism. */
 123#define         ECC_CONFIG_0_ERASE_DET_EN       BIT(1)
 124/* Enable controller ECC check bits generation and correction. */
 125#define         ECC_CONFIG_0_ECC_EN             BIT(0)
 126
 127/* ECC engine configuration register 1. */
 128#define ECC_CONFIG_1                            0x042C
 129
 130/* Multiplane settings register. */
 131#define MULTIPLANE_CFG                          0x0434
 132/* Cache operation settings. */
 133#define CACHE_CFG                               0x0438
 134
 135/* DMA settings register. */
 136#define DMA_SETINGS                             0x043C
 137/* Enable SDMA error report on access unprepared slave DMA interface. */
 138#define         DMA_SETINGS_SDMA_ERR_RSP        BIT(17)
 139
 140/* Transferred data block size for the slave DMA module. */
 141#define SDMA_SIZE                               0x0440
 142
 143/* Thread number associated with transferred data block
 144 * for the slave DMA module.
 145 */
 146#define SDMA_TRD_NUM                            0x0444
 147/* Thread number mask. */
 148#define         SDMA_TRD_NUM_SDMA_TRD           GENMASK(2, 0)
 149
 150#define CONTROL_DATA_CTRL                       0x0494
 151/* Thread number mask. */
 152#define         CONTROL_DATA_CTRL_SIZE          GENMASK(15, 0)
 153
 154#define CTRL_VERSION                            0x800
 155#define         CTRL_VERSION_REV                GENMASK(7, 0)
 156
 157/* Available hardware features of the controller. */
 158#define CTRL_FEATURES                           0x804
 159/* Support for NV-DDR2/3 work mode. */
 160#define         CTRL_FEATURES_NVDDR_2_3         BIT(28)
 161/* Support for NV-DDR work mode. */
 162#define         CTRL_FEATURES_NVDDR             BIT(27)
 163/* Support for asynchronous work mode. */
 164#define         CTRL_FEATURES_ASYNC             BIT(26)
 165/* Support for asynchronous work mode. */
 166#define         CTRL_FEATURES_N_BANKS           GENMASK(25, 24)
 167/* Slave and Master DMA data width. */
 168#define         CTRL_FEATURES_DMA_DWITH64       BIT(21)
 169/* Availability of Control Data feature.*/
 170#define         CTRL_FEATURES_CONTROL_DATA      BIT(10)
 171
 172/* BCH Engine identification register 0 - correction strengths. */
 173#define BCH_CFG_0                               0x838
 174#define         BCH_CFG_0_CORR_CAP_0            GENMASK(7, 0)
 175#define         BCH_CFG_0_CORR_CAP_1            GENMASK(15, 8)
 176#define         BCH_CFG_0_CORR_CAP_2            GENMASK(23, 16)
 177#define         BCH_CFG_0_CORR_CAP_3            GENMASK(31, 24)
 178
 179/* BCH Engine identification register 1 - correction strengths. */
 180#define BCH_CFG_1                               0x83C
 181#define         BCH_CFG_1_CORR_CAP_4            GENMASK(7, 0)
 182#define         BCH_CFG_1_CORR_CAP_5            GENMASK(15, 8)
 183#define         BCH_CFG_1_CORR_CAP_6            GENMASK(23, 16)
 184#define         BCH_CFG_1_CORR_CAP_7            GENMASK(31, 24)
 185
 186/* BCH Engine identification register 2 - sector sizes. */
 187#define BCH_CFG_2                               0x840
 188#define         BCH_CFG_2_SECT_0                GENMASK(15, 0)
 189#define         BCH_CFG_2_SECT_1                GENMASK(31, 16)
 190
 191/* BCH Engine identification register 3. */
 192#define BCH_CFG_3                               0x844
 193
 194/* Ready/Busy# line status. */
 195#define RBN_SETINGS                             0x1004
 196
 197/* Common settings. */
 198#define COMMON_SET                              0x1008
 199/* 16 bit device connected to the NAND Flash interface. */
 200#define         COMMON_SET_DEVICE_16BIT         BIT(8)
 201
 202/* Skip_bytes registers. */
 203#define SKIP_BYTES_CONF                         0x100C
 204#define         SKIP_BYTES_MARKER_VALUE         GENMASK(31, 16)
 205#define         SKIP_BYTES_NUM_OF_BYTES         GENMASK(7, 0)
 206
 207#define SKIP_BYTES_OFFSET                       0x1010
 208#define          SKIP_BYTES_OFFSET_VALUE        GENMASK(23, 0)
 209
 210/* Timings configuration. */
 211#define ASYNC_TOGGLE_TIMINGS                    0x101c
 212#define         ASYNC_TOGGLE_TIMINGS_TRH        GENMASK(28, 24)
 213#define         ASYNC_TOGGLE_TIMINGS_TRP        GENMASK(20, 16)
 214#define         ASYNC_TOGGLE_TIMINGS_TWH        GENMASK(12, 8)
 215#define         ASYNC_TOGGLE_TIMINGS_TWP        GENMASK(4, 0)
 216
 217#define TIMINGS0                                0x1024
 218#define         TIMINGS0_TADL                   GENMASK(31, 24)
 219#define         TIMINGS0_TCCS                   GENMASK(23, 16)
 220#define         TIMINGS0_TWHR                   GENMASK(15, 8)
 221#define         TIMINGS0_TRHW                   GENMASK(7, 0)
 222
 223#define TIMINGS1                                0x1028
 224#define         TIMINGS1_TRHZ                   GENMASK(31, 24)
 225#define         TIMINGS1_TWB                    GENMASK(23, 16)
 226#define         TIMINGS1_TVDLY                  GENMASK(7, 0)
 227
 228#define TIMINGS2                                0x102c
 229#define         TIMINGS2_TFEAT                  GENMASK(25, 16)
 230#define         TIMINGS2_CS_HOLD_TIME           GENMASK(13, 8)
 231#define         TIMINGS2_CS_SETUP_TIME          GENMASK(5, 0)
 232
 233/* Configuration of the resynchronization of slave DLL of PHY. */
 234#define DLL_PHY_CTRL                            0x1034
 235#define         DLL_PHY_CTRL_DLL_RST_N          BIT(24)
 236#define         DLL_PHY_CTRL_EXTENDED_WR_MODE   BIT(17)
 237#define         DLL_PHY_CTRL_EXTENDED_RD_MODE   BIT(16)
 238#define         DLL_PHY_CTRL_RS_HIGH_WAIT_CNT   GENMASK(11, 8)
 239#define         DLL_PHY_CTRL_RS_IDLE_CNT        GENMASK(7, 0)
 240
 241/* Register controlling DQ related timing. */
 242#define PHY_DQ_TIMING                           0x2000
 243/* Register controlling DSQ related timing.  */
 244#define PHY_DQS_TIMING                          0x2004
 245#define         PHY_DQS_TIMING_DQS_SEL_OE_END   GENMASK(3, 0)
 246#define         PHY_DQS_TIMING_PHONY_DQS_SEL    BIT(16)
 247#define         PHY_DQS_TIMING_USE_PHONY_DQS    BIT(20)
 248
 249/* Register controlling the gate and loopback control related timing. */
 250#define PHY_GATE_LPBK_CTRL                      0x2008
 251#define         PHY_GATE_LPBK_CTRL_RDS          GENMASK(24, 19)
 252
 253/* Register holds the control for the master DLL logic. */
 254#define PHY_DLL_MASTER_CTRL                     0x200C
 255#define         PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
 256
 257/* Register holds the control for the slave DLL logic. */
 258#define PHY_DLL_SLAVE_CTRL                      0x2010
 259
 260/* This register handles the global control settings for the PHY. */
 261#define PHY_CTRL                                0x2080
 262#define         PHY_CTRL_SDR_DQS                BIT(14)
 263#define         PHY_CTRL_PHONY_DQS              GENMASK(9, 4)
 264
 265/*
 266 * This register handles the global control settings
 267 * for the termination selects for reads.
 268 */
 269#define PHY_TSEL                                0x2084
 270
 271/* Generic command layout. */
 272#define GCMD_LAY_CS                     GENMASK_ULL(11, 8)
 273/*
 274 * This bit informs the minicotroller if it has to wait for tWB
 275 * after sending the last CMD/ADDR/DATA in the sequence.
 276 */
 277#define GCMD_LAY_TWB                    BIT_ULL(6)
 278/* Type of generic instruction. */
 279#define GCMD_LAY_INSTR                  GENMASK_ULL(5, 0)
 280
 281/* Generic CMD sequence type. */
 282#define         GCMD_LAY_INSTR_CMD      0
 283/* Generic ADDR sequence type. */
 284#define         GCMD_LAY_INSTR_ADDR     1
 285/* Generic data transfer sequence type. */
 286#define         GCMD_LAY_INSTR_DATA     2
 287
 288/* Input part of generic command type of input is command. */
 289#define GCMD_LAY_INPUT_CMD              GENMASK_ULL(23, 16)
 290
 291/* Generic command address sequence - address fields. */
 292#define GCMD_LAY_INPUT_ADDR             GENMASK_ULL(63, 16)
 293/* Generic command address sequence - address size. */
 294#define GCMD_LAY_INPUT_ADDR_SIZE        GENMASK_ULL(13, 11)
 295
 296/* Transfer direction field of generic command data sequence. */
 297#define GCMD_DIR                        BIT_ULL(11)
 298/* Read transfer direction of generic command data sequence. */
 299#define         GCMD_DIR_READ           0
 300/* Write transfer direction of generic command data sequence. */
 301#define         GCMD_DIR_WRITE          1
 302
 303/* ECC enabled flag of generic command data sequence - ECC enabled. */
 304#define GCMD_ECC_EN                     BIT_ULL(12)
 305/* Generic command data sequence - sector size. */
 306#define GCMD_SECT_SIZE                  GENMASK_ULL(31, 16)
 307/* Generic command data sequence - sector count. */
 308#define GCMD_SECT_CNT                   GENMASK_ULL(39, 32)
 309/* Generic command data sequence - last sector size. */
 310#define GCMD_LAST_SIZE                  GENMASK_ULL(55, 40)
 311
 312/* CDMA descriptor fields. */
 313/* Erase command type of CDMA descriptor. */
 314#define CDMA_CT_ERASE           0x1000
 315/* Program page command type of CDMA descriptor. */
 316#define CDMA_CT_WR              0x2100
 317/* Read page command type of CDMA descriptor. */
 318#define CDMA_CT_RD              0x2200
 319
 320/* Flash pointer memory shift. */
 321#define CDMA_CFPTR_MEM_SHIFT    24
 322/* Flash pointer memory mask. */
 323#define CDMA_CFPTR_MEM          GENMASK(26, 24)
 324
 325/*
 326 * Command DMA descriptor flags. If set causes issue interrupt after
 327 * the completion of descriptor processing.
 328 */
 329#define CDMA_CF_INT             BIT(8)
 330/*
 331 * Command DMA descriptor flags - the next descriptor
 332 * address field is valid and descriptor processing should continue.
 333 */
 334#define CDMA_CF_CONT            BIT(9)
 335/* DMA master flag of command DMA descriptor. */
 336#define CDMA_CF_DMA_MASTER      BIT(10)
 337
 338/* Operation complete status of command descriptor. */
 339#define CDMA_CS_COMP            BIT(15)
 340/* Operation complete status of command descriptor. */
 341/* Command descriptor status - operation fail. */
 342#define CDMA_CS_FAIL            BIT(14)
 343/* Command descriptor status - page erased. */
 344#define CDMA_CS_ERP             BIT(11)
 345/* Command descriptor status - timeout occurred. */
 346#define CDMA_CS_TOUT            BIT(10)
 347/*
 348 * Maximum amount of correction applied to one ECC sector.
 349 * It is part of command descriptor status.
 350 */
 351#define CDMA_CS_MAXERR          GENMASK(9, 2)
 352/* Command descriptor status - uncorrectable ECC error. */
 353#define CDMA_CS_UNCE            BIT(1)
 354/* Command descriptor status - descriptor error. */
 355#define CDMA_CS_ERR             BIT(0)
 356
 357/* Status of operation - OK. */
 358#define STAT_OK                 0
 359/* Status of operation - FAIL. */
 360#define STAT_FAIL               2
 361/* Status of operation - uncorrectable ECC error. */
 362#define STAT_ECC_UNCORR         3
 363/* Status of operation - page erased. */
 364#define STAT_ERASED             5
 365/* Status of operation - correctable ECC error. */
 366#define STAT_ECC_CORR           6
 367/* Status of operation - unsuspected state. */
 368#define STAT_UNKNOWN            7
 369/* Status of operation - operation is not completed yet. */
 370#define STAT_BUSY               0xFF
 371
 372#define BCH_MAX_NUM_CORR_CAPS           8
 373#define BCH_MAX_NUM_SECTOR_SIZES        2
 374
 375struct cadence_nand_timings {
 376        u32 async_toggle_timings;
 377        u32 timings0;
 378        u32 timings1;
 379        u32 timings2;
 380        u32 dll_phy_ctrl;
 381        u32 phy_ctrl;
 382        u32 phy_dqs_timing;
 383        u32 phy_gate_lpbk_ctrl;
 384};
 385
 386/* Command DMA descriptor. */
 387struct cadence_nand_cdma_desc {
 388        /* Next descriptor address. */
 389        u64 next_pointer;
 390
 391        /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
 392        u32 flash_pointer;
 393        /*field appears in HPNFC version 13*/
 394        u16 bank;
 395        u16 rsvd0;
 396
 397        /* Operation the controller needs to perform. */
 398        u16 command_type;
 399        u16 rsvd1;
 400        /* Flags for operation of this command. */
 401        u16 command_flags;
 402        u16 rsvd2;
 403
 404        /* System/host memory address required for data DMA commands. */
 405        u64 memory_pointer;
 406
 407        /* Status of operation. */
 408        u32 status;
 409        u32 rsvd3;
 410
 411        /* Address pointer to sync buffer location. */
 412        u64 sync_flag_pointer;
 413
 414        /* Controls the buffer sync mechanism. */
 415        u32 sync_arguments;
 416        u32 rsvd4;
 417
 418        /* Control data pointer. */
 419        u64 ctrl_data_ptr;
 420};
 421
 422/* Interrupt status. */
 423struct cadence_nand_irq_status {
 424        /* Thread operation complete status. */
 425        u32 trd_status;
 426        /* Thread operation error. */
 427        u32 trd_error;
 428        /* Controller status. */
 429        u32 status;
 430};
 431
 432/* Cadence NAND flash controller capabilities get from driver data. */
 433struct cadence_nand_dt_devdata {
 434        /* Skew value of the output signals of the NAND Flash interface. */
 435        u32 if_skew;
 436        /* It informs if slave DMA interface is connected to DMA engine. */
 437        unsigned int has_dma:1;
 438};
 439
 440/* Cadence NAND flash controller capabilities read from registers. */
 441struct cdns_nand_caps {
 442        /* Maximum number of banks supported by hardware. */
 443        u8 max_banks;
 444        /* Slave and Master DMA data width in bytes (4 or 8). */
 445        u8 data_dma_width;
 446        /* Control Data feature supported. */
 447        bool data_control_supp;
 448        /* Is PHY type DLL. */
 449        bool is_phy_type_dll;
 450};
 451
 452struct cdns_nand_ctrl {
 453        struct device *dev;
 454        struct nand_controller controller;
 455        struct cadence_nand_cdma_desc *cdma_desc;
 456        /* IP capability. */
 457        const struct cadence_nand_dt_devdata *caps1;
 458        struct cdns_nand_caps caps2;
 459        u8 ctrl_rev;
 460        dma_addr_t dma_cdma_desc;
 461        u8 *buf;
 462        u32 buf_size;
 463        u8 curr_corr_str_idx;
 464
 465        /* Register interface. */
 466        void __iomem *reg;
 467
 468        struct {
 469                void __iomem *virt;
 470                dma_addr_t dma;
 471        } io;
 472
 473        int irq;
 474        /* Interrupts that have happened. */
 475        struct cadence_nand_irq_status irq_status;
 476        /* Interrupts we are waiting for. */
 477        struct cadence_nand_irq_status irq_mask;
 478        struct completion complete;
 479        /* Protect irq_mask and irq_status. */
 480        spinlock_t irq_lock;
 481
 482        int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
 483        struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
 484        struct nand_ecc_caps ecc_caps;
 485
 486        int curr_trans_type;
 487
 488        struct dma_chan *dmac;
 489
 490        u32 nf_clk_rate;
 491        /*
 492         * Estimated Board delay. The value includes the total
 493         * round trip delay for the signals and is used for deciding on values
 494         * associated with data read capture.
 495         */
 496        u32 board_delay;
 497
 498        struct nand_chip *selected_chip;
 499
 500        unsigned long assigned_cs;
 501        struct list_head chips;
 502};
 503
 504struct cdns_nand_chip {
 505        struct cadence_nand_timings timings;
 506        struct nand_chip chip;
 507        u8 nsels;
 508        struct list_head node;
 509
 510        /*
 511         * part of oob area of NAND flash memory page.
 512         * This part is available for user to read or write.
 513         */
 514        u32 avail_oob_size;
 515
 516        /* Sector size. There are few sectors per mtd->writesize */
 517        u32 sector_size;
 518        u32 sector_count;
 519
 520        /* Offset of BBM. */
 521        u8 bbm_offs;
 522        /* Number of bytes reserved for BBM. */
 523        u8 bbm_len;
 524        /* ECC strength index. */
 525        u8 corr_str_idx;
 526
 527        u8 cs[];
 528};
 529
 530struct ecc_info {
 531        int (*calc_ecc_bytes)(int step_size, int strength);
 532        int max_step_size;
 533};
 534
 535static inline struct
 536cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
 537{
 538        return container_of(chip, struct cdns_nand_chip, chip);
 539}
 540
 541static inline struct
 542cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
 543{
 544        return container_of(controller, struct cdns_nand_ctrl, controller);
 545}
 546
 547static bool
 548cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
 549                        u32 buf_len)
 550{
 551        u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
 552
 553        return buf && virt_addr_valid(buf) &&
 554                likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
 555                likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
 556}
 557
 558static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
 559                                       u32 reg_offset, u32 timeout_us,
 560                                       u32 mask, bool is_clear)
 561{
 562        u32 val;
 563        int ret;
 564
 565        ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
 566                                         val, !(val & mask) == is_clear,
 567                                         10, timeout_us);
 568
 569        if (ret < 0) {
 570                dev_err(cdns_ctrl->dev,
 571                        "Timeout while waiting for reg %x with mask %x is clear %d\n",
 572                        reg_offset, mask, is_clear);
 573        }
 574
 575        return ret;
 576}
 577
 578static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
 579                                       bool enable)
 580{
 581        u32 reg;
 582
 583        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 584                                        1000000,
 585                                        CTRL_STATUS_CTRL_BUSY, true))
 586                return -ETIMEDOUT;
 587
 588        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 589
 590        if (enable)
 591                reg |= ECC_CONFIG_0_ECC_EN;
 592        else
 593                reg &= ~ECC_CONFIG_0_ECC_EN;
 594
 595        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 596
 597        return 0;
 598}
 599
 600static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
 601                                          u8 corr_str_idx)
 602{
 603        u32 reg;
 604
 605        if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
 606                return;
 607
 608        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 609        reg &= ~ECC_CONFIG_0_CORR_STR;
 610        reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
 611        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 612
 613        cdns_ctrl->curr_corr_str_idx = corr_str_idx;
 614}
 615
 616static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
 617                                             u8 strength)
 618{
 619        int i, corr_str_idx = -1;
 620
 621        for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
 622                if (cdns_ctrl->ecc_strengths[i] == strength) {
 623                        corr_str_idx = i;
 624                        break;
 625                }
 626        }
 627
 628        return corr_str_idx;
 629}
 630
 631static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
 632                                            u16 marker_value)
 633{
 634        u32 reg;
 635
 636        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 637                                        1000000,
 638                                        CTRL_STATUS_CTRL_BUSY, true))
 639                return -ETIMEDOUT;
 640
 641        reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
 642        reg &= ~SKIP_BYTES_MARKER_VALUE;
 643        reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
 644                          marker_value);
 645
 646        writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
 647
 648        return 0;
 649}
 650
 651static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
 652                                            u8 num_of_bytes,
 653                                            u32 offset_value,
 654                                            int enable)
 655{
 656        u32 reg, skip_bytes_offset;
 657
 658        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 659                                        1000000,
 660                                        CTRL_STATUS_CTRL_BUSY, true))
 661                return -ETIMEDOUT;
 662
 663        if (!enable) {
 664                num_of_bytes = 0;
 665                offset_value = 0;
 666        }
 667
 668        reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
 669        reg &= ~SKIP_BYTES_NUM_OF_BYTES;
 670        reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
 671                          num_of_bytes);
 672        skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
 673                                       offset_value);
 674
 675        writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
 676        writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
 677
 678        return 0;
 679}
 680
 681/* Functions enables/disables hardware detection of erased data */
 682static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
 683                                             bool enable,
 684                                             u8 bitflips_threshold)
 685{
 686        u32 reg;
 687
 688        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 689
 690        if (enable)
 691                reg |= ECC_CONFIG_0_ERASE_DET_EN;
 692        else
 693                reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
 694
 695        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 696        writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
 697}
 698
 699static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
 700                                           bool bit_bus16)
 701{
 702        u32 reg;
 703
 704        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 705                                        1000000,
 706                                        CTRL_STATUS_CTRL_BUSY, true))
 707                return -ETIMEDOUT;
 708
 709        reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
 710
 711        if (!bit_bus16)
 712                reg &= ~COMMON_SET_DEVICE_16BIT;
 713        else
 714                reg |= COMMON_SET_DEVICE_16BIT;
 715        writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
 716
 717        return 0;
 718}
 719
 720static void
 721cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
 722                             struct cadence_nand_irq_status *irq_status)
 723{
 724        writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
 725        writel_relaxed(irq_status->trd_status,
 726                       cdns_ctrl->reg + TRD_COMP_INT_STATUS);
 727        writel_relaxed(irq_status->trd_error,
 728                       cdns_ctrl->reg + TRD_ERR_INT_STATUS);
 729}
 730
 731static void
 732cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
 733                             struct cadence_nand_irq_status *irq_status)
 734{
 735        irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
 736        irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
 737                                               + TRD_COMP_INT_STATUS);
 738        irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
 739                                              + TRD_ERR_INT_STATUS);
 740}
 741
 742static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
 743                        struct cadence_nand_irq_status *irq_status)
 744{
 745        cadence_nand_read_int_status(cdns_ctrl, irq_status);
 746
 747        return irq_status->status || irq_status->trd_status ||
 748                irq_status->trd_error;
 749}
 750
 751static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
 752{
 753        unsigned long flags;
 754
 755        spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
 756        memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
 757        memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
 758        spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
 759}
 760
 761/*
 762 * This is the interrupt service routine. It handles all interrupts
 763 * sent to this device.
 764 */
 765static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
 766{
 767        struct cdns_nand_ctrl *cdns_ctrl = dev_id;
 768        struct cadence_nand_irq_status irq_status;
 769        irqreturn_t result = IRQ_NONE;
 770
 771        spin_lock(&cdns_ctrl->irq_lock);
 772
 773        if (irq_detected(cdns_ctrl, &irq_status)) {
 774                /* Handle interrupt. */
 775                /* First acknowledge it. */
 776                cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
 777                /* Status in the device context for someone to read. */
 778                cdns_ctrl->irq_status.status |= irq_status.status;
 779                cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
 780                cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
 781                /* Notify anyone who cares that it happened. */
 782                complete(&cdns_ctrl->complete);
 783                /* Tell the OS that we've handled this. */
 784                result = IRQ_HANDLED;
 785        }
 786        spin_unlock(&cdns_ctrl->irq_lock);
 787
 788        return result;
 789}
 790
 791static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
 792                                      struct cadence_nand_irq_status *irq_mask)
 793{
 794        writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
 795                       cdns_ctrl->reg + INTR_ENABLE);
 796
 797        writel_relaxed(irq_mask->trd_error,
 798                       cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
 799}
 800
 801static void
 802cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
 803                          struct cadence_nand_irq_status *irq_mask,
 804                          struct cadence_nand_irq_status *irq_status)
 805{
 806        unsigned long timeout = msecs_to_jiffies(10000);
 807        unsigned long time_left;
 808
 809        time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
 810                                                timeout);
 811
 812        *irq_status = cdns_ctrl->irq_status;
 813        if (time_left == 0) {
 814                /* Timeout error. */
 815                dev_err(cdns_ctrl->dev, "timeout occurred:\n");
 816                dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
 817                        irq_status->status, irq_mask->status);
 818                dev_err(cdns_ctrl->dev,
 819                        "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
 820                        irq_status->trd_status, irq_mask->trd_status);
 821                dev_err(cdns_ctrl->dev,
 822                        "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
 823                        irq_status->trd_error, irq_mask->trd_error);
 824        }
 825}
 826
 827/* Execute generic command on NAND controller. */
 828static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
 829                                         u8 chip_nr,
 830                                         u64 mini_ctrl_cmd)
 831{
 832        u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
 833
 834        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
 835        mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
 836        mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
 837
 838        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 839                                        1000000,
 840                                        CTRL_STATUS_CTRL_BUSY, true))
 841                return -ETIMEDOUT;
 842
 843        cadence_nand_reset_irq(cdns_ctrl);
 844
 845        writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
 846        writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
 847
 848        /* Select generic command. */
 849        reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
 850        /* Thread number. */
 851        reg |= FIELD_PREP(CMD_REG0_TN, 0);
 852
 853        /* Issue command. */
 854        writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
 855
 856        return 0;
 857}
 858
 859/* Wait for data on slave DMA interface. */
 860static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
 861                                     u8 *out_sdma_trd,
 862                                     u32 *out_sdma_size)
 863{
 864        struct cadence_nand_irq_status irq_mask, irq_status;
 865
 866        irq_mask.trd_status = 0;
 867        irq_mask.trd_error = 0;
 868        irq_mask.status = INTR_STATUS_SDMA_TRIGG
 869                | INTR_STATUS_SDMA_ERR
 870                | INTR_STATUS_UNSUPP_CMD;
 871
 872        cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
 873        cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
 874        if (irq_status.status == 0) {
 875                dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
 876                return -ETIMEDOUT;
 877        }
 878
 879        if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
 880                *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
 881                *out_sdma_trd  = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
 882                *out_sdma_trd =
 883                        FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
 884        } else {
 885                dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
 886                        irq_status.status);
 887                return -EIO;
 888        }
 889
 890        return 0;
 891}
 892
 893static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
 894{
 895        u32  reg;
 896
 897        reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
 898
 899        cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
 900
 901        if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
 902                cdns_ctrl->caps2.data_dma_width = 8;
 903        else
 904                cdns_ctrl->caps2.data_dma_width = 4;
 905
 906        if (reg & CTRL_FEATURES_CONTROL_DATA)
 907                cdns_ctrl->caps2.data_control_supp = true;
 908
 909        if (reg & (CTRL_FEATURES_NVDDR_2_3
 910                   | CTRL_FEATURES_NVDDR))
 911                cdns_ctrl->caps2.is_phy_type_dll = true;
 912}
 913
 914/* Prepare CDMA descriptor. */
 915static void
 916cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
 917                               char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
 918                                   dma_addr_t ctrl_data_ptr, u16 ctype)
 919{
 920        struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
 921
 922        memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
 923
 924        /* Set fields for one descriptor. */
 925        cdma_desc->flash_pointer = flash_ptr;
 926        if (cdns_ctrl->ctrl_rev >= 13)
 927                cdma_desc->bank = nf_mem;
 928        else
 929                cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
 930
 931        cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
 932        cdma_desc->command_flags  |= CDMA_CF_INT;
 933
 934        cdma_desc->memory_pointer = mem_ptr;
 935        cdma_desc->status = 0;
 936        cdma_desc->sync_flag_pointer = 0;
 937        cdma_desc->sync_arguments = 0;
 938
 939        cdma_desc->command_type = ctype;
 940        cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
 941}
 942
 943static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
 944                                        u32 desc_status)
 945{
 946        if (desc_status & CDMA_CS_ERP)
 947                return STAT_ERASED;
 948
 949        if (desc_status & CDMA_CS_UNCE)
 950                return STAT_ECC_UNCORR;
 951
 952        if (desc_status & CDMA_CS_ERR) {
 953                dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
 954                return STAT_FAIL;
 955        }
 956
 957        if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
 958                return STAT_ECC_CORR;
 959
 960        return STAT_FAIL;
 961}
 962
 963static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
 964{
 965        struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
 966        u8 status = STAT_BUSY;
 967
 968        if (desc_ptr->status & CDMA_CS_FAIL) {
 969                status = cadence_nand_check_desc_error(cdns_ctrl,
 970                                                       desc_ptr->status);
 971                dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
 972        } else if (desc_ptr->status & CDMA_CS_COMP) {
 973                /* Descriptor finished with no errors. */
 974                if (desc_ptr->command_flags & CDMA_CF_CONT) {
 975                        dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
 976                        status = STAT_UNKNOWN;
 977                } else {
 978                        /* Last descriptor.  */
 979                        status = STAT_OK;
 980                }
 981        }
 982
 983        return status;
 984}
 985
 986static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
 987                                  u8 thread)
 988{
 989        u32 reg;
 990        int status;
 991
 992        /* Wait for thread ready. */
 993        status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
 994                                             1000000,
 995                                             BIT(thread), true);
 996        if (status)
 997                return status;
 998
 999        cadence_nand_reset_irq(cdns_ctrl);
1000
1001        writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
1002                       cdns_ctrl->reg + CMD_REG2);
1003        writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
1004
1005        /* Select CDMA mode. */
1006        reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
1007        /* Thread number. */
1008        reg |= FIELD_PREP(CMD_REG0_TN, thread);
1009        /* Issue command. */
1010        writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
1011
1012        return 0;
1013}
1014
1015/* Send SDMA command and wait for finish. */
1016static u32
1017cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
1018                                u8 thread)
1019{
1020        struct cadence_nand_irq_status irq_mask, irq_status = {0};
1021        int status;
1022
1023        irq_mask.trd_status = BIT(thread);
1024        irq_mask.trd_error = BIT(thread);
1025        irq_mask.status = INTR_STATUS_CDMA_TERR;
1026
1027        cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
1028
1029        status = cadence_nand_cdma_send(cdns_ctrl, thread);
1030        if (status)
1031                return status;
1032
1033        cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
1034
1035        if (irq_status.status == 0 && irq_status.trd_status == 0 &&
1036            irq_status.trd_error == 0) {
1037                dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
1038                return -ETIMEDOUT;
1039        }
1040        if (irq_status.status & irq_mask.status) {
1041                dev_err(cdns_ctrl->dev, "CDMA command failed\n");
1042                return -EIO;
1043        }
1044
1045        return 0;
1046}
1047
1048/*
1049 * ECC size depends on configured ECC strength and on maximum supported
1050 * ECC step size.
1051 */
1052static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
1053{
1054        int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
1055
1056        return ALIGN(nbytes, 2);
1057}
1058
1059#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1060        static int \
1061        cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1062                                                    int strength)\
1063        {\
1064                return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1065        }
1066
1067CADENCE_NAND_CALC_ECC_BYTES(256)
1068CADENCE_NAND_CALC_ECC_BYTES(512)
1069CADENCE_NAND_CALC_ECC_BYTES(1024)
1070CADENCE_NAND_CALC_ECC_BYTES(2048)
1071CADENCE_NAND_CALC_ECC_BYTES(4096)
1072
1073/* Function reads BCH capabilities. */
1074static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
1075{
1076        struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
1077        int max_step_size = 0, nstrengths, i;
1078        u32 reg;
1079
1080        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
1081        cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
1082        cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
1083        cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
1084        cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
1085
1086        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
1087        cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
1088        cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
1089        cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
1090        cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
1091
1092        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
1093        cdns_ctrl->ecc_stepinfos[0].stepsize =
1094                FIELD_GET(BCH_CFG_2_SECT_0, reg);
1095
1096        cdns_ctrl->ecc_stepinfos[1].stepsize =
1097                FIELD_GET(BCH_CFG_2_SECT_1, reg);
1098
1099        nstrengths = 0;
1100        for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
1101                if (cdns_ctrl->ecc_strengths[i] != 0)
1102                        nstrengths++;
1103        }
1104
1105        ecc_caps->nstepinfos = 0;
1106        for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
1107                /* ECC strengths are common for all step infos. */
1108                cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
1109                cdns_ctrl->ecc_stepinfos[i].strengths =
1110                        cdns_ctrl->ecc_strengths;
1111
1112                if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
1113                        ecc_caps->nstepinfos++;
1114
1115                if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
1116                        max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
1117        }
1118        ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
1119
1120        switch (max_step_size) {
1121        case 256:
1122                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
1123                break;
1124        case 512:
1125                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
1126                break;
1127        case 1024:
1128                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
1129                break;
1130        case 2048:
1131                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
1132                break;
1133        case 4096:
1134                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
1135                break;
1136        default:
1137                dev_err(cdns_ctrl->dev,
1138                        "Unsupported sector size(ecc step size) %d\n",
1139                        max_step_size);
1140                return -EIO;
1141        }
1142
1143        return 0;
1144}
1145
1146/* Hardware initialization. */
1147static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
1148{
1149        int status;
1150        u32 reg;
1151
1152        status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1153                                             1000000,
1154                                             CTRL_STATUS_INIT_COMP, false);
1155        if (status)
1156                return status;
1157
1158        reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
1159        cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
1160
1161        dev_info(cdns_ctrl->dev,
1162                 "%s: cadence nand controller version reg %x\n",
1163                 __func__, reg);
1164
1165        /* Disable cache and multiplane. */
1166        writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
1167        writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
1168
1169        /* Clear all interrupts. */
1170        writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
1171
1172        cadence_nand_get_caps(cdns_ctrl);
1173        cadence_nand_read_bch_caps(cdns_ctrl);
1174
1175        /*
1176         * Set IO width access to 8.
1177         * It is because during SW device discovering width access
1178         * is expected to be 8.
1179         */
1180        status = cadence_nand_set_access_width16(cdns_ctrl, false);
1181
1182        return status;
1183}
1184
1185#define TT_MAIN_OOB_AREAS       2
1186#define TT_RAW_PAGE             3
1187#define TT_BBM                  4
1188#define TT_MAIN_OOB_AREA_EXT    5
1189
1190/* Prepare size of data to transfer. */
1191static void
1192cadence_nand_prepare_data_size(struct nand_chip *chip,
1193                               int transfer_type)
1194{
1195        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1196        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1197        struct mtd_info *mtd = nand_to_mtd(chip);
1198        u32 sec_size = 0, offset = 0, sec_cnt = 1;
1199        u32 last_sec_size = cdns_chip->sector_size;
1200        u32 data_ctrl_size = 0;
1201        u32 reg = 0;
1202
1203        if (cdns_ctrl->curr_trans_type == transfer_type)
1204                return;
1205
1206        switch (transfer_type) {
1207        case TT_MAIN_OOB_AREA_EXT:
1208                sec_cnt = cdns_chip->sector_count;
1209                sec_size = cdns_chip->sector_size;
1210                data_ctrl_size = cdns_chip->avail_oob_size;
1211                break;
1212        case TT_MAIN_OOB_AREAS:
1213                sec_cnt = cdns_chip->sector_count;
1214                last_sec_size = cdns_chip->sector_size
1215                        + cdns_chip->avail_oob_size;
1216                sec_size = cdns_chip->sector_size;
1217                break;
1218        case TT_RAW_PAGE:
1219                last_sec_size = mtd->writesize + mtd->oobsize;
1220                break;
1221        case TT_BBM:
1222                offset = mtd->writesize + cdns_chip->bbm_offs;
1223                last_sec_size = 8;
1224                break;
1225        }
1226
1227        reg = 0;
1228        reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
1229        reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
1230        writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
1231
1232        reg = 0;
1233        reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
1234        reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
1235        writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
1236
1237        if (cdns_ctrl->caps2.data_control_supp) {
1238                reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
1239                reg &= ~CONTROL_DATA_CTRL_SIZE;
1240                reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
1241                writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
1242        }
1243
1244        cdns_ctrl->curr_trans_type = transfer_type;
1245}
1246
1247static int
1248cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
1249                           int page, void *buf, void *ctrl_dat, u32 buf_size,
1250                           u32 ctrl_dat_size, enum dma_data_direction dir,
1251                           bool with_ecc)
1252{
1253        dma_addr_t dma_buf, dma_ctrl_dat = 0;
1254        u8 thread_nr = chip_nr;
1255        int status;
1256        u16 ctype;
1257
1258        if (dir == DMA_FROM_DEVICE)
1259                ctype = CDMA_CT_RD;
1260        else
1261                ctype = CDMA_CT_WR;
1262
1263        cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
1264
1265        dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
1266        if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
1267                dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1268                return -EIO;
1269        }
1270
1271        if (ctrl_dat && ctrl_dat_size) {
1272                dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
1273                                              ctrl_dat_size, dir);
1274                if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
1275                        dma_unmap_single(cdns_ctrl->dev, dma_buf,
1276                                         buf_size, dir);
1277                        dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1278                        return -EIO;
1279                }
1280        }
1281
1282        cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
1283                                       dma_buf, dma_ctrl_dat, ctype);
1284
1285        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1286
1287        dma_unmap_single(cdns_ctrl->dev, dma_buf,
1288                         buf_size, dir);
1289
1290        if (ctrl_dat && ctrl_dat_size)
1291                dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
1292                                 ctrl_dat_size, dir);
1293        if (status)
1294                return status;
1295
1296        return cadence_nand_cdma_finish(cdns_ctrl);
1297}
1298
1299static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
1300                                     struct cadence_nand_timings *t)
1301{
1302        writel_relaxed(t->async_toggle_timings,
1303                       cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
1304        writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
1305        writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
1306        writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
1307
1308        if (cdns_ctrl->caps2.is_phy_type_dll)
1309                writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
1310
1311        writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
1312
1313        if (cdns_ctrl->caps2.is_phy_type_dll) {
1314                writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
1315                writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
1316                writel_relaxed(t->phy_dqs_timing,
1317                               cdns_ctrl->reg + PHY_DQS_TIMING);
1318                writel_relaxed(t->phy_gate_lpbk_ctrl,
1319                               cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
1320                writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
1321                               cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
1322                writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
1323        }
1324}
1325
1326static int cadence_nand_select_target(struct nand_chip *chip)
1327{
1328        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1329        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1330
1331        if (chip == cdns_ctrl->selected_chip)
1332                return 0;
1333
1334        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1335                                        1000000,
1336                                        CTRL_STATUS_CTRL_BUSY, true))
1337                return -ETIMEDOUT;
1338
1339        cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
1340
1341        cadence_nand_set_ecc_strength(cdns_ctrl,
1342                                      cdns_chip->corr_str_idx);
1343
1344        cadence_nand_set_erase_detection(cdns_ctrl, true,
1345                                         chip->ecc.strength);
1346
1347        cdns_ctrl->curr_trans_type = -1;
1348        cdns_ctrl->selected_chip = chip;
1349
1350        return 0;
1351}
1352
1353static int cadence_nand_erase(struct nand_chip *chip, u32 page)
1354{
1355        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1356        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1357        int status;
1358        u8 thread_nr = cdns_chip->cs[chip->cur_cs];
1359
1360        cadence_nand_cdma_desc_prepare(cdns_ctrl,
1361                                       cdns_chip->cs[chip->cur_cs],
1362                                       page, 0, 0,
1363                                       CDMA_CT_ERASE);
1364        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1365        if (status) {
1366                dev_err(cdns_ctrl->dev, "erase operation failed\n");
1367                return -EIO;
1368        }
1369
1370        status = cadence_nand_cdma_finish(cdns_ctrl);
1371        if (status)
1372                return status;
1373
1374        return 0;
1375}
1376
1377static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
1378{
1379        int status;
1380        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1381        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1382        struct mtd_info *mtd = nand_to_mtd(chip);
1383
1384        cadence_nand_prepare_data_size(chip, TT_BBM);
1385
1386        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1387
1388        /*
1389         * Read only bad block marker from offset
1390         * defined by a memory manufacturer.
1391         */
1392        status = cadence_nand_cdma_transfer(cdns_ctrl,
1393                                            cdns_chip->cs[chip->cur_cs],
1394                                            page, cdns_ctrl->buf, NULL,
1395                                            mtd->oobsize,
1396                                            0, DMA_FROM_DEVICE, false);
1397        if (status) {
1398                dev_err(cdns_ctrl->dev, "read BBM failed\n");
1399                return -EIO;
1400        }
1401
1402        memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
1403
1404        return 0;
1405}
1406
1407static int cadence_nand_write_page(struct nand_chip *chip,
1408                                   const u8 *buf, int oob_required,
1409                                   int page)
1410{
1411        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1412        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1413        struct mtd_info *mtd = nand_to_mtd(chip);
1414        int status;
1415        u16 marker_val = 0xFFFF;
1416
1417        status = cadence_nand_select_target(chip);
1418        if (status)
1419                return status;
1420
1421        cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1422                                         mtd->writesize
1423                                         + cdns_chip->bbm_offs,
1424                                         1);
1425
1426        if (oob_required) {
1427                marker_val = *(u16 *)(chip->oob_poi
1428                                      + cdns_chip->bbm_offs);
1429        } else {
1430                /* Set oob data to 0xFF. */
1431                memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
1432                       cdns_chip->avail_oob_size);
1433        }
1434
1435        cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
1436
1437        cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1438
1439        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1440            cdns_ctrl->caps2.data_control_supp) {
1441                u8 *oob;
1442
1443                if (oob_required)
1444                        oob = chip->oob_poi;
1445                else
1446                        oob = cdns_ctrl->buf + mtd->writesize;
1447
1448                status = cadence_nand_cdma_transfer(cdns_ctrl,
1449                                                    cdns_chip->cs[chip->cur_cs],
1450                                                    page, (void *)buf, oob,
1451                                                    mtd->writesize,
1452                                                    cdns_chip->avail_oob_size,
1453                                                    DMA_TO_DEVICE, true);
1454                if (status) {
1455                        dev_err(cdns_ctrl->dev, "write page failed\n");
1456                        return -EIO;
1457                }
1458
1459                return 0;
1460        }
1461
1462        if (oob_required) {
1463                /* Transfer the data to the oob area. */
1464                memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
1465                       cdns_chip->avail_oob_size);
1466        }
1467
1468        memcpy(cdns_ctrl->buf, buf, mtd->writesize);
1469
1470        cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1471
1472        return cadence_nand_cdma_transfer(cdns_ctrl,
1473                                          cdns_chip->cs[chip->cur_cs],
1474                                          page, cdns_ctrl->buf, NULL,
1475                                          mtd->writesize
1476                                          + cdns_chip->avail_oob_size,
1477                                          0, DMA_TO_DEVICE, true);
1478}
1479
1480static int cadence_nand_write_oob(struct nand_chip *chip, int page)
1481{
1482        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1483        struct mtd_info *mtd = nand_to_mtd(chip);
1484
1485        memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
1486
1487        return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
1488}
1489
1490static int cadence_nand_write_page_raw(struct nand_chip *chip,
1491                                       const u8 *buf, int oob_required,
1492                                       int page)
1493{
1494        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1495        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1496        struct mtd_info *mtd = nand_to_mtd(chip);
1497        int writesize = mtd->writesize;
1498        int oobsize = mtd->oobsize;
1499        int ecc_steps = chip->ecc.steps;
1500        int ecc_size = chip->ecc.size;
1501        int ecc_bytes = chip->ecc.bytes;
1502        void *tmp_buf = cdns_ctrl->buf;
1503        int oob_skip = cdns_chip->bbm_len;
1504        size_t size = writesize + oobsize;
1505        int i, pos, len;
1506        int status = 0;
1507
1508        status = cadence_nand_select_target(chip);
1509        if (status)
1510                return status;
1511
1512        /*
1513         * Fill the buffer with 0xff first except the full page transfer.
1514         * This simplifies the logic.
1515         */
1516        if (!buf || !oob_required)
1517                memset(tmp_buf, 0xff, size);
1518
1519        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1520
1521        /* Arrange the buffer for syndrome payload/ecc layout. */
1522        if (buf) {
1523                for (i = 0; i < ecc_steps; i++) {
1524                        pos = i * (ecc_size + ecc_bytes);
1525                        len = ecc_size;
1526
1527                        if (pos >= writesize)
1528                                pos += oob_skip;
1529                        else if (pos + len > writesize)
1530                                len = writesize - pos;
1531
1532                        memcpy(tmp_buf + pos, buf, len);
1533                        buf += len;
1534                        if (len < ecc_size) {
1535                                len = ecc_size - len;
1536                                memcpy(tmp_buf + writesize + oob_skip, buf,
1537                                       len);
1538                                buf += len;
1539                        }
1540                }
1541        }
1542
1543        if (oob_required) {
1544                const u8 *oob = chip->oob_poi;
1545                u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1546                        (cdns_chip->sector_size + chip->ecc.bytes)
1547                        + cdns_chip->sector_size + oob_skip;
1548
1549                /* BBM at the beginning of the OOB area. */
1550                memcpy(tmp_buf + writesize, oob, oob_skip);
1551
1552                /* OOB free. */
1553                memcpy(tmp_buf + oob_data_offset, oob,
1554                       cdns_chip->avail_oob_size);
1555                oob += cdns_chip->avail_oob_size;
1556
1557                /* OOB ECC. */
1558                for (i = 0; i < ecc_steps; i++) {
1559                        pos = ecc_size + i * (ecc_size + ecc_bytes);
1560                        if (i == (ecc_steps - 1))
1561                                pos += cdns_chip->avail_oob_size;
1562
1563                        len = ecc_bytes;
1564
1565                        if (pos >= writesize)
1566                                pos += oob_skip;
1567                        else if (pos + len > writesize)
1568                                len = writesize - pos;
1569
1570                        memcpy(tmp_buf + pos, oob, len);
1571                        oob += len;
1572                        if (len < ecc_bytes) {
1573                                len = ecc_bytes - len;
1574                                memcpy(tmp_buf + writesize + oob_skip, oob,
1575                                       len);
1576                                oob += len;
1577                        }
1578                }
1579        }
1580
1581        cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1582
1583        return cadence_nand_cdma_transfer(cdns_ctrl,
1584                                          cdns_chip->cs[chip->cur_cs],
1585                                          page, cdns_ctrl->buf, NULL,
1586                                          mtd->writesize +
1587                                          mtd->oobsize,
1588                                          0, DMA_TO_DEVICE, false);
1589}
1590
1591static int cadence_nand_write_oob_raw(struct nand_chip *chip,
1592                                      int page)
1593{
1594        return cadence_nand_write_page_raw(chip, NULL, true, page);
1595}
1596
1597static int cadence_nand_read_page(struct nand_chip *chip,
1598                                  u8 *buf, int oob_required, int page)
1599{
1600        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1601        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1602        struct mtd_info *mtd = nand_to_mtd(chip);
1603        int status = 0;
1604        int ecc_err_count = 0;
1605
1606        status = cadence_nand_select_target(chip);
1607        if (status)
1608                return status;
1609
1610        cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1611                                         mtd->writesize
1612                                         + cdns_chip->bbm_offs, 1);
1613
1614        /*
1615         * If data buffer can be accessed by DMA and data_control feature
1616         * is supported then transfer data and oob directly.
1617         */
1618        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1619            cdns_ctrl->caps2.data_control_supp) {
1620                u8 *oob;
1621
1622                if (oob_required)
1623                        oob = chip->oob_poi;
1624                else
1625                        oob = cdns_ctrl->buf + mtd->writesize;
1626
1627                cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1628                status = cadence_nand_cdma_transfer(cdns_ctrl,
1629                                                    cdns_chip->cs[chip->cur_cs],
1630                                                    page, buf, oob,
1631                                                    mtd->writesize,
1632                                                    cdns_chip->avail_oob_size,
1633                                                    DMA_FROM_DEVICE, true);
1634        /* Otherwise use bounce buffer. */
1635        } else {
1636                cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1637                status = cadence_nand_cdma_transfer(cdns_ctrl,
1638                                                    cdns_chip->cs[chip->cur_cs],
1639                                                    page, cdns_ctrl->buf,
1640                                                    NULL, mtd->writesize
1641                                                    + cdns_chip->avail_oob_size,
1642                                                    0, DMA_FROM_DEVICE, true);
1643
1644                memcpy(buf, cdns_ctrl->buf, mtd->writesize);
1645                if (oob_required)
1646                        memcpy(chip->oob_poi,
1647                               cdns_ctrl->buf + mtd->writesize,
1648                               mtd->oobsize);
1649        }
1650
1651        switch (status) {
1652        case STAT_ECC_UNCORR:
1653                mtd->ecc_stats.failed++;
1654                ecc_err_count++;
1655                break;
1656        case STAT_ECC_CORR:
1657                ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1658                                          cdns_ctrl->cdma_desc->status);
1659                mtd->ecc_stats.corrected += ecc_err_count;
1660                break;
1661        case STAT_ERASED:
1662        case STAT_OK:
1663                break;
1664        default:
1665                dev_err(cdns_ctrl->dev, "read page failed\n");
1666                return -EIO;
1667        }
1668
1669        if (oob_required)
1670                if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
1671                        return -EIO;
1672
1673        return ecc_err_count;
1674}
1675
1676/* Reads OOB data from the device. */
1677static int cadence_nand_read_oob(struct nand_chip *chip, int page)
1678{
1679        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1680
1681        return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
1682}
1683
1684static int cadence_nand_read_page_raw(struct nand_chip *chip,
1685                                      u8 *buf, int oob_required, int page)
1686{
1687        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1688        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1689        struct mtd_info *mtd = nand_to_mtd(chip);
1690        int oob_skip = cdns_chip->bbm_len;
1691        int writesize = mtd->writesize;
1692        int ecc_steps = chip->ecc.steps;
1693        int ecc_size = chip->ecc.size;
1694        int ecc_bytes = chip->ecc.bytes;
1695        void *tmp_buf = cdns_ctrl->buf;
1696        int i, pos, len;
1697        int status = 0;
1698
1699        status = cadence_nand_select_target(chip);
1700        if (status)
1701                return status;
1702
1703        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1704
1705        cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1706        status = cadence_nand_cdma_transfer(cdns_ctrl,
1707                                            cdns_chip->cs[chip->cur_cs],
1708                                            page, cdns_ctrl->buf, NULL,
1709                                            mtd->writesize
1710                                            + mtd->oobsize,
1711                                            0, DMA_FROM_DEVICE, false);
1712
1713        switch (status) {
1714        case STAT_ERASED:
1715        case STAT_OK:
1716                break;
1717        default:
1718                dev_err(cdns_ctrl->dev, "read raw page failed\n");
1719                return -EIO;
1720        }
1721
1722        /* Arrange the buffer for syndrome payload/ecc layout. */
1723        if (buf) {
1724                for (i = 0; i < ecc_steps; i++) {
1725                        pos = i * (ecc_size + ecc_bytes);
1726                        len = ecc_size;
1727
1728                        if (pos >= writesize)
1729                                pos += oob_skip;
1730                        else if (pos + len > writesize)
1731                                len = writesize - pos;
1732
1733                        memcpy(buf, tmp_buf + pos, len);
1734                        buf += len;
1735                        if (len < ecc_size) {
1736                                len = ecc_size - len;
1737                                memcpy(buf, tmp_buf + writesize + oob_skip,
1738                                       len);
1739                                buf += len;
1740                        }
1741                }
1742        }
1743
1744        if (oob_required) {
1745                u8 *oob = chip->oob_poi;
1746                u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1747                        (cdns_chip->sector_size + chip->ecc.bytes)
1748                        + cdns_chip->sector_size + oob_skip;
1749
1750                /* OOB free. */
1751                memcpy(oob, tmp_buf + oob_data_offset,
1752                       cdns_chip->avail_oob_size);
1753
1754                /* BBM at the beginning of the OOB area. */
1755                memcpy(oob, tmp_buf + writesize, oob_skip);
1756
1757                oob += cdns_chip->avail_oob_size;
1758
1759                /* OOB ECC */
1760                for (i = 0; i < ecc_steps; i++) {
1761                        pos = ecc_size + i * (ecc_size + ecc_bytes);
1762                        len = ecc_bytes;
1763
1764                        if (i == (ecc_steps - 1))
1765                                pos += cdns_chip->avail_oob_size;
1766
1767                        if (pos >= writesize)
1768                                pos += oob_skip;
1769                        else if (pos + len > writesize)
1770                                len = writesize - pos;
1771
1772                        memcpy(oob, tmp_buf + pos, len);
1773                        oob += len;
1774                        if (len < ecc_bytes) {
1775                                len = ecc_bytes - len;
1776                                memcpy(oob, tmp_buf + writesize + oob_skip,
1777                                       len);
1778                                oob += len;
1779                        }
1780                }
1781        }
1782
1783        return 0;
1784}
1785
1786static int cadence_nand_read_oob_raw(struct nand_chip *chip,
1787                                     int page)
1788{
1789        return cadence_nand_read_page_raw(chip, NULL, true, page);
1790}
1791
1792static void cadence_nand_slave_dma_transfer_finished(void *data)
1793{
1794        struct completion *finished = data;
1795
1796        complete(finished);
1797}
1798
1799static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
1800                                           void *buf,
1801                                           dma_addr_t dev_dma, size_t len,
1802                                           enum dma_data_direction dir)
1803{
1804        DECLARE_COMPLETION_ONSTACK(finished);
1805        struct dma_chan *chan;
1806        struct dma_device *dma_dev;
1807        dma_addr_t src_dma, dst_dma, buf_dma;
1808        struct dma_async_tx_descriptor *tx;
1809        dma_cookie_t cookie;
1810
1811        chan = cdns_ctrl->dmac;
1812        dma_dev = chan->device;
1813
1814        buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
1815        if (dma_mapping_error(dma_dev->dev, buf_dma)) {
1816                dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1817                goto err;
1818        }
1819
1820        if (dir == DMA_FROM_DEVICE) {
1821                src_dma = cdns_ctrl->io.dma;
1822                dst_dma = buf_dma;
1823        } else {
1824                src_dma = buf_dma;
1825                dst_dma = cdns_ctrl->io.dma;
1826        }
1827
1828        tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
1829                                       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
1830        if (!tx) {
1831                dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
1832                goto err_unmap;
1833        }
1834
1835        tx->callback = cadence_nand_slave_dma_transfer_finished;
1836        tx->callback_param = &finished;
1837
1838        cookie = dmaengine_submit(tx);
1839        if (dma_submit_error(cookie)) {
1840                dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
1841                goto err_unmap;
1842        }
1843
1844        dma_async_issue_pending(cdns_ctrl->dmac);
1845        wait_for_completion(&finished);
1846
1847        dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1848
1849        return 0;
1850
1851err_unmap:
1852        dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1853
1854err:
1855        dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
1856
1857        return -EIO;
1858}
1859
1860static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
1861                                 u8 *buf, int len)
1862{
1863        u8 thread_nr = 0;
1864        u32 sdma_size;
1865        int status;
1866
1867        /* Wait until slave DMA interface is ready to data transfer. */
1868        status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1869        if (status)
1870                return status;
1871
1872        if (!cdns_ctrl->caps1->has_dma) {
1873                int len_in_words = len >> 2;
1874
1875                /* read alingment data */
1876                ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1877                if (sdma_size > len) {
1878                        /* read rest data from slave DMA interface if any */
1879                        ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1880                                     sdma_size / 4 - len_in_words);
1881                        /* copy rest of data */
1882                        memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
1883                               len - (len_in_words << 2));
1884                }
1885                return 0;
1886        }
1887
1888        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1889                status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
1890                                                         cdns_ctrl->io.dma,
1891                                                         len, DMA_FROM_DEVICE);
1892                if (status == 0)
1893                        return 0;
1894
1895                dev_warn(cdns_ctrl->dev,
1896                         "Slave DMA transfer failed. Try again using bounce buffer.");
1897        }
1898
1899        /* If DMA transfer is not possible or failed then use bounce buffer. */
1900        status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1901                                                 cdns_ctrl->io.dma,
1902                                                 sdma_size, DMA_FROM_DEVICE);
1903
1904        if (status) {
1905                dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1906                return status;
1907        }
1908
1909        memcpy(buf, cdns_ctrl->buf, len);
1910
1911        return 0;
1912}
1913
1914static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
1915                                  const u8 *buf, int len)
1916{
1917        u8 thread_nr = 0;
1918        u32 sdma_size;
1919        int status;
1920
1921        /* Wait until slave DMA interface is ready to data transfer. */
1922        status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1923        if (status)
1924                return status;
1925
1926        if (!cdns_ctrl->caps1->has_dma) {
1927                int len_in_words = len >> 2;
1928
1929                iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1930                if (sdma_size > len) {
1931                        /* copy rest of data */
1932                        memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
1933                               len - (len_in_words << 2));
1934                        /* write all expected by nand controller data */
1935                        iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1936                                      sdma_size / 4 - len_in_words);
1937                }
1938
1939                return 0;
1940        }
1941
1942        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1943                status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
1944                                                         cdns_ctrl->io.dma,
1945                                                         len, DMA_TO_DEVICE);
1946                if (status == 0)
1947                        return 0;
1948
1949                dev_warn(cdns_ctrl->dev,
1950                         "Slave DMA transfer failed. Try again using bounce buffer.");
1951        }
1952
1953        /* If DMA transfer is not possible or failed then use bounce buffer. */
1954        memcpy(cdns_ctrl->buf, buf, len);
1955
1956        status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1957                                                 cdns_ctrl->io.dma,
1958                                                 sdma_size, DMA_TO_DEVICE);
1959
1960        if (status)
1961                dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1962
1963        return status;
1964}
1965
1966static int cadence_nand_force_byte_access(struct nand_chip *chip,
1967                                          bool force_8bit)
1968{
1969        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1970        int status;
1971
1972        /*
1973         * Callers of this function do not verify if the NAND is using a 16-bit
1974         * an 8-bit bus for normal operations, so we need to take care of that
1975         * here by leaving the configuration unchanged if the NAND does not have
1976         * the NAND_BUSWIDTH_16 flag set.
1977         */
1978        if (!(chip->options & NAND_BUSWIDTH_16))
1979                return 0;
1980
1981        status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
1982
1983        return status;
1984}
1985
1986static int cadence_nand_cmd_opcode(struct nand_chip *chip,
1987                                   const struct nand_subop *subop)
1988{
1989        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1990        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1991        const struct nand_op_instr *instr;
1992        unsigned int op_id = 0;
1993        u64 mini_ctrl_cmd = 0;
1994        int ret;
1995
1996        instr = &subop->instrs[op_id];
1997
1998        if (instr->delay_ns > 0)
1999                mini_ctrl_cmd |= GCMD_LAY_TWB;
2000
2001        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2002                                    GCMD_LAY_INSTR_CMD);
2003        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
2004                                    instr->ctx.cmd.opcode);
2005
2006        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2007                                            cdns_chip->cs[chip->cur_cs],
2008                                            mini_ctrl_cmd);
2009        if (ret)
2010                dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
2011                        instr->ctx.cmd.opcode);
2012
2013        return ret;
2014}
2015
2016static int cadence_nand_cmd_address(struct nand_chip *chip,
2017                                    const struct nand_subop *subop)
2018{
2019        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2020        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2021        const struct nand_op_instr *instr;
2022        unsigned int op_id = 0;
2023        u64 mini_ctrl_cmd = 0;
2024        unsigned int offset, naddrs;
2025        u64 address = 0;
2026        const u8 *addrs;
2027        int ret;
2028        int i;
2029
2030        instr = &subop->instrs[op_id];
2031
2032        if (instr->delay_ns > 0)
2033                mini_ctrl_cmd |= GCMD_LAY_TWB;
2034
2035        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2036                                    GCMD_LAY_INSTR_ADDR);
2037
2038        offset = nand_subop_get_addr_start_off(subop, op_id);
2039        naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2040        addrs = &instr->ctx.addr.addrs[offset];
2041
2042        for (i = 0; i < naddrs; i++)
2043                address |= (u64)addrs[i] << (8 * i);
2044
2045        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
2046                                    address);
2047        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
2048                                    naddrs - 1);
2049
2050        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2051                                            cdns_chip->cs[chip->cur_cs],
2052                                            mini_ctrl_cmd);
2053        if (ret)
2054                dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
2055
2056        return ret;
2057}
2058
2059static int cadence_nand_cmd_erase(struct nand_chip *chip,
2060                                  const struct nand_subop *subop)
2061{
2062        unsigned int op_id;
2063
2064        if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
2065                int i;
2066                const struct nand_op_instr *instr = NULL;
2067                unsigned int offset, naddrs;
2068                const u8 *addrs;
2069                u32 page = 0;
2070
2071                instr = &subop->instrs[1];
2072                offset = nand_subop_get_addr_start_off(subop, 1);
2073                naddrs = nand_subop_get_num_addr_cyc(subop, 1);
2074                addrs = &instr->ctx.addr.addrs[offset];
2075
2076                for (i = 0; i < naddrs; i++)
2077                        page |= (u32)addrs[i] << (8 * i);
2078
2079                return cadence_nand_erase(chip, page);
2080        }
2081
2082        /*
2083         * If it is not an erase operation then handle operation
2084         * by calling exec_op function.
2085         */
2086        for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2087                int ret;
2088                const struct nand_operation nand_op = {
2089                        .cs = chip->cur_cs,
2090                        .instrs =  &subop->instrs[op_id],
2091                        .ninstrs = 1};
2092                ret = chip->controller->ops->exec_op(chip, &nand_op, false);
2093                if (ret)
2094                        return ret;
2095        }
2096
2097        return 0;
2098}
2099
2100static int cadence_nand_cmd_data(struct nand_chip *chip,
2101                                 const struct nand_subop *subop)
2102{
2103        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2104        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2105        const struct nand_op_instr *instr;
2106        unsigned int offset, op_id = 0;
2107        u64 mini_ctrl_cmd = 0;
2108        int len = 0;
2109        int ret;
2110
2111        instr = &subop->instrs[op_id];
2112
2113        if (instr->delay_ns > 0)
2114                mini_ctrl_cmd |= GCMD_LAY_TWB;
2115
2116        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2117                                    GCMD_LAY_INSTR_DATA);
2118
2119        if (instr->type == NAND_OP_DATA_OUT_INSTR)
2120                mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
2121                                            GCMD_DIR_WRITE);
2122
2123        len = nand_subop_get_data_len(subop, op_id);
2124        offset = nand_subop_get_data_start_off(subop, op_id);
2125        mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
2126        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
2127        if (instr->ctx.data.force_8bit) {
2128                ret = cadence_nand_force_byte_access(chip, true);
2129                if (ret) {
2130                        dev_err(cdns_ctrl->dev,
2131                                "cannot change byte access generic data cmd failed\n");
2132                        return ret;
2133                }
2134        }
2135
2136        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2137                                            cdns_chip->cs[chip->cur_cs],
2138                                            mini_ctrl_cmd);
2139        if (ret) {
2140                dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
2141                return ret;
2142        }
2143
2144        if (instr->type == NAND_OP_DATA_IN_INSTR) {
2145                void *buf = instr->ctx.data.buf.in + offset;
2146
2147                ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
2148        } else {
2149                const void *buf = instr->ctx.data.buf.out + offset;
2150
2151                ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
2152        }
2153
2154        if (ret) {
2155                dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
2156                return ret;
2157        }
2158
2159        if (instr->ctx.data.force_8bit) {
2160                ret = cadence_nand_force_byte_access(chip, false);
2161                if (ret) {
2162                        dev_err(cdns_ctrl->dev,
2163                                "cannot change byte access generic data cmd failed\n");
2164                }
2165        }
2166
2167        return ret;
2168}
2169
2170static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
2171                                    const struct nand_subop *subop)
2172{
2173        int status;
2174        unsigned int op_id = 0;
2175        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2176        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2177        const struct nand_op_instr *instr = &subop->instrs[op_id];
2178        u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
2179
2180        status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
2181                                             timeout_us,
2182                                             BIT(cdns_chip->cs[chip->cur_cs]),
2183                                             false);
2184        return status;
2185}
2186
2187static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
2188        NAND_OP_PARSER_PATTERN(
2189                cadence_nand_cmd_erase,
2190                NAND_OP_PARSER_PAT_CMD_ELEM(false),
2191                NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
2192                NAND_OP_PARSER_PAT_CMD_ELEM(false),
2193                NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2194        NAND_OP_PARSER_PATTERN(
2195                cadence_nand_cmd_opcode,
2196                NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2197        NAND_OP_PARSER_PATTERN(
2198                cadence_nand_cmd_address,
2199                NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
2200        NAND_OP_PARSER_PATTERN(
2201                cadence_nand_cmd_data,
2202                NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
2203        NAND_OP_PARSER_PATTERN(
2204                cadence_nand_cmd_data,
2205                NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
2206        NAND_OP_PARSER_PATTERN(
2207                cadence_nand_cmd_waitrdy,
2208                NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2209        );
2210
2211static int cadence_nand_exec_op(struct nand_chip *chip,
2212                                const struct nand_operation *op,
2213                                bool check_only)
2214{
2215        int status = cadence_nand_select_target(chip);
2216
2217        if (status)
2218                return status;
2219
2220        return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
2221                                      check_only);
2222}
2223
2224static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
2225                                       struct mtd_oob_region *oobregion)
2226{
2227        struct nand_chip *chip = mtd_to_nand(mtd);
2228        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2229
2230        if (section)
2231                return -ERANGE;
2232
2233        oobregion->offset = cdns_chip->bbm_len;
2234        oobregion->length = cdns_chip->avail_oob_size
2235                - cdns_chip->bbm_len;
2236
2237        return 0;
2238}
2239
2240static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2241                                      struct mtd_oob_region *oobregion)
2242{
2243        struct nand_chip *chip = mtd_to_nand(mtd);
2244        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2245
2246        if (section)
2247                return -ERANGE;
2248
2249        oobregion->offset = cdns_chip->avail_oob_size;
2250        oobregion->length = chip->ecc.total;
2251
2252        return 0;
2253}
2254
2255static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
2256        .free = cadence_nand_ooblayout_free,
2257        .ecc = cadence_nand_ooblayout_ecc,
2258};
2259
2260static int calc_cycl(u32 timing, u32 clock)
2261{
2262        if (timing == 0 || clock == 0)
2263                return 0;
2264
2265        if ((timing % clock) > 0)
2266                return timing / clock;
2267        else
2268                return timing / clock - 1;
2269}
2270
2271/* Calculate max data valid window. */
2272static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2273                                u32 board_delay_skew_min, u32 ext_mode)
2274{
2275        if (ext_mode == 0)
2276                clk_period /= 2;
2277
2278        return (trp_cnt + 1) * clk_period + trhoh_min +
2279                board_delay_skew_min;
2280}
2281
2282/* Calculate data valid window. */
2283static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2284                            u32 trea_max, u32 ext_mode)
2285{
2286        if (ext_mode == 0)
2287                clk_period /= 2;
2288
2289        return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
2290}
2291
2292static int
2293cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
2294                                  const struct nand_data_interface *conf)
2295{
2296        const struct nand_sdr_timings *sdr;
2297        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2298        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2299        struct cadence_nand_timings *t = &cdns_chip->timings;
2300        u32 reg;
2301        u32 board_delay = cdns_ctrl->board_delay;
2302        u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2303                                            cdns_ctrl->nf_clk_rate);
2304        u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2305        u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
2306        u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
2307        u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
2308        u32 if_skew = cdns_ctrl->caps1->if_skew;
2309        u32 board_delay_skew_min = board_delay - if_skew;
2310        u32 board_delay_skew_max = board_delay + if_skew;
2311        u32 dqs_sampl_res, phony_dqs_mod;
2312        u32 tdvw, tdvw_min, tdvw_max;
2313        u32 ext_rd_mode, ext_wr_mode;
2314        u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
2315        u32 sampling_point;
2316
2317        sdr = nand_get_sdr_timings(conf);
2318        if (IS_ERR(sdr))
2319                return PTR_ERR(sdr);
2320
2321        memset(t, 0, sizeof(*t));
2322        /* Sampling point calculation. */
2323
2324        if (cdns_ctrl->caps2.is_phy_type_dll)
2325                phony_dqs_mod = 2;
2326        else
2327                phony_dqs_mod = 1;
2328
2329        dqs_sampl_res = clk_period / phony_dqs_mod;
2330
2331        tdvw_min = sdr->tREA_max + board_delay_skew_max;
2332        /*
2333         * The idea of those calculation is to get the optimum value
2334         * for tRP and tRH timings. If it is NOT possible to sample data
2335         * with optimal tRP/tRH settings, the parameters will be extended.
2336         * If clk_period is 50ns (the lowest value) this condition is met
2337         * for asynchronous timing modes 1, 2, 3, 4 and 5.
2338         * If clk_period is 20ns the condition is met only
2339         * for asynchronous timing mode 5.
2340         */
2341        if (sdr->tRC_min <= clk_period &&
2342            sdr->tRP_min <= (clk_period / 2) &&
2343            sdr->tREH_min <= (clk_period / 2)) {
2344                /* Performance mode. */
2345                ext_rd_mode = 0;
2346                tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2347                                 sdr->tREA_max, ext_rd_mode);
2348                tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
2349                                         board_delay_skew_min,
2350                                         ext_rd_mode);
2351                /*
2352                 * Check if data valid window and sampling point can be found
2353                 * and is not on the edge (ie. we have hold margin).
2354                 * If not extend the tRP timings.
2355                 */
2356                if (tdvw > 0) {
2357                        if (tdvw_max <= tdvw_min ||
2358                            (tdvw_max % dqs_sampl_res) == 0) {
2359                                /*
2360                                 * No valid sampling point so the RE pulse need
2361                                 * to be widen widening by half clock cycle.
2362                                 */
2363                                ext_rd_mode = 1;
2364                        }
2365                } else {
2366                        /*
2367                         * There is no valid window
2368                         * to be able to sample data the tRP need to be widen.
2369                         * Very safe calculations are performed here.
2370                         */
2371                        trp_cnt = (sdr->tREA_max + board_delay_skew_max
2372                                   + dqs_sampl_res) / clk_period;
2373                        ext_rd_mode = 1;
2374                }
2375
2376        } else {
2377                /* Extended read mode. */
2378                u32 trh;
2379
2380                ext_rd_mode = 1;
2381                trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
2382                trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
2383                if (sdr->tREH_min >= trh)
2384                        trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
2385                else
2386                        trh_cnt = calc_cycl(trh, clk_period);
2387
2388                tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2389                                 sdr->tREA_max, ext_rd_mode);
2390                /*
2391                 * Check if data valid window and sampling point can be found
2392                 * or if it is at the edge check if previous is valid
2393                 * - if not extend the tRP timings.
2394                 */
2395                if (tdvw > 0) {
2396                        tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2397                                                 sdr->tRHOH_min,
2398                                                 board_delay_skew_min,
2399                                                 ext_rd_mode);
2400
2401                        if ((((tdvw_max / dqs_sampl_res)
2402                              * dqs_sampl_res) <= tdvw_min) ||
2403                            (((tdvw_max % dqs_sampl_res) == 0) &&
2404                             (((tdvw_max / dqs_sampl_res - 1)
2405                               * dqs_sampl_res) <= tdvw_min))) {
2406                                /*
2407                                 * Data valid window width is lower than
2408                                 * sampling resolution and do not hit any
2409                                 * sampling point to be sure the sampling point
2410                                 * will be found the RE low pulse width will be
2411                                 *  extended by one clock cycle.
2412                                 */
2413                                trp_cnt = trp_cnt + 1;
2414                        }
2415                } else {
2416                        /*
2417                         * There is no valid window to be able to sample data.
2418                         * The tRP need to be widen.
2419                         * Very safe calculations are performed here.
2420                         */
2421                        trp_cnt = (sdr->tREA_max + board_delay_skew_max
2422                                   + dqs_sampl_res) / clk_period;
2423                }
2424        }
2425
2426        tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2427                                 sdr->tRHOH_min,
2428                                 board_delay_skew_min, ext_rd_mode);
2429
2430        if (sdr->tWC_min <= clk_period &&
2431            (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
2432            (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
2433                ext_wr_mode = 0;
2434        } else {
2435                u32 twh;
2436
2437                ext_wr_mode = 1;
2438                twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
2439                if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
2440                        twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
2441                                            clk_period);
2442
2443                twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
2444                if (sdr->tWH_min >= twh)
2445                        twh = sdr->tWH_min;
2446
2447                twh_cnt = calc_cycl(twh + if_skew, clk_period);
2448        }
2449
2450        reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
2451        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
2452        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
2453        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
2454        t->async_toggle_timings = reg;
2455        dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
2456
2457        tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
2458        tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
2459        twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
2460        trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
2461        reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2462
2463        /*
2464         * If timing exceeds delay field in timing register
2465         * then use maximum value.
2466         */
2467        if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
2468                reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2469        else
2470                reg |= TIMINGS0_TCCS;
2471
2472        reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2473        reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2474        t->timings0 = reg;
2475        dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
2476
2477        /* The following is related to single signal so skew is not needed. */
2478        trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
2479        trhz_cnt = trhz_cnt + 1;
2480        twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
2481        /*
2482         * Because of the two stage syncflop the value must be increased by 3
2483         * first value is related with sync, second value is related
2484         * with output if delay.
2485         */
2486        twb_cnt = twb_cnt + 3 + 5;
2487        /*
2488         * The following is related to the we edge of the random data input
2489         * sequence so skew is not needed.
2490         */
2491        tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
2492        reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2493        reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2494        reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2495        t->timings1 = reg;
2496        dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
2497
2498        tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
2499        if (tfeat_cnt < twb_cnt)
2500                tfeat_cnt = twb_cnt;
2501
2502        tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
2503        tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
2504
2505        reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2506        reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2507        reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2508        t->timings2 = reg;
2509        dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
2510
2511        if (cdns_ctrl->caps2.is_phy_type_dll) {
2512                reg = DLL_PHY_CTRL_DLL_RST_N;
2513                if (ext_wr_mode)
2514                        reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
2515                if (ext_rd_mode)
2516                        reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
2517
2518                reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
2519                reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
2520                t->dll_phy_ctrl = reg;
2521                dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
2522        }
2523
2524        /* Sampling point calculation. */
2525        if ((tdvw_max % dqs_sampl_res) > 0)
2526                sampling_point = tdvw_max / dqs_sampl_res;
2527        else
2528                sampling_point = (tdvw_max / dqs_sampl_res - 1);
2529
2530        if (sampling_point * dqs_sampl_res > tdvw_min) {
2531                dll_phy_dqs_timing =
2532                        FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
2533                dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
2534                phony_dqs_timing = sampling_point / phony_dqs_mod;
2535
2536                if ((sampling_point % 2) > 0) {
2537                        dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
2538                        if ((tdvw_max % dqs_sampl_res) == 0)
2539                                /*
2540                                 * Calculation for sampling point at the edge
2541                                 * of data and being odd number.
2542                                 */
2543                                phony_dqs_timing = (tdvw_max / dqs_sampl_res)
2544                                        / phony_dqs_mod - 1;
2545
2546                        if (!cdns_ctrl->caps2.is_phy_type_dll)
2547                                phony_dqs_timing--;
2548
2549                } else {
2550                        phony_dqs_timing--;
2551                }
2552                rd_del_sel = phony_dqs_timing + 3;
2553        } else {
2554                dev_warn(cdns_ctrl->dev,
2555                         "ERROR : cannot find valid sampling point\n");
2556        }
2557
2558        reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
2559        if (cdns_ctrl->caps2.is_phy_type_dll)
2560                reg  |= PHY_CTRL_SDR_DQS;
2561        t->phy_ctrl = reg;
2562        dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
2563
2564        if (cdns_ctrl->caps2.is_phy_type_dll) {
2565                dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
2566                dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2567                dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2568                        dll_phy_dqs_timing);
2569                t->phy_dqs_timing = dll_phy_dqs_timing;
2570
2571                reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2572                dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2573                        reg);
2574                t->phy_gate_lpbk_ctrl = reg;
2575
2576                dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2577                        PHY_DLL_MASTER_CTRL_BYPASS_MODE);
2578                dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2579        }
2580
2581        return 0;
2582}
2583
2584int cadence_nand_attach_chip(struct nand_chip *chip)
2585{
2586        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2587        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2588        u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
2589        struct mtd_info *mtd = nand_to_mtd(chip);
2590        u32 max_oob_data_size;
2591        int ret;
2592
2593        if (chip->options & NAND_BUSWIDTH_16) {
2594                ret = cadence_nand_set_access_width16(cdns_ctrl, true);
2595                if (ret)
2596                        return ret;
2597        }
2598
2599        chip->bbt_options |= NAND_BBT_USE_FLASH;
2600        chip->bbt_options |= NAND_BBT_NO_OOB;
2601        chip->ecc.mode = NAND_ECC_HW;
2602
2603        chip->options |= NAND_NO_SUBPAGE_WRITE;
2604
2605        cdns_chip->bbm_offs = chip->badblockpos;
2606        if (chip->options & NAND_BUSWIDTH_16) {
2607                cdns_chip->bbm_offs &= ~0x01;
2608                cdns_chip->bbm_len = 2;
2609        } else {
2610                cdns_chip->bbm_len = 1;
2611        }
2612
2613        ret = nand_ecc_choose_conf(chip,
2614                                   &cdns_ctrl->ecc_caps,
2615                                   mtd->oobsize - cdns_chip->bbm_len);
2616        if (ret) {
2617                dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
2618                return ret;
2619        }
2620
2621        dev_dbg(cdns_ctrl->dev,
2622                "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2623                chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
2624
2625        /* Error correction configuration. */
2626        cdns_chip->sector_size = chip->ecc.size;
2627        cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
2628
2629        cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
2630
2631        max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
2632
2633        if (cdns_chip->avail_oob_size > max_oob_data_size)
2634                cdns_chip->avail_oob_size = max_oob_data_size;
2635
2636        if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
2637            > mtd->oobsize)
2638                cdns_chip->avail_oob_size -= 4;
2639
2640        ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
2641        if (ret < 0)
2642                return -EINVAL;
2643
2644        cdns_chip->corr_str_idx = (u8)ret;
2645
2646        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
2647                                        1000000,
2648                                        CTRL_STATUS_CTRL_BUSY, true))
2649                return -ETIMEDOUT;
2650
2651        cadence_nand_set_ecc_strength(cdns_ctrl,
2652                                      cdns_chip->corr_str_idx);
2653
2654        cadence_nand_set_erase_detection(cdns_ctrl, true,
2655                                         chip->ecc.strength);
2656
2657        /* Override the default read operations. */
2658        chip->ecc.read_page = cadence_nand_read_page;
2659        chip->ecc.read_page_raw = cadence_nand_read_page_raw;
2660        chip->ecc.write_page = cadence_nand_write_page;
2661        chip->ecc.write_page_raw = cadence_nand_write_page_raw;
2662        chip->ecc.read_oob = cadence_nand_read_oob;
2663        chip->ecc.write_oob = cadence_nand_write_oob;
2664        chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
2665        chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
2666
2667        if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
2668                cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
2669
2670        /* Is 32-bit DMA supported? */
2671        ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
2672        if (ret) {
2673                dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
2674                return ret;
2675        }
2676
2677        mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
2678
2679        return 0;
2680}
2681
2682static const struct nand_controller_ops cadence_nand_controller_ops = {
2683        .attach_chip = cadence_nand_attach_chip,
2684        .exec_op = cadence_nand_exec_op,
2685        .setup_data_interface = cadence_nand_setup_data_interface,
2686};
2687
2688static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
2689                                  struct device_node *np)
2690{
2691        struct cdns_nand_chip *cdns_chip;
2692        struct mtd_info *mtd;
2693        struct nand_chip *chip;
2694        int nsels, ret, i;
2695        u32 cs;
2696
2697        nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2698        if (nsels <= 0) {
2699                dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
2700                return -EINVAL;
2701        }
2702
2703        /* Allocate the nand chip structure. */
2704        cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
2705                                 (nsels * sizeof(u8)),
2706                                 GFP_KERNEL);
2707        if (!cdns_chip) {
2708                dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
2709                return -ENOMEM;
2710        }
2711
2712        cdns_chip->nsels = nsels;
2713
2714        for (i = 0; i < nsels; i++) {
2715                /* Retrieve CS id. */
2716                ret = of_property_read_u32_index(np, "reg", i, &cs);
2717                if (ret) {
2718                        dev_err(cdns_ctrl->dev,
2719                                "could not retrieve reg property: %d\n",
2720                                ret);
2721                        return ret;
2722                }
2723
2724                if (cs >= cdns_ctrl->caps2.max_banks) {
2725                        dev_err(cdns_ctrl->dev,
2726                                "invalid reg value: %u (max CS = %d)\n",
2727                                cs, cdns_ctrl->caps2.max_banks);
2728                        return -EINVAL;
2729                }
2730
2731                if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
2732                        dev_err(cdns_ctrl->dev,
2733                                "CS %d already assigned\n", cs);
2734                        return -EINVAL;
2735                }
2736
2737                cdns_chip->cs[i] = cs;
2738        }
2739
2740        chip = &cdns_chip->chip;
2741        chip->controller = &cdns_ctrl->controller;
2742        nand_set_flash_node(chip, np);
2743
2744        mtd = nand_to_mtd(chip);
2745        mtd->dev.parent = cdns_ctrl->dev;
2746
2747        /*
2748         * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2749         * in the DT node, this entry will be overwritten in nand_scan_ident().
2750         */
2751        chip->ecc.mode = NAND_ECC_HW;
2752
2753        ret = nand_scan(chip, cdns_chip->nsels);
2754        if (ret) {
2755                dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
2756                return ret;
2757        }
2758
2759        ret = mtd_device_register(mtd, NULL, 0);
2760        if (ret) {
2761                dev_err(cdns_ctrl->dev,
2762                        "failed to register mtd device: %d\n", ret);
2763                nand_cleanup(chip);
2764                return ret;
2765        }
2766
2767        list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
2768
2769        return 0;
2770}
2771
2772static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
2773{
2774        struct cdns_nand_chip *entry, *temp;
2775
2776        list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
2777                nand_release(&entry->chip);
2778                list_del(&entry->node);
2779        }
2780}
2781
2782static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
2783{
2784        struct device_node *np = cdns_ctrl->dev->of_node;
2785        struct device_node *nand_np;
2786        int max_cs = cdns_ctrl->caps2.max_banks;
2787        int nchips, ret;
2788
2789        nchips = of_get_child_count(np);
2790
2791        if (nchips > max_cs) {
2792                dev_err(cdns_ctrl->dev,
2793                        "too many NAND chips: %d (max = %d CS)\n",
2794                        nchips, max_cs);
2795                return -EINVAL;
2796        }
2797
2798        for_each_child_of_node(np, nand_np) {
2799                ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
2800                if (ret) {
2801                        of_node_put(nand_np);
2802                        cadence_nand_chips_cleanup(cdns_ctrl);
2803                        return ret;
2804                }
2805        }
2806
2807        return 0;
2808}
2809
2810static void
2811cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
2812{
2813        /* Disable interrupts. */
2814        writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
2815}
2816
2817static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
2818{
2819        dma_cap_mask_t mask;
2820        int ret;
2821
2822        cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
2823                                                  sizeof(*cdns_ctrl->cdma_desc),
2824                                                  &cdns_ctrl->dma_cdma_desc,
2825                                                  GFP_KERNEL);
2826        if (!cdns_ctrl->dma_cdma_desc)
2827                return -ENOMEM;
2828
2829        cdns_ctrl->buf_size = SZ_16K;
2830        cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2831        if (!cdns_ctrl->buf) {
2832                ret = -ENOMEM;
2833                goto free_buf_desc;
2834        }
2835
2836        if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
2837                             IRQF_SHARED, "cadence-nand-controller",
2838                             cdns_ctrl)) {
2839                dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
2840                ret = -ENODEV;
2841                goto free_buf;
2842        }
2843
2844        spin_lock_init(&cdns_ctrl->irq_lock);
2845        init_completion(&cdns_ctrl->complete);
2846
2847        ret = cadence_nand_hw_init(cdns_ctrl);
2848        if (ret)
2849                goto disable_irq;
2850
2851        dma_cap_zero(mask);
2852        dma_cap_set(DMA_MEMCPY, mask);
2853
2854        if (cdns_ctrl->caps1->has_dma) {
2855                cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
2856                if (!cdns_ctrl->dmac) {
2857                        dev_err(cdns_ctrl->dev,
2858                                "Unable to get a DMA channel\n");
2859                        ret = -EBUSY;
2860                        goto disable_irq;
2861                }
2862        }
2863
2864        nand_controller_init(&cdns_ctrl->controller);
2865        INIT_LIST_HEAD(&cdns_ctrl->chips);
2866
2867        cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
2868        cdns_ctrl->curr_corr_str_idx = 0xFF;
2869
2870        ret = cadence_nand_chips_init(cdns_ctrl);
2871        if (ret) {
2872                dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
2873                        ret);
2874                goto dma_release_chnl;
2875        }
2876
2877        kfree(cdns_ctrl->buf);
2878        cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2879        if (!cdns_ctrl->buf) {
2880                ret = -ENOMEM;
2881                goto dma_release_chnl;
2882        }
2883
2884        return 0;
2885
2886dma_release_chnl:
2887        if (cdns_ctrl->dmac)
2888                dma_release_channel(cdns_ctrl->dmac);
2889
2890disable_irq:
2891        cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2892
2893free_buf:
2894        kfree(cdns_ctrl->buf);
2895
2896free_buf_desc:
2897        dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2898                          cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2899
2900        return ret;
2901}
2902
2903/* Driver exit point. */
2904static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
2905{
2906        cadence_nand_chips_cleanup(cdns_ctrl);
2907        cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2908        kfree(cdns_ctrl->buf);
2909        dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2910                          cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2911
2912        if (cdns_ctrl->dmac)
2913                dma_release_channel(cdns_ctrl->dmac);
2914}
2915
2916struct cadence_nand_dt {
2917        struct cdns_nand_ctrl cdns_ctrl;
2918        struct clk *clk;
2919};
2920
2921static const struct cadence_nand_dt_devdata cadence_nand_default = {
2922        .if_skew = 0,
2923        .has_dma = 1,
2924};
2925
2926static const struct of_device_id cadence_nand_dt_ids[] = {
2927        {
2928                .compatible = "cdns,hp-nfc",
2929                .data = &cadence_nand_default
2930        }, {}
2931};
2932
2933MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
2934
2935static int cadence_nand_dt_probe(struct platform_device *ofdev)
2936{
2937        struct resource *res;
2938        struct cadence_nand_dt *dt;
2939        struct cdns_nand_ctrl *cdns_ctrl;
2940        int ret;
2941        const struct of_device_id *of_id;
2942        const struct cadence_nand_dt_devdata *devdata;
2943        u32 val;
2944
2945        of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
2946        if (of_id) {
2947                ofdev->id_entry = of_id->data;
2948                devdata = of_id->data;
2949        } else {
2950                pr_err("Failed to find the right device id.\n");
2951                return -ENOMEM;
2952        }
2953
2954        dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
2955        if (!dt)
2956                return -ENOMEM;
2957
2958        cdns_ctrl = &dt->cdns_ctrl;
2959        cdns_ctrl->caps1 = devdata;
2960
2961        cdns_ctrl->dev = &ofdev->dev;
2962        cdns_ctrl->irq = platform_get_irq(ofdev, 0);
2963        if (cdns_ctrl->irq < 0)
2964                return cdns_ctrl->irq;
2965
2966        dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
2967
2968        cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
2969        if (IS_ERR(cdns_ctrl->reg)) {
2970                dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n");
2971                return PTR_ERR(cdns_ctrl->reg);
2972        }
2973
2974        res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
2975        cdns_ctrl->io.dma = res->start;
2976        cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
2977        if (IS_ERR(cdns_ctrl->io.virt)) {
2978                dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n");
2979                return PTR_ERR(cdns_ctrl->io.virt);
2980        }
2981
2982        dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
2983        if (IS_ERR(dt->clk))
2984                return PTR_ERR(dt->clk);
2985
2986        cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
2987
2988        ret = of_property_read_u32(ofdev->dev.of_node,
2989                                   "cdns,board-delay-ps", &val);
2990        if (ret) {
2991                val = 4830;
2992                dev_info(cdns_ctrl->dev,
2993                         "missing cdns,board-delay-ps property, %d was set\n",
2994                         val);
2995        }
2996        cdns_ctrl->board_delay = val;
2997
2998        ret = cadence_nand_init(cdns_ctrl);
2999        if (ret)
3000                return ret;
3001
3002        platform_set_drvdata(ofdev, dt);
3003        return 0;
3004}
3005
3006static int cadence_nand_dt_remove(struct platform_device *ofdev)
3007{
3008        struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
3009
3010        cadence_nand_remove(&dt->cdns_ctrl);
3011
3012        return 0;
3013}
3014
3015static struct platform_driver cadence_nand_dt_driver = {
3016        .probe          = cadence_nand_dt_probe,
3017        .remove         = cadence_nand_dt_remove,
3018        .driver         = {
3019                .name   = "cadence-nand-controller",
3020                .of_match_table = cadence_nand_dt_ids,
3021        },
3022};
3023
3024module_platform_driver(cadence_nand_dt_driver);
3025
3026MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3027MODULE_LICENSE("GPL v2");
3028MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
3029
3030