linux/drivers/mtd/nand/raw/cadence-nand-controller.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cadence NAND flash controller driver
   4 *
   5 * Copyright (C) 2019 Cadence
   6 *
   7 * Author: Piotr Sroka <piotrs@cadence.com>
   8 */
   9
  10#include <linux/bitfield.h>
  11#include <linux/clk.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/dmaengine.h>
  14#include <linux/interrupt.h>
  15#include <linux/module.h>
  16#include <linux/mtd/mtd.h>
  17#include <linux/mtd/rawnand.h>
  18#include <linux/of_device.h>
  19#include <linux/iopoll.h>
  20#include <linux/slab.h>
  21
  22/*
  23 * HPNFC can work in 3 modes:
  24 * -  PIO - can work in master or slave DMA
  25 * -  CDMA - needs Master DMA for accessing command descriptors.
  26 * -  Generic mode - can use only slave DMA.
  27 * CDMA and PIO modes can be used to execute only base commands.
  28 * Generic mode can be used to execute any command
  29 * on NAND flash memory. Driver uses CDMA mode for
  30 * block erasing, page reading, page programing.
  31 * Generic mode is used for executing rest of commands.
  32 */
  33
  34#define MAX_ADDRESS_CYC         6
  35#define MAX_ERASE_ADDRESS_CYC   3
  36#define MAX_DATA_SIZE           0xFFFC
  37#define DMA_DATA_SIZE_ALIGN     8
  38
  39/* Register definition. */
  40/*
  41 * Command register 0.
  42 * Writing data to this register will initiate a new transaction
  43 * of the NF controller.
  44 */
  45#define CMD_REG0                        0x0000
  46/* Command type field mask. */
  47#define         CMD_REG0_CT             GENMASK(31, 30)
  48/* Command type CDMA. */
  49#define         CMD_REG0_CT_CDMA        0uL
  50/* Command type generic. */
  51#define         CMD_REG0_CT_GEN         3uL
  52/* Command thread number field mask. */
  53#define         CMD_REG0_TN             GENMASK(27, 24)
  54
  55/* Command register 2. */
  56#define CMD_REG2                        0x0008
  57/* Command register 3. */
  58#define CMD_REG3                        0x000C
  59/* Pointer register to select which thread status will be selected. */
  60#define CMD_STATUS_PTR                  0x0010
  61/* Command status register for selected thread. */
  62#define CMD_STATUS                      0x0014
  63
  64/* Interrupt status register. */
  65#define INTR_STATUS                     0x0110
  66#define         INTR_STATUS_SDMA_ERR    BIT(22)
  67#define         INTR_STATUS_SDMA_TRIGG  BIT(21)
  68#define         INTR_STATUS_UNSUPP_CMD  BIT(19)
  69#define         INTR_STATUS_DDMA_TERR   BIT(18)
  70#define         INTR_STATUS_CDMA_TERR   BIT(17)
  71#define         INTR_STATUS_CDMA_IDL    BIT(16)
  72
  73/* Interrupt enable register. */
  74#define INTR_ENABLE                             0x0114
  75#define         INTR_ENABLE_INTR_EN             BIT(31)
  76#define         INTR_ENABLE_SDMA_ERR_EN         BIT(22)
  77#define         INTR_ENABLE_SDMA_TRIGG_EN       BIT(21)
  78#define         INTR_ENABLE_UNSUPP_CMD_EN       BIT(19)
  79#define         INTR_ENABLE_DDMA_TERR_EN        BIT(18)
  80#define         INTR_ENABLE_CDMA_TERR_EN        BIT(17)
  81#define         INTR_ENABLE_CDMA_IDLE_EN        BIT(16)
  82
  83/* Controller internal state. */
  84#define CTRL_STATUS                             0x0118
  85#define         CTRL_STATUS_INIT_COMP           BIT(9)
  86#define         CTRL_STATUS_CTRL_BUSY           BIT(8)
  87
  88/* Command Engine threads state. */
  89#define TRD_STATUS                              0x0120
  90
  91/* Command Engine interrupt thread error status. */
  92#define TRD_ERR_INT_STATUS                      0x0128
  93/* Command Engine interrupt thread error enable. */
  94#define TRD_ERR_INT_STATUS_EN                   0x0130
  95/* Command Engine interrupt thread complete status. */
  96#define TRD_COMP_INT_STATUS                     0x0138
  97
  98/*
  99 * Transfer config 0 register.
 100 * Configures data transfer parameters.
 101 */
 102#define TRAN_CFG_0                              0x0400
 103/* Offset value from the beginning of the page. */
 104#define         TRAN_CFG_0_OFFSET               GENMASK(31, 16)
 105/* Numbers of sectors to transfer within singlNF device's page. */
 106#define         TRAN_CFG_0_SEC_CNT              GENMASK(7, 0)
 107
 108/*
 109 * Transfer config 1 register.
 110 * Configures data transfer parameters.
 111 */
 112#define TRAN_CFG_1                              0x0404
 113/* Size of last data sector. */
 114#define         TRAN_CFG_1_LAST_SEC_SIZE        GENMASK(31, 16)
 115/* Size of not-last data sector. */
 116#define         TRAN_CFG_1_SECTOR_SIZE          GENMASK(15, 0)
 117
 118/* ECC engine configuration register 0. */
 119#define ECC_CONFIG_0                            0x0428
 120/* Correction strength. */
 121#define         ECC_CONFIG_0_CORR_STR           GENMASK(10, 8)
 122/* Enable erased pages detection mechanism. */
 123#define         ECC_CONFIG_0_ERASE_DET_EN       BIT(1)
 124/* Enable controller ECC check bits generation and correction. */
 125#define         ECC_CONFIG_0_ECC_EN             BIT(0)
 126
 127/* ECC engine configuration register 1. */
 128#define ECC_CONFIG_1                            0x042C
 129
 130/* Multiplane settings register. */
 131#define MULTIPLANE_CFG                          0x0434
 132/* Cache operation settings. */
 133#define CACHE_CFG                               0x0438
 134
 135/* DMA settings register. */
 136#define DMA_SETINGS                             0x043C
 137/* Enable SDMA error report on access unprepared slave DMA interface. */
 138#define         DMA_SETINGS_SDMA_ERR_RSP        BIT(17)
 139
 140/* Transferred data block size for the slave DMA module. */
 141#define SDMA_SIZE                               0x0440
 142
 143/* Thread number associated with transferred data block
 144 * for the slave DMA module.
 145 */
 146#define SDMA_TRD_NUM                            0x0444
 147/* Thread number mask. */
 148#define         SDMA_TRD_NUM_SDMA_TRD           GENMASK(2, 0)
 149
 150#define CONTROL_DATA_CTRL                       0x0494
 151/* Thread number mask. */
 152#define         CONTROL_DATA_CTRL_SIZE          GENMASK(15, 0)
 153
 154#define CTRL_VERSION                            0x800
 155#define         CTRL_VERSION_REV                GENMASK(7, 0)
 156
 157/* Available hardware features of the controller. */
 158#define CTRL_FEATURES                           0x804
 159/* Support for NV-DDR2/3 work mode. */
 160#define         CTRL_FEATURES_NVDDR_2_3         BIT(28)
 161/* Support for NV-DDR work mode. */
 162#define         CTRL_FEATURES_NVDDR             BIT(27)
 163/* Support for asynchronous work mode. */
 164#define         CTRL_FEATURES_ASYNC             BIT(26)
 165/* Support for asynchronous work mode. */
 166#define         CTRL_FEATURES_N_BANKS           GENMASK(25, 24)
 167/* Slave and Master DMA data width. */
 168#define         CTRL_FEATURES_DMA_DWITH64       BIT(21)
 169/* Availability of Control Data feature.*/
 170#define         CTRL_FEATURES_CONTROL_DATA      BIT(10)
 171
 172/* BCH Engine identification register 0 - correction strengths. */
 173#define BCH_CFG_0                               0x838
 174#define         BCH_CFG_0_CORR_CAP_0            GENMASK(7, 0)
 175#define         BCH_CFG_0_CORR_CAP_1            GENMASK(15, 8)
 176#define         BCH_CFG_0_CORR_CAP_2            GENMASK(23, 16)
 177#define         BCH_CFG_0_CORR_CAP_3            GENMASK(31, 24)
 178
 179/* BCH Engine identification register 1 - correction strengths. */
 180#define BCH_CFG_1                               0x83C
 181#define         BCH_CFG_1_CORR_CAP_4            GENMASK(7, 0)
 182#define         BCH_CFG_1_CORR_CAP_5            GENMASK(15, 8)
 183#define         BCH_CFG_1_CORR_CAP_6            GENMASK(23, 16)
 184#define         BCH_CFG_1_CORR_CAP_7            GENMASK(31, 24)
 185
 186/* BCH Engine identification register 2 - sector sizes. */
 187#define BCH_CFG_2                               0x840
 188#define         BCH_CFG_2_SECT_0                GENMASK(15, 0)
 189#define         BCH_CFG_2_SECT_1                GENMASK(31, 16)
 190
 191/* BCH Engine identification register 3. */
 192#define BCH_CFG_3                               0x844
 193#define         BCH_CFG_3_METADATA_SIZE         GENMASK(23, 16)
 194
 195/* Ready/Busy# line status. */
 196#define RBN_SETINGS                             0x1004
 197
 198/* Common settings. */
 199#define COMMON_SET                              0x1008
 200/* 16 bit device connected to the NAND Flash interface. */
 201#define         COMMON_SET_DEVICE_16BIT         BIT(8)
 202
 203/* Skip_bytes registers. */
 204#define SKIP_BYTES_CONF                         0x100C
 205#define         SKIP_BYTES_MARKER_VALUE         GENMASK(31, 16)
 206#define         SKIP_BYTES_NUM_OF_BYTES         GENMASK(7, 0)
 207
 208#define SKIP_BYTES_OFFSET                       0x1010
 209#define          SKIP_BYTES_OFFSET_VALUE        GENMASK(23, 0)
 210
 211/* Timings configuration. */
 212#define ASYNC_TOGGLE_TIMINGS                    0x101c
 213#define         ASYNC_TOGGLE_TIMINGS_TRH        GENMASK(28, 24)
 214#define         ASYNC_TOGGLE_TIMINGS_TRP        GENMASK(20, 16)
 215#define         ASYNC_TOGGLE_TIMINGS_TWH        GENMASK(12, 8)
 216#define         ASYNC_TOGGLE_TIMINGS_TWP        GENMASK(4, 0)
 217
 218#define TIMINGS0                                0x1024
 219#define         TIMINGS0_TADL                   GENMASK(31, 24)
 220#define         TIMINGS0_TCCS                   GENMASK(23, 16)
 221#define         TIMINGS0_TWHR                   GENMASK(15, 8)
 222#define         TIMINGS0_TRHW                   GENMASK(7, 0)
 223
 224#define TIMINGS1                                0x1028
 225#define         TIMINGS1_TRHZ                   GENMASK(31, 24)
 226#define         TIMINGS1_TWB                    GENMASK(23, 16)
 227#define         TIMINGS1_TVDLY                  GENMASK(7, 0)
 228
 229#define TIMINGS2                                0x102c
 230#define         TIMINGS2_TFEAT                  GENMASK(25, 16)
 231#define         TIMINGS2_CS_HOLD_TIME           GENMASK(13, 8)
 232#define         TIMINGS2_CS_SETUP_TIME          GENMASK(5, 0)
 233
 234/* Configuration of the resynchronization of slave DLL of PHY. */
 235#define DLL_PHY_CTRL                            0x1034
 236#define         DLL_PHY_CTRL_DLL_RST_N          BIT(24)
 237#define         DLL_PHY_CTRL_EXTENDED_WR_MODE   BIT(17)
 238#define         DLL_PHY_CTRL_EXTENDED_RD_MODE   BIT(16)
 239#define         DLL_PHY_CTRL_RS_HIGH_WAIT_CNT   GENMASK(11, 8)
 240#define         DLL_PHY_CTRL_RS_IDLE_CNT        GENMASK(7, 0)
 241
 242/* Register controlling DQ related timing. */
 243#define PHY_DQ_TIMING                           0x2000
 244/* Register controlling DSQ related timing.  */
 245#define PHY_DQS_TIMING                          0x2004
 246#define         PHY_DQS_TIMING_DQS_SEL_OE_END   GENMASK(3, 0)
 247#define         PHY_DQS_TIMING_PHONY_DQS_SEL    BIT(16)
 248#define         PHY_DQS_TIMING_USE_PHONY_DQS    BIT(20)
 249
 250/* Register controlling the gate and loopback control related timing. */
 251#define PHY_GATE_LPBK_CTRL                      0x2008
 252#define         PHY_GATE_LPBK_CTRL_RDS          GENMASK(24, 19)
 253
 254/* Register holds the control for the master DLL logic. */
 255#define PHY_DLL_MASTER_CTRL                     0x200C
 256#define         PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
 257
 258/* Register holds the control for the slave DLL logic. */
 259#define PHY_DLL_SLAVE_CTRL                      0x2010
 260
 261/* This register handles the global control settings for the PHY. */
 262#define PHY_CTRL                                0x2080
 263#define         PHY_CTRL_SDR_DQS                BIT(14)
 264#define         PHY_CTRL_PHONY_DQS              GENMASK(9, 4)
 265
 266/*
 267 * This register handles the global control settings
 268 * for the termination selects for reads.
 269 */
 270#define PHY_TSEL                                0x2084
 271
 272/* Generic command layout. */
 273#define GCMD_LAY_CS                     GENMASK_ULL(11, 8)
 274/*
 275 * This bit informs the minicotroller if it has to wait for tWB
 276 * after sending the last CMD/ADDR/DATA in the sequence.
 277 */
 278#define GCMD_LAY_TWB                    BIT_ULL(6)
 279/* Type of generic instruction. */
 280#define GCMD_LAY_INSTR                  GENMASK_ULL(5, 0)
 281
 282/* Generic CMD sequence type. */
 283#define         GCMD_LAY_INSTR_CMD      0
 284/* Generic ADDR sequence type. */
 285#define         GCMD_LAY_INSTR_ADDR     1
 286/* Generic data transfer sequence type. */
 287#define         GCMD_LAY_INSTR_DATA     2
 288
 289/* Input part of generic command type of input is command. */
 290#define GCMD_LAY_INPUT_CMD              GENMASK_ULL(23, 16)
 291
 292/* Generic command address sequence - address fields. */
 293#define GCMD_LAY_INPUT_ADDR             GENMASK_ULL(63, 16)
 294/* Generic command address sequence - address size. */
 295#define GCMD_LAY_INPUT_ADDR_SIZE        GENMASK_ULL(13, 11)
 296
 297/* Transfer direction field of generic command data sequence. */
 298#define GCMD_DIR                        BIT_ULL(11)
 299/* Read transfer direction of generic command data sequence. */
 300#define         GCMD_DIR_READ           0
 301/* Write transfer direction of generic command data sequence. */
 302#define         GCMD_DIR_WRITE          1
 303
 304/* ECC enabled flag of generic command data sequence - ECC enabled. */
 305#define GCMD_ECC_EN                     BIT_ULL(12)
 306/* Generic command data sequence - sector size. */
 307#define GCMD_SECT_SIZE                  GENMASK_ULL(31, 16)
 308/* Generic command data sequence - sector count. */
 309#define GCMD_SECT_CNT                   GENMASK_ULL(39, 32)
 310/* Generic command data sequence - last sector size. */
 311#define GCMD_LAST_SIZE                  GENMASK_ULL(55, 40)
 312
 313/* CDMA descriptor fields. */
 314/* Erase command type of CDMA descriptor. */
 315#define CDMA_CT_ERASE           0x1000
 316/* Program page command type of CDMA descriptor. */
 317#define CDMA_CT_WR              0x2100
 318/* Read page command type of CDMA descriptor. */
 319#define CDMA_CT_RD              0x2200
 320
 321/* Flash pointer memory shift. */
 322#define CDMA_CFPTR_MEM_SHIFT    24
 323/* Flash pointer memory mask. */
 324#define CDMA_CFPTR_MEM          GENMASK(26, 24)
 325
 326/*
 327 * Command DMA descriptor flags. If set causes issue interrupt after
 328 * the completion of descriptor processing.
 329 */
 330#define CDMA_CF_INT             BIT(8)
 331/*
 332 * Command DMA descriptor flags - the next descriptor
 333 * address field is valid and descriptor processing should continue.
 334 */
 335#define CDMA_CF_CONT            BIT(9)
 336/* DMA master flag of command DMA descriptor. */
 337#define CDMA_CF_DMA_MASTER      BIT(10)
 338
 339/* Operation complete status of command descriptor. */
 340#define CDMA_CS_COMP            BIT(15)
 341/* Operation complete status of command descriptor. */
 342/* Command descriptor status - operation fail. */
 343#define CDMA_CS_FAIL            BIT(14)
 344/* Command descriptor status - page erased. */
 345#define CDMA_CS_ERP             BIT(11)
 346/* Command descriptor status - timeout occurred. */
 347#define CDMA_CS_TOUT            BIT(10)
 348/*
 349 * Maximum amount of correction applied to one ECC sector.
 350 * It is part of command descriptor status.
 351 */
 352#define CDMA_CS_MAXERR          GENMASK(9, 2)
 353/* Command descriptor status - uncorrectable ECC error. */
 354#define CDMA_CS_UNCE            BIT(1)
 355/* Command descriptor status - descriptor error. */
 356#define CDMA_CS_ERR             BIT(0)
 357
 358/* Status of operation - OK. */
 359#define STAT_OK                 0
 360/* Status of operation - FAIL. */
 361#define STAT_FAIL               2
 362/* Status of operation - uncorrectable ECC error. */
 363#define STAT_ECC_UNCORR         3
 364/* Status of operation - page erased. */
 365#define STAT_ERASED             5
 366/* Status of operation - correctable ECC error. */
 367#define STAT_ECC_CORR           6
 368/* Status of operation - unsuspected state. */
 369#define STAT_UNKNOWN            7
 370/* Status of operation - operation is not completed yet. */
 371#define STAT_BUSY               0xFF
 372
 373#define BCH_MAX_NUM_CORR_CAPS           8
 374#define BCH_MAX_NUM_SECTOR_SIZES        2
 375
 376struct cadence_nand_timings {
 377        u32 async_toggle_timings;
 378        u32 timings0;
 379        u32 timings1;
 380        u32 timings2;
 381        u32 dll_phy_ctrl;
 382        u32 phy_ctrl;
 383        u32 phy_dqs_timing;
 384        u32 phy_gate_lpbk_ctrl;
 385};
 386
 387/* Command DMA descriptor. */
 388struct cadence_nand_cdma_desc {
 389        /* Next descriptor address. */
 390        u64 next_pointer;
 391
 392        /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
 393        u32 flash_pointer;
 394        /*field appears in HPNFC version 13*/
 395        u16 bank;
 396        u16 rsvd0;
 397
 398        /* Operation the controller needs to perform. */
 399        u16 command_type;
 400        u16 rsvd1;
 401        /* Flags for operation of this command. */
 402        u16 command_flags;
 403        u16 rsvd2;
 404
 405        /* System/host memory address required for data DMA commands. */
 406        u64 memory_pointer;
 407
 408        /* Status of operation. */
 409        u32 status;
 410        u32 rsvd3;
 411
 412        /* Address pointer to sync buffer location. */
 413        u64 sync_flag_pointer;
 414
 415        /* Controls the buffer sync mechanism. */
 416        u32 sync_arguments;
 417        u32 rsvd4;
 418
 419        /* Control data pointer. */
 420        u64 ctrl_data_ptr;
 421};
 422
 423/* Interrupt status. */
 424struct cadence_nand_irq_status {
 425        /* Thread operation complete status. */
 426        u32 trd_status;
 427        /* Thread operation error. */
 428        u32 trd_error;
 429        /* Controller status. */
 430        u32 status;
 431};
 432
 433/* Cadence NAND flash controller capabilities get from driver data. */
 434struct cadence_nand_dt_devdata {
 435        /* Skew value of the output signals of the NAND Flash interface. */
 436        u32 if_skew;
 437        /* It informs if slave DMA interface is connected to DMA engine. */
 438        unsigned int has_dma:1;
 439};
 440
 441/* Cadence NAND flash controller capabilities read from registers. */
 442struct cdns_nand_caps {
 443        /* Maximum number of banks supported by hardware. */
 444        u8 max_banks;
 445        /* Slave and Master DMA data width in bytes (4 or 8). */
 446        u8 data_dma_width;
 447        /* Control Data feature supported. */
 448        bool data_control_supp;
 449        /* Is PHY type DLL. */
 450        bool is_phy_type_dll;
 451};
 452
 453struct cdns_nand_ctrl {
 454        struct device *dev;
 455        struct nand_controller controller;
 456        struct cadence_nand_cdma_desc *cdma_desc;
 457        /* IP capability. */
 458        const struct cadence_nand_dt_devdata *caps1;
 459        struct cdns_nand_caps caps2;
 460        u8 ctrl_rev;
 461        dma_addr_t dma_cdma_desc;
 462        u8 *buf;
 463        u32 buf_size;
 464        u8 curr_corr_str_idx;
 465
 466        /* Register interface. */
 467        void __iomem *reg;
 468
 469        struct {
 470                void __iomem *virt;
 471                dma_addr_t dma;
 472        } io;
 473
 474        int irq;
 475        /* Interrupts that have happened. */
 476        struct cadence_nand_irq_status irq_status;
 477        /* Interrupts we are waiting for. */
 478        struct cadence_nand_irq_status irq_mask;
 479        struct completion complete;
 480        /* Protect irq_mask and irq_status. */
 481        spinlock_t irq_lock;
 482
 483        int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
 484        struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
 485        struct nand_ecc_caps ecc_caps;
 486
 487        int curr_trans_type;
 488
 489        struct dma_chan *dmac;
 490
 491        u32 nf_clk_rate;
 492        /*
 493         * Estimated Board delay. The value includes the total
 494         * round trip delay for the signals and is used for deciding on values
 495         * associated with data read capture.
 496         */
 497        u32 board_delay;
 498
 499        struct nand_chip *selected_chip;
 500
 501        unsigned long assigned_cs;
 502        struct list_head chips;
 503        u8 bch_metadata_size;
 504};
 505
 506struct cdns_nand_chip {
 507        struct cadence_nand_timings timings;
 508        struct nand_chip chip;
 509        u8 nsels;
 510        struct list_head node;
 511
 512        /*
 513         * part of oob area of NAND flash memory page.
 514         * This part is available for user to read or write.
 515         */
 516        u32 avail_oob_size;
 517
 518        /* Sector size. There are few sectors per mtd->writesize */
 519        u32 sector_size;
 520        u32 sector_count;
 521
 522        /* Offset of BBM. */
 523        u8 bbm_offs;
 524        /* Number of bytes reserved for BBM. */
 525        u8 bbm_len;
 526        /* ECC strength index. */
 527        u8 corr_str_idx;
 528
 529        u8 cs[];
 530};
 531
 532struct ecc_info {
 533        int (*calc_ecc_bytes)(int step_size, int strength);
 534        int max_step_size;
 535};
 536
 537static inline struct
 538cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
 539{
 540        return container_of(chip, struct cdns_nand_chip, chip);
 541}
 542
 543static inline struct
 544cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
 545{
 546        return container_of(controller, struct cdns_nand_ctrl, controller);
 547}
 548
 549static bool
 550cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
 551                        u32 buf_len)
 552{
 553        u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
 554
 555        return buf && virt_addr_valid(buf) &&
 556                likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
 557                likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
 558}
 559
 560static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
 561                                       u32 reg_offset, u32 timeout_us,
 562                                       u32 mask, bool is_clear)
 563{
 564        u32 val;
 565        int ret;
 566
 567        ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
 568                                         val, !(val & mask) == is_clear,
 569                                         10, timeout_us);
 570
 571        if (ret < 0) {
 572                dev_err(cdns_ctrl->dev,
 573                        "Timeout while waiting for reg %x with mask %x is clear %d\n",
 574                        reg_offset, mask, is_clear);
 575        }
 576
 577        return ret;
 578}
 579
 580static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
 581                                       bool enable)
 582{
 583        u32 reg;
 584
 585        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 586                                        1000000,
 587                                        CTRL_STATUS_CTRL_BUSY, true))
 588                return -ETIMEDOUT;
 589
 590        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 591
 592        if (enable)
 593                reg |= ECC_CONFIG_0_ECC_EN;
 594        else
 595                reg &= ~ECC_CONFIG_0_ECC_EN;
 596
 597        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 598
 599        return 0;
 600}
 601
 602static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
 603                                          u8 corr_str_idx)
 604{
 605        u32 reg;
 606
 607        if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
 608                return;
 609
 610        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 611        reg &= ~ECC_CONFIG_0_CORR_STR;
 612        reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
 613        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 614
 615        cdns_ctrl->curr_corr_str_idx = corr_str_idx;
 616}
 617
 618static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
 619                                             u8 strength)
 620{
 621        int i, corr_str_idx = -1;
 622
 623        for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
 624                if (cdns_ctrl->ecc_strengths[i] == strength) {
 625                        corr_str_idx = i;
 626                        break;
 627                }
 628        }
 629
 630        return corr_str_idx;
 631}
 632
 633static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
 634                                            u16 marker_value)
 635{
 636        u32 reg;
 637
 638        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 639                                        1000000,
 640                                        CTRL_STATUS_CTRL_BUSY, true))
 641                return -ETIMEDOUT;
 642
 643        reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
 644        reg &= ~SKIP_BYTES_MARKER_VALUE;
 645        reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
 646                          marker_value);
 647
 648        writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
 649
 650        return 0;
 651}
 652
 653static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
 654                                            u8 num_of_bytes,
 655                                            u32 offset_value,
 656                                            int enable)
 657{
 658        u32 reg, skip_bytes_offset;
 659
 660        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 661                                        1000000,
 662                                        CTRL_STATUS_CTRL_BUSY, true))
 663                return -ETIMEDOUT;
 664
 665        if (!enable) {
 666                num_of_bytes = 0;
 667                offset_value = 0;
 668        }
 669
 670        reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
 671        reg &= ~SKIP_BYTES_NUM_OF_BYTES;
 672        reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
 673                          num_of_bytes);
 674        skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
 675                                       offset_value);
 676
 677        writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
 678        writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
 679
 680        return 0;
 681}
 682
 683/* Functions enables/disables hardware detection of erased data */
 684static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
 685                                             bool enable,
 686                                             u8 bitflips_threshold)
 687{
 688        u32 reg;
 689
 690        reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
 691
 692        if (enable)
 693                reg |= ECC_CONFIG_0_ERASE_DET_EN;
 694        else
 695                reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
 696
 697        writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
 698        writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
 699}
 700
 701static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
 702                                           bool bit_bus16)
 703{
 704        u32 reg;
 705
 706        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 707                                        1000000,
 708                                        CTRL_STATUS_CTRL_BUSY, true))
 709                return -ETIMEDOUT;
 710
 711        reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
 712
 713        if (!bit_bus16)
 714                reg &= ~COMMON_SET_DEVICE_16BIT;
 715        else
 716                reg |= COMMON_SET_DEVICE_16BIT;
 717        writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
 718
 719        return 0;
 720}
 721
 722static void
 723cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
 724                             struct cadence_nand_irq_status *irq_status)
 725{
 726        writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
 727        writel_relaxed(irq_status->trd_status,
 728                       cdns_ctrl->reg + TRD_COMP_INT_STATUS);
 729        writel_relaxed(irq_status->trd_error,
 730                       cdns_ctrl->reg + TRD_ERR_INT_STATUS);
 731}
 732
 733static void
 734cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
 735                             struct cadence_nand_irq_status *irq_status)
 736{
 737        irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
 738        irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
 739                                               + TRD_COMP_INT_STATUS);
 740        irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
 741                                              + TRD_ERR_INT_STATUS);
 742}
 743
 744static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
 745                        struct cadence_nand_irq_status *irq_status)
 746{
 747        cadence_nand_read_int_status(cdns_ctrl, irq_status);
 748
 749        return irq_status->status || irq_status->trd_status ||
 750                irq_status->trd_error;
 751}
 752
 753static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
 754{
 755        unsigned long flags;
 756
 757        spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
 758        memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
 759        memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
 760        spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
 761}
 762
 763/*
 764 * This is the interrupt service routine. It handles all interrupts
 765 * sent to this device.
 766 */
 767static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
 768{
 769        struct cdns_nand_ctrl *cdns_ctrl = dev_id;
 770        struct cadence_nand_irq_status irq_status;
 771        irqreturn_t result = IRQ_NONE;
 772
 773        spin_lock(&cdns_ctrl->irq_lock);
 774
 775        if (irq_detected(cdns_ctrl, &irq_status)) {
 776                /* Handle interrupt. */
 777                /* First acknowledge it. */
 778                cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
 779                /* Status in the device context for someone to read. */
 780                cdns_ctrl->irq_status.status |= irq_status.status;
 781                cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
 782                cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
 783                /* Notify anyone who cares that it happened. */
 784                complete(&cdns_ctrl->complete);
 785                /* Tell the OS that we've handled this. */
 786                result = IRQ_HANDLED;
 787        }
 788        spin_unlock(&cdns_ctrl->irq_lock);
 789
 790        return result;
 791}
 792
 793static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
 794                                      struct cadence_nand_irq_status *irq_mask)
 795{
 796        writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
 797                       cdns_ctrl->reg + INTR_ENABLE);
 798
 799        writel_relaxed(irq_mask->trd_error,
 800                       cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
 801}
 802
 803static void
 804cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
 805                          struct cadence_nand_irq_status *irq_mask,
 806                          struct cadence_nand_irq_status *irq_status)
 807{
 808        unsigned long timeout = msecs_to_jiffies(10000);
 809        unsigned long time_left;
 810
 811        time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
 812                                                timeout);
 813
 814        *irq_status = cdns_ctrl->irq_status;
 815        if (time_left == 0) {
 816                /* Timeout error. */
 817                dev_err(cdns_ctrl->dev, "timeout occurred:\n");
 818                dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
 819                        irq_status->status, irq_mask->status);
 820                dev_err(cdns_ctrl->dev,
 821                        "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
 822                        irq_status->trd_status, irq_mask->trd_status);
 823                dev_err(cdns_ctrl->dev,
 824                        "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
 825                        irq_status->trd_error, irq_mask->trd_error);
 826        }
 827}
 828
 829/* Execute generic command on NAND controller. */
 830static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
 831                                         u8 chip_nr,
 832                                         u64 mini_ctrl_cmd)
 833{
 834        u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
 835
 836        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
 837        mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
 838        mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
 839
 840        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
 841                                        1000000,
 842                                        CTRL_STATUS_CTRL_BUSY, true))
 843                return -ETIMEDOUT;
 844
 845        cadence_nand_reset_irq(cdns_ctrl);
 846
 847        writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
 848        writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
 849
 850        /* Select generic command. */
 851        reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
 852        /* Thread number. */
 853        reg |= FIELD_PREP(CMD_REG0_TN, 0);
 854
 855        /* Issue command. */
 856        writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
 857
 858        return 0;
 859}
 860
 861/* Wait for data on slave DMA interface. */
 862static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
 863                                     u8 *out_sdma_trd,
 864                                     u32 *out_sdma_size)
 865{
 866        struct cadence_nand_irq_status irq_mask, irq_status;
 867
 868        irq_mask.trd_status = 0;
 869        irq_mask.trd_error = 0;
 870        irq_mask.status = INTR_STATUS_SDMA_TRIGG
 871                | INTR_STATUS_SDMA_ERR
 872                | INTR_STATUS_UNSUPP_CMD;
 873
 874        cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
 875        cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
 876        if (irq_status.status == 0) {
 877                dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
 878                return -ETIMEDOUT;
 879        }
 880
 881        if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
 882                *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
 883                *out_sdma_trd  = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
 884                *out_sdma_trd =
 885                        FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
 886        } else {
 887                dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
 888                        irq_status.status);
 889                return -EIO;
 890        }
 891
 892        return 0;
 893}
 894
 895static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
 896{
 897        u32  reg;
 898
 899        reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
 900
 901        cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
 902
 903        if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
 904                cdns_ctrl->caps2.data_dma_width = 8;
 905        else
 906                cdns_ctrl->caps2.data_dma_width = 4;
 907
 908        if (reg & CTRL_FEATURES_CONTROL_DATA)
 909                cdns_ctrl->caps2.data_control_supp = true;
 910
 911        if (reg & (CTRL_FEATURES_NVDDR_2_3
 912                   | CTRL_FEATURES_NVDDR))
 913                cdns_ctrl->caps2.is_phy_type_dll = true;
 914}
 915
 916/* Prepare CDMA descriptor. */
 917static void
 918cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
 919                               char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
 920                                   dma_addr_t ctrl_data_ptr, u16 ctype)
 921{
 922        struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
 923
 924        memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
 925
 926        /* Set fields for one descriptor. */
 927        cdma_desc->flash_pointer = flash_ptr;
 928        if (cdns_ctrl->ctrl_rev >= 13)
 929                cdma_desc->bank = nf_mem;
 930        else
 931                cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
 932
 933        cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
 934        cdma_desc->command_flags  |= CDMA_CF_INT;
 935
 936        cdma_desc->memory_pointer = mem_ptr;
 937        cdma_desc->status = 0;
 938        cdma_desc->sync_flag_pointer = 0;
 939        cdma_desc->sync_arguments = 0;
 940
 941        cdma_desc->command_type = ctype;
 942        cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
 943}
 944
 945static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
 946                                        u32 desc_status)
 947{
 948        if (desc_status & CDMA_CS_ERP)
 949                return STAT_ERASED;
 950
 951        if (desc_status & CDMA_CS_UNCE)
 952                return STAT_ECC_UNCORR;
 953
 954        if (desc_status & CDMA_CS_ERR) {
 955                dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
 956                return STAT_FAIL;
 957        }
 958
 959        if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
 960                return STAT_ECC_CORR;
 961
 962        return STAT_FAIL;
 963}
 964
 965static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
 966{
 967        struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
 968        u8 status = STAT_BUSY;
 969
 970        if (desc_ptr->status & CDMA_CS_FAIL) {
 971                status = cadence_nand_check_desc_error(cdns_ctrl,
 972                                                       desc_ptr->status);
 973                dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
 974        } else if (desc_ptr->status & CDMA_CS_COMP) {
 975                /* Descriptor finished with no errors. */
 976                if (desc_ptr->command_flags & CDMA_CF_CONT) {
 977                        dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
 978                        status = STAT_UNKNOWN;
 979                } else {
 980                        /* Last descriptor.  */
 981                        status = STAT_OK;
 982                }
 983        }
 984
 985        return status;
 986}
 987
 988static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
 989                                  u8 thread)
 990{
 991        u32 reg;
 992        int status;
 993
 994        /* Wait for thread ready. */
 995        status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
 996                                             1000000,
 997                                             BIT(thread), true);
 998        if (status)
 999                return status;
1000
1001        cadence_nand_reset_irq(cdns_ctrl);
1002        reinit_completion(&cdns_ctrl->complete);
1003
1004        writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
1005                       cdns_ctrl->reg + CMD_REG2);
1006        writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
1007
1008        /* Select CDMA mode. */
1009        reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
1010        /* Thread number. */
1011        reg |= FIELD_PREP(CMD_REG0_TN, thread);
1012        /* Issue command. */
1013        writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
1014
1015        return 0;
1016}
1017
1018/* Send SDMA command and wait for finish. */
1019static u32
1020cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
1021                                u8 thread)
1022{
1023        struct cadence_nand_irq_status irq_mask, irq_status = {0};
1024        int status;
1025
1026        irq_mask.trd_status = BIT(thread);
1027        irq_mask.trd_error = BIT(thread);
1028        irq_mask.status = INTR_STATUS_CDMA_TERR;
1029
1030        cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
1031
1032        status = cadence_nand_cdma_send(cdns_ctrl, thread);
1033        if (status)
1034                return status;
1035
1036        cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
1037
1038        if (irq_status.status == 0 && irq_status.trd_status == 0 &&
1039            irq_status.trd_error == 0) {
1040                dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
1041                return -ETIMEDOUT;
1042        }
1043        if (irq_status.status & irq_mask.status) {
1044                dev_err(cdns_ctrl->dev, "CDMA command failed\n");
1045                return -EIO;
1046        }
1047
1048        return 0;
1049}
1050
1051/*
1052 * ECC size depends on configured ECC strength and on maximum supported
1053 * ECC step size.
1054 */
1055static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
1056{
1057        int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
1058
1059        return ALIGN(nbytes, 2);
1060}
1061
1062#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1063        static int \
1064        cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1065                                                    int strength)\
1066        {\
1067                return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1068        }
1069
1070CADENCE_NAND_CALC_ECC_BYTES(256)
1071CADENCE_NAND_CALC_ECC_BYTES(512)
1072CADENCE_NAND_CALC_ECC_BYTES(1024)
1073CADENCE_NAND_CALC_ECC_BYTES(2048)
1074CADENCE_NAND_CALC_ECC_BYTES(4096)
1075
1076/* Function reads BCH capabilities. */
1077static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
1078{
1079        struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
1080        int max_step_size = 0, nstrengths, i;
1081        u32 reg;
1082
1083        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
1084        cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
1085        if (cdns_ctrl->bch_metadata_size < 4) {
1086                dev_err(cdns_ctrl->dev,
1087                        "Driver needs at least 4 bytes of BCH meta data\n");
1088                return -EIO;
1089        }
1090
1091        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
1092        cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
1093        cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
1094        cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
1095        cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
1096
1097        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
1098        cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
1099        cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
1100        cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
1101        cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
1102
1103        reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
1104        cdns_ctrl->ecc_stepinfos[0].stepsize =
1105                FIELD_GET(BCH_CFG_2_SECT_0, reg);
1106
1107        cdns_ctrl->ecc_stepinfos[1].stepsize =
1108                FIELD_GET(BCH_CFG_2_SECT_1, reg);
1109
1110        nstrengths = 0;
1111        for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
1112                if (cdns_ctrl->ecc_strengths[i] != 0)
1113                        nstrengths++;
1114        }
1115
1116        ecc_caps->nstepinfos = 0;
1117        for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
1118                /* ECC strengths are common for all step infos. */
1119                cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
1120                cdns_ctrl->ecc_stepinfos[i].strengths =
1121                        cdns_ctrl->ecc_strengths;
1122
1123                if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
1124                        ecc_caps->nstepinfos++;
1125
1126                if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
1127                        max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
1128        }
1129        ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
1130
1131        switch (max_step_size) {
1132        case 256:
1133                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
1134                break;
1135        case 512:
1136                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
1137                break;
1138        case 1024:
1139                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
1140                break;
1141        case 2048:
1142                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
1143                break;
1144        case 4096:
1145                ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
1146                break;
1147        default:
1148                dev_err(cdns_ctrl->dev,
1149                        "Unsupported sector size(ecc step size) %d\n",
1150                        max_step_size);
1151                return -EIO;
1152        }
1153
1154        return 0;
1155}
1156
1157/* Hardware initialization. */
1158static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
1159{
1160        int status;
1161        u32 reg;
1162
1163        status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1164                                             1000000,
1165                                             CTRL_STATUS_INIT_COMP, false);
1166        if (status)
1167                return status;
1168
1169        reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
1170        cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
1171
1172        dev_info(cdns_ctrl->dev,
1173                 "%s: cadence nand controller version reg %x\n",
1174                 __func__, reg);
1175
1176        /* Disable cache and multiplane. */
1177        writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
1178        writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
1179
1180        /* Clear all interrupts. */
1181        writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
1182
1183        cadence_nand_get_caps(cdns_ctrl);
1184        if (cadence_nand_read_bch_caps(cdns_ctrl))
1185                return -EIO;
1186
1187        /*
1188         * Set IO width access to 8.
1189         * It is because during SW device discovering width access
1190         * is expected to be 8.
1191         */
1192        status = cadence_nand_set_access_width16(cdns_ctrl, false);
1193
1194        return status;
1195}
1196
1197#define TT_MAIN_OOB_AREAS       2
1198#define TT_RAW_PAGE             3
1199#define TT_BBM                  4
1200#define TT_MAIN_OOB_AREA_EXT    5
1201
1202/* Prepare size of data to transfer. */
1203static void
1204cadence_nand_prepare_data_size(struct nand_chip *chip,
1205                               int transfer_type)
1206{
1207        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1208        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1209        struct mtd_info *mtd = nand_to_mtd(chip);
1210        u32 sec_size = 0, offset = 0, sec_cnt = 1;
1211        u32 last_sec_size = cdns_chip->sector_size;
1212        u32 data_ctrl_size = 0;
1213        u32 reg = 0;
1214
1215        if (cdns_ctrl->curr_trans_type == transfer_type)
1216                return;
1217
1218        switch (transfer_type) {
1219        case TT_MAIN_OOB_AREA_EXT:
1220                sec_cnt = cdns_chip->sector_count;
1221                sec_size = cdns_chip->sector_size;
1222                data_ctrl_size = cdns_chip->avail_oob_size;
1223                break;
1224        case TT_MAIN_OOB_AREAS:
1225                sec_cnt = cdns_chip->sector_count;
1226                last_sec_size = cdns_chip->sector_size
1227                        + cdns_chip->avail_oob_size;
1228                sec_size = cdns_chip->sector_size;
1229                break;
1230        case TT_RAW_PAGE:
1231                last_sec_size = mtd->writesize + mtd->oobsize;
1232                break;
1233        case TT_BBM:
1234                offset = mtd->writesize + cdns_chip->bbm_offs;
1235                last_sec_size = 8;
1236                break;
1237        }
1238
1239        reg = 0;
1240        reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
1241        reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
1242        writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
1243
1244        reg = 0;
1245        reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
1246        reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
1247        writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
1248
1249        if (cdns_ctrl->caps2.data_control_supp) {
1250                reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
1251                reg &= ~CONTROL_DATA_CTRL_SIZE;
1252                reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
1253                writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
1254        }
1255
1256        cdns_ctrl->curr_trans_type = transfer_type;
1257}
1258
1259static int
1260cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
1261                           int page, void *buf, void *ctrl_dat, u32 buf_size,
1262                           u32 ctrl_dat_size, enum dma_data_direction dir,
1263                           bool with_ecc)
1264{
1265        dma_addr_t dma_buf, dma_ctrl_dat = 0;
1266        u8 thread_nr = chip_nr;
1267        int status;
1268        u16 ctype;
1269
1270        if (dir == DMA_FROM_DEVICE)
1271                ctype = CDMA_CT_RD;
1272        else
1273                ctype = CDMA_CT_WR;
1274
1275        cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
1276
1277        dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
1278        if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
1279                dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1280                return -EIO;
1281        }
1282
1283        if (ctrl_dat && ctrl_dat_size) {
1284                dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
1285                                              ctrl_dat_size, dir);
1286                if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
1287                        dma_unmap_single(cdns_ctrl->dev, dma_buf,
1288                                         buf_size, dir);
1289                        dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1290                        return -EIO;
1291                }
1292        }
1293
1294        cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
1295                                       dma_buf, dma_ctrl_dat, ctype);
1296
1297        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1298
1299        dma_unmap_single(cdns_ctrl->dev, dma_buf,
1300                         buf_size, dir);
1301
1302        if (ctrl_dat && ctrl_dat_size)
1303                dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
1304                                 ctrl_dat_size, dir);
1305        if (status)
1306                return status;
1307
1308        return cadence_nand_cdma_finish(cdns_ctrl);
1309}
1310
1311static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
1312                                     struct cadence_nand_timings *t)
1313{
1314        writel_relaxed(t->async_toggle_timings,
1315                       cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
1316        writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
1317        writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
1318        writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
1319
1320        if (cdns_ctrl->caps2.is_phy_type_dll)
1321                writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
1322
1323        writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
1324
1325        if (cdns_ctrl->caps2.is_phy_type_dll) {
1326                writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
1327                writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
1328                writel_relaxed(t->phy_dqs_timing,
1329                               cdns_ctrl->reg + PHY_DQS_TIMING);
1330                writel_relaxed(t->phy_gate_lpbk_ctrl,
1331                               cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
1332                writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
1333                               cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
1334                writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
1335        }
1336}
1337
1338static int cadence_nand_select_target(struct nand_chip *chip)
1339{
1340        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1341        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1342
1343        if (chip == cdns_ctrl->selected_chip)
1344                return 0;
1345
1346        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1347                                        1000000,
1348                                        CTRL_STATUS_CTRL_BUSY, true))
1349                return -ETIMEDOUT;
1350
1351        cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
1352
1353        cadence_nand_set_ecc_strength(cdns_ctrl,
1354                                      cdns_chip->corr_str_idx);
1355
1356        cadence_nand_set_erase_detection(cdns_ctrl, true,
1357                                         chip->ecc.strength);
1358
1359        cdns_ctrl->curr_trans_type = -1;
1360        cdns_ctrl->selected_chip = chip;
1361
1362        return 0;
1363}
1364
1365static int cadence_nand_erase(struct nand_chip *chip, u32 page)
1366{
1367        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1368        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1369        int status;
1370        u8 thread_nr = cdns_chip->cs[chip->cur_cs];
1371
1372        cadence_nand_cdma_desc_prepare(cdns_ctrl,
1373                                       cdns_chip->cs[chip->cur_cs],
1374                                       page, 0, 0,
1375                                       CDMA_CT_ERASE);
1376        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1377        if (status) {
1378                dev_err(cdns_ctrl->dev, "erase operation failed\n");
1379                return -EIO;
1380        }
1381
1382        status = cadence_nand_cdma_finish(cdns_ctrl);
1383        if (status)
1384                return status;
1385
1386        return 0;
1387}
1388
1389static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
1390{
1391        int status;
1392        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1393        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1394        struct mtd_info *mtd = nand_to_mtd(chip);
1395
1396        cadence_nand_prepare_data_size(chip, TT_BBM);
1397
1398        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1399
1400        /*
1401         * Read only bad block marker from offset
1402         * defined by a memory manufacturer.
1403         */
1404        status = cadence_nand_cdma_transfer(cdns_ctrl,
1405                                            cdns_chip->cs[chip->cur_cs],
1406                                            page, cdns_ctrl->buf, NULL,
1407                                            mtd->oobsize,
1408                                            0, DMA_FROM_DEVICE, false);
1409        if (status) {
1410                dev_err(cdns_ctrl->dev, "read BBM failed\n");
1411                return -EIO;
1412        }
1413
1414        memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
1415
1416        return 0;
1417}
1418
1419static int cadence_nand_write_page(struct nand_chip *chip,
1420                                   const u8 *buf, int oob_required,
1421                                   int page)
1422{
1423        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1424        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1425        struct mtd_info *mtd = nand_to_mtd(chip);
1426        int status;
1427        u16 marker_val = 0xFFFF;
1428
1429        status = cadence_nand_select_target(chip);
1430        if (status)
1431                return status;
1432
1433        cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1434                                         mtd->writesize
1435                                         + cdns_chip->bbm_offs,
1436                                         1);
1437
1438        if (oob_required) {
1439                marker_val = *(u16 *)(chip->oob_poi
1440                                      + cdns_chip->bbm_offs);
1441        } else {
1442                /* Set oob data to 0xFF. */
1443                memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
1444                       cdns_chip->avail_oob_size);
1445        }
1446
1447        cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
1448
1449        cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1450
1451        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1452            cdns_ctrl->caps2.data_control_supp) {
1453                u8 *oob;
1454
1455                if (oob_required)
1456                        oob = chip->oob_poi;
1457                else
1458                        oob = cdns_ctrl->buf + mtd->writesize;
1459
1460                status = cadence_nand_cdma_transfer(cdns_ctrl,
1461                                                    cdns_chip->cs[chip->cur_cs],
1462                                                    page, (void *)buf, oob,
1463                                                    mtd->writesize,
1464                                                    cdns_chip->avail_oob_size,
1465                                                    DMA_TO_DEVICE, true);
1466                if (status) {
1467                        dev_err(cdns_ctrl->dev, "write page failed\n");
1468                        return -EIO;
1469                }
1470
1471                return 0;
1472        }
1473
1474        if (oob_required) {
1475                /* Transfer the data to the oob area. */
1476                memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
1477                       cdns_chip->avail_oob_size);
1478        }
1479
1480        memcpy(cdns_ctrl->buf, buf, mtd->writesize);
1481
1482        cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1483
1484        return cadence_nand_cdma_transfer(cdns_ctrl,
1485                                          cdns_chip->cs[chip->cur_cs],
1486                                          page, cdns_ctrl->buf, NULL,
1487                                          mtd->writesize
1488                                          + cdns_chip->avail_oob_size,
1489                                          0, DMA_TO_DEVICE, true);
1490}
1491
1492static int cadence_nand_write_oob(struct nand_chip *chip, int page)
1493{
1494        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1495        struct mtd_info *mtd = nand_to_mtd(chip);
1496
1497        memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
1498
1499        return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
1500}
1501
1502static int cadence_nand_write_page_raw(struct nand_chip *chip,
1503                                       const u8 *buf, int oob_required,
1504                                       int page)
1505{
1506        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1507        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1508        struct mtd_info *mtd = nand_to_mtd(chip);
1509        int writesize = mtd->writesize;
1510        int oobsize = mtd->oobsize;
1511        int ecc_steps = chip->ecc.steps;
1512        int ecc_size = chip->ecc.size;
1513        int ecc_bytes = chip->ecc.bytes;
1514        void *tmp_buf = cdns_ctrl->buf;
1515        int oob_skip = cdns_chip->bbm_len;
1516        size_t size = writesize + oobsize;
1517        int i, pos, len;
1518        int status = 0;
1519
1520        status = cadence_nand_select_target(chip);
1521        if (status)
1522                return status;
1523
1524        /*
1525         * Fill the buffer with 0xff first except the full page transfer.
1526         * This simplifies the logic.
1527         */
1528        if (!buf || !oob_required)
1529                memset(tmp_buf, 0xff, size);
1530
1531        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1532
1533        /* Arrange the buffer for syndrome payload/ecc layout. */
1534        if (buf) {
1535                for (i = 0; i < ecc_steps; i++) {
1536                        pos = i * (ecc_size + ecc_bytes);
1537                        len = ecc_size;
1538
1539                        if (pos >= writesize)
1540                                pos += oob_skip;
1541                        else if (pos + len > writesize)
1542                                len = writesize - pos;
1543
1544                        memcpy(tmp_buf + pos, buf, len);
1545                        buf += len;
1546                        if (len < ecc_size) {
1547                                len = ecc_size - len;
1548                                memcpy(tmp_buf + writesize + oob_skip, buf,
1549                                       len);
1550                                buf += len;
1551                        }
1552                }
1553        }
1554
1555        if (oob_required) {
1556                const u8 *oob = chip->oob_poi;
1557                u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1558                        (cdns_chip->sector_size + chip->ecc.bytes)
1559                        + cdns_chip->sector_size + oob_skip;
1560
1561                /* BBM at the beginning of the OOB area. */
1562                memcpy(tmp_buf + writesize, oob, oob_skip);
1563
1564                /* OOB free. */
1565                memcpy(tmp_buf + oob_data_offset, oob,
1566                       cdns_chip->avail_oob_size);
1567                oob += cdns_chip->avail_oob_size;
1568
1569                /* OOB ECC. */
1570                for (i = 0; i < ecc_steps; i++) {
1571                        pos = ecc_size + i * (ecc_size + ecc_bytes);
1572                        if (i == (ecc_steps - 1))
1573                                pos += cdns_chip->avail_oob_size;
1574
1575                        len = ecc_bytes;
1576
1577                        if (pos >= writesize)
1578                                pos += oob_skip;
1579                        else if (pos + len > writesize)
1580                                len = writesize - pos;
1581
1582                        memcpy(tmp_buf + pos, oob, len);
1583                        oob += len;
1584                        if (len < ecc_bytes) {
1585                                len = ecc_bytes - len;
1586                                memcpy(tmp_buf + writesize + oob_skip, oob,
1587                                       len);
1588                                oob += len;
1589                        }
1590                }
1591        }
1592
1593        cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1594
1595        return cadence_nand_cdma_transfer(cdns_ctrl,
1596                                          cdns_chip->cs[chip->cur_cs],
1597                                          page, cdns_ctrl->buf, NULL,
1598                                          mtd->writesize +
1599                                          mtd->oobsize,
1600                                          0, DMA_TO_DEVICE, false);
1601}
1602
1603static int cadence_nand_write_oob_raw(struct nand_chip *chip,
1604                                      int page)
1605{
1606        return cadence_nand_write_page_raw(chip, NULL, true, page);
1607}
1608
1609static int cadence_nand_read_page(struct nand_chip *chip,
1610                                  u8 *buf, int oob_required, int page)
1611{
1612        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1613        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1614        struct mtd_info *mtd = nand_to_mtd(chip);
1615        int status = 0;
1616        int ecc_err_count = 0;
1617
1618        status = cadence_nand_select_target(chip);
1619        if (status)
1620                return status;
1621
1622        cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1623                                         mtd->writesize
1624                                         + cdns_chip->bbm_offs, 1);
1625
1626        /*
1627         * If data buffer can be accessed by DMA and data_control feature
1628         * is supported then transfer data and oob directly.
1629         */
1630        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1631            cdns_ctrl->caps2.data_control_supp) {
1632                u8 *oob;
1633
1634                if (oob_required)
1635                        oob = chip->oob_poi;
1636                else
1637                        oob = cdns_ctrl->buf + mtd->writesize;
1638
1639                cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1640                status = cadence_nand_cdma_transfer(cdns_ctrl,
1641                                                    cdns_chip->cs[chip->cur_cs],
1642                                                    page, buf, oob,
1643                                                    mtd->writesize,
1644                                                    cdns_chip->avail_oob_size,
1645                                                    DMA_FROM_DEVICE, true);
1646        /* Otherwise use bounce buffer. */
1647        } else {
1648                cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1649                status = cadence_nand_cdma_transfer(cdns_ctrl,
1650                                                    cdns_chip->cs[chip->cur_cs],
1651                                                    page, cdns_ctrl->buf,
1652                                                    NULL, mtd->writesize
1653                                                    + cdns_chip->avail_oob_size,
1654                                                    0, DMA_FROM_DEVICE, true);
1655
1656                memcpy(buf, cdns_ctrl->buf, mtd->writesize);
1657                if (oob_required)
1658                        memcpy(chip->oob_poi,
1659                               cdns_ctrl->buf + mtd->writesize,
1660                               mtd->oobsize);
1661        }
1662
1663        switch (status) {
1664        case STAT_ECC_UNCORR:
1665                mtd->ecc_stats.failed++;
1666                ecc_err_count++;
1667                break;
1668        case STAT_ECC_CORR:
1669                ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1670                                          cdns_ctrl->cdma_desc->status);
1671                mtd->ecc_stats.corrected += ecc_err_count;
1672                break;
1673        case STAT_ERASED:
1674        case STAT_OK:
1675                break;
1676        default:
1677                dev_err(cdns_ctrl->dev, "read page failed\n");
1678                return -EIO;
1679        }
1680
1681        if (oob_required)
1682                if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
1683                        return -EIO;
1684
1685        return ecc_err_count;
1686}
1687
1688/* Reads OOB data from the device. */
1689static int cadence_nand_read_oob(struct nand_chip *chip, int page)
1690{
1691        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1692
1693        return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
1694}
1695
1696static int cadence_nand_read_page_raw(struct nand_chip *chip,
1697                                      u8 *buf, int oob_required, int page)
1698{
1699        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1700        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1701        struct mtd_info *mtd = nand_to_mtd(chip);
1702        int oob_skip = cdns_chip->bbm_len;
1703        int writesize = mtd->writesize;
1704        int ecc_steps = chip->ecc.steps;
1705        int ecc_size = chip->ecc.size;
1706        int ecc_bytes = chip->ecc.bytes;
1707        void *tmp_buf = cdns_ctrl->buf;
1708        int i, pos, len;
1709        int status = 0;
1710
1711        status = cadence_nand_select_target(chip);
1712        if (status)
1713                return status;
1714
1715        cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1716
1717        cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1718        status = cadence_nand_cdma_transfer(cdns_ctrl,
1719                                            cdns_chip->cs[chip->cur_cs],
1720                                            page, cdns_ctrl->buf, NULL,
1721                                            mtd->writesize
1722                                            + mtd->oobsize,
1723                                            0, DMA_FROM_DEVICE, false);
1724
1725        switch (status) {
1726        case STAT_ERASED:
1727        case STAT_OK:
1728                break;
1729        default:
1730                dev_err(cdns_ctrl->dev, "read raw page failed\n");
1731                return -EIO;
1732        }
1733
1734        /* Arrange the buffer for syndrome payload/ecc layout. */
1735        if (buf) {
1736                for (i = 0; i < ecc_steps; i++) {
1737                        pos = i * (ecc_size + ecc_bytes);
1738                        len = ecc_size;
1739
1740                        if (pos >= writesize)
1741                                pos += oob_skip;
1742                        else if (pos + len > writesize)
1743                                len = writesize - pos;
1744
1745                        memcpy(buf, tmp_buf + pos, len);
1746                        buf += len;
1747                        if (len < ecc_size) {
1748                                len = ecc_size - len;
1749                                memcpy(buf, tmp_buf + writesize + oob_skip,
1750                                       len);
1751                                buf += len;
1752                        }
1753                }
1754        }
1755
1756        if (oob_required) {
1757                u8 *oob = chip->oob_poi;
1758                u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1759                        (cdns_chip->sector_size + chip->ecc.bytes)
1760                        + cdns_chip->sector_size + oob_skip;
1761
1762                /* OOB free. */
1763                memcpy(oob, tmp_buf + oob_data_offset,
1764                       cdns_chip->avail_oob_size);
1765
1766                /* BBM at the beginning of the OOB area. */
1767                memcpy(oob, tmp_buf + writesize, oob_skip);
1768
1769                oob += cdns_chip->avail_oob_size;
1770
1771                /* OOB ECC */
1772                for (i = 0; i < ecc_steps; i++) {
1773                        pos = ecc_size + i * (ecc_size + ecc_bytes);
1774                        len = ecc_bytes;
1775
1776                        if (i == (ecc_steps - 1))
1777                                pos += cdns_chip->avail_oob_size;
1778
1779                        if (pos >= writesize)
1780                                pos += oob_skip;
1781                        else if (pos + len > writesize)
1782                                len = writesize - pos;
1783
1784                        memcpy(oob, tmp_buf + pos, len);
1785                        oob += len;
1786                        if (len < ecc_bytes) {
1787                                len = ecc_bytes - len;
1788                                memcpy(oob, tmp_buf + writesize + oob_skip,
1789                                       len);
1790                                oob += len;
1791                        }
1792                }
1793        }
1794
1795        return 0;
1796}
1797
1798static int cadence_nand_read_oob_raw(struct nand_chip *chip,
1799                                     int page)
1800{
1801        return cadence_nand_read_page_raw(chip, NULL, true, page);
1802}
1803
1804static void cadence_nand_slave_dma_transfer_finished(void *data)
1805{
1806        struct completion *finished = data;
1807
1808        complete(finished);
1809}
1810
1811static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
1812                                           void *buf,
1813                                           dma_addr_t dev_dma, size_t len,
1814                                           enum dma_data_direction dir)
1815{
1816        DECLARE_COMPLETION_ONSTACK(finished);
1817        struct dma_chan *chan;
1818        struct dma_device *dma_dev;
1819        dma_addr_t src_dma, dst_dma, buf_dma;
1820        struct dma_async_tx_descriptor *tx;
1821        dma_cookie_t cookie;
1822
1823        chan = cdns_ctrl->dmac;
1824        dma_dev = chan->device;
1825
1826        buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
1827        if (dma_mapping_error(dma_dev->dev, buf_dma)) {
1828                dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1829                goto err;
1830        }
1831
1832        if (dir == DMA_FROM_DEVICE) {
1833                src_dma = cdns_ctrl->io.dma;
1834                dst_dma = buf_dma;
1835        } else {
1836                src_dma = buf_dma;
1837                dst_dma = cdns_ctrl->io.dma;
1838        }
1839
1840        tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
1841                                       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
1842        if (!tx) {
1843                dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
1844                goto err_unmap;
1845        }
1846
1847        tx->callback = cadence_nand_slave_dma_transfer_finished;
1848        tx->callback_param = &finished;
1849
1850        cookie = dmaengine_submit(tx);
1851        if (dma_submit_error(cookie)) {
1852                dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
1853                goto err_unmap;
1854        }
1855
1856        dma_async_issue_pending(cdns_ctrl->dmac);
1857        wait_for_completion(&finished);
1858
1859        dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1860
1861        return 0;
1862
1863err_unmap:
1864        dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1865
1866err:
1867        dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
1868
1869        return -EIO;
1870}
1871
1872static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
1873                                 u8 *buf, int len)
1874{
1875        u8 thread_nr = 0;
1876        u32 sdma_size;
1877        int status;
1878
1879        /* Wait until slave DMA interface is ready to data transfer. */
1880        status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1881        if (status)
1882                return status;
1883
1884        if (!cdns_ctrl->caps1->has_dma) {
1885                int len_in_words = len >> 2;
1886
1887                /* read alingment data */
1888                ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1889                if (sdma_size > len) {
1890                        /* read rest data from slave DMA interface if any */
1891                        ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1892                                     sdma_size / 4 - len_in_words);
1893                        /* copy rest of data */
1894                        memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
1895                               len - (len_in_words << 2));
1896                }
1897                return 0;
1898        }
1899
1900        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1901                status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
1902                                                         cdns_ctrl->io.dma,
1903                                                         len, DMA_FROM_DEVICE);
1904                if (status == 0)
1905                        return 0;
1906
1907                dev_warn(cdns_ctrl->dev,
1908                         "Slave DMA transfer failed. Try again using bounce buffer.");
1909        }
1910
1911        /* If DMA transfer is not possible or failed then use bounce buffer. */
1912        status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1913                                                 cdns_ctrl->io.dma,
1914                                                 sdma_size, DMA_FROM_DEVICE);
1915
1916        if (status) {
1917                dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1918                return status;
1919        }
1920
1921        memcpy(buf, cdns_ctrl->buf, len);
1922
1923        return 0;
1924}
1925
1926static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
1927                                  const u8 *buf, int len)
1928{
1929        u8 thread_nr = 0;
1930        u32 sdma_size;
1931        int status;
1932
1933        /* Wait until slave DMA interface is ready to data transfer. */
1934        status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1935        if (status)
1936                return status;
1937
1938        if (!cdns_ctrl->caps1->has_dma) {
1939                int len_in_words = len >> 2;
1940
1941                iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1942                if (sdma_size > len) {
1943                        /* copy rest of data */
1944                        memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
1945                               len - (len_in_words << 2));
1946                        /* write all expected by nand controller data */
1947                        iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1948                                      sdma_size / 4 - len_in_words);
1949                }
1950
1951                return 0;
1952        }
1953
1954        if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1955                status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
1956                                                         cdns_ctrl->io.dma,
1957                                                         len, DMA_TO_DEVICE);
1958                if (status == 0)
1959                        return 0;
1960
1961                dev_warn(cdns_ctrl->dev,
1962                         "Slave DMA transfer failed. Try again using bounce buffer.");
1963        }
1964
1965        /* If DMA transfer is not possible or failed then use bounce buffer. */
1966        memcpy(cdns_ctrl->buf, buf, len);
1967
1968        status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1969                                                 cdns_ctrl->io.dma,
1970                                                 sdma_size, DMA_TO_DEVICE);
1971
1972        if (status)
1973                dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1974
1975        return status;
1976}
1977
1978static int cadence_nand_force_byte_access(struct nand_chip *chip,
1979                                          bool force_8bit)
1980{
1981        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1982        int status;
1983
1984        /*
1985         * Callers of this function do not verify if the NAND is using a 16-bit
1986         * an 8-bit bus for normal operations, so we need to take care of that
1987         * here by leaving the configuration unchanged if the NAND does not have
1988         * the NAND_BUSWIDTH_16 flag set.
1989         */
1990        if (!(chip->options & NAND_BUSWIDTH_16))
1991                return 0;
1992
1993        status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
1994
1995        return status;
1996}
1997
1998static int cadence_nand_cmd_opcode(struct nand_chip *chip,
1999                                   const struct nand_subop *subop)
2000{
2001        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2002        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2003        const struct nand_op_instr *instr;
2004        unsigned int op_id = 0;
2005        u64 mini_ctrl_cmd = 0;
2006        int ret;
2007
2008        instr = &subop->instrs[op_id];
2009
2010        if (instr->delay_ns > 0)
2011                mini_ctrl_cmd |= GCMD_LAY_TWB;
2012
2013        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2014                                    GCMD_LAY_INSTR_CMD);
2015        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
2016                                    instr->ctx.cmd.opcode);
2017
2018        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2019                                            cdns_chip->cs[chip->cur_cs],
2020                                            mini_ctrl_cmd);
2021        if (ret)
2022                dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
2023                        instr->ctx.cmd.opcode);
2024
2025        return ret;
2026}
2027
2028static int cadence_nand_cmd_address(struct nand_chip *chip,
2029                                    const struct nand_subop *subop)
2030{
2031        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2032        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2033        const struct nand_op_instr *instr;
2034        unsigned int op_id = 0;
2035        u64 mini_ctrl_cmd = 0;
2036        unsigned int offset, naddrs;
2037        u64 address = 0;
2038        const u8 *addrs;
2039        int ret;
2040        int i;
2041
2042        instr = &subop->instrs[op_id];
2043
2044        if (instr->delay_ns > 0)
2045                mini_ctrl_cmd |= GCMD_LAY_TWB;
2046
2047        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2048                                    GCMD_LAY_INSTR_ADDR);
2049
2050        offset = nand_subop_get_addr_start_off(subop, op_id);
2051        naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2052        addrs = &instr->ctx.addr.addrs[offset];
2053
2054        for (i = 0; i < naddrs; i++)
2055                address |= (u64)addrs[i] << (8 * i);
2056
2057        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
2058                                    address);
2059        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
2060                                    naddrs - 1);
2061
2062        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2063                                            cdns_chip->cs[chip->cur_cs],
2064                                            mini_ctrl_cmd);
2065        if (ret)
2066                dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
2067
2068        return ret;
2069}
2070
2071static int cadence_nand_cmd_erase(struct nand_chip *chip,
2072                                  const struct nand_subop *subop)
2073{
2074        unsigned int op_id;
2075
2076        if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
2077                int i;
2078                const struct nand_op_instr *instr = NULL;
2079                unsigned int offset, naddrs;
2080                const u8 *addrs;
2081                u32 page = 0;
2082
2083                instr = &subop->instrs[1];
2084                offset = nand_subop_get_addr_start_off(subop, 1);
2085                naddrs = nand_subop_get_num_addr_cyc(subop, 1);
2086                addrs = &instr->ctx.addr.addrs[offset];
2087
2088                for (i = 0; i < naddrs; i++)
2089                        page |= (u32)addrs[i] << (8 * i);
2090
2091                return cadence_nand_erase(chip, page);
2092        }
2093
2094        /*
2095         * If it is not an erase operation then handle operation
2096         * by calling exec_op function.
2097         */
2098        for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2099                int ret;
2100                const struct nand_operation nand_op = {
2101                        .cs = chip->cur_cs,
2102                        .instrs =  &subop->instrs[op_id],
2103                        .ninstrs = 1};
2104                ret = chip->controller->ops->exec_op(chip, &nand_op, false);
2105                if (ret)
2106                        return ret;
2107        }
2108
2109        return 0;
2110}
2111
2112static int cadence_nand_cmd_data(struct nand_chip *chip,
2113                                 const struct nand_subop *subop)
2114{
2115        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2116        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2117        const struct nand_op_instr *instr;
2118        unsigned int offset, op_id = 0;
2119        u64 mini_ctrl_cmd = 0;
2120        int len = 0;
2121        int ret;
2122
2123        instr = &subop->instrs[op_id];
2124
2125        if (instr->delay_ns > 0)
2126                mini_ctrl_cmd |= GCMD_LAY_TWB;
2127
2128        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2129                                    GCMD_LAY_INSTR_DATA);
2130
2131        if (instr->type == NAND_OP_DATA_OUT_INSTR)
2132                mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
2133                                            GCMD_DIR_WRITE);
2134
2135        len = nand_subop_get_data_len(subop, op_id);
2136        offset = nand_subop_get_data_start_off(subop, op_id);
2137        mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
2138        mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
2139        if (instr->ctx.data.force_8bit) {
2140                ret = cadence_nand_force_byte_access(chip, true);
2141                if (ret) {
2142                        dev_err(cdns_ctrl->dev,
2143                                "cannot change byte access generic data cmd failed\n");
2144                        return ret;
2145                }
2146        }
2147
2148        ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2149                                            cdns_chip->cs[chip->cur_cs],
2150                                            mini_ctrl_cmd);
2151        if (ret) {
2152                dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
2153                return ret;
2154        }
2155
2156        if (instr->type == NAND_OP_DATA_IN_INSTR) {
2157                void *buf = instr->ctx.data.buf.in + offset;
2158
2159                ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
2160        } else {
2161                const void *buf = instr->ctx.data.buf.out + offset;
2162
2163                ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
2164        }
2165
2166        if (ret) {
2167                dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
2168                return ret;
2169        }
2170
2171        if (instr->ctx.data.force_8bit) {
2172                ret = cadence_nand_force_byte_access(chip, false);
2173                if (ret) {
2174                        dev_err(cdns_ctrl->dev,
2175                                "cannot change byte access generic data cmd failed\n");
2176                }
2177        }
2178
2179        return ret;
2180}
2181
2182static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
2183                                    const struct nand_subop *subop)
2184{
2185        int status;
2186        unsigned int op_id = 0;
2187        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2188        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2189        const struct nand_op_instr *instr = &subop->instrs[op_id];
2190        u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
2191
2192        status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
2193                                             timeout_us,
2194                                             BIT(cdns_chip->cs[chip->cur_cs]),
2195                                             false);
2196        return status;
2197}
2198
2199static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
2200        NAND_OP_PARSER_PATTERN(
2201                cadence_nand_cmd_erase,
2202                NAND_OP_PARSER_PAT_CMD_ELEM(false),
2203                NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
2204                NAND_OP_PARSER_PAT_CMD_ELEM(false),
2205                NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2206        NAND_OP_PARSER_PATTERN(
2207                cadence_nand_cmd_opcode,
2208                NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2209        NAND_OP_PARSER_PATTERN(
2210                cadence_nand_cmd_address,
2211                NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
2212        NAND_OP_PARSER_PATTERN(
2213                cadence_nand_cmd_data,
2214                NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
2215        NAND_OP_PARSER_PATTERN(
2216                cadence_nand_cmd_data,
2217                NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
2218        NAND_OP_PARSER_PATTERN(
2219                cadence_nand_cmd_waitrdy,
2220                NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2221        );
2222
2223static int cadence_nand_exec_op(struct nand_chip *chip,
2224                                const struct nand_operation *op,
2225                                bool check_only)
2226{
2227        if (!check_only) {
2228                int status = cadence_nand_select_target(chip);
2229
2230                if (status)
2231                        return status;
2232        }
2233
2234        return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
2235                                      check_only);
2236}
2237
2238static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
2239                                       struct mtd_oob_region *oobregion)
2240{
2241        struct nand_chip *chip = mtd_to_nand(mtd);
2242        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2243
2244        if (section)
2245                return -ERANGE;
2246
2247        oobregion->offset = cdns_chip->bbm_len;
2248        oobregion->length = cdns_chip->avail_oob_size
2249                - cdns_chip->bbm_len;
2250
2251        return 0;
2252}
2253
2254static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2255                                      struct mtd_oob_region *oobregion)
2256{
2257        struct nand_chip *chip = mtd_to_nand(mtd);
2258        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2259
2260        if (section)
2261                return -ERANGE;
2262
2263        oobregion->offset = cdns_chip->avail_oob_size;
2264        oobregion->length = chip->ecc.total;
2265
2266        return 0;
2267}
2268
2269static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
2270        .free = cadence_nand_ooblayout_free,
2271        .ecc = cadence_nand_ooblayout_ecc,
2272};
2273
2274static int calc_cycl(u32 timing, u32 clock)
2275{
2276        if (timing == 0 || clock == 0)
2277                return 0;
2278
2279        if ((timing % clock) > 0)
2280                return timing / clock;
2281        else
2282                return timing / clock - 1;
2283}
2284
2285/* Calculate max data valid window. */
2286static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2287                                u32 board_delay_skew_min, u32 ext_mode)
2288{
2289        if (ext_mode == 0)
2290                clk_period /= 2;
2291
2292        return (trp_cnt + 1) * clk_period + trhoh_min +
2293                board_delay_skew_min;
2294}
2295
2296/* Calculate data valid window. */
2297static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2298                            u32 trea_max, u32 ext_mode)
2299{
2300        if (ext_mode == 0)
2301                clk_period /= 2;
2302
2303        return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
2304}
2305
2306static int
2307cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
2308                             const struct nand_interface_config *conf)
2309{
2310        const struct nand_sdr_timings *sdr;
2311        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2312        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2313        struct cadence_nand_timings *t = &cdns_chip->timings;
2314        u32 reg;
2315        u32 board_delay = cdns_ctrl->board_delay;
2316        u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2317                                            cdns_ctrl->nf_clk_rate);
2318        u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2319        u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
2320        u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
2321        u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
2322        u32 if_skew = cdns_ctrl->caps1->if_skew;
2323        u32 board_delay_skew_min = board_delay - if_skew;
2324        u32 board_delay_skew_max = board_delay + if_skew;
2325        u32 dqs_sampl_res, phony_dqs_mod;
2326        u32 tdvw, tdvw_min, tdvw_max;
2327        u32 ext_rd_mode, ext_wr_mode;
2328        u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
2329        u32 sampling_point;
2330
2331        sdr = nand_get_sdr_timings(conf);
2332        if (IS_ERR(sdr))
2333                return PTR_ERR(sdr);
2334
2335        memset(t, 0, sizeof(*t));
2336        /* Sampling point calculation. */
2337
2338        if (cdns_ctrl->caps2.is_phy_type_dll)
2339                phony_dqs_mod = 2;
2340        else
2341                phony_dqs_mod = 1;
2342
2343        dqs_sampl_res = clk_period / phony_dqs_mod;
2344
2345        tdvw_min = sdr->tREA_max + board_delay_skew_max;
2346        /*
2347         * The idea of those calculation is to get the optimum value
2348         * for tRP and tRH timings. If it is NOT possible to sample data
2349         * with optimal tRP/tRH settings, the parameters will be extended.
2350         * If clk_period is 50ns (the lowest value) this condition is met
2351         * for SDR timing modes 1, 2, 3, 4 and 5.
2352         * If clk_period is 20ns the condition is met only for SDR timing
2353         * mode 5.
2354         */
2355        if (sdr->tRC_min <= clk_period &&
2356            sdr->tRP_min <= (clk_period / 2) &&
2357            sdr->tREH_min <= (clk_period / 2)) {
2358                /* Performance mode. */
2359                ext_rd_mode = 0;
2360                tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2361                                 sdr->tREA_max, ext_rd_mode);
2362                tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
2363                                         board_delay_skew_min,
2364                                         ext_rd_mode);
2365                /*
2366                 * Check if data valid window and sampling point can be found
2367                 * and is not on the edge (ie. we have hold margin).
2368                 * If not extend the tRP timings.
2369                 */
2370                if (tdvw > 0) {
2371                        if (tdvw_max <= tdvw_min ||
2372                            (tdvw_max % dqs_sampl_res) == 0) {
2373                                /*
2374                                 * No valid sampling point so the RE pulse need
2375                                 * to be widen widening by half clock cycle.
2376                                 */
2377                                ext_rd_mode = 1;
2378                        }
2379                } else {
2380                        /*
2381                         * There is no valid window
2382                         * to be able to sample data the tRP need to be widen.
2383                         * Very safe calculations are performed here.
2384                         */
2385                        trp_cnt = (sdr->tREA_max + board_delay_skew_max
2386                                   + dqs_sampl_res) / clk_period;
2387                        ext_rd_mode = 1;
2388                }
2389
2390        } else {
2391                /* Extended read mode. */
2392                u32 trh;
2393
2394                ext_rd_mode = 1;
2395                trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
2396                trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
2397                if (sdr->tREH_min >= trh)
2398                        trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
2399                else
2400                        trh_cnt = calc_cycl(trh, clk_period);
2401
2402                tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2403                                 sdr->tREA_max, ext_rd_mode);
2404                /*
2405                 * Check if data valid window and sampling point can be found
2406                 * or if it is at the edge check if previous is valid
2407                 * - if not extend the tRP timings.
2408                 */
2409                if (tdvw > 0) {
2410                        tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2411                                                 sdr->tRHOH_min,
2412                                                 board_delay_skew_min,
2413                                                 ext_rd_mode);
2414
2415                        if ((((tdvw_max / dqs_sampl_res)
2416                              * dqs_sampl_res) <= tdvw_min) ||
2417                            (((tdvw_max % dqs_sampl_res) == 0) &&
2418                             (((tdvw_max / dqs_sampl_res - 1)
2419                               * dqs_sampl_res) <= tdvw_min))) {
2420                                /*
2421                                 * Data valid window width is lower than
2422                                 * sampling resolution and do not hit any
2423                                 * sampling point to be sure the sampling point
2424                                 * will be found the RE low pulse width will be
2425                                 *  extended by one clock cycle.
2426                                 */
2427                                trp_cnt = trp_cnt + 1;
2428                        }
2429                } else {
2430                        /*
2431                         * There is no valid window to be able to sample data.
2432                         * The tRP need to be widen.
2433                         * Very safe calculations are performed here.
2434                         */
2435                        trp_cnt = (sdr->tREA_max + board_delay_skew_max
2436                                   + dqs_sampl_res) / clk_period;
2437                }
2438        }
2439
2440        tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2441                                 sdr->tRHOH_min,
2442                                 board_delay_skew_min, ext_rd_mode);
2443
2444        if (sdr->tWC_min <= clk_period &&
2445            (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
2446            (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
2447                ext_wr_mode = 0;
2448        } else {
2449                u32 twh;
2450
2451                ext_wr_mode = 1;
2452                twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
2453                if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
2454                        twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
2455                                            clk_period);
2456
2457                twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
2458                if (sdr->tWH_min >= twh)
2459                        twh = sdr->tWH_min;
2460
2461                twh_cnt = calc_cycl(twh + if_skew, clk_period);
2462        }
2463
2464        reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
2465        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
2466        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
2467        reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
2468        t->async_toggle_timings = reg;
2469        dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
2470
2471        tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
2472        tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
2473        twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
2474        trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
2475        reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2476
2477        /*
2478         * If timing exceeds delay field in timing register
2479         * then use maximum value.
2480         */
2481        if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
2482                reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2483        else
2484                reg |= TIMINGS0_TCCS;
2485
2486        reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2487        reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2488        t->timings0 = reg;
2489        dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
2490
2491        /* The following is related to single signal so skew is not needed. */
2492        trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
2493        trhz_cnt = trhz_cnt + 1;
2494        twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
2495        /*
2496         * Because of the two stage syncflop the value must be increased by 3
2497         * first value is related with sync, second value is related
2498         * with output if delay.
2499         */
2500        twb_cnt = twb_cnt + 3 + 5;
2501        /*
2502         * The following is related to the we edge of the random data input
2503         * sequence so skew is not needed.
2504         */
2505        tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
2506        reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2507        reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2508        reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2509        t->timings1 = reg;
2510        dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
2511
2512        tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
2513        if (tfeat_cnt < twb_cnt)
2514                tfeat_cnt = twb_cnt;
2515
2516        tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
2517        tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
2518
2519        reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2520        reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2521        reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2522        t->timings2 = reg;
2523        dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
2524
2525        if (cdns_ctrl->caps2.is_phy_type_dll) {
2526                reg = DLL_PHY_CTRL_DLL_RST_N;
2527                if (ext_wr_mode)
2528                        reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
2529                if (ext_rd_mode)
2530                        reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
2531
2532                reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
2533                reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
2534                t->dll_phy_ctrl = reg;
2535                dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
2536        }
2537
2538        /* Sampling point calculation. */
2539        if ((tdvw_max % dqs_sampl_res) > 0)
2540                sampling_point = tdvw_max / dqs_sampl_res;
2541        else
2542                sampling_point = (tdvw_max / dqs_sampl_res - 1);
2543
2544        if (sampling_point * dqs_sampl_res > tdvw_min) {
2545                dll_phy_dqs_timing =
2546                        FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
2547                dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
2548                phony_dqs_timing = sampling_point / phony_dqs_mod;
2549
2550                if ((sampling_point % 2) > 0) {
2551                        dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
2552                        if ((tdvw_max % dqs_sampl_res) == 0)
2553                                /*
2554                                 * Calculation for sampling point at the edge
2555                                 * of data and being odd number.
2556                                 */
2557                                phony_dqs_timing = (tdvw_max / dqs_sampl_res)
2558                                        / phony_dqs_mod - 1;
2559
2560                        if (!cdns_ctrl->caps2.is_phy_type_dll)
2561                                phony_dqs_timing--;
2562
2563                } else {
2564                        phony_dqs_timing--;
2565                }
2566                rd_del_sel = phony_dqs_timing + 3;
2567        } else {
2568                dev_warn(cdns_ctrl->dev,
2569                         "ERROR : cannot find valid sampling point\n");
2570        }
2571
2572        reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
2573        if (cdns_ctrl->caps2.is_phy_type_dll)
2574                reg  |= PHY_CTRL_SDR_DQS;
2575        t->phy_ctrl = reg;
2576        dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
2577
2578        if (cdns_ctrl->caps2.is_phy_type_dll) {
2579                dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
2580                dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2581                dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2582                        dll_phy_dqs_timing);
2583                t->phy_dqs_timing = dll_phy_dqs_timing;
2584
2585                reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2586                dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2587                        reg);
2588                t->phy_gate_lpbk_ctrl = reg;
2589
2590                dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2591                        PHY_DLL_MASTER_CTRL_BYPASS_MODE);
2592                dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2593        }
2594
2595        return 0;
2596}
2597
2598static int cadence_nand_attach_chip(struct nand_chip *chip)
2599{
2600        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2601        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2602        u32 ecc_size;
2603        struct mtd_info *mtd = nand_to_mtd(chip);
2604        int ret;
2605
2606        if (chip->options & NAND_BUSWIDTH_16) {
2607                ret = cadence_nand_set_access_width16(cdns_ctrl, true);
2608                if (ret)
2609                        return ret;
2610        }
2611
2612        chip->bbt_options |= NAND_BBT_USE_FLASH;
2613        chip->bbt_options |= NAND_BBT_NO_OOB;
2614        chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2615
2616        chip->options |= NAND_NO_SUBPAGE_WRITE;
2617
2618        cdns_chip->bbm_offs = chip->badblockpos;
2619        cdns_chip->bbm_offs &= ~0x01;
2620        /* this value should be even number */
2621        cdns_chip->bbm_len = 2;
2622
2623        ret = nand_ecc_choose_conf(chip,
2624                                   &cdns_ctrl->ecc_caps,
2625                                   mtd->oobsize - cdns_chip->bbm_len);
2626        if (ret) {
2627                dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
2628                return ret;
2629        }
2630
2631        dev_dbg(cdns_ctrl->dev,
2632                "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2633                chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
2634
2635        /* Error correction configuration. */
2636        cdns_chip->sector_size = chip->ecc.size;
2637        cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
2638        ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
2639
2640        cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
2641
2642        if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
2643                cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
2644
2645        if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
2646            > mtd->oobsize)
2647                cdns_chip->avail_oob_size -= 4;
2648
2649        ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
2650        if (ret < 0)
2651                return -EINVAL;
2652
2653        cdns_chip->corr_str_idx = (u8)ret;
2654
2655        if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
2656                                        1000000,
2657                                        CTRL_STATUS_CTRL_BUSY, true))
2658                return -ETIMEDOUT;
2659
2660        cadence_nand_set_ecc_strength(cdns_ctrl,
2661                                      cdns_chip->corr_str_idx);
2662
2663        cadence_nand_set_erase_detection(cdns_ctrl, true,
2664                                         chip->ecc.strength);
2665
2666        /* Override the default read operations. */
2667        chip->ecc.read_page = cadence_nand_read_page;
2668        chip->ecc.read_page_raw = cadence_nand_read_page_raw;
2669        chip->ecc.write_page = cadence_nand_write_page;
2670        chip->ecc.write_page_raw = cadence_nand_write_page_raw;
2671        chip->ecc.read_oob = cadence_nand_read_oob;
2672        chip->ecc.write_oob = cadence_nand_write_oob;
2673        chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
2674        chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
2675
2676        if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
2677                cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
2678
2679        /* Is 32-bit DMA supported? */
2680        ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
2681        if (ret) {
2682                dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
2683                return ret;
2684        }
2685
2686        mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
2687
2688        return 0;
2689}
2690
2691static const struct nand_controller_ops cadence_nand_controller_ops = {
2692        .attach_chip = cadence_nand_attach_chip,
2693        .exec_op = cadence_nand_exec_op,
2694        .setup_interface = cadence_nand_setup_interface,
2695};
2696
2697static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
2698                                  struct device_node *np)
2699{
2700        struct cdns_nand_chip *cdns_chip;
2701        struct mtd_info *mtd;
2702        struct nand_chip *chip;
2703        int nsels, ret, i;
2704        u32 cs;
2705
2706        nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2707        if (nsels <= 0) {
2708                dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
2709                return -EINVAL;
2710        }
2711
2712        /* Allocate the nand chip structure. */
2713        cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
2714                                 (nsels * sizeof(u8)),
2715                                 GFP_KERNEL);
2716        if (!cdns_chip) {
2717                dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
2718                return -ENOMEM;
2719        }
2720
2721        cdns_chip->nsels = nsels;
2722
2723        for (i = 0; i < nsels; i++) {
2724                /* Retrieve CS id. */
2725                ret = of_property_read_u32_index(np, "reg", i, &cs);
2726                if (ret) {
2727                        dev_err(cdns_ctrl->dev,
2728                                "could not retrieve reg property: %d\n",
2729                                ret);
2730                        return ret;
2731                }
2732
2733                if (cs >= cdns_ctrl->caps2.max_banks) {
2734                        dev_err(cdns_ctrl->dev,
2735                                "invalid reg value: %u (max CS = %d)\n",
2736                                cs, cdns_ctrl->caps2.max_banks);
2737                        return -EINVAL;
2738                }
2739
2740                if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
2741                        dev_err(cdns_ctrl->dev,
2742                                "CS %d already assigned\n", cs);
2743                        return -EINVAL;
2744                }
2745
2746                cdns_chip->cs[i] = cs;
2747        }
2748
2749        chip = &cdns_chip->chip;
2750        chip->controller = &cdns_ctrl->controller;
2751        nand_set_flash_node(chip, np);
2752
2753        mtd = nand_to_mtd(chip);
2754        mtd->dev.parent = cdns_ctrl->dev;
2755
2756        /*
2757         * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2758         * in the DT node, this entry will be overwritten in nand_scan_ident().
2759         */
2760        chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2761
2762        ret = nand_scan(chip, cdns_chip->nsels);
2763        if (ret) {
2764                dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
2765                return ret;
2766        }
2767
2768        ret = mtd_device_register(mtd, NULL, 0);
2769        if (ret) {
2770                dev_err(cdns_ctrl->dev,
2771                        "failed to register mtd device: %d\n", ret);
2772                nand_cleanup(chip);
2773                return ret;
2774        }
2775
2776        list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
2777
2778        return 0;
2779}
2780
2781static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
2782{
2783        struct cdns_nand_chip *entry, *temp;
2784        struct nand_chip *chip;
2785        int ret;
2786
2787        list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
2788                chip = &entry->chip;
2789                ret = mtd_device_unregister(nand_to_mtd(chip));
2790                WARN_ON(ret);
2791                nand_cleanup(chip);
2792                list_del(&entry->node);
2793        }
2794}
2795
2796static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
2797{
2798        struct device_node *np = cdns_ctrl->dev->of_node;
2799        struct device_node *nand_np;
2800        int max_cs = cdns_ctrl->caps2.max_banks;
2801        int nchips, ret;
2802
2803        nchips = of_get_child_count(np);
2804
2805        if (nchips > max_cs) {
2806                dev_err(cdns_ctrl->dev,
2807                        "too many NAND chips: %d (max = %d CS)\n",
2808                        nchips, max_cs);
2809                return -EINVAL;
2810        }
2811
2812        for_each_child_of_node(np, nand_np) {
2813                ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
2814                if (ret) {
2815                        of_node_put(nand_np);
2816                        cadence_nand_chips_cleanup(cdns_ctrl);
2817                        return ret;
2818                }
2819        }
2820
2821        return 0;
2822}
2823
2824static void
2825cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
2826{
2827        /* Disable interrupts. */
2828        writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
2829}
2830
2831static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
2832{
2833        dma_cap_mask_t mask;
2834        int ret;
2835
2836        cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
2837                                                  sizeof(*cdns_ctrl->cdma_desc),
2838                                                  &cdns_ctrl->dma_cdma_desc,
2839                                                  GFP_KERNEL);
2840        if (!cdns_ctrl->dma_cdma_desc)
2841                return -ENOMEM;
2842
2843        cdns_ctrl->buf_size = SZ_16K;
2844        cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2845        if (!cdns_ctrl->buf) {
2846                ret = -ENOMEM;
2847                goto free_buf_desc;
2848        }
2849
2850        if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
2851                             IRQF_SHARED, "cadence-nand-controller",
2852                             cdns_ctrl)) {
2853                dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
2854                ret = -ENODEV;
2855                goto free_buf;
2856        }
2857
2858        spin_lock_init(&cdns_ctrl->irq_lock);
2859        init_completion(&cdns_ctrl->complete);
2860
2861        ret = cadence_nand_hw_init(cdns_ctrl);
2862        if (ret)
2863                goto disable_irq;
2864
2865        dma_cap_zero(mask);
2866        dma_cap_set(DMA_MEMCPY, mask);
2867
2868        if (cdns_ctrl->caps1->has_dma) {
2869                cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
2870                if (!cdns_ctrl->dmac) {
2871                        dev_err(cdns_ctrl->dev,
2872                                "Unable to get a DMA channel\n");
2873                        ret = -EBUSY;
2874                        goto disable_irq;
2875                }
2876        }
2877
2878        nand_controller_init(&cdns_ctrl->controller);
2879        INIT_LIST_HEAD(&cdns_ctrl->chips);
2880
2881        cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
2882        cdns_ctrl->curr_corr_str_idx = 0xFF;
2883
2884        ret = cadence_nand_chips_init(cdns_ctrl);
2885        if (ret) {
2886                dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
2887                        ret);
2888                goto dma_release_chnl;
2889        }
2890
2891        kfree(cdns_ctrl->buf);
2892        cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2893        if (!cdns_ctrl->buf) {
2894                ret = -ENOMEM;
2895                goto dma_release_chnl;
2896        }
2897
2898        return 0;
2899
2900dma_release_chnl:
2901        if (cdns_ctrl->dmac)
2902                dma_release_channel(cdns_ctrl->dmac);
2903
2904disable_irq:
2905        cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2906
2907free_buf:
2908        kfree(cdns_ctrl->buf);
2909
2910free_buf_desc:
2911        dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2912                          cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2913
2914        return ret;
2915}
2916
2917/* Driver exit point. */
2918static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
2919{
2920        cadence_nand_chips_cleanup(cdns_ctrl);
2921        cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2922        kfree(cdns_ctrl->buf);
2923        dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2924                          cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2925
2926        if (cdns_ctrl->dmac)
2927                dma_release_channel(cdns_ctrl->dmac);
2928}
2929
2930struct cadence_nand_dt {
2931        struct cdns_nand_ctrl cdns_ctrl;
2932        struct clk *clk;
2933};
2934
2935static const struct cadence_nand_dt_devdata cadence_nand_default = {
2936        .if_skew = 0,
2937        .has_dma = 1,
2938};
2939
2940static const struct of_device_id cadence_nand_dt_ids[] = {
2941        {
2942                .compatible = "cdns,hp-nfc",
2943                .data = &cadence_nand_default
2944        }, {}
2945};
2946
2947MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
2948
2949static int cadence_nand_dt_probe(struct platform_device *ofdev)
2950{
2951        struct resource *res;
2952        struct cadence_nand_dt *dt;
2953        struct cdns_nand_ctrl *cdns_ctrl;
2954        int ret;
2955        const struct of_device_id *of_id;
2956        const struct cadence_nand_dt_devdata *devdata;
2957        u32 val;
2958
2959        of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
2960        if (of_id) {
2961                ofdev->id_entry = of_id->data;
2962                devdata = of_id->data;
2963        } else {
2964                pr_err("Failed to find the right device id.\n");
2965                return -ENOMEM;
2966        }
2967
2968        dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
2969        if (!dt)
2970                return -ENOMEM;
2971
2972        cdns_ctrl = &dt->cdns_ctrl;
2973        cdns_ctrl->caps1 = devdata;
2974
2975        cdns_ctrl->dev = &ofdev->dev;
2976        cdns_ctrl->irq = platform_get_irq(ofdev, 0);
2977        if (cdns_ctrl->irq < 0)
2978                return cdns_ctrl->irq;
2979
2980        dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
2981
2982        cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
2983        if (IS_ERR(cdns_ctrl->reg))
2984                return PTR_ERR(cdns_ctrl->reg);
2985
2986        res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
2987        cdns_ctrl->io.dma = res->start;
2988        cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
2989        if (IS_ERR(cdns_ctrl->io.virt))
2990                return PTR_ERR(cdns_ctrl->io.virt);
2991
2992        dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
2993        if (IS_ERR(dt->clk))
2994                return PTR_ERR(dt->clk);
2995
2996        cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
2997
2998        ret = of_property_read_u32(ofdev->dev.of_node,
2999                                   "cdns,board-delay-ps", &val);
3000        if (ret) {
3001                val = 4830;
3002                dev_info(cdns_ctrl->dev,
3003                         "missing cdns,board-delay-ps property, %d was set\n",
3004                         val);
3005        }
3006        cdns_ctrl->board_delay = val;
3007
3008        ret = cadence_nand_init(cdns_ctrl);
3009        if (ret)
3010                return ret;
3011
3012        platform_set_drvdata(ofdev, dt);
3013        return 0;
3014}
3015
3016static int cadence_nand_dt_remove(struct platform_device *ofdev)
3017{
3018        struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
3019
3020        cadence_nand_remove(&dt->cdns_ctrl);
3021
3022        return 0;
3023}
3024
3025static struct platform_driver cadence_nand_dt_driver = {
3026        .probe          = cadence_nand_dt_probe,
3027        .remove         = cadence_nand_dt_remove,
3028        .driver         = {
3029                .name   = "cadence-nand-controller",
3030                .of_match_table = cadence_nand_dt_ids,
3031        },
3032};
3033
3034module_platform_driver(cadence_nand_dt_driver);
3035
3036MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3037MODULE_LICENSE("GPL v2");
3038MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
3039
3040