linux/drivers/mtd/nand/raw/qcom_nandc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/slab.h>
   8#include <linux/bitops.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmaengine.h>
  11#include <linux/module.h>
  12#include <linux/mtd/rawnand.h>
  13#include <linux/mtd/partitions.h>
  14#include <linux/of.h>
  15#include <linux/of_device.h>
  16#include <linux/delay.h>
  17#include <linux/dma/qcom_bam_dma.h>
  18
  19/* NANDc reg offsets */
  20#define NAND_FLASH_CMD                  0x00
  21#define NAND_ADDR0                      0x04
  22#define NAND_ADDR1                      0x08
  23#define NAND_FLASH_CHIP_SELECT          0x0c
  24#define NAND_EXEC_CMD                   0x10
  25#define NAND_FLASH_STATUS               0x14
  26#define NAND_BUFFER_STATUS              0x18
  27#define NAND_DEV0_CFG0                  0x20
  28#define NAND_DEV0_CFG1                  0x24
  29#define NAND_DEV0_ECC_CFG               0x28
  30#define NAND_DEV1_ECC_CFG               0x2c
  31#define NAND_DEV1_CFG0                  0x30
  32#define NAND_DEV1_CFG1                  0x34
  33#define NAND_READ_ID                    0x40
  34#define NAND_READ_STATUS                0x44
  35#define NAND_DEV_CMD0                   0xa0
  36#define NAND_DEV_CMD1                   0xa4
  37#define NAND_DEV_CMD2                   0xa8
  38#define NAND_DEV_CMD_VLD                0xac
  39#define SFLASHC_BURST_CFG               0xe0
  40#define NAND_ERASED_CW_DETECT_CFG       0xe8
  41#define NAND_ERASED_CW_DETECT_STATUS    0xec
  42#define NAND_EBI2_ECC_BUF_CFG           0xf0
  43#define FLASH_BUF_ACC                   0x100
  44
  45#define NAND_CTRL                       0xf00
  46#define NAND_VERSION                    0xf08
  47#define NAND_READ_LOCATION_0            0xf20
  48#define NAND_READ_LOCATION_1            0xf24
  49#define NAND_READ_LOCATION_2            0xf28
  50#define NAND_READ_LOCATION_3            0xf2c
  51
  52/* dummy register offsets, used by write_reg_dma */
  53#define NAND_DEV_CMD1_RESTORE           0xdead
  54#define NAND_DEV_CMD_VLD_RESTORE        0xbeef
  55
  56/* NAND_FLASH_CMD bits */
  57#define PAGE_ACC                        BIT(4)
  58#define LAST_PAGE                       BIT(5)
  59
  60/* NAND_FLASH_CHIP_SELECT bits */
  61#define NAND_DEV_SEL                    0
  62#define DM_EN                           BIT(2)
  63
  64/* NAND_FLASH_STATUS bits */
  65#define FS_OP_ERR                       BIT(4)
  66#define FS_READY_BSY_N                  BIT(5)
  67#define FS_MPU_ERR                      BIT(8)
  68#define FS_DEVICE_STS_ERR               BIT(16)
  69#define FS_DEVICE_WP                    BIT(23)
  70
  71/* NAND_BUFFER_STATUS bits */
  72#define BS_UNCORRECTABLE_BIT            BIT(8)
  73#define BS_CORRECTABLE_ERR_MSK          0x1f
  74
  75/* NAND_DEVn_CFG0 bits */
  76#define DISABLE_STATUS_AFTER_WRITE      4
  77#define CW_PER_PAGE                     6
  78#define UD_SIZE_BYTES                   9
  79#define ECC_PARITY_SIZE_BYTES_RS        19
  80#define SPARE_SIZE_BYTES                23
  81#define NUM_ADDR_CYCLES                 27
  82#define STATUS_BFR_READ                 30
  83#define SET_RD_MODE_AFTER_STATUS        31
  84
  85/* NAND_DEVn_CFG0 bits */
  86#define DEV0_CFG1_ECC_DISABLE           0
  87#define WIDE_FLASH                      1
  88#define NAND_RECOVERY_CYCLES            2
  89#define CS_ACTIVE_BSY                   5
  90#define BAD_BLOCK_BYTE_NUM              6
  91#define BAD_BLOCK_IN_SPARE_AREA         16
  92#define WR_RD_BSY_GAP                   17
  93#define ENABLE_BCH_ECC                  27
  94
  95/* NAND_DEV0_ECC_CFG bits */
  96#define ECC_CFG_ECC_DISABLE             0
  97#define ECC_SW_RESET                    1
  98#define ECC_MODE                        4
  99#define ECC_PARITY_SIZE_BYTES_BCH       8
 100#define ECC_NUM_DATA_BYTES              16
 101#define ECC_FORCE_CLK_OPEN              30
 102
 103/* NAND_DEV_CMD1 bits */
 104#define READ_ADDR                       0
 105
 106/* NAND_DEV_CMD_VLD bits */
 107#define READ_START_VLD                  BIT(0)
 108#define READ_STOP_VLD                   BIT(1)
 109#define WRITE_START_VLD                 BIT(2)
 110#define ERASE_START_VLD                 BIT(3)
 111#define SEQ_READ_START_VLD              BIT(4)
 112
 113/* NAND_EBI2_ECC_BUF_CFG bits */
 114#define NUM_STEPS                       0
 115
 116/* NAND_ERASED_CW_DETECT_CFG bits */
 117#define ERASED_CW_ECC_MASK              1
 118#define AUTO_DETECT_RES                 0
 119#define MASK_ECC                        (1 << ERASED_CW_ECC_MASK)
 120#define RESET_ERASED_DET                (1 << AUTO_DETECT_RES)
 121#define ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
 122#define CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
 123#define SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
 124
 125/* NAND_ERASED_CW_DETECT_STATUS bits */
 126#define PAGE_ALL_ERASED                 BIT(7)
 127#define CODEWORD_ALL_ERASED             BIT(6)
 128#define PAGE_ERASED                     BIT(5)
 129#define CODEWORD_ERASED                 BIT(4)
 130#define ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
 131#define ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
 132
 133/* NAND_READ_LOCATION_n bits */
 134#define READ_LOCATION_OFFSET            0
 135#define READ_LOCATION_SIZE              16
 136#define READ_LOCATION_LAST              31
 137
 138/* Version Mask */
 139#define NAND_VERSION_MAJOR_MASK         0xf0000000
 140#define NAND_VERSION_MAJOR_SHIFT        28
 141#define NAND_VERSION_MINOR_MASK         0x0fff0000
 142#define NAND_VERSION_MINOR_SHIFT        16
 143
 144/* NAND OP_CMDs */
 145#define OP_PAGE_READ                    0x2
 146#define OP_PAGE_READ_WITH_ECC           0x3
 147#define OP_PAGE_READ_WITH_ECC_SPARE     0x4
 148#define OP_PROGRAM_PAGE                 0x6
 149#define OP_PAGE_PROGRAM_WITH_ECC        0x7
 150#define OP_PROGRAM_PAGE_SPARE           0x9
 151#define OP_BLOCK_ERASE                  0xa
 152#define OP_FETCH_ID                     0xb
 153#define OP_RESET_DEVICE                 0xd
 154
 155/* Default Value for NAND_DEV_CMD_VLD */
 156#define NAND_DEV_CMD_VLD_VAL            (READ_START_VLD | WRITE_START_VLD | \
 157                                         ERASE_START_VLD | SEQ_READ_START_VLD)
 158
 159/* NAND_CTRL bits */
 160#define BAM_MODE_EN                     BIT(0)
 161
 162/*
 163 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
 164 * the driver calls the chunks 'step' or 'codeword' interchangeably
 165 */
 166#define NANDC_STEP_SIZE                 512
 167
 168/*
 169 * the largest page size we support is 8K, this will have 16 steps/codewords
 170 * of 512 bytes each
 171 */
 172#define MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
 173
 174/* we read at most 3 registers per codeword scan */
 175#define MAX_REG_RD                      (3 * MAX_NUM_STEPS)
 176
 177/* ECC modes supported by the controller */
 178#define ECC_NONE        BIT(0)
 179#define ECC_RS_4BIT     BIT(1)
 180#define ECC_BCH_4BIT    BIT(2)
 181#define ECC_BCH_8BIT    BIT(3)
 182
 183#define nandc_set_read_loc(nandc, reg, offset, size, is_last)   \
 184nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                  \
 185              ((offset) << READ_LOCATION_OFFSET) |              \
 186              ((size) << READ_LOCATION_SIZE) |                  \
 187              ((is_last) << READ_LOCATION_LAST))
 188
 189/*
 190 * Returns the actual register address for all NAND_DEV_ registers
 191 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
 192 */
 193#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
 194
 195/* Returns the NAND register physical address */
 196#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
 197
 198/* Returns the dma address for reg read buffer */
 199#define reg_buf_dma_addr(chip, vaddr) \
 200        ((chip)->reg_read_dma + \
 201        ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
 202
 203#define QPIC_PER_CW_CMD_ELEMENTS        32
 204#define QPIC_PER_CW_CMD_SGL             32
 205#define QPIC_PER_CW_DATA_SGL            8
 206
 207#define QPIC_NAND_COMPLETION_TIMEOUT    msecs_to_jiffies(2000)
 208
 209/*
 210 * Flags used in DMA descriptor preparation helper functions
 211 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
 212 */
 213/* Don't set the EOT in current tx BAM sgl */
 214#define NAND_BAM_NO_EOT                 BIT(0)
 215/* Set the NWD flag in current BAM sgl */
 216#define NAND_BAM_NWD                    BIT(1)
 217/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
 218#define NAND_BAM_NEXT_SGL               BIT(2)
 219/*
 220 * Erased codeword status is being used two times in single transfer so this
 221 * flag will determine the current value of erased codeword status register
 222 */
 223#define NAND_ERASED_CW_SET              BIT(4)
 224
 225/*
 226 * This data type corresponds to the BAM transaction which will be used for all
 227 * NAND transfers.
 228 * @bam_ce - the array of BAM command elements
 229 * @cmd_sgl - sgl for NAND BAM command pipe
 230 * @data_sgl - sgl for NAND BAM consumer/producer pipe
 231 * @bam_ce_pos - the index in bam_ce which is available for next sgl
 232 * @bam_ce_start - the index in bam_ce which marks the start position ce
 233 *                 for current sgl. It will be used for size calculation
 234 *                 for current sgl
 235 * @cmd_sgl_pos - current index in command sgl.
 236 * @cmd_sgl_start - start index in command sgl.
 237 * @tx_sgl_pos - current index in data sgl for tx.
 238 * @tx_sgl_start - start index in data sgl for tx.
 239 * @rx_sgl_pos - current index in data sgl for rx.
 240 * @rx_sgl_start - start index in data sgl for rx.
 241 * @wait_second_completion - wait for second DMA desc completion before making
 242 *                           the NAND transfer completion.
 243 * @txn_done - completion for NAND transfer.
 244 * @last_data_desc - last DMA desc in data channel (tx/rx).
 245 * @last_cmd_desc - last DMA desc in command channel.
 246 */
 247struct bam_transaction {
 248        struct bam_cmd_element *bam_ce;
 249        struct scatterlist *cmd_sgl;
 250        struct scatterlist *data_sgl;
 251        u32 bam_ce_pos;
 252        u32 bam_ce_start;
 253        u32 cmd_sgl_pos;
 254        u32 cmd_sgl_start;
 255        u32 tx_sgl_pos;
 256        u32 tx_sgl_start;
 257        u32 rx_sgl_pos;
 258        u32 rx_sgl_start;
 259        bool wait_second_completion;
 260        struct completion txn_done;
 261        struct dma_async_tx_descriptor *last_data_desc;
 262        struct dma_async_tx_descriptor *last_cmd_desc;
 263};
 264
 265/*
 266 * This data type corresponds to the nand dma descriptor
 267 * @list - list for desc_info
 268 * @dir - DMA transfer direction
 269 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
 270 *            ADM
 271 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
 272 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
 273 * @dma_desc - low level DMA engine descriptor
 274 */
 275struct desc_info {
 276        struct list_head node;
 277
 278        enum dma_data_direction dir;
 279        union {
 280                struct scatterlist adm_sgl;
 281                struct {
 282                        struct scatterlist *bam_sgl;
 283                        int sgl_cnt;
 284                };
 285        };
 286        struct dma_async_tx_descriptor *dma_desc;
 287};
 288
 289/*
 290 * holds the current register values that we want to write. acts as a contiguous
 291 * chunk of memory which we use to write the controller registers through DMA.
 292 */
 293struct nandc_regs {
 294        __le32 cmd;
 295        __le32 addr0;
 296        __le32 addr1;
 297        __le32 chip_sel;
 298        __le32 exec;
 299
 300        __le32 cfg0;
 301        __le32 cfg1;
 302        __le32 ecc_bch_cfg;
 303
 304        __le32 clrflashstatus;
 305        __le32 clrreadstatus;
 306
 307        __le32 cmd1;
 308        __le32 vld;
 309
 310        __le32 orig_cmd1;
 311        __le32 orig_vld;
 312
 313        __le32 ecc_buf_cfg;
 314        __le32 read_location0;
 315        __le32 read_location1;
 316        __le32 read_location2;
 317        __le32 read_location3;
 318
 319        __le32 erased_cw_detect_cfg_clr;
 320        __le32 erased_cw_detect_cfg_set;
 321};
 322
 323/*
 324 * NAND controller data struct
 325 *
 326 * @controller:                 base controller structure
 327 * @host_list:                  list containing all the chips attached to the
 328 *                              controller
 329 * @dev:                        parent device
 330 * @base:                       MMIO base
 331 * @base_phys:                  physical base address of controller registers
 332 * @base_dma:                   dma base address of controller registers
 333 * @core_clk:                   controller clock
 334 * @aon_clk:                    another controller clock
 335 *
 336 * @chan:                       dma channel
 337 * @cmd_crci:                   ADM DMA CRCI for command flow control
 338 * @data_crci:                  ADM DMA CRCI for data flow control
 339 * @desc_list:                  DMA descriptor list (list of desc_infos)
 340 *
 341 * @data_buffer:                our local DMA buffer for page read/writes,
 342 *                              used when we can't use the buffer provided
 343 *                              by upper layers directly
 344 * @buf_size/count/start:       markers for chip->legacy.read_buf/write_buf
 345 *                              functions
 346 * @reg_read_buf:               local buffer for reading back registers via DMA
 347 * @reg_read_dma:               contains dma address for register read buffer
 348 * @reg_read_pos:               marker for data read in reg_read_buf
 349 *
 350 * @regs:                       a contiguous chunk of memory for DMA register
 351 *                              writes. contains the register values to be
 352 *                              written to controller
 353 * @cmd1/vld:                   some fixed controller register values
 354 * @props:                      properties of current NAND controller,
 355 *                              initialized via DT match data
 356 * @max_cwperpage:              maximum QPIC codewords required. calculated
 357 *                              from all connected NAND devices pagesize
 358 */
 359struct qcom_nand_controller {
 360        struct nand_controller controller;
 361        struct list_head host_list;
 362
 363        struct device *dev;
 364
 365        void __iomem *base;
 366        phys_addr_t base_phys;
 367        dma_addr_t base_dma;
 368
 369        struct clk *core_clk;
 370        struct clk *aon_clk;
 371
 372        union {
 373                /* will be used only by QPIC for BAM DMA */
 374                struct {
 375                        struct dma_chan *tx_chan;
 376                        struct dma_chan *rx_chan;
 377                        struct dma_chan *cmd_chan;
 378                };
 379
 380                /* will be used only by EBI2 for ADM DMA */
 381                struct {
 382                        struct dma_chan *chan;
 383                        unsigned int cmd_crci;
 384                        unsigned int data_crci;
 385                };
 386        };
 387
 388        struct list_head desc_list;
 389        struct bam_transaction *bam_txn;
 390
 391        u8              *data_buffer;
 392        int             buf_size;
 393        int             buf_count;
 394        int             buf_start;
 395        unsigned int    max_cwperpage;
 396
 397        __le32 *reg_read_buf;
 398        dma_addr_t reg_read_dma;
 399        int reg_read_pos;
 400
 401        struct nandc_regs *regs;
 402
 403        u32 cmd1, vld;
 404        const struct qcom_nandc_props *props;
 405};
 406
 407/*
 408 * NAND chip structure
 409 *
 410 * @chip:                       base NAND chip structure
 411 * @node:                       list node to add itself to host_list in
 412 *                              qcom_nand_controller
 413 *
 414 * @cs:                         chip select value for this chip
 415 * @cw_size:                    the number of bytes in a single step/codeword
 416 *                              of a page, consisting of all data, ecc, spare
 417 *                              and reserved bytes
 418 * @cw_data:                    the number of bytes within a codeword protected
 419 *                              by ECC
 420 * @use_ecc:                    request the controller to use ECC for the
 421 *                              upcoming read/write
 422 * @bch_enabled:                flag to tell whether BCH ECC mode is used
 423 * @ecc_bytes_hw:               ECC bytes used by controller hardware for this
 424 *                              chip
 425 * @status:                     value to be returned if NAND_CMD_STATUS command
 426 *                              is executed
 427 * @last_command:               keeps track of last command on this chip. used
 428 *                              for reading correct status
 429 *
 430 * @cfg0, cfg1, cfg0_raw..:     NANDc register configurations needed for
 431 *                              ecc/non-ecc mode for the current nand flash
 432 *                              device
 433 */
 434struct qcom_nand_host {
 435        struct nand_chip chip;
 436        struct list_head node;
 437
 438        int cs;
 439        int cw_size;
 440        int cw_data;
 441        bool use_ecc;
 442        bool bch_enabled;
 443        int ecc_bytes_hw;
 444        int spare_bytes;
 445        int bbm_size;
 446        u8 status;
 447        int last_command;
 448
 449        u32 cfg0, cfg1;
 450        u32 cfg0_raw, cfg1_raw;
 451        u32 ecc_buf_cfg;
 452        u32 ecc_bch_cfg;
 453        u32 clrflashstatus;
 454        u32 clrreadstatus;
 455};
 456
 457/*
 458 * This data type corresponds to the NAND controller properties which varies
 459 * among different NAND controllers.
 460 * @ecc_modes - ecc mode for NAND
 461 * @is_bam - whether NAND controller is using BAM
 462 * @is_qpic - whether NAND CTRL is part of qpic IP
 463 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
 464 */
 465struct qcom_nandc_props {
 466        u32 ecc_modes;
 467        bool is_bam;
 468        bool is_qpic;
 469        u32 dev_cmd_reg_start;
 470};
 471
 472/* Frees the BAM transaction memory */
 473static void free_bam_transaction(struct qcom_nand_controller *nandc)
 474{
 475        struct bam_transaction *bam_txn = nandc->bam_txn;
 476
 477        devm_kfree(nandc->dev, bam_txn);
 478}
 479
 480/* Allocates and Initializes the BAM transaction */
 481static struct bam_transaction *
 482alloc_bam_transaction(struct qcom_nand_controller *nandc)
 483{
 484        struct bam_transaction *bam_txn;
 485        size_t bam_txn_size;
 486        unsigned int num_cw = nandc->max_cwperpage;
 487        void *bam_txn_buf;
 488
 489        bam_txn_size =
 490                sizeof(*bam_txn) + num_cw *
 491                ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
 492                (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
 493                (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
 494
 495        bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
 496        if (!bam_txn_buf)
 497                return NULL;
 498
 499        bam_txn = bam_txn_buf;
 500        bam_txn_buf += sizeof(*bam_txn);
 501
 502        bam_txn->bam_ce = bam_txn_buf;
 503        bam_txn_buf +=
 504                sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
 505
 506        bam_txn->cmd_sgl = bam_txn_buf;
 507        bam_txn_buf +=
 508                sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
 509
 510        bam_txn->data_sgl = bam_txn_buf;
 511
 512        init_completion(&bam_txn->txn_done);
 513
 514        return bam_txn;
 515}
 516
 517/* Clears the BAM transaction indexes */
 518static void clear_bam_transaction(struct qcom_nand_controller *nandc)
 519{
 520        struct bam_transaction *bam_txn = nandc->bam_txn;
 521
 522        if (!nandc->props->is_bam)
 523                return;
 524
 525        bam_txn->bam_ce_pos = 0;
 526        bam_txn->bam_ce_start = 0;
 527        bam_txn->cmd_sgl_pos = 0;
 528        bam_txn->cmd_sgl_start = 0;
 529        bam_txn->tx_sgl_pos = 0;
 530        bam_txn->tx_sgl_start = 0;
 531        bam_txn->rx_sgl_pos = 0;
 532        bam_txn->rx_sgl_start = 0;
 533        bam_txn->last_data_desc = NULL;
 534        bam_txn->wait_second_completion = false;
 535
 536        sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
 537                      QPIC_PER_CW_CMD_SGL);
 538        sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
 539                      QPIC_PER_CW_DATA_SGL);
 540
 541        reinit_completion(&bam_txn->txn_done);
 542}
 543
 544/* Callback for DMA descriptor completion */
 545static void qpic_bam_dma_done(void *data)
 546{
 547        struct bam_transaction *bam_txn = data;
 548
 549        /*
 550         * In case of data transfer with NAND, 2 callbacks will be generated.
 551         * One for command channel and another one for data channel.
 552         * If current transaction has data descriptors
 553         * (i.e. wait_second_completion is true), then set this to false
 554         * and wait for second DMA descriptor completion.
 555         */
 556        if (bam_txn->wait_second_completion)
 557                bam_txn->wait_second_completion = false;
 558        else
 559                complete(&bam_txn->txn_done);
 560}
 561
 562static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
 563{
 564        return container_of(chip, struct qcom_nand_host, chip);
 565}
 566
 567static inline struct qcom_nand_controller *
 568get_qcom_nand_controller(struct nand_chip *chip)
 569{
 570        return container_of(chip->controller, struct qcom_nand_controller,
 571                            controller);
 572}
 573
 574static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
 575{
 576        return ioread32(nandc->base + offset);
 577}
 578
 579static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
 580                               u32 val)
 581{
 582        iowrite32(val, nandc->base + offset);
 583}
 584
 585static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
 586                                          bool is_cpu)
 587{
 588        if (!nandc->props->is_bam)
 589                return;
 590
 591        if (is_cpu)
 592                dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
 593                                        MAX_REG_RD *
 594                                        sizeof(*nandc->reg_read_buf),
 595                                        DMA_FROM_DEVICE);
 596        else
 597                dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
 598                                           MAX_REG_RD *
 599                                           sizeof(*nandc->reg_read_buf),
 600                                           DMA_FROM_DEVICE);
 601}
 602
 603static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
 604{
 605        switch (offset) {
 606        case NAND_FLASH_CMD:
 607                return &regs->cmd;
 608        case NAND_ADDR0:
 609                return &regs->addr0;
 610        case NAND_ADDR1:
 611                return &regs->addr1;
 612        case NAND_FLASH_CHIP_SELECT:
 613                return &regs->chip_sel;
 614        case NAND_EXEC_CMD:
 615                return &regs->exec;
 616        case NAND_FLASH_STATUS:
 617                return &regs->clrflashstatus;
 618        case NAND_DEV0_CFG0:
 619                return &regs->cfg0;
 620        case NAND_DEV0_CFG1:
 621                return &regs->cfg1;
 622        case NAND_DEV0_ECC_CFG:
 623                return &regs->ecc_bch_cfg;
 624        case NAND_READ_STATUS:
 625                return &regs->clrreadstatus;
 626        case NAND_DEV_CMD1:
 627                return &regs->cmd1;
 628        case NAND_DEV_CMD1_RESTORE:
 629                return &regs->orig_cmd1;
 630        case NAND_DEV_CMD_VLD:
 631                return &regs->vld;
 632        case NAND_DEV_CMD_VLD_RESTORE:
 633                return &regs->orig_vld;
 634        case NAND_EBI2_ECC_BUF_CFG:
 635                return &regs->ecc_buf_cfg;
 636        case NAND_READ_LOCATION_0:
 637                return &regs->read_location0;
 638        case NAND_READ_LOCATION_1:
 639                return &regs->read_location1;
 640        case NAND_READ_LOCATION_2:
 641                return &regs->read_location2;
 642        case NAND_READ_LOCATION_3:
 643                return &regs->read_location3;
 644        default:
 645                return NULL;
 646        }
 647}
 648
 649static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
 650                          u32 val)
 651{
 652        struct nandc_regs *regs = nandc->regs;
 653        __le32 *reg;
 654
 655        reg = offset_to_nandc_reg(regs, offset);
 656
 657        if (reg)
 658                *reg = cpu_to_le32(val);
 659}
 660
 661/* helper to configure address register values */
 662static void set_address(struct qcom_nand_host *host, u16 column, int page)
 663{
 664        struct nand_chip *chip = &host->chip;
 665        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 666
 667        if (chip->options & NAND_BUSWIDTH_16)
 668                column >>= 1;
 669
 670        nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
 671        nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
 672}
 673
 674/*
 675 * update_rw_regs:      set up read/write register values, these will be
 676 *                      written to the NAND controller registers via DMA
 677 *
 678 * @num_cw:             number of steps for the read/write operation
 679 * @read:               read or write operation
 680 */
 681static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
 682{
 683        struct nand_chip *chip = &host->chip;
 684        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 685        u32 cmd, cfg0, cfg1, ecc_bch_cfg;
 686
 687        if (read) {
 688                if (host->use_ecc)
 689                        cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
 690                else
 691                        cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
 692        } else {
 693                cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
 694        }
 695
 696        if (host->use_ecc) {
 697                cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
 698                                (num_cw - 1) << CW_PER_PAGE;
 699
 700                cfg1 = host->cfg1;
 701                ecc_bch_cfg = host->ecc_bch_cfg;
 702        } else {
 703                cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
 704                                (num_cw - 1) << CW_PER_PAGE;
 705
 706                cfg1 = host->cfg1_raw;
 707                ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
 708        }
 709
 710        nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
 711        nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
 712        nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
 713        nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
 714        nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
 715        nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
 716        nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
 717        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
 718
 719        if (read)
 720                nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
 721                                   host->cw_data : host->cw_size, 1);
 722}
 723
 724/*
 725 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
 726 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
 727 * which will be submitted to DMA engine.
 728 */
 729static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
 730                                  struct dma_chan *chan,
 731                                  unsigned long flags)
 732{
 733        struct desc_info *desc;
 734        struct scatterlist *sgl;
 735        unsigned int sgl_cnt;
 736        int ret;
 737        struct bam_transaction *bam_txn = nandc->bam_txn;
 738        enum dma_transfer_direction dir_eng;
 739        struct dma_async_tx_descriptor *dma_desc;
 740
 741        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 742        if (!desc)
 743                return -ENOMEM;
 744
 745        if (chan == nandc->cmd_chan) {
 746                sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
 747                sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
 748                bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
 749                dir_eng = DMA_MEM_TO_DEV;
 750                desc->dir = DMA_TO_DEVICE;
 751        } else if (chan == nandc->tx_chan) {
 752                sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
 753                sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
 754                bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
 755                dir_eng = DMA_MEM_TO_DEV;
 756                desc->dir = DMA_TO_DEVICE;
 757        } else {
 758                sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
 759                sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
 760                bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
 761                dir_eng = DMA_DEV_TO_MEM;
 762                desc->dir = DMA_FROM_DEVICE;
 763        }
 764
 765        sg_mark_end(sgl + sgl_cnt - 1);
 766        ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
 767        if (ret == 0) {
 768                dev_err(nandc->dev, "failure in mapping desc\n");
 769                kfree(desc);
 770                return -ENOMEM;
 771        }
 772
 773        desc->sgl_cnt = sgl_cnt;
 774        desc->bam_sgl = sgl;
 775
 776        dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
 777                                           flags);
 778
 779        if (!dma_desc) {
 780                dev_err(nandc->dev, "failure in prep desc\n");
 781                dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
 782                kfree(desc);
 783                return -EINVAL;
 784        }
 785
 786        desc->dma_desc = dma_desc;
 787
 788        /* update last data/command descriptor */
 789        if (chan == nandc->cmd_chan)
 790                bam_txn->last_cmd_desc = dma_desc;
 791        else
 792                bam_txn->last_data_desc = dma_desc;
 793
 794        list_add_tail(&desc->node, &nandc->desc_list);
 795
 796        return 0;
 797}
 798
 799/*
 800 * Prepares the command descriptor for BAM DMA which will be used for NAND
 801 * register reads and writes. The command descriptor requires the command
 802 * to be formed in command element type so this function uses the command
 803 * element from bam transaction ce array and fills the same with required
 804 * data. A single SGL can contain multiple command elements so
 805 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
 806 * after the current command element.
 807 */
 808static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
 809                                 int reg_off, const void *vaddr,
 810                                 int size, unsigned int flags)
 811{
 812        int bam_ce_size;
 813        int i, ret;
 814        struct bam_cmd_element *bam_ce_buffer;
 815        struct bam_transaction *bam_txn = nandc->bam_txn;
 816
 817        bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
 818
 819        /* fill the command desc */
 820        for (i = 0; i < size; i++) {
 821                if (read)
 822                        bam_prep_ce(&bam_ce_buffer[i],
 823                                    nandc_reg_phys(nandc, reg_off + 4 * i),
 824                                    BAM_READ_COMMAND,
 825                                    reg_buf_dma_addr(nandc,
 826                                                     (__le32 *)vaddr + i));
 827                else
 828                        bam_prep_ce_le32(&bam_ce_buffer[i],
 829                                         nandc_reg_phys(nandc, reg_off + 4 * i),
 830                                         BAM_WRITE_COMMAND,
 831                                         *((__le32 *)vaddr + i));
 832        }
 833
 834        bam_txn->bam_ce_pos += size;
 835
 836        /* use the separate sgl after this command */
 837        if (flags & NAND_BAM_NEXT_SGL) {
 838                bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
 839                bam_ce_size = (bam_txn->bam_ce_pos -
 840                                bam_txn->bam_ce_start) *
 841                                sizeof(struct bam_cmd_element);
 842                sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
 843                           bam_ce_buffer, bam_ce_size);
 844                bam_txn->cmd_sgl_pos++;
 845                bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
 846
 847                if (flags & NAND_BAM_NWD) {
 848                        ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
 849                                                     DMA_PREP_FENCE |
 850                                                     DMA_PREP_CMD);
 851                        if (ret)
 852                                return ret;
 853                }
 854        }
 855
 856        return 0;
 857}
 858
 859/*
 860 * Prepares the data descriptor for BAM DMA which will be used for NAND
 861 * data reads and writes.
 862 */
 863static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
 864                                  const void *vaddr,
 865                                  int size, unsigned int flags)
 866{
 867        int ret;
 868        struct bam_transaction *bam_txn = nandc->bam_txn;
 869
 870        if (read) {
 871                sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
 872                           vaddr, size);
 873                bam_txn->rx_sgl_pos++;
 874        } else {
 875                sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
 876                           vaddr, size);
 877                bam_txn->tx_sgl_pos++;
 878
 879                /*
 880                 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
 881                 * is not set, form the DMA descriptor
 882                 */
 883                if (!(flags & NAND_BAM_NO_EOT)) {
 884                        ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
 885                                                     DMA_PREP_INTERRUPT);
 886                        if (ret)
 887                                return ret;
 888                }
 889        }
 890
 891        return 0;
 892}
 893
 894static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
 895                             int reg_off, const void *vaddr, int size,
 896                             bool flow_control)
 897{
 898        struct desc_info *desc;
 899        struct dma_async_tx_descriptor *dma_desc;
 900        struct scatterlist *sgl;
 901        struct dma_slave_config slave_conf;
 902        enum dma_transfer_direction dir_eng;
 903        int ret;
 904
 905        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 906        if (!desc)
 907                return -ENOMEM;
 908
 909        sgl = &desc->adm_sgl;
 910
 911        sg_init_one(sgl, vaddr, size);
 912
 913        if (read) {
 914                dir_eng = DMA_DEV_TO_MEM;
 915                desc->dir = DMA_FROM_DEVICE;
 916        } else {
 917                dir_eng = DMA_MEM_TO_DEV;
 918                desc->dir = DMA_TO_DEVICE;
 919        }
 920
 921        ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
 922        if (ret == 0) {
 923                ret = -ENOMEM;
 924                goto err;
 925        }
 926
 927        memset(&slave_conf, 0x00, sizeof(slave_conf));
 928
 929        slave_conf.device_fc = flow_control;
 930        if (read) {
 931                slave_conf.src_maxburst = 16;
 932                slave_conf.src_addr = nandc->base_dma + reg_off;
 933                slave_conf.slave_id = nandc->data_crci;
 934        } else {
 935                slave_conf.dst_maxburst = 16;
 936                slave_conf.dst_addr = nandc->base_dma + reg_off;
 937                slave_conf.slave_id = nandc->cmd_crci;
 938        }
 939
 940        ret = dmaengine_slave_config(nandc->chan, &slave_conf);
 941        if (ret) {
 942                dev_err(nandc->dev, "failed to configure dma channel\n");
 943                goto err;
 944        }
 945
 946        dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
 947        if (!dma_desc) {
 948                dev_err(nandc->dev, "failed to prepare desc\n");
 949                ret = -EINVAL;
 950                goto err;
 951        }
 952
 953        desc->dma_desc = dma_desc;
 954
 955        list_add_tail(&desc->node, &nandc->desc_list);
 956
 957        return 0;
 958err:
 959        kfree(desc);
 960
 961        return ret;
 962}
 963
 964/*
 965 * read_reg_dma:        prepares a descriptor to read a given number of
 966 *                      contiguous registers to the reg_read_buf pointer
 967 *
 968 * @first:              offset of the first register in the contiguous block
 969 * @num_regs:           number of registers to read
 970 * @flags:              flags to control DMA descriptor preparation
 971 */
 972static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
 973                        int num_regs, unsigned int flags)
 974{
 975        bool flow_control = false;
 976        void *vaddr;
 977
 978        vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
 979        nandc->reg_read_pos += num_regs;
 980
 981        if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
 982                first = dev_cmd_reg_addr(nandc, first);
 983
 984        if (nandc->props->is_bam)
 985                return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
 986                                             num_regs, flags);
 987
 988        if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
 989                flow_control = true;
 990
 991        return prep_adm_dma_desc(nandc, true, first, vaddr,
 992                                 num_regs * sizeof(u32), flow_control);
 993}
 994
 995/*
 996 * write_reg_dma:       prepares a descriptor to write a given number of
 997 *                      contiguous registers
 998 *
 999 * @first:              offset of the first register in the contiguous block
1000 * @num_regs:           number of registers to write
1001 * @flags:              flags to control DMA descriptor preparation
1002 */
1003static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1004                         int num_regs, unsigned int flags)
1005{
1006        bool flow_control = false;
1007        struct nandc_regs *regs = nandc->regs;
1008        void *vaddr;
1009
1010        vaddr = offset_to_nandc_reg(regs, first);
1011
1012        if (first == NAND_ERASED_CW_DETECT_CFG) {
1013                if (flags & NAND_ERASED_CW_SET)
1014                        vaddr = &regs->erased_cw_detect_cfg_set;
1015                else
1016                        vaddr = &regs->erased_cw_detect_cfg_clr;
1017        }
1018
1019        if (first == NAND_EXEC_CMD)
1020                flags |= NAND_BAM_NWD;
1021
1022        if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1023                first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1024
1025        if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1026                first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1027
1028        if (nandc->props->is_bam)
1029                return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1030                                             num_regs, flags);
1031
1032        if (first == NAND_FLASH_CMD)
1033                flow_control = true;
1034
1035        return prep_adm_dma_desc(nandc, false, first, vaddr,
1036                                 num_regs * sizeof(u32), flow_control);
1037}
1038
1039/*
1040 * read_data_dma:       prepares a DMA descriptor to transfer data from the
1041 *                      controller's internal buffer to the buffer 'vaddr'
1042 *
1043 * @reg_off:            offset within the controller's data buffer
1044 * @vaddr:              virtual address of the buffer we want to write to
1045 * @size:               DMA transaction size in bytes
1046 * @flags:              flags to control DMA descriptor preparation
1047 */
1048static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1049                         const u8 *vaddr, int size, unsigned int flags)
1050{
1051        if (nandc->props->is_bam)
1052                return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1053
1054        return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1055}
1056
1057/*
1058 * write_data_dma:      prepares a DMA descriptor to transfer data from
1059 *                      'vaddr' to the controller's internal buffer
1060 *
1061 * @reg_off:            offset within the controller's data buffer
1062 * @vaddr:              virtual address of the buffer we want to read from
1063 * @size:               DMA transaction size in bytes
1064 * @flags:              flags to control DMA descriptor preparation
1065 */
1066static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1067                          const u8 *vaddr, int size, unsigned int flags)
1068{
1069        if (nandc->props->is_bam)
1070                return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1071
1072        return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1073}
1074
1075/*
1076 * Helper to prepare DMA descriptors for configuring registers
1077 * before reading a NAND page.
1078 */
1079static void config_nand_page_read(struct qcom_nand_controller *nandc)
1080{
1081        write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1082        write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1083        write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1084        write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1085        write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1086                      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1087}
1088
1089/*
1090 * Helper to prepare DMA descriptors for configuring registers
1091 * before reading each codeword in NAND page.
1092 */
1093static void
1094config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1095{
1096        if (nandc->props->is_bam)
1097                write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1098                              NAND_BAM_NEXT_SGL);
1099
1100        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1101        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1102
1103        if (use_ecc) {
1104                read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1105                read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1106                             NAND_BAM_NEXT_SGL);
1107        } else {
1108                read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1109        }
1110}
1111
1112/*
1113 * Helper to prepare dma descriptors to configure registers needed for reading a
1114 * single codeword in page
1115 */
1116static void
1117config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1118                                bool use_ecc)
1119{
1120        config_nand_page_read(nandc);
1121        config_nand_cw_read(nandc, use_ecc);
1122}
1123
1124/*
1125 * Helper to prepare DMA descriptors used to configure registers needed for
1126 * before writing a NAND page.
1127 */
1128static void config_nand_page_write(struct qcom_nand_controller *nandc)
1129{
1130        write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1131        write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1132        write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1133                      NAND_BAM_NEXT_SGL);
1134}
1135
1136/*
1137 * Helper to prepare DMA descriptors for configuring registers
1138 * before writing each codeword in NAND page.
1139 */
1140static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1141{
1142        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1143        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1144
1145        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1146
1147        write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1148        write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1149}
1150
1151/*
1152 * the following functions are used within chip->legacy.cmdfunc() to
1153 * perform different NAND_CMD_* commands
1154 */
1155
1156/* sets up descriptors for NAND_CMD_PARAM */
1157static int nandc_param(struct qcom_nand_host *host)
1158{
1159        struct nand_chip *chip = &host->chip;
1160        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1161
1162        /*
1163         * NAND_CMD_PARAM is called before we know much about the FLASH chip
1164         * in use. we configure the controller to perform a raw read of 512
1165         * bytes to read onfi params
1166         */
1167        nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1168        nandc_set_reg(nandc, NAND_ADDR0, 0);
1169        nandc_set_reg(nandc, NAND_ADDR1, 0);
1170        nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1171                                        | 512 << UD_SIZE_BYTES
1172                                        | 5 << NUM_ADDR_CYCLES
1173                                        | 0 << SPARE_SIZE_BYTES);
1174        nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1175                                        | 0 << CS_ACTIVE_BSY
1176                                        | 17 << BAD_BLOCK_BYTE_NUM
1177                                        | 1 << BAD_BLOCK_IN_SPARE_AREA
1178                                        | 2 << WR_RD_BSY_GAP
1179                                        | 0 << WIDE_FLASH
1180                                        | 1 << DEV0_CFG1_ECC_DISABLE);
1181        nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1182
1183        /* configure CMD1 and VLD for ONFI param probing */
1184        nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1185                      (nandc->vld & ~READ_START_VLD));
1186        nandc_set_reg(nandc, NAND_DEV_CMD1,
1187                      (nandc->cmd1 & ~(0xFF << READ_ADDR))
1188                      | NAND_CMD_PARAM << READ_ADDR);
1189
1190        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1191
1192        nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1193        nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1194        nandc_set_read_loc(nandc, 0, 0, 512, 1);
1195
1196        write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1197        write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1198
1199        nandc->buf_count = 512;
1200        memset(nandc->data_buffer, 0xff, nandc->buf_count);
1201
1202        config_nand_single_cw_page_read(nandc, false);
1203
1204        read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1205                      nandc->buf_count, 0);
1206
1207        /* restore CMD1 and VLD regs */
1208        write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1209        write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1210
1211        return 0;
1212}
1213
1214/* sets up descriptors for NAND_CMD_ERASE1 */
1215static int erase_block(struct qcom_nand_host *host, int page_addr)
1216{
1217        struct nand_chip *chip = &host->chip;
1218        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1219
1220        nandc_set_reg(nandc, NAND_FLASH_CMD,
1221                      OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1222        nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1223        nandc_set_reg(nandc, NAND_ADDR1, 0);
1224        nandc_set_reg(nandc, NAND_DEV0_CFG0,
1225                      host->cfg0_raw & ~(7 << CW_PER_PAGE));
1226        nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1227        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1228        nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1229        nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1230
1231        write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1232        write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1233        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1234
1235        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1236
1237        write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1238        write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1239
1240        return 0;
1241}
1242
1243/* sets up descriptors for NAND_CMD_READID */
1244static int read_id(struct qcom_nand_host *host, int column)
1245{
1246        struct nand_chip *chip = &host->chip;
1247        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1248
1249        if (column == -1)
1250                return 0;
1251
1252        nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1253        nandc_set_reg(nandc, NAND_ADDR0, column);
1254        nandc_set_reg(nandc, NAND_ADDR1, 0);
1255        nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1256                      nandc->props->is_bam ? 0 : DM_EN);
1257        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1258
1259        write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1260        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1261
1262        read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1263
1264        return 0;
1265}
1266
1267/* sets up descriptors for NAND_CMD_RESET */
1268static int reset(struct qcom_nand_host *host)
1269{
1270        struct nand_chip *chip = &host->chip;
1271        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1272
1273        nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1274        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1275
1276        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1277        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1278
1279        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1280
1281        return 0;
1282}
1283
1284/* helpers to submit/free our list of dma descriptors */
1285static int submit_descs(struct qcom_nand_controller *nandc)
1286{
1287        struct desc_info *desc;
1288        dma_cookie_t cookie = 0;
1289        struct bam_transaction *bam_txn = nandc->bam_txn;
1290        int r;
1291
1292        if (nandc->props->is_bam) {
1293                if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1294                        r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1295                        if (r)
1296                                return r;
1297                }
1298
1299                if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1300                        r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1301                                                   DMA_PREP_INTERRUPT);
1302                        if (r)
1303                                return r;
1304                }
1305
1306                if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1307                        r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1308                                                   DMA_PREP_CMD);
1309                        if (r)
1310                                return r;
1311                }
1312        }
1313
1314        list_for_each_entry(desc, &nandc->desc_list, node)
1315                cookie = dmaengine_submit(desc->dma_desc);
1316
1317        if (nandc->props->is_bam) {
1318                bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1319                bam_txn->last_cmd_desc->callback_param = bam_txn;
1320                if (bam_txn->last_data_desc) {
1321                        bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1322                        bam_txn->last_data_desc->callback_param = bam_txn;
1323                        bam_txn->wait_second_completion = true;
1324                }
1325
1326                dma_async_issue_pending(nandc->tx_chan);
1327                dma_async_issue_pending(nandc->rx_chan);
1328                dma_async_issue_pending(nandc->cmd_chan);
1329
1330                if (!wait_for_completion_timeout(&bam_txn->txn_done,
1331                                                 QPIC_NAND_COMPLETION_TIMEOUT))
1332                        return -ETIMEDOUT;
1333        } else {
1334                if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1335                        return -ETIMEDOUT;
1336        }
1337
1338        return 0;
1339}
1340
1341static void free_descs(struct qcom_nand_controller *nandc)
1342{
1343        struct desc_info *desc, *n;
1344
1345        list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1346                list_del(&desc->node);
1347
1348                if (nandc->props->is_bam)
1349                        dma_unmap_sg(nandc->dev, desc->bam_sgl,
1350                                     desc->sgl_cnt, desc->dir);
1351                else
1352                        dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1353                                     desc->dir);
1354
1355                kfree(desc);
1356        }
1357}
1358
1359/* reset the register read buffer for next NAND operation */
1360static void clear_read_regs(struct qcom_nand_controller *nandc)
1361{
1362        nandc->reg_read_pos = 0;
1363        nandc_read_buffer_sync(nandc, false);
1364}
1365
1366static void pre_command(struct qcom_nand_host *host, int command)
1367{
1368        struct nand_chip *chip = &host->chip;
1369        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1370
1371        nandc->buf_count = 0;
1372        nandc->buf_start = 0;
1373        host->use_ecc = false;
1374        host->last_command = command;
1375
1376        clear_read_regs(nandc);
1377
1378        if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1379            command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1380                clear_bam_transaction(nandc);
1381}
1382
1383/*
1384 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1385 * privately maintained status byte, this status byte can be read after
1386 * NAND_CMD_STATUS is called
1387 */
1388static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1389{
1390        struct nand_chip *chip = &host->chip;
1391        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1392        struct nand_ecc_ctrl *ecc = &chip->ecc;
1393        int num_cw;
1394        int i;
1395
1396        num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1397        nandc_read_buffer_sync(nandc, true);
1398
1399        for (i = 0; i < num_cw; i++) {
1400                u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1401
1402                if (flash_status & FS_MPU_ERR)
1403                        host->status &= ~NAND_STATUS_WP;
1404
1405                if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1406                                                 (flash_status &
1407                                                  FS_DEVICE_STS_ERR)))
1408                        host->status |= NAND_STATUS_FAIL;
1409        }
1410}
1411
1412static void post_command(struct qcom_nand_host *host, int command)
1413{
1414        struct nand_chip *chip = &host->chip;
1415        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1416
1417        switch (command) {
1418        case NAND_CMD_READID:
1419                nandc_read_buffer_sync(nandc, true);
1420                memcpy(nandc->data_buffer, nandc->reg_read_buf,
1421                       nandc->buf_count);
1422                break;
1423        case NAND_CMD_PAGEPROG:
1424        case NAND_CMD_ERASE1:
1425                parse_erase_write_errors(host, command);
1426                break;
1427        default:
1428                break;
1429        }
1430}
1431
1432/*
1433 * Implements chip->legacy.cmdfunc. It's  only used for a limited set of
1434 * commands. The rest of the commands wouldn't be called by upper layers.
1435 * For example, NAND_CMD_READOOB would never be called because we have our own
1436 * versions of read_oob ops for nand_ecc_ctrl.
1437 */
1438static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1439                               int column, int page_addr)
1440{
1441        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1442        struct nand_ecc_ctrl *ecc = &chip->ecc;
1443        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1444        bool wait = false;
1445        int ret = 0;
1446
1447        pre_command(host, command);
1448
1449        switch (command) {
1450        case NAND_CMD_RESET:
1451                ret = reset(host);
1452                wait = true;
1453                break;
1454
1455        case NAND_CMD_READID:
1456                nandc->buf_count = 4;
1457                ret = read_id(host, column);
1458                wait = true;
1459                break;
1460
1461        case NAND_CMD_PARAM:
1462                ret = nandc_param(host);
1463                wait = true;
1464                break;
1465
1466        case NAND_CMD_ERASE1:
1467                ret = erase_block(host, page_addr);
1468                wait = true;
1469                break;
1470
1471        case NAND_CMD_READ0:
1472                /* we read the entire page for now */
1473                WARN_ON(column != 0);
1474
1475                host->use_ecc = true;
1476                set_address(host, 0, page_addr);
1477                update_rw_regs(host, ecc->steps, true);
1478                break;
1479
1480        case NAND_CMD_SEQIN:
1481                WARN_ON(column != 0);
1482                set_address(host, 0, page_addr);
1483                break;
1484
1485        case NAND_CMD_PAGEPROG:
1486        case NAND_CMD_STATUS:
1487        case NAND_CMD_NONE:
1488        default:
1489                break;
1490        }
1491
1492        if (ret) {
1493                dev_err(nandc->dev, "failure executing command %d\n",
1494                        command);
1495                free_descs(nandc);
1496                return;
1497        }
1498
1499        if (wait) {
1500                ret = submit_descs(nandc);
1501                if (ret)
1502                        dev_err(nandc->dev,
1503                                "failure submitting descs for command %d\n",
1504                                command);
1505        }
1506
1507        free_descs(nandc);
1508
1509        post_command(host, command);
1510}
1511
1512/*
1513 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1514 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1515 *
1516 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1517 * but it notifies that it is an erased CW by placing special characters at
1518 * certain offsets in the buffer.
1519 *
1520 * verify if the page is erased or not, and fix up the page for RS ECC by
1521 * replacing the special characters with 0xff.
1522 */
1523static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1524{
1525        u8 empty1, empty2;
1526
1527        /*
1528         * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1529         * is erased by looking for 0x54s at offsets 3 and 175 from the
1530         * beginning of each codeword
1531         */
1532
1533        empty1 = data_buf[3];
1534        empty2 = data_buf[175];
1535
1536        /*
1537         * if the erased codework markers, if they exist override them with
1538         * 0xffs
1539         */
1540        if ((empty1 == 0x54 && empty2 == 0xff) ||
1541            (empty1 == 0xff && empty2 == 0x54)) {
1542                data_buf[3] = 0xff;
1543                data_buf[175] = 0xff;
1544        }
1545
1546        /*
1547         * check if the entire chunk contains 0xffs or not. if it doesn't, then
1548         * restore the original values at the special offsets
1549         */
1550        if (memchr_inv(data_buf, 0xff, data_len)) {
1551                data_buf[3] = empty1;
1552                data_buf[175] = empty2;
1553
1554                return false;
1555        }
1556
1557        return true;
1558}
1559
1560struct read_stats {
1561        __le32 flash;
1562        __le32 buffer;
1563        __le32 erased_cw;
1564};
1565
1566/* reads back FLASH_STATUS register set by the controller */
1567static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1568{
1569        struct nand_chip *chip = &host->chip;
1570        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1571        int i;
1572
1573        for (i = 0; i < cw_cnt; i++) {
1574                u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1575
1576                if (flash & (FS_OP_ERR | FS_MPU_ERR))
1577                        return -EIO;
1578        }
1579
1580        return 0;
1581}
1582
1583/* performs raw read for one codeword */
1584static int
1585qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1586                       u8 *data_buf, u8 *oob_buf, int page, int cw)
1587{
1588        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1589        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1590        struct nand_ecc_ctrl *ecc = &chip->ecc;
1591        int data_size1, data_size2, oob_size1, oob_size2;
1592        int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1593
1594        nand_read_page_op(chip, page, 0, NULL, 0);
1595        host->use_ecc = false;
1596
1597        clear_bam_transaction(nandc);
1598        set_address(host, host->cw_size * cw, page);
1599        update_rw_regs(host, 1, true);
1600        config_nand_page_read(nandc);
1601
1602        data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1603        oob_size1 = host->bbm_size;
1604
1605        if (cw == (ecc->steps - 1)) {
1606                data_size2 = ecc->size - data_size1 -
1607                             ((ecc->steps - 1) * 4);
1608                oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1609                            host->spare_bytes;
1610        } else {
1611                data_size2 = host->cw_data - data_size1;
1612                oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1613        }
1614
1615        if (nandc->props->is_bam) {
1616                nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1617                read_loc += data_size1;
1618
1619                nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1620                read_loc += oob_size1;
1621
1622                nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1623                read_loc += data_size2;
1624
1625                nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1626        }
1627
1628        config_nand_cw_read(nandc, false);
1629
1630        read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1631        reg_off += data_size1;
1632
1633        read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1634        reg_off += oob_size1;
1635
1636        read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1637        reg_off += data_size2;
1638
1639        read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1640
1641        ret = submit_descs(nandc);
1642        free_descs(nandc);
1643        if (ret) {
1644                dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1645                return ret;
1646        }
1647
1648        return check_flash_errors(host, 1);
1649}
1650
1651/*
1652 * Bitflips can happen in erased codewords also so this function counts the
1653 * number of 0 in each CW for which ECC engine returns the uncorrectable
1654 * error. The page will be assumed as erased if this count is less than or
1655 * equal to the ecc->strength for each CW.
1656 *
1657 * 1. Both DATA and OOB need to be checked for number of 0. The
1658 *    top-level API can be called with only data buf or OOB buf so use
1659 *    chip->data_buf if data buf is null and chip->oob_poi if oob buf
1660 *    is null for copying the raw bytes.
1661 * 2. Perform raw read for all the CW which has uncorrectable errors.
1662 * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1663 *    The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1664 *    the number of bitflips in this area.
1665 */
1666static int
1667check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1668                      u8 *oob_buf, unsigned long uncorrectable_cws,
1669                      int page, unsigned int max_bitflips)
1670{
1671        struct nand_chip *chip = &host->chip;
1672        struct mtd_info *mtd = nand_to_mtd(chip);
1673        struct nand_ecc_ctrl *ecc = &chip->ecc;
1674        u8 *cw_data_buf, *cw_oob_buf;
1675        int cw, data_size, oob_size, ret = 0;
1676
1677        if (!data_buf)
1678                data_buf = nand_get_data_buf(chip);
1679
1680        if (!oob_buf) {
1681                nand_get_data_buf(chip);
1682                oob_buf = chip->oob_poi;
1683        }
1684
1685        for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1686                if (cw == (ecc->steps - 1)) {
1687                        data_size = ecc->size - ((ecc->steps - 1) * 4);
1688                        oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1689                } else {
1690                        data_size = host->cw_data;
1691                        oob_size = host->ecc_bytes_hw;
1692                }
1693
1694                /* determine starting buffer address for current CW */
1695                cw_data_buf = data_buf + (cw * host->cw_data);
1696                cw_oob_buf = oob_buf + (cw * ecc->bytes);
1697
1698                ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1699                                             cw_oob_buf, page, cw);
1700                if (ret)
1701                        return ret;
1702
1703                /*
1704                 * make sure it isn't an erased page reported
1705                 * as not-erased by HW because of a few bitflips
1706                 */
1707                ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1708                                                  cw_oob_buf + host->bbm_size,
1709                                                  oob_size, NULL,
1710                                                  0, ecc->strength);
1711                if (ret < 0) {
1712                        mtd->ecc_stats.failed++;
1713                } else {
1714                        mtd->ecc_stats.corrected += ret;
1715                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
1716                }
1717        }
1718
1719        return max_bitflips;
1720}
1721
1722/*
1723 * reads back status registers set by the controller to notify page read
1724 * errors. this is equivalent to what 'ecc->correct()' would do.
1725 */
1726static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1727                             u8 *oob_buf, int page)
1728{
1729        struct nand_chip *chip = &host->chip;
1730        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1731        struct mtd_info *mtd = nand_to_mtd(chip);
1732        struct nand_ecc_ctrl *ecc = &chip->ecc;
1733        unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1734        struct read_stats *buf;
1735        bool flash_op_err = false, erased;
1736        int i;
1737        u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1738
1739        buf = (struct read_stats *)nandc->reg_read_buf;
1740        nandc_read_buffer_sync(nandc, true);
1741
1742        for (i = 0; i < ecc->steps; i++, buf++) {
1743                u32 flash, buffer, erased_cw;
1744                int data_len, oob_len;
1745
1746                if (i == (ecc->steps - 1)) {
1747                        data_len = ecc->size - ((ecc->steps - 1) << 2);
1748                        oob_len = ecc->steps << 2;
1749                } else {
1750                        data_len = host->cw_data;
1751                        oob_len = 0;
1752                }
1753
1754                flash = le32_to_cpu(buf->flash);
1755                buffer = le32_to_cpu(buf->buffer);
1756                erased_cw = le32_to_cpu(buf->erased_cw);
1757
1758                /*
1759                 * Check ECC failure for each codeword. ECC failure can
1760                 * happen in either of the following conditions
1761                 * 1. If number of bitflips are greater than ECC engine
1762                 *    capability.
1763                 * 2. If this codeword contains all 0xff for which erased
1764                 *    codeword detection check will be done.
1765                 */
1766                if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1767                        /*
1768                         * For BCH ECC, ignore erased codeword errors, if
1769                         * ERASED_CW bits are set.
1770                         */
1771                        if (host->bch_enabled) {
1772                                erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1773                                         true : false;
1774                        /*
1775                         * For RS ECC, HW reports the erased CW by placing
1776                         * special characters at certain offsets in the buffer.
1777                         * These special characters will be valid only if
1778                         * complete page is read i.e. data_buf is not NULL.
1779                         */
1780                        } else if (data_buf) {
1781                                erased = erased_chunk_check_and_fixup(data_buf,
1782                                                                      data_len);
1783                        } else {
1784                                erased = false;
1785                        }
1786
1787                        if (!erased)
1788                                uncorrectable_cws |= BIT(i);
1789                /*
1790                 * Check if MPU or any other operational error (timeout,
1791                 * device failure, etc.) happened for this codeword and
1792                 * make flash_op_err true. If flash_op_err is set, then
1793                 * EIO will be returned for page read.
1794                 */
1795                } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1796                        flash_op_err = true;
1797                /*
1798                 * No ECC or operational errors happened. Check the number of
1799                 * bits corrected and update the ecc_stats.corrected.
1800                 */
1801                } else {
1802                        unsigned int stat;
1803
1804                        stat = buffer & BS_CORRECTABLE_ERR_MSK;
1805                        mtd->ecc_stats.corrected += stat;
1806                        max_bitflips = max(max_bitflips, stat);
1807                }
1808
1809                if (data_buf)
1810                        data_buf += data_len;
1811                if (oob_buf)
1812                        oob_buf += oob_len + ecc->bytes;
1813        }
1814
1815        if (flash_op_err)
1816                return -EIO;
1817
1818        if (!uncorrectable_cws)
1819                return max_bitflips;
1820
1821        return check_for_erased_page(host, data_buf_start, oob_buf_start,
1822                                     uncorrectable_cws, page,
1823                                     max_bitflips);
1824}
1825
1826/*
1827 * helper to perform the actual page read operation, used by ecc->read_page(),
1828 * ecc->read_oob()
1829 */
1830static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1831                         u8 *oob_buf, int page)
1832{
1833        struct nand_chip *chip = &host->chip;
1834        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1835        struct nand_ecc_ctrl *ecc = &chip->ecc;
1836        u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1837        int i, ret;
1838
1839        config_nand_page_read(nandc);
1840
1841        /* queue cmd descs for each codeword */
1842        for (i = 0; i < ecc->steps; i++) {
1843                int data_size, oob_size;
1844
1845                if (i == (ecc->steps - 1)) {
1846                        data_size = ecc->size - ((ecc->steps - 1) << 2);
1847                        oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1848                                   host->spare_bytes;
1849                } else {
1850                        data_size = host->cw_data;
1851                        oob_size = host->ecc_bytes_hw + host->spare_bytes;
1852                }
1853
1854                if (nandc->props->is_bam) {
1855                        if (data_buf && oob_buf) {
1856                                nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1857                                nandc_set_read_loc(nandc, 1, data_size,
1858                                                   oob_size, 1);
1859                        } else if (data_buf) {
1860                                nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1861                        } else {
1862                                nandc_set_read_loc(nandc, 0, data_size,
1863                                                   oob_size, 1);
1864                        }
1865                }
1866
1867                config_nand_cw_read(nandc, true);
1868
1869                if (data_buf)
1870                        read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1871                                      data_size, 0);
1872
1873                /*
1874                 * when ecc is enabled, the controller doesn't read the real
1875                 * or dummy bad block markers in each chunk. To maintain a
1876                 * consistent layout across RAW and ECC reads, we just
1877                 * leave the real/dummy BBM offsets empty (i.e, filled with
1878                 * 0xffs)
1879                 */
1880                if (oob_buf) {
1881                        int j;
1882
1883                        for (j = 0; j < host->bbm_size; j++)
1884                                *oob_buf++ = 0xff;
1885
1886                        read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1887                                      oob_buf, oob_size, 0);
1888                }
1889
1890                if (data_buf)
1891                        data_buf += data_size;
1892                if (oob_buf)
1893                        oob_buf += oob_size;
1894        }
1895
1896        ret = submit_descs(nandc);
1897        free_descs(nandc);
1898
1899        if (ret) {
1900                dev_err(nandc->dev, "failure to read page/oob\n");
1901                return ret;
1902        }
1903
1904        return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1905}
1906
1907/*
1908 * a helper that copies the last step/codeword of a page (containing free oob)
1909 * into our local buffer
1910 */
1911static int copy_last_cw(struct qcom_nand_host *host, int page)
1912{
1913        struct nand_chip *chip = &host->chip;
1914        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1915        struct nand_ecc_ctrl *ecc = &chip->ecc;
1916        int size;
1917        int ret;
1918
1919        clear_read_regs(nandc);
1920
1921        size = host->use_ecc ? host->cw_data : host->cw_size;
1922
1923        /* prepare a clean read buffer */
1924        memset(nandc->data_buffer, 0xff, size);
1925
1926        set_address(host, host->cw_size * (ecc->steps - 1), page);
1927        update_rw_regs(host, 1, true);
1928
1929        config_nand_single_cw_page_read(nandc, host->use_ecc);
1930
1931        read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1932
1933        ret = submit_descs(nandc);
1934        if (ret)
1935                dev_err(nandc->dev, "failed to copy last codeword\n");
1936
1937        free_descs(nandc);
1938
1939        return ret;
1940}
1941
1942/* implements ecc->read_page() */
1943static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
1944                                int oob_required, int page)
1945{
1946        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1947        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1948        u8 *data_buf, *oob_buf = NULL;
1949
1950        nand_read_page_op(chip, page, 0, NULL, 0);
1951        data_buf = buf;
1952        oob_buf = oob_required ? chip->oob_poi : NULL;
1953
1954        clear_bam_transaction(nandc);
1955
1956        return read_page_ecc(host, data_buf, oob_buf, page);
1957}
1958
1959/* implements ecc->read_page_raw() */
1960static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1961                                    int oob_required, int page)
1962{
1963        struct mtd_info *mtd = nand_to_mtd(chip);
1964        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1965        struct nand_ecc_ctrl *ecc = &chip->ecc;
1966        int cw, ret;
1967        u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1968
1969        for (cw = 0; cw < ecc->steps; cw++) {
1970                ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1971                                             page, cw);
1972                if (ret)
1973                        return ret;
1974
1975                data_buf += host->cw_data;
1976                oob_buf += ecc->bytes;
1977        }
1978
1979        return 0;
1980}
1981
1982/* implements ecc->read_oob() */
1983static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
1984{
1985        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1986        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1987        struct nand_ecc_ctrl *ecc = &chip->ecc;
1988
1989        clear_read_regs(nandc);
1990        clear_bam_transaction(nandc);
1991
1992        host->use_ecc = true;
1993        set_address(host, 0, page);
1994        update_rw_regs(host, ecc->steps, true);
1995
1996        return read_page_ecc(host, NULL, chip->oob_poi, page);
1997}
1998
1999/* implements ecc->write_page() */
2000static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2001                                 int oob_required, int page)
2002{
2003        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2004        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2005        struct nand_ecc_ctrl *ecc = &chip->ecc;
2006        u8 *data_buf, *oob_buf;
2007        int i, ret;
2008
2009        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2010
2011        clear_read_regs(nandc);
2012        clear_bam_transaction(nandc);
2013
2014        data_buf = (u8 *)buf;
2015        oob_buf = chip->oob_poi;
2016
2017        host->use_ecc = true;
2018        update_rw_regs(host, ecc->steps, false);
2019        config_nand_page_write(nandc);
2020
2021        for (i = 0; i < ecc->steps; i++) {
2022                int data_size, oob_size;
2023
2024                if (i == (ecc->steps - 1)) {
2025                        data_size = ecc->size - ((ecc->steps - 1) << 2);
2026                        oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2027                                   host->spare_bytes;
2028                } else {
2029                        data_size = host->cw_data;
2030                        oob_size = ecc->bytes;
2031                }
2032
2033
2034                write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2035                               i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2036
2037                /*
2038                 * when ECC is enabled, we don't really need to write anything
2039                 * to oob for the first n - 1 codewords since these oob regions
2040                 * just contain ECC bytes that's written by the controller
2041                 * itself. For the last codeword, we skip the bbm positions and
2042                 * write to the free oob area.
2043                 */
2044                if (i == (ecc->steps - 1)) {
2045                        oob_buf += host->bbm_size;
2046
2047                        write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2048                                       oob_buf, oob_size, 0);
2049                }
2050
2051                config_nand_cw_write(nandc);
2052
2053                data_buf += data_size;
2054                oob_buf += oob_size;
2055        }
2056
2057        ret = submit_descs(nandc);
2058        if (ret)
2059                dev_err(nandc->dev, "failure to write page\n");
2060
2061        free_descs(nandc);
2062
2063        if (!ret)
2064                ret = nand_prog_page_end_op(chip);
2065
2066        return ret;
2067}
2068
2069/* implements ecc->write_page_raw() */
2070static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2071                                     const uint8_t *buf, int oob_required,
2072                                     int page)
2073{
2074        struct mtd_info *mtd = nand_to_mtd(chip);
2075        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2076        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2077        struct nand_ecc_ctrl *ecc = &chip->ecc;
2078        u8 *data_buf, *oob_buf;
2079        int i, ret;
2080
2081        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2082        clear_read_regs(nandc);
2083        clear_bam_transaction(nandc);
2084
2085        data_buf = (u8 *)buf;
2086        oob_buf = chip->oob_poi;
2087
2088        host->use_ecc = false;
2089        update_rw_regs(host, ecc->steps, false);
2090        config_nand_page_write(nandc);
2091
2092        for (i = 0; i < ecc->steps; i++) {
2093                int data_size1, data_size2, oob_size1, oob_size2;
2094                int reg_off = FLASH_BUF_ACC;
2095
2096                data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2097                oob_size1 = host->bbm_size;
2098
2099                if (i == (ecc->steps - 1)) {
2100                        data_size2 = ecc->size - data_size1 -
2101                                     ((ecc->steps - 1) << 2);
2102                        oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2103                                    host->spare_bytes;
2104                } else {
2105                        data_size2 = host->cw_data - data_size1;
2106                        oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2107                }
2108
2109                write_data_dma(nandc, reg_off, data_buf, data_size1,
2110                               NAND_BAM_NO_EOT);
2111                reg_off += data_size1;
2112                data_buf += data_size1;
2113
2114                write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2115                               NAND_BAM_NO_EOT);
2116                reg_off += oob_size1;
2117                oob_buf += oob_size1;
2118
2119                write_data_dma(nandc, reg_off, data_buf, data_size2,
2120                               NAND_BAM_NO_EOT);
2121                reg_off += data_size2;
2122                data_buf += data_size2;
2123
2124                write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2125                oob_buf += oob_size2;
2126
2127                config_nand_cw_write(nandc);
2128        }
2129
2130        ret = submit_descs(nandc);
2131        if (ret)
2132                dev_err(nandc->dev, "failure to write raw page\n");
2133
2134        free_descs(nandc);
2135
2136        if (!ret)
2137                ret = nand_prog_page_end_op(chip);
2138
2139        return ret;
2140}
2141
2142/*
2143 * implements ecc->write_oob()
2144 *
2145 * the NAND controller cannot write only data or only OOB within a codeword
2146 * since ECC is calculated for the combined codeword. So update the OOB from
2147 * chip->oob_poi, and pad the data area with OxFF before writing.
2148 */
2149static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2150{
2151        struct mtd_info *mtd = nand_to_mtd(chip);
2152        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2153        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2154        struct nand_ecc_ctrl *ecc = &chip->ecc;
2155        u8 *oob = chip->oob_poi;
2156        int data_size, oob_size;
2157        int ret;
2158
2159        host->use_ecc = true;
2160        clear_bam_transaction(nandc);
2161
2162        /* calculate the data and oob size for the last codeword/step */
2163        data_size = ecc->size - ((ecc->steps - 1) << 2);
2164        oob_size = mtd->oobavail;
2165
2166        memset(nandc->data_buffer, 0xff, host->cw_data);
2167        /* override new oob content to last codeword */
2168        mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2169                                    0, mtd->oobavail);
2170
2171        set_address(host, host->cw_size * (ecc->steps - 1), page);
2172        update_rw_regs(host, 1, false);
2173
2174        config_nand_page_write(nandc);
2175        write_data_dma(nandc, FLASH_BUF_ACC,
2176                       nandc->data_buffer, data_size + oob_size, 0);
2177        config_nand_cw_write(nandc);
2178
2179        ret = submit_descs(nandc);
2180
2181        free_descs(nandc);
2182
2183        if (ret) {
2184                dev_err(nandc->dev, "failure to write oob\n");
2185                return -EIO;
2186        }
2187
2188        return nand_prog_page_end_op(chip);
2189}
2190
2191static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2192{
2193        struct mtd_info *mtd = nand_to_mtd(chip);
2194        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2195        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2196        struct nand_ecc_ctrl *ecc = &chip->ecc;
2197        int page, ret, bbpos, bad = 0;
2198
2199        page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2200
2201        /*
2202         * configure registers for a raw sub page read, the address is set to
2203         * the beginning of the last codeword, we don't care about reading ecc
2204         * portion of oob. we just want the first few bytes from this codeword
2205         * that contains the BBM
2206         */
2207        host->use_ecc = false;
2208
2209        clear_bam_transaction(nandc);
2210        ret = copy_last_cw(host, page);
2211        if (ret)
2212                goto err;
2213
2214        if (check_flash_errors(host, 1)) {
2215                dev_warn(nandc->dev, "error when trying to read BBM\n");
2216                goto err;
2217        }
2218
2219        bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2220
2221        bad = nandc->data_buffer[bbpos] != 0xff;
2222
2223        if (chip->options & NAND_BUSWIDTH_16)
2224                bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2225err:
2226        return bad;
2227}
2228
2229static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2230{
2231        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2232        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2233        struct nand_ecc_ctrl *ecc = &chip->ecc;
2234        int page, ret;
2235
2236        clear_read_regs(nandc);
2237        clear_bam_transaction(nandc);
2238
2239        /*
2240         * to mark the BBM as bad, we flash the entire last codeword with 0s.
2241         * we don't care about the rest of the content in the codeword since
2242         * we aren't going to use this block again
2243         */
2244        memset(nandc->data_buffer, 0x00, host->cw_size);
2245
2246        page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2247
2248        /* prepare write */
2249        host->use_ecc = false;
2250        set_address(host, host->cw_size * (ecc->steps - 1), page);
2251        update_rw_regs(host, 1, false);
2252
2253        config_nand_page_write(nandc);
2254        write_data_dma(nandc, FLASH_BUF_ACC,
2255                       nandc->data_buffer, host->cw_size, 0);
2256        config_nand_cw_write(nandc);
2257
2258        ret = submit_descs(nandc);
2259
2260        free_descs(nandc);
2261
2262        if (ret) {
2263                dev_err(nandc->dev, "failure to update BBM\n");
2264                return -EIO;
2265        }
2266
2267        return nand_prog_page_end_op(chip);
2268}
2269
2270/*
2271 * the three functions below implement chip->legacy.read_byte(),
2272 * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
2273 * aren't used for reading/writing page data, they are used for smaller data
2274 * like reading id, status etc
2275 */
2276static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2277{
2278        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2279        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2280        u8 *buf = nandc->data_buffer;
2281        u8 ret = 0x0;
2282
2283        if (host->last_command == NAND_CMD_STATUS) {
2284                ret = host->status;
2285
2286                host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2287
2288                return ret;
2289        }
2290
2291        if (nandc->buf_start < nandc->buf_count)
2292                ret = buf[nandc->buf_start++];
2293
2294        return ret;
2295}
2296
2297static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2298{
2299        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2300        int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2301
2302        memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2303        nandc->buf_start += real_len;
2304}
2305
2306static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2307                                 int len)
2308{
2309        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2310        int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2311
2312        memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2313
2314        nandc->buf_start += real_len;
2315}
2316
2317/* we support only one external chip for now */
2318static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2319{
2320        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2321
2322        if (chipnr <= 0)
2323                return;
2324
2325        dev_warn(nandc->dev, "invalid chip select\n");
2326}
2327
2328/*
2329 * NAND controller page layout info
2330 *
2331 * Layout with ECC enabled:
2332 *
2333 * |----------------------|  |---------------------------------|
2334 * |           xx.......yy|  |             *********xx.......yy|
2335 * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2336 * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2337 * |           xx.......yy|  |             *********xx.......yy|
2338 * |----------------------|  |---------------------------------|
2339 *     codeword 1,2..n-1                  codeword n
2340 *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2341 *
2342 * n = Number of codewords in the page
2343 * . = ECC bytes
2344 * * = Spare/free bytes
2345 * x = Unused byte(s)
2346 * y = Reserved byte(s)
2347 *
2348 * 2K page: n = 4, spare = 16 bytes
2349 * 4K page: n = 8, spare = 32 bytes
2350 * 8K page: n = 16, spare = 64 bytes
2351 *
2352 * the qcom nand controller operates at a sub page/codeword level. each
2353 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2354 * the number of ECC bytes vary based on the ECC strength and the bus width.
2355 *
2356 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2357 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2358 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2359 *
2360 * When we access a page with ECC enabled, the reserved bytes(s) are not
2361 * accessible at all. When reading, we fill up these unreadable positions
2362 * with 0xffs. When writing, the controller skips writing the inaccessible
2363 * bytes.
2364 *
2365 * Layout with ECC disabled:
2366 *
2367 * |------------------------------|  |---------------------------------------|
2368 * |         yy          xx.......|  |         bb          *********xx.......|
2369 * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2370 * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2371 * |         yy          xx.......|  |         bb          *********xx.......|
2372 * |------------------------------|  |---------------------------------------|
2373 *         codeword 1,2..n-1                        codeword n
2374 *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2375 *
2376 * n = Number of codewords in the page
2377 * . = ECC bytes
2378 * * = Spare/free bytes
2379 * x = Unused byte(s)
2380 * y = Dummy Bad Bock byte(s)
2381 * b = Real Bad Block byte(s)
2382 * size1/size2 = function of codeword size and 'n'
2383 *
2384 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2385 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2386 * Block Markers. In the last codeword, this position contains the real BBM
2387 *
2388 * In order to have a consistent layout between RAW and ECC modes, we assume
2389 * the following OOB layout arrangement:
2390 *
2391 * |-----------|  |--------------------|
2392 * |yyxx.......|  |bb*********xx.......|
2393 * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2394 * |yyxx.......|  |bb*********xx.......|
2395 * |yyxx.......|  |bb*********xx.......|
2396 * |-----------|  |--------------------|
2397 *  first n - 1       nth OOB region
2398 *  OOB regions
2399 *
2400 * n = Number of codewords in the page
2401 * . = ECC bytes
2402 * * = FREE OOB bytes
2403 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2404 * x = Unused byte(s)
2405 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2406 *
2407 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2408 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2409 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2410 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2411 * the sum of the three).
2412 */
2413static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2414                                   struct mtd_oob_region *oobregion)
2415{
2416        struct nand_chip *chip = mtd_to_nand(mtd);
2417        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2418        struct nand_ecc_ctrl *ecc = &chip->ecc;
2419
2420        if (section > 1)
2421                return -ERANGE;
2422
2423        if (!section) {
2424                oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2425                                    host->bbm_size;
2426                oobregion->offset = 0;
2427        } else {
2428                oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2429                oobregion->offset = mtd->oobsize - oobregion->length;
2430        }
2431
2432        return 0;
2433}
2434
2435static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2436                                     struct mtd_oob_region *oobregion)
2437{
2438        struct nand_chip *chip = mtd_to_nand(mtd);
2439        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2440        struct nand_ecc_ctrl *ecc = &chip->ecc;
2441
2442        if (section)
2443                return -ERANGE;
2444
2445        oobregion->length = ecc->steps * 4;
2446        oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2447
2448        return 0;
2449}
2450
2451static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2452        .ecc = qcom_nand_ooblayout_ecc,
2453        .free = qcom_nand_ooblayout_free,
2454};
2455
2456static int
2457qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2458{
2459        return strength == 4 ? 12 : 16;
2460}
2461NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2462                     NANDC_STEP_SIZE, 4, 8);
2463
2464static int qcom_nand_attach_chip(struct nand_chip *chip)
2465{
2466        struct mtd_info *mtd = nand_to_mtd(chip);
2467        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2468        struct nand_ecc_ctrl *ecc = &chip->ecc;
2469        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2470        int cwperpage, bad_block_byte, ret;
2471        bool wide_bus;
2472        int ecc_mode = 1;
2473
2474        /* controller only supports 512 bytes data steps */
2475        ecc->size = NANDC_STEP_SIZE;
2476        wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2477        cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2478
2479        /*
2480         * Each CW has 4 available OOB bytes which will be protected with ECC
2481         * so remaining bytes can be used for ECC.
2482         */
2483        ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2484                                   mtd->oobsize - (cwperpage * 4));
2485        if (ret) {
2486                dev_err(nandc->dev, "No valid ECC settings possible\n");
2487                return ret;
2488        }
2489
2490        if (ecc->strength >= 8) {
2491                /* 8 bit ECC defaults to BCH ECC on all platforms */
2492                host->bch_enabled = true;
2493                ecc_mode = 1;
2494
2495                if (wide_bus) {
2496                        host->ecc_bytes_hw = 14;
2497                        host->spare_bytes = 0;
2498                        host->bbm_size = 2;
2499                } else {
2500                        host->ecc_bytes_hw = 13;
2501                        host->spare_bytes = 2;
2502                        host->bbm_size = 1;
2503                }
2504        } else {
2505                /*
2506                 * if the controller supports BCH for 4 bit ECC, the controller
2507                 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2508                 * always 10 bytes
2509                 */
2510                if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2511                        /* BCH */
2512                        host->bch_enabled = true;
2513                        ecc_mode = 0;
2514
2515                        if (wide_bus) {
2516                                host->ecc_bytes_hw = 8;
2517                                host->spare_bytes = 2;
2518                                host->bbm_size = 2;
2519                        } else {
2520                                host->ecc_bytes_hw = 7;
2521                                host->spare_bytes = 4;
2522                                host->bbm_size = 1;
2523                        }
2524                } else {
2525                        /* RS */
2526                        host->ecc_bytes_hw = 10;
2527
2528                        if (wide_bus) {
2529                                host->spare_bytes = 0;
2530                                host->bbm_size = 2;
2531                        } else {
2532                                host->spare_bytes = 1;
2533                                host->bbm_size = 1;
2534                        }
2535                }
2536        }
2537
2538        /*
2539         * we consider ecc->bytes as the sum of all the non-data content in a
2540         * step. It gives us a clean representation of the oob area (even if
2541         * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2542         * ECC and 12 bytes for 4 bit ECC
2543         */
2544        ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2545
2546        ecc->read_page          = qcom_nandc_read_page;
2547        ecc->read_page_raw      = qcom_nandc_read_page_raw;
2548        ecc->read_oob           = qcom_nandc_read_oob;
2549        ecc->write_page         = qcom_nandc_write_page;
2550        ecc->write_page_raw     = qcom_nandc_write_page_raw;
2551        ecc->write_oob          = qcom_nandc_write_oob;
2552
2553        ecc->mode = NAND_ECC_HW;
2554
2555        mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2556
2557        nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2558                                     cwperpage);
2559
2560        /*
2561         * DATA_UD_BYTES varies based on whether the read/write command protects
2562         * spare data with ECC too. We protect spare data by default, so we set
2563         * it to main + spare data, which are 512 and 4 bytes respectively.
2564         */
2565        host->cw_data = 516;
2566
2567        /*
2568         * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2569         * for 8 bit ECC
2570         */
2571        host->cw_size = host->cw_data + ecc->bytes;
2572        bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2573
2574        host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2575                                | host->cw_data << UD_SIZE_BYTES
2576                                | 0 << DISABLE_STATUS_AFTER_WRITE
2577                                | 5 << NUM_ADDR_CYCLES
2578                                | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2579                                | 0 << STATUS_BFR_READ
2580                                | 1 << SET_RD_MODE_AFTER_STATUS
2581                                | host->spare_bytes << SPARE_SIZE_BYTES;
2582
2583        host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2584                                | 0 <<  CS_ACTIVE_BSY
2585                                | bad_block_byte << BAD_BLOCK_BYTE_NUM
2586                                | 0 << BAD_BLOCK_IN_SPARE_AREA
2587                                | 2 << WR_RD_BSY_GAP
2588                                | wide_bus << WIDE_FLASH
2589                                | host->bch_enabled << ENABLE_BCH_ECC;
2590
2591        host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2592                                | host->cw_size << UD_SIZE_BYTES
2593                                | 5 << NUM_ADDR_CYCLES
2594                                | 0 << SPARE_SIZE_BYTES;
2595
2596        host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2597                                | 0 << CS_ACTIVE_BSY
2598                                | 17 << BAD_BLOCK_BYTE_NUM
2599                                | 1 << BAD_BLOCK_IN_SPARE_AREA
2600                                | 2 << WR_RD_BSY_GAP
2601                                | wide_bus << WIDE_FLASH
2602                                | 1 << DEV0_CFG1_ECC_DISABLE;
2603
2604        host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2605                                | 0 << ECC_SW_RESET
2606                                | host->cw_data << ECC_NUM_DATA_BYTES
2607                                | 1 << ECC_FORCE_CLK_OPEN
2608                                | ecc_mode << ECC_MODE
2609                                | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2610
2611        host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2612
2613        host->clrflashstatus = FS_READY_BSY_N;
2614        host->clrreadstatus = 0xc0;
2615        nandc->regs->erased_cw_detect_cfg_clr =
2616                cpu_to_le32(CLR_ERASED_PAGE_DET);
2617        nandc->regs->erased_cw_detect_cfg_set =
2618                cpu_to_le32(SET_ERASED_PAGE_DET);
2619
2620        dev_dbg(nandc->dev,
2621                "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2622                host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2623                host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2624                cwperpage);
2625
2626        return 0;
2627}
2628
2629static const struct nand_controller_ops qcom_nandc_ops = {
2630        .attach_chip = qcom_nand_attach_chip,
2631};
2632
2633static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2634{
2635        if (nandc->props->is_bam) {
2636                if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2637                        dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2638                                         MAX_REG_RD *
2639                                         sizeof(*nandc->reg_read_buf),
2640                                         DMA_FROM_DEVICE);
2641
2642                if (nandc->tx_chan)
2643                        dma_release_channel(nandc->tx_chan);
2644
2645                if (nandc->rx_chan)
2646                        dma_release_channel(nandc->rx_chan);
2647
2648                if (nandc->cmd_chan)
2649                        dma_release_channel(nandc->cmd_chan);
2650        } else {
2651                if (nandc->chan)
2652                        dma_release_channel(nandc->chan);
2653        }
2654}
2655
2656static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2657{
2658        int ret;
2659
2660        ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2661        if (ret) {
2662                dev_err(nandc->dev, "failed to set DMA mask\n");
2663                return ret;
2664        }
2665
2666        /*
2667         * we use the internal buffer for reading ONFI params, reading small
2668         * data like ID and status, and preforming read-copy-write operations
2669         * when writing to a codeword partially. 532 is the maximum possible
2670         * size of a codeword for our nand controller
2671         */
2672        nandc->buf_size = 532;
2673
2674        nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2675                                        GFP_KERNEL);
2676        if (!nandc->data_buffer)
2677                return -ENOMEM;
2678
2679        nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2680                                        GFP_KERNEL);
2681        if (!nandc->regs)
2682                return -ENOMEM;
2683
2684        nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2685                                MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2686                                GFP_KERNEL);
2687        if (!nandc->reg_read_buf)
2688                return -ENOMEM;
2689
2690        if (nandc->props->is_bam) {
2691                nandc->reg_read_dma =
2692                        dma_map_single(nandc->dev, nandc->reg_read_buf,
2693                                       MAX_REG_RD *
2694                                       sizeof(*nandc->reg_read_buf),
2695                                       DMA_FROM_DEVICE);
2696                if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2697                        dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2698                        return -EIO;
2699                }
2700
2701                nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2702                if (IS_ERR(nandc->tx_chan)) {
2703                        ret = PTR_ERR(nandc->tx_chan);
2704                        nandc->tx_chan = NULL;
2705                        if (ret != -EPROBE_DEFER)
2706                                dev_err(nandc->dev,
2707                                        "tx DMA channel request failed: %d\n",
2708                                        ret);
2709                        goto unalloc;
2710                }
2711
2712                nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2713                if (IS_ERR(nandc->rx_chan)) {
2714                        ret = PTR_ERR(nandc->rx_chan);
2715                        nandc->rx_chan = NULL;
2716                        if (ret != -EPROBE_DEFER)
2717                                dev_err(nandc->dev,
2718                                        "rx DMA channel request failed: %d\n",
2719                                        ret);
2720                        goto unalloc;
2721                }
2722
2723                nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2724                if (IS_ERR(nandc->cmd_chan)) {
2725                        ret = PTR_ERR(nandc->cmd_chan);
2726                        nandc->cmd_chan = NULL;
2727                        if (ret != -EPROBE_DEFER)
2728                                dev_err(nandc->dev,
2729                                        "cmd DMA channel request failed: %d\n",
2730                                        ret);
2731                        goto unalloc;
2732                }
2733
2734                /*
2735                 * Initially allocate BAM transaction to read ONFI param page.
2736                 * After detecting all the devices, this BAM transaction will
2737                 * be freed and the next BAM tranasction will be allocated with
2738                 * maximum codeword size
2739                 */
2740                nandc->max_cwperpage = 1;
2741                nandc->bam_txn = alloc_bam_transaction(nandc);
2742                if (!nandc->bam_txn) {
2743                        dev_err(nandc->dev,
2744                                "failed to allocate bam transaction\n");
2745                        ret = -ENOMEM;
2746                        goto unalloc;
2747                }
2748        } else {
2749                nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2750                if (IS_ERR(nandc->chan)) {
2751                        ret = PTR_ERR(nandc->chan);
2752                        nandc->chan = NULL;
2753                        if (ret != -EPROBE_DEFER)
2754                                dev_err(nandc->dev,
2755                                        "rxtx DMA channel request failed: %d\n",
2756                                        ret);
2757                        return ret;
2758                }
2759        }
2760
2761        INIT_LIST_HEAD(&nandc->desc_list);
2762        INIT_LIST_HEAD(&nandc->host_list);
2763
2764        nand_controller_init(&nandc->controller);
2765        nandc->controller.ops = &qcom_nandc_ops;
2766
2767        return 0;
2768unalloc:
2769        qcom_nandc_unalloc(nandc);
2770        return ret;
2771}
2772
2773/* one time setup of a few nand controller registers */
2774static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2775{
2776        u32 nand_ctrl;
2777
2778        /* kill onenand */
2779        if (!nandc->props->is_qpic)
2780                nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2781        nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2782                    NAND_DEV_CMD_VLD_VAL);
2783
2784        /* enable ADM or BAM DMA */
2785        if (nandc->props->is_bam) {
2786                nand_ctrl = nandc_read(nandc, NAND_CTRL);
2787
2788                /*
2789                 *NAND_CTRL is an operational registers, and CPU
2790                 * access to operational registers are read only
2791                 * in BAM mode. So update the NAND_CTRL register
2792                 * only if it is not in BAM mode. In most cases BAM
2793                 * mode will be enabled in bootloader
2794                 */
2795                if (!(nand_ctrl & BAM_MODE_EN))
2796                        nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2797        } else {
2798                nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2799        }
2800
2801        /* save the original values of these registers */
2802        nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2803        nandc->vld = NAND_DEV_CMD_VLD_VAL;
2804
2805        return 0;
2806}
2807
2808static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2809                                            struct qcom_nand_host *host,
2810                                            struct device_node *dn)
2811{
2812        struct nand_chip *chip = &host->chip;
2813        struct mtd_info *mtd = nand_to_mtd(chip);
2814        struct device *dev = nandc->dev;
2815        int ret;
2816
2817        ret = of_property_read_u32(dn, "reg", &host->cs);
2818        if (ret) {
2819                dev_err(dev, "can't get chip-select\n");
2820                return -ENXIO;
2821        }
2822
2823        nand_set_flash_node(chip, dn);
2824        mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2825        if (!mtd->name)
2826                return -ENOMEM;
2827
2828        mtd->owner = THIS_MODULE;
2829        mtd->dev.parent = dev;
2830
2831        chip->legacy.cmdfunc    = qcom_nandc_command;
2832        chip->legacy.select_chip        = qcom_nandc_select_chip;
2833        chip->legacy.read_byte  = qcom_nandc_read_byte;
2834        chip->legacy.read_buf   = qcom_nandc_read_buf;
2835        chip->legacy.write_buf  = qcom_nandc_write_buf;
2836        chip->legacy.set_features       = nand_get_set_features_notsupp;
2837        chip->legacy.get_features       = nand_get_set_features_notsupp;
2838
2839        /*
2840         * the bad block marker is readable only when we read the last codeword
2841         * of a page with ECC disabled. currently, the nand_base and nand_bbt
2842         * helpers don't allow us to read BB from a nand chip with ECC
2843         * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2844         * and block_markbad helpers until we permanently switch to using
2845         * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2846         */
2847        chip->legacy.block_bad          = qcom_nandc_block_bad;
2848        chip->legacy.block_markbad      = qcom_nandc_block_markbad;
2849
2850        chip->controller = &nandc->controller;
2851        chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
2852                         NAND_SKIP_BBTSCAN;
2853
2854        /* set up initial status value */
2855        host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2856
2857        ret = nand_scan(chip, 1);
2858        if (ret)
2859                return ret;
2860
2861        if (nandc->props->is_bam) {
2862                free_bam_transaction(nandc);
2863                nandc->bam_txn = alloc_bam_transaction(nandc);
2864                if (!nandc->bam_txn) {
2865                        dev_err(nandc->dev,
2866                                "failed to allocate bam transaction\n");
2867                        return -ENOMEM;
2868                }
2869        }
2870
2871        ret = mtd_device_register(mtd, NULL, 0);
2872        if (ret)
2873                nand_cleanup(chip);
2874
2875        return ret;
2876}
2877
2878static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2879{
2880        struct device *dev = nandc->dev;
2881        struct device_node *dn = dev->of_node, *child;
2882        struct qcom_nand_host *host;
2883        int ret;
2884
2885        for_each_available_child_of_node(dn, child) {
2886                host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2887                if (!host) {
2888                        of_node_put(child);
2889                        return -ENOMEM;
2890                }
2891
2892                ret = qcom_nand_host_init_and_register(nandc, host, child);
2893                if (ret) {
2894                        devm_kfree(dev, host);
2895                        continue;
2896                }
2897
2898                list_add_tail(&host->node, &nandc->host_list);
2899        }
2900
2901        if (list_empty(&nandc->host_list))
2902                return -ENODEV;
2903
2904        return 0;
2905}
2906
2907/* parse custom DT properties here */
2908static int qcom_nandc_parse_dt(struct platform_device *pdev)
2909{
2910        struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2911        struct device_node *np = nandc->dev->of_node;
2912        int ret;
2913
2914        if (!nandc->props->is_bam) {
2915                ret = of_property_read_u32(np, "qcom,cmd-crci",
2916                                           &nandc->cmd_crci);
2917                if (ret) {
2918                        dev_err(nandc->dev, "command CRCI unspecified\n");
2919                        return ret;
2920                }
2921
2922                ret = of_property_read_u32(np, "qcom,data-crci",
2923                                           &nandc->data_crci);
2924                if (ret) {
2925                        dev_err(nandc->dev, "data CRCI unspecified\n");
2926                        return ret;
2927                }
2928        }
2929
2930        return 0;
2931}
2932
2933static int qcom_nandc_probe(struct platform_device *pdev)
2934{
2935        struct qcom_nand_controller *nandc;
2936        const void *dev_data;
2937        struct device *dev = &pdev->dev;
2938        struct resource *res;
2939        int ret;
2940
2941        nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2942        if (!nandc)
2943                return -ENOMEM;
2944
2945        platform_set_drvdata(pdev, nandc);
2946        nandc->dev = dev;
2947
2948        dev_data = of_device_get_match_data(dev);
2949        if (!dev_data) {
2950                dev_err(&pdev->dev, "failed to get device data\n");
2951                return -ENODEV;
2952        }
2953
2954        nandc->props = dev_data;
2955
2956        nandc->core_clk = devm_clk_get(dev, "core");
2957        if (IS_ERR(nandc->core_clk))
2958                return PTR_ERR(nandc->core_clk);
2959
2960        nandc->aon_clk = devm_clk_get(dev, "aon");
2961        if (IS_ERR(nandc->aon_clk))
2962                return PTR_ERR(nandc->aon_clk);
2963
2964        ret = qcom_nandc_parse_dt(pdev);
2965        if (ret)
2966                return ret;
2967
2968        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2969        nandc->base = devm_ioremap_resource(dev, res);
2970        if (IS_ERR(nandc->base))
2971                return PTR_ERR(nandc->base);
2972
2973        nandc->base_phys = res->start;
2974        nandc->base_dma = dma_map_resource(dev, res->start,
2975                                           resource_size(res),
2976                                           DMA_BIDIRECTIONAL, 0);
2977        if (!nandc->base_dma)
2978                return -ENXIO;
2979
2980        ret = qcom_nandc_alloc(nandc);
2981        if (ret)
2982                goto err_nandc_alloc;
2983
2984        ret = clk_prepare_enable(nandc->core_clk);
2985        if (ret)
2986                goto err_core_clk;
2987
2988        ret = clk_prepare_enable(nandc->aon_clk);
2989        if (ret)
2990                goto err_aon_clk;
2991
2992        ret = qcom_nandc_setup(nandc);
2993        if (ret)
2994                goto err_setup;
2995
2996        ret = qcom_probe_nand_devices(nandc);
2997        if (ret)
2998                goto err_setup;
2999
3000        return 0;
3001
3002err_setup:
3003        clk_disable_unprepare(nandc->aon_clk);
3004err_aon_clk:
3005        clk_disable_unprepare(nandc->core_clk);
3006err_core_clk:
3007        qcom_nandc_unalloc(nandc);
3008err_nandc_alloc:
3009        dma_unmap_resource(dev, res->start, resource_size(res),
3010                           DMA_BIDIRECTIONAL, 0);
3011
3012        return ret;
3013}
3014
3015static int qcom_nandc_remove(struct platform_device *pdev)
3016{
3017        struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3018        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3019        struct qcom_nand_host *host;
3020        struct nand_chip *chip;
3021        int ret;
3022
3023        list_for_each_entry(host, &nandc->host_list, node) {
3024                chip = &host->chip;
3025                ret = mtd_device_unregister(nand_to_mtd(chip));
3026                WARN_ON(ret);
3027                nand_cleanup(chip);
3028        }
3029
3030        qcom_nandc_unalloc(nandc);
3031
3032        clk_disable_unprepare(nandc->aon_clk);
3033        clk_disable_unprepare(nandc->core_clk);
3034
3035        dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3036                           DMA_BIDIRECTIONAL, 0);
3037
3038        return 0;
3039}
3040
3041static const struct qcom_nandc_props ipq806x_nandc_props = {
3042        .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3043        .is_bam = false,
3044        .dev_cmd_reg_start = 0x0,
3045};
3046
3047static const struct qcom_nandc_props ipq4019_nandc_props = {
3048        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3049        .is_bam = true,
3050        .is_qpic = true,
3051        .dev_cmd_reg_start = 0x0,
3052};
3053
3054static const struct qcom_nandc_props ipq8074_nandc_props = {
3055        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3056        .is_bam = true,
3057        .is_qpic = true,
3058        .dev_cmd_reg_start = 0x7000,
3059};
3060
3061/*
3062 * data will hold a struct pointer containing more differences once we support
3063 * more controller variants
3064 */
3065static const struct of_device_id qcom_nandc_of_match[] = {
3066        {
3067                .compatible = "qcom,ipq806x-nand",
3068                .data = &ipq806x_nandc_props,
3069        },
3070        {
3071                .compatible = "qcom,ipq4019-nand",
3072                .data = &ipq4019_nandc_props,
3073        },
3074        {
3075                .compatible = "qcom,ipq8074-nand",
3076                .data = &ipq8074_nandc_props,
3077        },
3078        {}
3079};
3080MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3081
3082static struct platform_driver qcom_nandc_driver = {
3083        .driver = {
3084                .name = "qcom-nandc",
3085                .of_match_table = qcom_nandc_of_match,
3086        },
3087        .probe   = qcom_nandc_probe,
3088        .remove  = qcom_nandc_remove,
3089};
3090module_platform_driver(qcom_nandc_driver);
3091
3092MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3093MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3094MODULE_LICENSE("GPL v2");
3095