linux/drivers/mtd/nand/raw/qcom_nandc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
   3 *
   4 * This software is licensed under the terms of the GNU General Public
   5 * License version 2, as published by the Free Software Foundation, and
   6 * may be copied, distributed, and modified under those terms.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/slab.h>
  16#include <linux/bitops.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/dmaengine.h>
  19#include <linux/module.h>
  20#include <linux/mtd/rawnand.h>
  21#include <linux/mtd/partitions.h>
  22#include <linux/of.h>
  23#include <linux/of_device.h>
  24#include <linux/delay.h>
  25#include <linux/dma/qcom_bam_dma.h>
  26#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
  27
  28/* NANDc reg offsets */
  29#define NAND_FLASH_CMD                  0x00
  30#define NAND_ADDR0                      0x04
  31#define NAND_ADDR1                      0x08
  32#define NAND_FLASH_CHIP_SELECT          0x0c
  33#define NAND_EXEC_CMD                   0x10
  34#define NAND_FLASH_STATUS               0x14
  35#define NAND_BUFFER_STATUS              0x18
  36#define NAND_DEV0_CFG0                  0x20
  37#define NAND_DEV0_CFG1                  0x24
  38#define NAND_DEV0_ECC_CFG               0x28
  39#define NAND_DEV1_ECC_CFG               0x2c
  40#define NAND_DEV1_CFG0                  0x30
  41#define NAND_DEV1_CFG1                  0x34
  42#define NAND_READ_ID                    0x40
  43#define NAND_READ_STATUS                0x44
  44#define NAND_DEV_CMD0                   0xa0
  45#define NAND_DEV_CMD1                   0xa4
  46#define NAND_DEV_CMD2                   0xa8
  47#define NAND_DEV_CMD_VLD                0xac
  48#define SFLASHC_BURST_CFG               0xe0
  49#define NAND_ERASED_CW_DETECT_CFG       0xe8
  50#define NAND_ERASED_CW_DETECT_STATUS    0xec
  51#define NAND_EBI2_ECC_BUF_CFG           0xf0
  52#define FLASH_BUF_ACC                   0x100
  53
  54#define NAND_CTRL                       0xf00
  55#define NAND_VERSION                    0xf08
  56#define NAND_READ_LOCATION_0            0xf20
  57#define NAND_READ_LOCATION_1            0xf24
  58#define NAND_READ_LOCATION_2            0xf28
  59#define NAND_READ_LOCATION_3            0xf2c
  60
  61/* dummy register offsets, used by write_reg_dma */
  62#define NAND_DEV_CMD1_RESTORE           0xdead
  63#define NAND_DEV_CMD_VLD_RESTORE        0xbeef
  64
  65/* NAND_FLASH_CMD bits */
  66#define PAGE_ACC                        BIT(4)
  67#define LAST_PAGE                       BIT(5)
  68
  69/* NAND_FLASH_CHIP_SELECT bits */
  70#define NAND_DEV_SEL                    0
  71#define DM_EN                           BIT(2)
  72
  73/* NAND_FLASH_STATUS bits */
  74#define FS_OP_ERR                       BIT(4)
  75#define FS_READY_BSY_N                  BIT(5)
  76#define FS_MPU_ERR                      BIT(8)
  77#define FS_DEVICE_STS_ERR               BIT(16)
  78#define FS_DEVICE_WP                    BIT(23)
  79
  80/* NAND_BUFFER_STATUS bits */
  81#define BS_UNCORRECTABLE_BIT            BIT(8)
  82#define BS_CORRECTABLE_ERR_MSK          0x1f
  83
  84/* NAND_DEVn_CFG0 bits */
  85#define DISABLE_STATUS_AFTER_WRITE      4
  86#define CW_PER_PAGE                     6
  87#define UD_SIZE_BYTES                   9
  88#define ECC_PARITY_SIZE_BYTES_RS        19
  89#define SPARE_SIZE_BYTES                23
  90#define NUM_ADDR_CYCLES                 27
  91#define STATUS_BFR_READ                 30
  92#define SET_RD_MODE_AFTER_STATUS        31
  93
  94/* NAND_DEVn_CFG0 bits */
  95#define DEV0_CFG1_ECC_DISABLE           0
  96#define WIDE_FLASH                      1
  97#define NAND_RECOVERY_CYCLES            2
  98#define CS_ACTIVE_BSY                   5
  99#define BAD_BLOCK_BYTE_NUM              6
 100#define BAD_BLOCK_IN_SPARE_AREA         16
 101#define WR_RD_BSY_GAP                   17
 102#define ENABLE_BCH_ECC                  27
 103
 104/* NAND_DEV0_ECC_CFG bits */
 105#define ECC_CFG_ECC_DISABLE             0
 106#define ECC_SW_RESET                    1
 107#define ECC_MODE                        4
 108#define ECC_PARITY_SIZE_BYTES_BCH       8
 109#define ECC_NUM_DATA_BYTES              16
 110#define ECC_FORCE_CLK_OPEN              30
 111
 112/* NAND_DEV_CMD1 bits */
 113#define READ_ADDR                       0
 114
 115/* NAND_DEV_CMD_VLD bits */
 116#define READ_START_VLD                  BIT(0)
 117#define READ_STOP_VLD                   BIT(1)
 118#define WRITE_START_VLD                 BIT(2)
 119#define ERASE_START_VLD                 BIT(3)
 120#define SEQ_READ_START_VLD              BIT(4)
 121
 122/* NAND_EBI2_ECC_BUF_CFG bits */
 123#define NUM_STEPS                       0
 124
 125/* NAND_ERASED_CW_DETECT_CFG bits */
 126#define ERASED_CW_ECC_MASK              1
 127#define AUTO_DETECT_RES                 0
 128#define MASK_ECC                        (1 << ERASED_CW_ECC_MASK)
 129#define RESET_ERASED_DET                (1 << AUTO_DETECT_RES)
 130#define ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
 131#define CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
 132#define SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
 133
 134/* NAND_ERASED_CW_DETECT_STATUS bits */
 135#define PAGE_ALL_ERASED                 BIT(7)
 136#define CODEWORD_ALL_ERASED             BIT(6)
 137#define PAGE_ERASED                     BIT(5)
 138#define CODEWORD_ERASED                 BIT(4)
 139#define ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
 140#define ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
 141
 142/* NAND_READ_LOCATION_n bits */
 143#define READ_LOCATION_OFFSET            0
 144#define READ_LOCATION_SIZE              16
 145#define READ_LOCATION_LAST              31
 146
 147/* Version Mask */
 148#define NAND_VERSION_MAJOR_MASK         0xf0000000
 149#define NAND_VERSION_MAJOR_SHIFT        28
 150#define NAND_VERSION_MINOR_MASK         0x0fff0000
 151#define NAND_VERSION_MINOR_SHIFT        16
 152
 153/* NAND OP_CMDs */
 154#define PAGE_READ                       0x2
 155#define PAGE_READ_WITH_ECC              0x3
 156#define PAGE_READ_WITH_ECC_SPARE        0x4
 157#define PROGRAM_PAGE                    0x6
 158#define PAGE_PROGRAM_WITH_ECC           0x7
 159#define PROGRAM_PAGE_SPARE              0x9
 160#define BLOCK_ERASE                     0xa
 161#define FETCH_ID                        0xb
 162#define RESET_DEVICE                    0xd
 163
 164/* Default Value for NAND_DEV_CMD_VLD */
 165#define NAND_DEV_CMD_VLD_VAL            (READ_START_VLD | WRITE_START_VLD | \
 166                                         ERASE_START_VLD | SEQ_READ_START_VLD)
 167
 168/* NAND_CTRL bits */
 169#define BAM_MODE_EN                     BIT(0)
 170
 171/*
 172 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
 173 * the driver calls the chunks 'step' or 'codeword' interchangeably
 174 */
 175#define NANDC_STEP_SIZE                 512
 176
 177/*
 178 * the largest page size we support is 8K, this will have 16 steps/codewords
 179 * of 512 bytes each
 180 */
 181#define MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
 182
 183/* we read at most 3 registers per codeword scan */
 184#define MAX_REG_RD                      (3 * MAX_NUM_STEPS)
 185
 186/* ECC modes supported by the controller */
 187#define ECC_NONE        BIT(0)
 188#define ECC_RS_4BIT     BIT(1)
 189#define ECC_BCH_4BIT    BIT(2)
 190#define ECC_BCH_8BIT    BIT(3)
 191
 192#define nandc_set_read_loc(nandc, reg, offset, size, is_last)   \
 193nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                  \
 194              ((offset) << READ_LOCATION_OFFSET) |              \
 195              ((size) << READ_LOCATION_SIZE) |                  \
 196              ((is_last) << READ_LOCATION_LAST))
 197
 198/*
 199 * Returns the actual register address for all NAND_DEV_ registers
 200 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
 201 */
 202#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
 203
 204/* Returns the NAND register physical address */
 205#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
 206
 207/* Returns the dma address for reg read buffer */
 208#define reg_buf_dma_addr(chip, vaddr) \
 209        ((chip)->reg_read_dma + \
 210        ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
 211
 212#define QPIC_PER_CW_CMD_ELEMENTS        32
 213#define QPIC_PER_CW_CMD_SGL             32
 214#define QPIC_PER_CW_DATA_SGL            8
 215
 216#define QPIC_NAND_COMPLETION_TIMEOUT    msecs_to_jiffies(2000)
 217
 218/*
 219 * Flags used in DMA descriptor preparation helper functions
 220 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
 221 */
 222/* Don't set the EOT in current tx BAM sgl */
 223#define NAND_BAM_NO_EOT                 BIT(0)
 224/* Set the NWD flag in current BAM sgl */
 225#define NAND_BAM_NWD                    BIT(1)
 226/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
 227#define NAND_BAM_NEXT_SGL               BIT(2)
 228/*
 229 * Erased codeword status is being used two times in single transfer so this
 230 * flag will determine the current value of erased codeword status register
 231 */
 232#define NAND_ERASED_CW_SET              BIT(4)
 233
 234/*
 235 * This data type corresponds to the BAM transaction which will be used for all
 236 * NAND transfers.
 237 * @bam_ce - the array of BAM command elements
 238 * @cmd_sgl - sgl for NAND BAM command pipe
 239 * @data_sgl - sgl for NAND BAM consumer/producer pipe
 240 * @bam_ce_pos - the index in bam_ce which is available for next sgl
 241 * @bam_ce_start - the index in bam_ce which marks the start position ce
 242 *                 for current sgl. It will be used for size calculation
 243 *                 for current sgl
 244 * @cmd_sgl_pos - current index in command sgl.
 245 * @cmd_sgl_start - start index in command sgl.
 246 * @tx_sgl_pos - current index in data sgl for tx.
 247 * @tx_sgl_start - start index in data sgl for tx.
 248 * @rx_sgl_pos - current index in data sgl for rx.
 249 * @rx_sgl_start - start index in data sgl for rx.
 250 * @wait_second_completion - wait for second DMA desc completion before making
 251 *                           the NAND transfer completion.
 252 * @txn_done - completion for NAND transfer.
 253 * @last_data_desc - last DMA desc in data channel (tx/rx).
 254 * @last_cmd_desc - last DMA desc in command channel.
 255 */
 256struct bam_transaction {
 257        struct bam_cmd_element *bam_ce;
 258        struct scatterlist *cmd_sgl;
 259        struct scatterlist *data_sgl;
 260        u32 bam_ce_pos;
 261        u32 bam_ce_start;
 262        u32 cmd_sgl_pos;
 263        u32 cmd_sgl_start;
 264        u32 tx_sgl_pos;
 265        u32 tx_sgl_start;
 266        u32 rx_sgl_pos;
 267        u32 rx_sgl_start;
 268        bool wait_second_completion;
 269        struct completion txn_done;
 270        struct dma_async_tx_descriptor *last_data_desc;
 271        struct dma_async_tx_descriptor *last_cmd_desc;
 272};
 273
 274/*
 275 * This data type corresponds to the nand dma descriptor
 276 * @list - list for desc_info
 277 * @dir - DMA transfer direction
 278 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
 279 *            ADM
 280 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
 281 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
 282 * @dma_desc - low level DMA engine descriptor
 283 */
 284struct desc_info {
 285        struct list_head node;
 286
 287        enum dma_data_direction dir;
 288        union {
 289                struct scatterlist adm_sgl;
 290                struct {
 291                        struct scatterlist *bam_sgl;
 292                        int sgl_cnt;
 293                };
 294        };
 295        struct dma_async_tx_descriptor *dma_desc;
 296};
 297
 298/*
 299 * holds the current register values that we want to write. acts as a contiguous
 300 * chunk of memory which we use to write the controller registers through DMA.
 301 */
 302struct nandc_regs {
 303        __le32 cmd;
 304        __le32 addr0;
 305        __le32 addr1;
 306        __le32 chip_sel;
 307        __le32 exec;
 308
 309        __le32 cfg0;
 310        __le32 cfg1;
 311        __le32 ecc_bch_cfg;
 312
 313        __le32 clrflashstatus;
 314        __le32 clrreadstatus;
 315
 316        __le32 cmd1;
 317        __le32 vld;
 318
 319        __le32 orig_cmd1;
 320        __le32 orig_vld;
 321
 322        __le32 ecc_buf_cfg;
 323        __le32 read_location0;
 324        __le32 read_location1;
 325        __le32 read_location2;
 326        __le32 read_location3;
 327
 328        __le32 erased_cw_detect_cfg_clr;
 329        __le32 erased_cw_detect_cfg_set;
 330};
 331
 332/*
 333 * NAND controller data struct
 334 *
 335 * @controller:                 base controller structure
 336 * @host_list:                  list containing all the chips attached to the
 337 *                              controller
 338 * @dev:                        parent device
 339 * @base:                       MMIO base
 340 * @base_phys:                  physical base address of controller registers
 341 * @base_dma:                   dma base address of controller registers
 342 * @core_clk:                   controller clock
 343 * @aon_clk:                    another controller clock
 344 *
 345 * @chan:                       dma channel
 346 * @cmd_crci:                   ADM DMA CRCI for command flow control
 347 * @data_crci:                  ADM DMA CRCI for data flow control
 348 * @desc_list:                  DMA descriptor list (list of desc_infos)
 349 *
 350 * @data_buffer:                our local DMA buffer for page read/writes,
 351 *                              used when we can't use the buffer provided
 352 *                              by upper layers directly
 353 * @buf_size/count/start:       markers for chip->read_buf/write_buf functions
 354 * @reg_read_buf:               local buffer for reading back registers via DMA
 355 * @reg_read_dma:               contains dma address for register read buffer
 356 * @reg_read_pos:               marker for data read in reg_read_buf
 357 *
 358 * @regs:                       a contiguous chunk of memory for DMA register
 359 *                              writes. contains the register values to be
 360 *                              written to controller
 361 * @cmd1/vld:                   some fixed controller register values
 362 * @props:                      properties of current NAND controller,
 363 *                              initialized via DT match data
 364 * @max_cwperpage:              maximum QPIC codewords required. calculated
 365 *                              from all connected NAND devices pagesize
 366 */
 367struct qcom_nand_controller {
 368        struct nand_controller controller;
 369        struct list_head host_list;
 370
 371        struct device *dev;
 372
 373        void __iomem *base;
 374        phys_addr_t base_phys;
 375        dma_addr_t base_dma;
 376
 377        struct clk *core_clk;
 378        struct clk *aon_clk;
 379
 380        union {
 381                /* will be used only by QPIC for BAM DMA */
 382                struct {
 383                        struct dma_chan *tx_chan;
 384                        struct dma_chan *rx_chan;
 385                        struct dma_chan *cmd_chan;
 386                };
 387
 388                /* will be used only by EBI2 for ADM DMA */
 389                struct {
 390                        struct dma_chan *chan;
 391                        unsigned int cmd_crci;
 392                        unsigned int data_crci;
 393                };
 394        };
 395
 396        struct list_head desc_list;
 397        struct bam_transaction *bam_txn;
 398
 399        u8              *data_buffer;
 400        int             buf_size;
 401        int             buf_count;
 402        int             buf_start;
 403        unsigned int    max_cwperpage;
 404
 405        __le32 *reg_read_buf;
 406        dma_addr_t reg_read_dma;
 407        int reg_read_pos;
 408
 409        struct nandc_regs *regs;
 410
 411        u32 cmd1, vld;
 412        const struct qcom_nandc_props *props;
 413};
 414
 415/*
 416 * NAND chip structure
 417 *
 418 * @chip:                       base NAND chip structure
 419 * @node:                       list node to add itself to host_list in
 420 *                              qcom_nand_controller
 421 *
 422 * @cs:                         chip select value for this chip
 423 * @cw_size:                    the number of bytes in a single step/codeword
 424 *                              of a page, consisting of all data, ecc, spare
 425 *                              and reserved bytes
 426 * @cw_data:                    the number of bytes within a codeword protected
 427 *                              by ECC
 428 * @use_ecc:                    request the controller to use ECC for the
 429 *                              upcoming read/write
 430 * @bch_enabled:                flag to tell whether BCH ECC mode is used
 431 * @ecc_bytes_hw:               ECC bytes used by controller hardware for this
 432 *                              chip
 433 * @status:                     value to be returned if NAND_CMD_STATUS command
 434 *                              is executed
 435 * @last_command:               keeps track of last command on this chip. used
 436 *                              for reading correct status
 437 *
 438 * @cfg0, cfg1, cfg0_raw..:     NANDc register configurations needed for
 439 *                              ecc/non-ecc mode for the current nand flash
 440 *                              device
 441 */
 442struct qcom_nand_host {
 443        struct nand_chip chip;
 444        struct list_head node;
 445
 446        int cs;
 447        int cw_size;
 448        int cw_data;
 449        bool use_ecc;
 450        bool bch_enabled;
 451        int ecc_bytes_hw;
 452        int spare_bytes;
 453        int bbm_size;
 454        u8 status;
 455        int last_command;
 456
 457        u32 cfg0, cfg1;
 458        u32 cfg0_raw, cfg1_raw;
 459        u32 ecc_buf_cfg;
 460        u32 ecc_bch_cfg;
 461        u32 clrflashstatus;
 462        u32 clrreadstatus;
 463};
 464
 465/*
 466 * This data type corresponds to the NAND controller properties which varies
 467 * among different NAND controllers.
 468 * @ecc_modes - ecc mode for NAND
 469 * @is_bam - whether NAND controller is using BAM
 470 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
 471 */
 472struct qcom_nandc_props {
 473        u32 ecc_modes;
 474        bool is_bam;
 475        u32 dev_cmd_reg_start;
 476};
 477
 478/* Frees the BAM transaction memory */
 479static void free_bam_transaction(struct qcom_nand_controller *nandc)
 480{
 481        struct bam_transaction *bam_txn = nandc->bam_txn;
 482
 483        devm_kfree(nandc->dev, bam_txn);
 484}
 485
 486/* Allocates and Initializes the BAM transaction */
 487static struct bam_transaction *
 488alloc_bam_transaction(struct qcom_nand_controller *nandc)
 489{
 490        struct bam_transaction *bam_txn;
 491        size_t bam_txn_size;
 492        unsigned int num_cw = nandc->max_cwperpage;
 493        void *bam_txn_buf;
 494
 495        bam_txn_size =
 496                sizeof(*bam_txn) + num_cw *
 497                ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
 498                (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
 499                (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
 500
 501        bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
 502        if (!bam_txn_buf)
 503                return NULL;
 504
 505        bam_txn = bam_txn_buf;
 506        bam_txn_buf += sizeof(*bam_txn);
 507
 508        bam_txn->bam_ce = bam_txn_buf;
 509        bam_txn_buf +=
 510                sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
 511
 512        bam_txn->cmd_sgl = bam_txn_buf;
 513        bam_txn_buf +=
 514                sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
 515
 516        bam_txn->data_sgl = bam_txn_buf;
 517
 518        init_completion(&bam_txn->txn_done);
 519
 520        return bam_txn;
 521}
 522
 523/* Clears the BAM transaction indexes */
 524static void clear_bam_transaction(struct qcom_nand_controller *nandc)
 525{
 526        struct bam_transaction *bam_txn = nandc->bam_txn;
 527
 528        if (!nandc->props->is_bam)
 529                return;
 530
 531        bam_txn->bam_ce_pos = 0;
 532        bam_txn->bam_ce_start = 0;
 533        bam_txn->cmd_sgl_pos = 0;
 534        bam_txn->cmd_sgl_start = 0;
 535        bam_txn->tx_sgl_pos = 0;
 536        bam_txn->tx_sgl_start = 0;
 537        bam_txn->rx_sgl_pos = 0;
 538        bam_txn->rx_sgl_start = 0;
 539        bam_txn->last_data_desc = NULL;
 540        bam_txn->wait_second_completion = false;
 541
 542        sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
 543                      QPIC_PER_CW_CMD_SGL);
 544        sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
 545                      QPIC_PER_CW_DATA_SGL);
 546
 547        reinit_completion(&bam_txn->txn_done);
 548}
 549
 550/* Callback for DMA descriptor completion */
 551static void qpic_bam_dma_done(void *data)
 552{
 553        struct bam_transaction *bam_txn = data;
 554
 555        /*
 556         * In case of data transfer with NAND, 2 callbacks will be generated.
 557         * One for command channel and another one for data channel.
 558         * If current transaction has data descriptors
 559         * (i.e. wait_second_completion is true), then set this to false
 560         * and wait for second DMA descriptor completion.
 561         */
 562        if (bam_txn->wait_second_completion)
 563                bam_txn->wait_second_completion = false;
 564        else
 565                complete(&bam_txn->txn_done);
 566}
 567
 568static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
 569{
 570        return container_of(chip, struct qcom_nand_host, chip);
 571}
 572
 573static inline struct qcom_nand_controller *
 574get_qcom_nand_controller(struct nand_chip *chip)
 575{
 576        return container_of(chip->controller, struct qcom_nand_controller,
 577                            controller);
 578}
 579
 580static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
 581{
 582        return ioread32(nandc->base + offset);
 583}
 584
 585static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
 586                               u32 val)
 587{
 588        iowrite32(val, nandc->base + offset);
 589}
 590
 591static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
 592                                          bool is_cpu)
 593{
 594        if (!nandc->props->is_bam)
 595                return;
 596
 597        if (is_cpu)
 598                dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
 599                                        MAX_REG_RD *
 600                                        sizeof(*nandc->reg_read_buf),
 601                                        DMA_FROM_DEVICE);
 602        else
 603                dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
 604                                           MAX_REG_RD *
 605                                           sizeof(*nandc->reg_read_buf),
 606                                           DMA_FROM_DEVICE);
 607}
 608
 609static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
 610{
 611        switch (offset) {
 612        case NAND_FLASH_CMD:
 613                return &regs->cmd;
 614        case NAND_ADDR0:
 615                return &regs->addr0;
 616        case NAND_ADDR1:
 617                return &regs->addr1;
 618        case NAND_FLASH_CHIP_SELECT:
 619                return &regs->chip_sel;
 620        case NAND_EXEC_CMD:
 621                return &regs->exec;
 622        case NAND_FLASH_STATUS:
 623                return &regs->clrflashstatus;
 624        case NAND_DEV0_CFG0:
 625                return &regs->cfg0;
 626        case NAND_DEV0_CFG1:
 627                return &regs->cfg1;
 628        case NAND_DEV0_ECC_CFG:
 629                return &regs->ecc_bch_cfg;
 630        case NAND_READ_STATUS:
 631                return &regs->clrreadstatus;
 632        case NAND_DEV_CMD1:
 633                return &regs->cmd1;
 634        case NAND_DEV_CMD1_RESTORE:
 635                return &regs->orig_cmd1;
 636        case NAND_DEV_CMD_VLD:
 637                return &regs->vld;
 638        case NAND_DEV_CMD_VLD_RESTORE:
 639                return &regs->orig_vld;
 640        case NAND_EBI2_ECC_BUF_CFG:
 641                return &regs->ecc_buf_cfg;
 642        case NAND_READ_LOCATION_0:
 643                return &regs->read_location0;
 644        case NAND_READ_LOCATION_1:
 645                return &regs->read_location1;
 646        case NAND_READ_LOCATION_2:
 647                return &regs->read_location2;
 648        case NAND_READ_LOCATION_3:
 649                return &regs->read_location3;
 650        default:
 651                return NULL;
 652        }
 653}
 654
 655static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
 656                          u32 val)
 657{
 658        struct nandc_regs *regs = nandc->regs;
 659        __le32 *reg;
 660
 661        reg = offset_to_nandc_reg(regs, offset);
 662
 663        if (reg)
 664                *reg = cpu_to_le32(val);
 665}
 666
 667/* helper to configure address register values */
 668static void set_address(struct qcom_nand_host *host, u16 column, int page)
 669{
 670        struct nand_chip *chip = &host->chip;
 671        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 672
 673        if (chip->options & NAND_BUSWIDTH_16)
 674                column >>= 1;
 675
 676        nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
 677        nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
 678}
 679
 680/*
 681 * update_rw_regs:      set up read/write register values, these will be
 682 *                      written to the NAND controller registers via DMA
 683 *
 684 * @num_cw:             number of steps for the read/write operation
 685 * @read:               read or write operation
 686 */
 687static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
 688{
 689        struct nand_chip *chip = &host->chip;
 690        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 691        u32 cmd, cfg0, cfg1, ecc_bch_cfg;
 692
 693        if (read) {
 694                if (host->use_ecc)
 695                        cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
 696                else
 697                        cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
 698        } else {
 699                        cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
 700        }
 701
 702        if (host->use_ecc) {
 703                cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
 704                                (num_cw - 1) << CW_PER_PAGE;
 705
 706                cfg1 = host->cfg1;
 707                ecc_bch_cfg = host->ecc_bch_cfg;
 708        } else {
 709                cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
 710                                (num_cw - 1) << CW_PER_PAGE;
 711
 712                cfg1 = host->cfg1_raw;
 713                ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
 714        }
 715
 716        nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
 717        nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
 718        nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
 719        nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
 720        nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
 721        nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
 722        nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
 723        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
 724
 725        if (read)
 726                nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
 727                                   host->cw_data : host->cw_size, 1);
 728}
 729
 730/*
 731 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
 732 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
 733 * which will be submitted to DMA engine.
 734 */
 735static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
 736                                  struct dma_chan *chan,
 737                                  unsigned long flags)
 738{
 739        struct desc_info *desc;
 740        struct scatterlist *sgl;
 741        unsigned int sgl_cnt;
 742        int ret;
 743        struct bam_transaction *bam_txn = nandc->bam_txn;
 744        enum dma_transfer_direction dir_eng;
 745        struct dma_async_tx_descriptor *dma_desc;
 746
 747        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 748        if (!desc)
 749                return -ENOMEM;
 750
 751        if (chan == nandc->cmd_chan) {
 752                sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
 753                sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
 754                bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
 755                dir_eng = DMA_MEM_TO_DEV;
 756                desc->dir = DMA_TO_DEVICE;
 757        } else if (chan == nandc->tx_chan) {
 758                sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
 759                sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
 760                bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
 761                dir_eng = DMA_MEM_TO_DEV;
 762                desc->dir = DMA_TO_DEVICE;
 763        } else {
 764                sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
 765                sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
 766                bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
 767                dir_eng = DMA_DEV_TO_MEM;
 768                desc->dir = DMA_FROM_DEVICE;
 769        }
 770
 771        sg_mark_end(sgl + sgl_cnt - 1);
 772        ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
 773        if (ret == 0) {
 774                dev_err(nandc->dev, "failure in mapping desc\n");
 775                kfree(desc);
 776                return -ENOMEM;
 777        }
 778
 779        desc->sgl_cnt = sgl_cnt;
 780        desc->bam_sgl = sgl;
 781
 782        dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
 783                                           flags);
 784
 785        if (!dma_desc) {
 786                dev_err(nandc->dev, "failure in prep desc\n");
 787                dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
 788                kfree(desc);
 789                return -EINVAL;
 790        }
 791
 792        desc->dma_desc = dma_desc;
 793
 794        /* update last data/command descriptor */
 795        if (chan == nandc->cmd_chan)
 796                bam_txn->last_cmd_desc = dma_desc;
 797        else
 798                bam_txn->last_data_desc = dma_desc;
 799
 800        list_add_tail(&desc->node, &nandc->desc_list);
 801
 802        return 0;
 803}
 804
 805/*
 806 * Prepares the command descriptor for BAM DMA which will be used for NAND
 807 * register reads and writes. The command descriptor requires the command
 808 * to be formed in command element type so this function uses the command
 809 * element from bam transaction ce array and fills the same with required
 810 * data. A single SGL can contain multiple command elements so
 811 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
 812 * after the current command element.
 813 */
 814static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
 815                                 int reg_off, const void *vaddr,
 816                                 int size, unsigned int flags)
 817{
 818        int bam_ce_size;
 819        int i, ret;
 820        struct bam_cmd_element *bam_ce_buffer;
 821        struct bam_transaction *bam_txn = nandc->bam_txn;
 822
 823        bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
 824
 825        /* fill the command desc */
 826        for (i = 0; i < size; i++) {
 827                if (read)
 828                        bam_prep_ce(&bam_ce_buffer[i],
 829                                    nandc_reg_phys(nandc, reg_off + 4 * i),
 830                                    BAM_READ_COMMAND,
 831                                    reg_buf_dma_addr(nandc,
 832                                                     (__le32 *)vaddr + i));
 833                else
 834                        bam_prep_ce_le32(&bam_ce_buffer[i],
 835                                         nandc_reg_phys(nandc, reg_off + 4 * i),
 836                                         BAM_WRITE_COMMAND,
 837                                         *((__le32 *)vaddr + i));
 838        }
 839
 840        bam_txn->bam_ce_pos += size;
 841
 842        /* use the separate sgl after this command */
 843        if (flags & NAND_BAM_NEXT_SGL) {
 844                bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
 845                bam_ce_size = (bam_txn->bam_ce_pos -
 846                                bam_txn->bam_ce_start) *
 847                                sizeof(struct bam_cmd_element);
 848                sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
 849                           bam_ce_buffer, bam_ce_size);
 850                bam_txn->cmd_sgl_pos++;
 851                bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
 852
 853                if (flags & NAND_BAM_NWD) {
 854                        ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
 855                                                     DMA_PREP_FENCE |
 856                                                     DMA_PREP_CMD);
 857                        if (ret)
 858                                return ret;
 859                }
 860        }
 861
 862        return 0;
 863}
 864
 865/*
 866 * Prepares the data descriptor for BAM DMA which will be used for NAND
 867 * data reads and writes.
 868 */
 869static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
 870                                  const void *vaddr,
 871                                  int size, unsigned int flags)
 872{
 873        int ret;
 874        struct bam_transaction *bam_txn = nandc->bam_txn;
 875
 876        if (read) {
 877                sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
 878                           vaddr, size);
 879                bam_txn->rx_sgl_pos++;
 880        } else {
 881                sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
 882                           vaddr, size);
 883                bam_txn->tx_sgl_pos++;
 884
 885                /*
 886                 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
 887                 * is not set, form the DMA descriptor
 888                 */
 889                if (!(flags & NAND_BAM_NO_EOT)) {
 890                        ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
 891                                                     DMA_PREP_INTERRUPT);
 892                        if (ret)
 893                                return ret;
 894                }
 895        }
 896
 897        return 0;
 898}
 899
 900static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
 901                             int reg_off, const void *vaddr, int size,
 902                             bool flow_control)
 903{
 904        struct desc_info *desc;
 905        struct dma_async_tx_descriptor *dma_desc;
 906        struct scatterlist *sgl;
 907        struct dma_slave_config slave_conf;
 908        enum dma_transfer_direction dir_eng;
 909        int ret;
 910
 911        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 912        if (!desc)
 913                return -ENOMEM;
 914
 915        sgl = &desc->adm_sgl;
 916
 917        sg_init_one(sgl, vaddr, size);
 918
 919        if (read) {
 920                dir_eng = DMA_DEV_TO_MEM;
 921                desc->dir = DMA_FROM_DEVICE;
 922        } else {
 923                dir_eng = DMA_MEM_TO_DEV;
 924                desc->dir = DMA_TO_DEVICE;
 925        }
 926
 927        ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
 928        if (ret == 0) {
 929                ret = -ENOMEM;
 930                goto err;
 931        }
 932
 933        memset(&slave_conf, 0x00, sizeof(slave_conf));
 934
 935        slave_conf.device_fc = flow_control;
 936        if (read) {
 937                slave_conf.src_maxburst = 16;
 938                slave_conf.src_addr = nandc->base_dma + reg_off;
 939                slave_conf.slave_id = nandc->data_crci;
 940        } else {
 941                slave_conf.dst_maxburst = 16;
 942                slave_conf.dst_addr = nandc->base_dma + reg_off;
 943                slave_conf.slave_id = nandc->cmd_crci;
 944        }
 945
 946        ret = dmaengine_slave_config(nandc->chan, &slave_conf);
 947        if (ret) {
 948                dev_err(nandc->dev, "failed to configure dma channel\n");
 949                goto err;
 950        }
 951
 952        dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
 953        if (!dma_desc) {
 954                dev_err(nandc->dev, "failed to prepare desc\n");
 955                ret = -EINVAL;
 956                goto err;
 957        }
 958
 959        desc->dma_desc = dma_desc;
 960
 961        list_add_tail(&desc->node, &nandc->desc_list);
 962
 963        return 0;
 964err:
 965        kfree(desc);
 966
 967        return ret;
 968}
 969
 970/*
 971 * read_reg_dma:        prepares a descriptor to read a given number of
 972 *                      contiguous registers to the reg_read_buf pointer
 973 *
 974 * @first:              offset of the first register in the contiguous block
 975 * @num_regs:           number of registers to read
 976 * @flags:              flags to control DMA descriptor preparation
 977 */
 978static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
 979                        int num_regs, unsigned int flags)
 980{
 981        bool flow_control = false;
 982        void *vaddr;
 983
 984        vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
 985        nandc->reg_read_pos += num_regs;
 986
 987        if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
 988                first = dev_cmd_reg_addr(nandc, first);
 989
 990        if (nandc->props->is_bam)
 991                return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
 992                                             num_regs, flags);
 993
 994        if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
 995                flow_control = true;
 996
 997        return prep_adm_dma_desc(nandc, true, first, vaddr,
 998                                 num_regs * sizeof(u32), flow_control);
 999}
1000
1001/*
1002 * write_reg_dma:       prepares a descriptor to write a given number of
1003 *                      contiguous registers
1004 *
1005 * @first:              offset of the first register in the contiguous block
1006 * @num_regs:           number of registers to write
1007 * @flags:              flags to control DMA descriptor preparation
1008 */
1009static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1010                         int num_regs, unsigned int flags)
1011{
1012        bool flow_control = false;
1013        struct nandc_regs *regs = nandc->regs;
1014        void *vaddr;
1015
1016        vaddr = offset_to_nandc_reg(regs, first);
1017
1018        if (first == NAND_ERASED_CW_DETECT_CFG) {
1019                if (flags & NAND_ERASED_CW_SET)
1020                        vaddr = &regs->erased_cw_detect_cfg_set;
1021                else
1022                        vaddr = &regs->erased_cw_detect_cfg_clr;
1023        }
1024
1025        if (first == NAND_EXEC_CMD)
1026                flags |= NAND_BAM_NWD;
1027
1028        if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1029                first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1030
1031        if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1032                first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1033
1034        if (nandc->props->is_bam)
1035                return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1036                                             num_regs, flags);
1037
1038        if (first == NAND_FLASH_CMD)
1039                flow_control = true;
1040
1041        return prep_adm_dma_desc(nandc, false, first, vaddr,
1042                                 num_regs * sizeof(u32), flow_control);
1043}
1044
1045/*
1046 * read_data_dma:       prepares a DMA descriptor to transfer data from the
1047 *                      controller's internal buffer to the buffer 'vaddr'
1048 *
1049 * @reg_off:            offset within the controller's data buffer
1050 * @vaddr:              virtual address of the buffer we want to write to
1051 * @size:               DMA transaction size in bytes
1052 * @flags:              flags to control DMA descriptor preparation
1053 */
1054static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1055                         const u8 *vaddr, int size, unsigned int flags)
1056{
1057        if (nandc->props->is_bam)
1058                return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1059
1060        return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1061}
1062
1063/*
1064 * write_data_dma:      prepares a DMA descriptor to transfer data from
1065 *                      'vaddr' to the controller's internal buffer
1066 *
1067 * @reg_off:            offset within the controller's data buffer
1068 * @vaddr:              virtual address of the buffer we want to read from
1069 * @size:               DMA transaction size in bytes
1070 * @flags:              flags to control DMA descriptor preparation
1071 */
1072static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1073                          const u8 *vaddr, int size, unsigned int flags)
1074{
1075        if (nandc->props->is_bam)
1076                return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1077
1078        return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1079}
1080
1081/*
1082 * Helper to prepare DMA descriptors for configuring registers
1083 * before reading a NAND page.
1084 */
1085static void config_nand_page_read(struct qcom_nand_controller *nandc)
1086{
1087        write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1088        write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1089        write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1090        write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1091        write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1092                      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1093}
1094
1095/*
1096 * Helper to prepare DMA descriptors for configuring registers
1097 * before reading each codeword in NAND page.
1098 */
1099static void
1100config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1101{
1102        if (nandc->props->is_bam)
1103                write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1104                              NAND_BAM_NEXT_SGL);
1105
1106        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1107        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1108
1109        if (use_ecc) {
1110                read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1111                read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1112                             NAND_BAM_NEXT_SGL);
1113        } else {
1114                read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1115        }
1116}
1117
1118/*
1119 * Helper to prepare dma descriptors to configure registers needed for reading a
1120 * single codeword in page
1121 */
1122static void
1123config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1124                                bool use_ecc)
1125{
1126        config_nand_page_read(nandc);
1127        config_nand_cw_read(nandc, use_ecc);
1128}
1129
1130/*
1131 * Helper to prepare DMA descriptors used to configure registers needed for
1132 * before writing a NAND page.
1133 */
1134static void config_nand_page_write(struct qcom_nand_controller *nandc)
1135{
1136        write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1137        write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1138        write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1139                      NAND_BAM_NEXT_SGL);
1140}
1141
1142/*
1143 * Helper to prepare DMA descriptors for configuring registers
1144 * before writing each codeword in NAND page.
1145 */
1146static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1147{
1148        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1149        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1150
1151        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1152
1153        write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1154        write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1155}
1156
1157/*
1158 * the following functions are used within chip->cmdfunc() to perform different
1159 * NAND_CMD_* commands
1160 */
1161
1162/* sets up descriptors for NAND_CMD_PARAM */
1163static int nandc_param(struct qcom_nand_host *host)
1164{
1165        struct nand_chip *chip = &host->chip;
1166        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1167
1168        /*
1169         * NAND_CMD_PARAM is called before we know much about the FLASH chip
1170         * in use. we configure the controller to perform a raw read of 512
1171         * bytes to read onfi params
1172         */
1173        nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1174        nandc_set_reg(nandc, NAND_ADDR0, 0);
1175        nandc_set_reg(nandc, NAND_ADDR1, 0);
1176        nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1177                                        | 512 << UD_SIZE_BYTES
1178                                        | 5 << NUM_ADDR_CYCLES
1179                                        | 0 << SPARE_SIZE_BYTES);
1180        nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1181                                        | 0 << CS_ACTIVE_BSY
1182                                        | 17 << BAD_BLOCK_BYTE_NUM
1183                                        | 1 << BAD_BLOCK_IN_SPARE_AREA
1184                                        | 2 << WR_RD_BSY_GAP
1185                                        | 0 << WIDE_FLASH
1186                                        | 1 << DEV0_CFG1_ECC_DISABLE);
1187        nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1188
1189        /* configure CMD1 and VLD for ONFI param probing */
1190        nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1191                      (nandc->vld & ~READ_START_VLD));
1192        nandc_set_reg(nandc, NAND_DEV_CMD1,
1193                      (nandc->cmd1 & ~(0xFF << READ_ADDR))
1194                      | NAND_CMD_PARAM << READ_ADDR);
1195
1196        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1197
1198        nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1199        nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1200        nandc_set_read_loc(nandc, 0, 0, 512, 1);
1201
1202        write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1203        write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1204
1205        nandc->buf_count = 512;
1206        memset(nandc->data_buffer, 0xff, nandc->buf_count);
1207
1208        config_nand_single_cw_page_read(nandc, false);
1209
1210        read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1211                      nandc->buf_count, 0);
1212
1213        /* restore CMD1 and VLD regs */
1214        write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1215        write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1216
1217        return 0;
1218}
1219
1220/* sets up descriptors for NAND_CMD_ERASE1 */
1221static int erase_block(struct qcom_nand_host *host, int page_addr)
1222{
1223        struct nand_chip *chip = &host->chip;
1224        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1225
1226        nandc_set_reg(nandc, NAND_FLASH_CMD,
1227                      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1228        nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1229        nandc_set_reg(nandc, NAND_ADDR1, 0);
1230        nandc_set_reg(nandc, NAND_DEV0_CFG0,
1231                      host->cfg0_raw & ~(7 << CW_PER_PAGE));
1232        nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1233        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1234        nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1235        nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1236
1237        write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1238        write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1239        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1240
1241        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1242
1243        write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1244        write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1245
1246        return 0;
1247}
1248
1249/* sets up descriptors for NAND_CMD_READID */
1250static int read_id(struct qcom_nand_host *host, int column)
1251{
1252        struct nand_chip *chip = &host->chip;
1253        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1254
1255        if (column == -1)
1256                return 0;
1257
1258        nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1259        nandc_set_reg(nandc, NAND_ADDR0, column);
1260        nandc_set_reg(nandc, NAND_ADDR1, 0);
1261        nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1262                      nandc->props->is_bam ? 0 : DM_EN);
1263        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1264
1265        write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1266        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1267
1268        read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1269
1270        return 0;
1271}
1272
1273/* sets up descriptors for NAND_CMD_RESET */
1274static int reset(struct qcom_nand_host *host)
1275{
1276        struct nand_chip *chip = &host->chip;
1277        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1278
1279        nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1280        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1281
1282        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1283        write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1284
1285        read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1286
1287        return 0;
1288}
1289
1290/* helpers to submit/free our list of dma descriptors */
1291static int submit_descs(struct qcom_nand_controller *nandc)
1292{
1293        struct desc_info *desc;
1294        dma_cookie_t cookie = 0;
1295        struct bam_transaction *bam_txn = nandc->bam_txn;
1296        int r;
1297
1298        if (nandc->props->is_bam) {
1299                if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1300                        r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1301                        if (r)
1302                                return r;
1303                }
1304
1305                if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1306                        r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1307                                                   DMA_PREP_INTERRUPT);
1308                        if (r)
1309                                return r;
1310                }
1311
1312                if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1313                        r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1314                                                   DMA_PREP_CMD);
1315                        if (r)
1316                                return r;
1317                }
1318        }
1319
1320        list_for_each_entry(desc, &nandc->desc_list, node)
1321                cookie = dmaengine_submit(desc->dma_desc);
1322
1323        if (nandc->props->is_bam) {
1324                bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1325                bam_txn->last_cmd_desc->callback_param = bam_txn;
1326                if (bam_txn->last_data_desc) {
1327                        bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1328                        bam_txn->last_data_desc->callback_param = bam_txn;
1329                        bam_txn->wait_second_completion = true;
1330                }
1331
1332                dma_async_issue_pending(nandc->tx_chan);
1333                dma_async_issue_pending(nandc->rx_chan);
1334                dma_async_issue_pending(nandc->cmd_chan);
1335
1336                if (!wait_for_completion_timeout(&bam_txn->txn_done,
1337                                                 QPIC_NAND_COMPLETION_TIMEOUT))
1338                        return -ETIMEDOUT;
1339        } else {
1340                if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1341                        return -ETIMEDOUT;
1342        }
1343
1344        return 0;
1345}
1346
1347static void free_descs(struct qcom_nand_controller *nandc)
1348{
1349        struct desc_info *desc, *n;
1350
1351        list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1352                list_del(&desc->node);
1353
1354                if (nandc->props->is_bam)
1355                        dma_unmap_sg(nandc->dev, desc->bam_sgl,
1356                                     desc->sgl_cnt, desc->dir);
1357                else
1358                        dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1359                                     desc->dir);
1360
1361                kfree(desc);
1362        }
1363}
1364
1365/* reset the register read buffer for next NAND operation */
1366static void clear_read_regs(struct qcom_nand_controller *nandc)
1367{
1368        nandc->reg_read_pos = 0;
1369        nandc_read_buffer_sync(nandc, false);
1370}
1371
1372static void pre_command(struct qcom_nand_host *host, int command)
1373{
1374        struct nand_chip *chip = &host->chip;
1375        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1376
1377        nandc->buf_count = 0;
1378        nandc->buf_start = 0;
1379        host->use_ecc = false;
1380        host->last_command = command;
1381
1382        clear_read_regs(nandc);
1383
1384        if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1385            command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1386                clear_bam_transaction(nandc);
1387}
1388
1389/*
1390 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1391 * privately maintained status byte, this status byte can be read after
1392 * NAND_CMD_STATUS is called
1393 */
1394static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1395{
1396        struct nand_chip *chip = &host->chip;
1397        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1398        struct nand_ecc_ctrl *ecc = &chip->ecc;
1399        int num_cw;
1400        int i;
1401
1402        num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1403        nandc_read_buffer_sync(nandc, true);
1404
1405        for (i = 0; i < num_cw; i++) {
1406                u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1407
1408                if (flash_status & FS_MPU_ERR)
1409                        host->status &= ~NAND_STATUS_WP;
1410
1411                if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1412                                                 (flash_status &
1413                                                  FS_DEVICE_STS_ERR)))
1414                        host->status |= NAND_STATUS_FAIL;
1415        }
1416}
1417
1418static void post_command(struct qcom_nand_host *host, int command)
1419{
1420        struct nand_chip *chip = &host->chip;
1421        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1422
1423        switch (command) {
1424        case NAND_CMD_READID:
1425                nandc_read_buffer_sync(nandc, true);
1426                memcpy(nandc->data_buffer, nandc->reg_read_buf,
1427                       nandc->buf_count);
1428                break;
1429        case NAND_CMD_PAGEPROG:
1430        case NAND_CMD_ERASE1:
1431                parse_erase_write_errors(host, command);
1432                break;
1433        default:
1434                break;
1435        }
1436}
1437
1438/*
1439 * Implements chip->cmdfunc. It's  only used for a limited set of commands.
1440 * The rest of the commands wouldn't be called by upper layers. For example,
1441 * NAND_CMD_READOOB would never be called because we have our own versions
1442 * of read_oob ops for nand_ecc_ctrl.
1443 */
1444static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1445                               int column, int page_addr)
1446{
1447        struct nand_chip *chip = mtd_to_nand(mtd);
1448        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1449        struct nand_ecc_ctrl *ecc = &chip->ecc;
1450        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1451        bool wait = false;
1452        int ret = 0;
1453
1454        pre_command(host, command);
1455
1456        switch (command) {
1457        case NAND_CMD_RESET:
1458                ret = reset(host);
1459                wait = true;
1460                break;
1461
1462        case NAND_CMD_READID:
1463                nandc->buf_count = 4;
1464                ret = read_id(host, column);
1465                wait = true;
1466                break;
1467
1468        case NAND_CMD_PARAM:
1469                ret = nandc_param(host);
1470                wait = true;
1471                break;
1472
1473        case NAND_CMD_ERASE1:
1474                ret = erase_block(host, page_addr);
1475                wait = true;
1476                break;
1477
1478        case NAND_CMD_READ0:
1479                /* we read the entire page for now */
1480                WARN_ON(column != 0);
1481
1482                host->use_ecc = true;
1483                set_address(host, 0, page_addr);
1484                update_rw_regs(host, ecc->steps, true);
1485                break;
1486
1487        case NAND_CMD_SEQIN:
1488                WARN_ON(column != 0);
1489                set_address(host, 0, page_addr);
1490                break;
1491
1492        case NAND_CMD_PAGEPROG:
1493        case NAND_CMD_STATUS:
1494        case NAND_CMD_NONE:
1495        default:
1496                break;
1497        }
1498
1499        if (ret) {
1500                dev_err(nandc->dev, "failure executing command %d\n",
1501                        command);
1502                free_descs(nandc);
1503                return;
1504        }
1505
1506        if (wait) {
1507                ret = submit_descs(nandc);
1508                if (ret)
1509                        dev_err(nandc->dev,
1510                                "failure submitting descs for command %d\n",
1511                                command);
1512        }
1513
1514        free_descs(nandc);
1515
1516        post_command(host, command);
1517}
1518
1519/*
1520 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1521 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1522 *
1523 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1524 * but it notifies that it is an erased CW by placing special characters at
1525 * certain offsets in the buffer.
1526 *
1527 * verify if the page is erased or not, and fix up the page for RS ECC by
1528 * replacing the special characters with 0xff.
1529 */
1530static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1531{
1532        u8 empty1, empty2;
1533
1534        /*
1535         * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1536         * is erased by looking for 0x54s at offsets 3 and 175 from the
1537         * beginning of each codeword
1538         */
1539
1540        empty1 = data_buf[3];
1541        empty2 = data_buf[175];
1542
1543        /*
1544         * if the erased codework markers, if they exist override them with
1545         * 0xffs
1546         */
1547        if ((empty1 == 0x54 && empty2 == 0xff) ||
1548            (empty1 == 0xff && empty2 == 0x54)) {
1549                data_buf[3] = 0xff;
1550                data_buf[175] = 0xff;
1551        }
1552
1553        /*
1554         * check if the entire chunk contains 0xffs or not. if it doesn't, then
1555         * restore the original values at the special offsets
1556         */
1557        if (memchr_inv(data_buf, 0xff, data_len)) {
1558                data_buf[3] = empty1;
1559                data_buf[175] = empty2;
1560
1561                return false;
1562        }
1563
1564        return true;
1565}
1566
1567struct read_stats {
1568        __le32 flash;
1569        __le32 buffer;
1570        __le32 erased_cw;
1571};
1572
1573/* reads back FLASH_STATUS register set by the controller */
1574static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1575{
1576        struct nand_chip *chip = &host->chip;
1577        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1578        int i;
1579
1580        for (i = 0; i < cw_cnt; i++) {
1581                u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1582
1583                if (flash & (FS_OP_ERR | FS_MPU_ERR))
1584                        return -EIO;
1585        }
1586
1587        return 0;
1588}
1589
1590/* performs raw read for one codeword */
1591static int
1592qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1593                       u8 *data_buf, u8 *oob_buf, int page, int cw)
1594{
1595        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1596        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1597        struct nand_ecc_ctrl *ecc = &chip->ecc;
1598        int data_size1, data_size2, oob_size1, oob_size2;
1599        int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1600
1601        nand_read_page_op(chip, page, 0, NULL, 0);
1602        host->use_ecc = false;
1603
1604        clear_bam_transaction(nandc);
1605        set_address(host, host->cw_size * cw, page);
1606        update_rw_regs(host, 1, true);
1607        config_nand_page_read(nandc);
1608
1609        data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1610        oob_size1 = host->bbm_size;
1611
1612        if (cw == (ecc->steps - 1)) {
1613                data_size2 = ecc->size - data_size1 -
1614                             ((ecc->steps - 1) * 4);
1615                oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1616                            host->spare_bytes;
1617        } else {
1618                data_size2 = host->cw_data - data_size1;
1619                oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1620        }
1621
1622        if (nandc->props->is_bam) {
1623                nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1624                read_loc += data_size1;
1625
1626                nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1627                read_loc += oob_size1;
1628
1629                nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1630                read_loc += data_size2;
1631
1632                nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1633        }
1634
1635        config_nand_cw_read(nandc, false);
1636
1637        read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1638        reg_off += data_size1;
1639
1640        read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1641        reg_off += oob_size1;
1642
1643        read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1644        reg_off += data_size2;
1645
1646        read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1647
1648        ret = submit_descs(nandc);
1649        free_descs(nandc);
1650        if (ret) {
1651                dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1652                return ret;
1653        }
1654
1655        return check_flash_errors(host, 1);
1656}
1657
1658/*
1659 * Bitflips can happen in erased codewords also so this function counts the
1660 * number of 0 in each CW for which ECC engine returns the uncorrectable
1661 * error. The page will be assumed as erased if this count is less than or
1662 * equal to the ecc->strength for each CW.
1663 *
1664 * 1. Both DATA and OOB need to be checked for number of 0. The
1665 *    top-level API can be called with only data buf or OOB buf so use
1666 *    chip->data_buf if data buf is null and chip->oob_poi if oob buf
1667 *    is null for copying the raw bytes.
1668 * 2. Perform raw read for all the CW which has uncorrectable errors.
1669 * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1670 *    The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1671 *    the number of bitflips in this area.
1672 */
1673static int
1674check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1675                      u8 *oob_buf, unsigned long uncorrectable_cws,
1676                      int page, unsigned int max_bitflips)
1677{
1678        struct nand_chip *chip = &host->chip;
1679        struct mtd_info *mtd = nand_to_mtd(chip);
1680        struct nand_ecc_ctrl *ecc = &chip->ecc;
1681        u8 *cw_data_buf, *cw_oob_buf;
1682        int cw, data_size, oob_size, ret = 0;
1683
1684        if (!data_buf) {
1685                data_buf = chip->data_buf;
1686                chip->pagebuf = -1;
1687        }
1688
1689        if (!oob_buf) {
1690                oob_buf = chip->oob_poi;
1691                chip->pagebuf = -1;
1692        }
1693
1694        for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1695                if (cw == (ecc->steps - 1)) {
1696                        data_size = ecc->size - ((ecc->steps - 1) * 4);
1697                        oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1698                } else {
1699                        data_size = host->cw_data;
1700                        oob_size = host->ecc_bytes_hw;
1701                }
1702
1703                /* determine starting buffer address for current CW */
1704                cw_data_buf = data_buf + (cw * host->cw_data);
1705                cw_oob_buf = oob_buf + (cw * ecc->bytes);
1706
1707                ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1708                                             cw_oob_buf, page, cw);
1709                if (ret)
1710                        return ret;
1711
1712                /*
1713                 * make sure it isn't an erased page reported
1714                 * as not-erased by HW because of a few bitflips
1715                 */
1716                ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1717                                                  cw_oob_buf + host->bbm_size,
1718                                                  oob_size, NULL,
1719                                                  0, ecc->strength);
1720                if (ret < 0) {
1721                        mtd->ecc_stats.failed++;
1722                } else {
1723                        mtd->ecc_stats.corrected += ret;
1724                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
1725                }
1726        }
1727
1728        return max_bitflips;
1729}
1730
1731/*
1732 * reads back status registers set by the controller to notify page read
1733 * errors. this is equivalent to what 'ecc->correct()' would do.
1734 */
1735static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1736                             u8 *oob_buf, int page)
1737{
1738        struct nand_chip *chip = &host->chip;
1739        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1740        struct mtd_info *mtd = nand_to_mtd(chip);
1741        struct nand_ecc_ctrl *ecc = &chip->ecc;
1742        unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1743        struct read_stats *buf;
1744        bool flash_op_err = false, erased;
1745        int i;
1746        u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1747
1748        buf = (struct read_stats *)nandc->reg_read_buf;
1749        nandc_read_buffer_sync(nandc, true);
1750
1751        for (i = 0; i < ecc->steps; i++, buf++) {
1752                u32 flash, buffer, erased_cw;
1753                int data_len, oob_len;
1754
1755                if (i == (ecc->steps - 1)) {
1756                        data_len = ecc->size - ((ecc->steps - 1) << 2);
1757                        oob_len = ecc->steps << 2;
1758                } else {
1759                        data_len = host->cw_data;
1760                        oob_len = 0;
1761                }
1762
1763                flash = le32_to_cpu(buf->flash);
1764                buffer = le32_to_cpu(buf->buffer);
1765                erased_cw = le32_to_cpu(buf->erased_cw);
1766
1767                /*
1768                 * Check ECC failure for each codeword. ECC failure can
1769                 * happen in either of the following conditions
1770                 * 1. If number of bitflips are greater than ECC engine
1771                 *    capability.
1772                 * 2. If this codeword contains all 0xff for which erased
1773                 *    codeword detection check will be done.
1774                 */
1775                if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1776                        /*
1777                         * For BCH ECC, ignore erased codeword errors, if
1778                         * ERASED_CW bits are set.
1779                         */
1780                        if (host->bch_enabled) {
1781                                erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1782                                         true : false;
1783                        /*
1784                         * For RS ECC, HW reports the erased CW by placing
1785                         * special characters at certain offsets in the buffer.
1786                         * These special characters will be valid only if
1787                         * complete page is read i.e. data_buf is not NULL.
1788                         */
1789                        } else if (data_buf) {
1790                                erased = erased_chunk_check_and_fixup(data_buf,
1791                                                                      data_len);
1792                        } else {
1793                                erased = false;
1794                        }
1795
1796                        if (!erased)
1797                                uncorrectable_cws |= BIT(i);
1798                /*
1799                 * Check if MPU or any other operational error (timeout,
1800                 * device failure, etc.) happened for this codeword and
1801                 * make flash_op_err true. If flash_op_err is set, then
1802                 * EIO will be returned for page read.
1803                 */
1804                } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1805                        flash_op_err = true;
1806                /*
1807                 * No ECC or operational errors happened. Check the number of
1808                 * bits corrected and update the ecc_stats.corrected.
1809                 */
1810                } else {
1811                        unsigned int stat;
1812
1813                        stat = buffer & BS_CORRECTABLE_ERR_MSK;
1814                        mtd->ecc_stats.corrected += stat;
1815                        max_bitflips = max(max_bitflips, stat);
1816                }
1817
1818                if (data_buf)
1819                        data_buf += data_len;
1820                if (oob_buf)
1821                        oob_buf += oob_len + ecc->bytes;
1822        }
1823
1824        if (flash_op_err)
1825                return -EIO;
1826
1827        if (!uncorrectable_cws)
1828                return max_bitflips;
1829
1830        return check_for_erased_page(host, data_buf_start, oob_buf_start,
1831                                     uncorrectable_cws, page,
1832                                     max_bitflips);
1833}
1834
1835/*
1836 * helper to perform the actual page read operation, used by ecc->read_page(),
1837 * ecc->read_oob()
1838 */
1839static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1840                         u8 *oob_buf, int page)
1841{
1842        struct nand_chip *chip = &host->chip;
1843        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1844        struct nand_ecc_ctrl *ecc = &chip->ecc;
1845        u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1846        int i, ret;
1847
1848        config_nand_page_read(nandc);
1849
1850        /* queue cmd descs for each codeword */
1851        for (i = 0; i < ecc->steps; i++) {
1852                int data_size, oob_size;
1853
1854                if (i == (ecc->steps - 1)) {
1855                        data_size = ecc->size - ((ecc->steps - 1) << 2);
1856                        oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1857                                   host->spare_bytes;
1858                } else {
1859                        data_size = host->cw_data;
1860                        oob_size = host->ecc_bytes_hw + host->spare_bytes;
1861                }
1862
1863                if (nandc->props->is_bam) {
1864                        if (data_buf && oob_buf) {
1865                                nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1866                                nandc_set_read_loc(nandc, 1, data_size,
1867                                                   oob_size, 1);
1868                        } else if (data_buf) {
1869                                nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1870                        } else {
1871                                nandc_set_read_loc(nandc, 0, data_size,
1872                                                   oob_size, 1);
1873                        }
1874                }
1875
1876                config_nand_cw_read(nandc, true);
1877
1878                if (data_buf)
1879                        read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1880                                      data_size, 0);
1881
1882                /*
1883                 * when ecc is enabled, the controller doesn't read the real
1884                 * or dummy bad block markers in each chunk. To maintain a
1885                 * consistent layout across RAW and ECC reads, we just
1886                 * leave the real/dummy BBM offsets empty (i.e, filled with
1887                 * 0xffs)
1888                 */
1889                if (oob_buf) {
1890                        int j;
1891
1892                        for (j = 0; j < host->bbm_size; j++)
1893                                *oob_buf++ = 0xff;
1894
1895                        read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1896                                      oob_buf, oob_size, 0);
1897                }
1898
1899                if (data_buf)
1900                        data_buf += data_size;
1901                if (oob_buf)
1902                        oob_buf += oob_size;
1903        }
1904
1905        ret = submit_descs(nandc);
1906        free_descs(nandc);
1907
1908        if (ret) {
1909                dev_err(nandc->dev, "failure to read page/oob\n");
1910                return ret;
1911        }
1912
1913        return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1914}
1915
1916/*
1917 * a helper that copies the last step/codeword of a page (containing free oob)
1918 * into our local buffer
1919 */
1920static int copy_last_cw(struct qcom_nand_host *host, int page)
1921{
1922        struct nand_chip *chip = &host->chip;
1923        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1924        struct nand_ecc_ctrl *ecc = &chip->ecc;
1925        int size;
1926        int ret;
1927
1928        clear_read_regs(nandc);
1929
1930        size = host->use_ecc ? host->cw_data : host->cw_size;
1931
1932        /* prepare a clean read buffer */
1933        memset(nandc->data_buffer, 0xff, size);
1934
1935        set_address(host, host->cw_size * (ecc->steps - 1), page);
1936        update_rw_regs(host, 1, true);
1937
1938        config_nand_single_cw_page_read(nandc, host->use_ecc);
1939
1940        read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1941
1942        ret = submit_descs(nandc);
1943        if (ret)
1944                dev_err(nandc->dev, "failed to copy last codeword\n");
1945
1946        free_descs(nandc);
1947
1948        return ret;
1949}
1950
1951/* implements ecc->read_page() */
1952static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1953                                uint8_t *buf, int oob_required, int page)
1954{
1955        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1956        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1957        u8 *data_buf, *oob_buf = NULL;
1958
1959        nand_read_page_op(chip, page, 0, NULL, 0);
1960        data_buf = buf;
1961        oob_buf = oob_required ? chip->oob_poi : NULL;
1962
1963        clear_bam_transaction(nandc);
1964
1965        return read_page_ecc(host, data_buf, oob_buf, page);
1966}
1967
1968/* implements ecc->read_page_raw() */
1969static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1970                                    struct nand_chip *chip, uint8_t *buf,
1971                                    int oob_required, int page)
1972{
1973        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1974        struct nand_ecc_ctrl *ecc = &chip->ecc;
1975        int cw, ret;
1976        u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1977
1978        for (cw = 0; cw < ecc->steps; cw++) {
1979                ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1980                                             page, cw);
1981                if (ret)
1982                        return ret;
1983
1984                data_buf += host->cw_data;
1985                oob_buf += ecc->bytes;
1986        }
1987
1988        return 0;
1989}
1990
1991/* implements ecc->read_oob() */
1992static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1993                               int page)
1994{
1995        struct qcom_nand_host *host = to_qcom_nand_host(chip);
1996        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1997        struct nand_ecc_ctrl *ecc = &chip->ecc;
1998
1999        clear_read_regs(nandc);
2000        clear_bam_transaction(nandc);
2001
2002        host->use_ecc = true;
2003        set_address(host, 0, page);
2004        update_rw_regs(host, ecc->steps, true);
2005
2006        return read_page_ecc(host, NULL, chip->oob_poi, page);
2007}
2008
2009/* implements ecc->write_page() */
2010static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2011                                 const uint8_t *buf, int oob_required, int page)
2012{
2013        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2014        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2015        struct nand_ecc_ctrl *ecc = &chip->ecc;
2016        u8 *data_buf, *oob_buf;
2017        int i, ret;
2018
2019        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2020
2021        clear_read_regs(nandc);
2022        clear_bam_transaction(nandc);
2023
2024        data_buf = (u8 *)buf;
2025        oob_buf = chip->oob_poi;
2026
2027        host->use_ecc = true;
2028        update_rw_regs(host, ecc->steps, false);
2029        config_nand_page_write(nandc);
2030
2031        for (i = 0; i < ecc->steps; i++) {
2032                int data_size, oob_size;
2033
2034                if (i == (ecc->steps - 1)) {
2035                        data_size = ecc->size - ((ecc->steps - 1) << 2);
2036                        oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2037                                   host->spare_bytes;
2038                } else {
2039                        data_size = host->cw_data;
2040                        oob_size = ecc->bytes;
2041                }
2042
2043
2044                write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2045                               i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2046
2047                /*
2048                 * when ECC is enabled, we don't really need to write anything
2049                 * to oob for the first n - 1 codewords since these oob regions
2050                 * just contain ECC bytes that's written by the controller
2051                 * itself. For the last codeword, we skip the bbm positions and
2052                 * write to the free oob area.
2053                 */
2054                if (i == (ecc->steps - 1)) {
2055                        oob_buf += host->bbm_size;
2056
2057                        write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2058                                       oob_buf, oob_size, 0);
2059                }
2060
2061                config_nand_cw_write(nandc);
2062
2063                data_buf += data_size;
2064                oob_buf += oob_size;
2065        }
2066
2067        ret = submit_descs(nandc);
2068        if (ret)
2069                dev_err(nandc->dev, "failure to write page\n");
2070
2071        free_descs(nandc);
2072
2073        if (!ret)
2074                ret = nand_prog_page_end_op(chip);
2075
2076        return ret;
2077}
2078
2079/* implements ecc->write_page_raw() */
2080static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
2081                                     struct nand_chip *chip, const uint8_t *buf,
2082                                     int oob_required, int page)
2083{
2084        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2085        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2086        struct nand_ecc_ctrl *ecc = &chip->ecc;
2087        u8 *data_buf, *oob_buf;
2088        int i, ret;
2089
2090        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2091        clear_read_regs(nandc);
2092        clear_bam_transaction(nandc);
2093
2094        data_buf = (u8 *)buf;
2095        oob_buf = chip->oob_poi;
2096
2097        host->use_ecc = false;
2098        update_rw_regs(host, ecc->steps, false);
2099        config_nand_page_write(nandc);
2100
2101        for (i = 0; i < ecc->steps; i++) {
2102                int data_size1, data_size2, oob_size1, oob_size2;
2103                int reg_off = FLASH_BUF_ACC;
2104
2105                data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2106                oob_size1 = host->bbm_size;
2107
2108                if (i == (ecc->steps - 1)) {
2109                        data_size2 = ecc->size - data_size1 -
2110                                     ((ecc->steps - 1) << 2);
2111                        oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2112                                    host->spare_bytes;
2113                } else {
2114                        data_size2 = host->cw_data - data_size1;
2115                        oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2116                }
2117
2118                write_data_dma(nandc, reg_off, data_buf, data_size1,
2119                               NAND_BAM_NO_EOT);
2120                reg_off += data_size1;
2121                data_buf += data_size1;
2122
2123                write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2124                               NAND_BAM_NO_EOT);
2125                reg_off += oob_size1;
2126                oob_buf += oob_size1;
2127
2128                write_data_dma(nandc, reg_off, data_buf, data_size2,
2129                               NAND_BAM_NO_EOT);
2130                reg_off += data_size2;
2131                data_buf += data_size2;
2132
2133                write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2134                oob_buf += oob_size2;
2135
2136                config_nand_cw_write(nandc);
2137        }
2138
2139        ret = submit_descs(nandc);
2140        if (ret)
2141                dev_err(nandc->dev, "failure to write raw page\n");
2142
2143        free_descs(nandc);
2144
2145        if (!ret)
2146                ret = nand_prog_page_end_op(chip);
2147
2148        return ret;
2149}
2150
2151/*
2152 * implements ecc->write_oob()
2153 *
2154 * the NAND controller cannot write only data or only OOB within a codeword
2155 * since ECC is calculated for the combined codeword. So update the OOB from
2156 * chip->oob_poi, and pad the data area with OxFF before writing.
2157 */
2158static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
2159                                int page)
2160{
2161        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2162        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2163        struct nand_ecc_ctrl *ecc = &chip->ecc;
2164        u8 *oob = chip->oob_poi;
2165        int data_size, oob_size;
2166        int ret;
2167
2168        host->use_ecc = true;
2169        clear_bam_transaction(nandc);
2170
2171        /* calculate the data and oob size for the last codeword/step */
2172        data_size = ecc->size - ((ecc->steps - 1) << 2);
2173        oob_size = mtd->oobavail;
2174
2175        memset(nandc->data_buffer, 0xff, host->cw_data);
2176        /* override new oob content to last codeword */
2177        mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2178                                    0, mtd->oobavail);
2179
2180        set_address(host, host->cw_size * (ecc->steps - 1), page);
2181        update_rw_regs(host, 1, false);
2182
2183        config_nand_page_write(nandc);
2184        write_data_dma(nandc, FLASH_BUF_ACC,
2185                       nandc->data_buffer, data_size + oob_size, 0);
2186        config_nand_cw_write(nandc);
2187
2188        ret = submit_descs(nandc);
2189
2190        free_descs(nandc);
2191
2192        if (ret) {
2193                dev_err(nandc->dev, "failure to write oob\n");
2194                return -EIO;
2195        }
2196
2197        return nand_prog_page_end_op(chip);
2198}
2199
2200static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2201{
2202        struct nand_chip *chip = mtd_to_nand(mtd);
2203        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2204        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2205        struct nand_ecc_ctrl *ecc = &chip->ecc;
2206        int page, ret, bbpos, bad = 0;
2207
2208        page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2209
2210        /*
2211         * configure registers for a raw sub page read, the address is set to
2212         * the beginning of the last codeword, we don't care about reading ecc
2213         * portion of oob. we just want the first few bytes from this codeword
2214         * that contains the BBM
2215         */
2216        host->use_ecc = false;
2217
2218        clear_bam_transaction(nandc);
2219        ret = copy_last_cw(host, page);
2220        if (ret)
2221                goto err;
2222
2223        if (check_flash_errors(host, 1)) {
2224                dev_warn(nandc->dev, "error when trying to read BBM\n");
2225                goto err;
2226        }
2227
2228        bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2229
2230        bad = nandc->data_buffer[bbpos] != 0xff;
2231
2232        if (chip->options & NAND_BUSWIDTH_16)
2233                bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2234err:
2235        return bad;
2236}
2237
2238static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2239{
2240        struct nand_chip *chip = mtd_to_nand(mtd);
2241        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2242        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2243        struct nand_ecc_ctrl *ecc = &chip->ecc;
2244        int page, ret;
2245
2246        clear_read_regs(nandc);
2247        clear_bam_transaction(nandc);
2248
2249        /*
2250         * to mark the BBM as bad, we flash the entire last codeword with 0s.
2251         * we don't care about the rest of the content in the codeword since
2252         * we aren't going to use this block again
2253         */
2254        memset(nandc->data_buffer, 0x00, host->cw_size);
2255
2256        page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2257
2258        /* prepare write */
2259        host->use_ecc = false;
2260        set_address(host, host->cw_size * (ecc->steps - 1), page);
2261        update_rw_regs(host, 1, false);
2262
2263        config_nand_page_write(nandc);
2264        write_data_dma(nandc, FLASH_BUF_ACC,
2265                       nandc->data_buffer, host->cw_size, 0);
2266        config_nand_cw_write(nandc);
2267
2268        ret = submit_descs(nandc);
2269
2270        free_descs(nandc);
2271
2272        if (ret) {
2273                dev_err(nandc->dev, "failure to update BBM\n");
2274                return -EIO;
2275        }
2276
2277        return nand_prog_page_end_op(chip);
2278}
2279
2280/*
2281 * the three functions below implement chip->read_byte(), chip->read_buf()
2282 * and chip->write_buf() respectively. these aren't used for
2283 * reading/writing page data, they are used for smaller data like reading
2284 * id, status etc
2285 */
2286static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2287{
2288        struct nand_chip *chip = mtd_to_nand(mtd);
2289        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2290        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2291        u8 *buf = nandc->data_buffer;
2292        u8 ret = 0x0;
2293
2294        if (host->last_command == NAND_CMD_STATUS) {
2295                ret = host->status;
2296
2297                host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2298
2299                return ret;
2300        }
2301
2302        if (nandc->buf_start < nandc->buf_count)
2303                ret = buf[nandc->buf_start++];
2304
2305        return ret;
2306}
2307
2308static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2309{
2310        struct nand_chip *chip = mtd_to_nand(mtd);
2311        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2312        int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2313
2314        memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2315        nandc->buf_start += real_len;
2316}
2317
2318static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2319                                 int len)
2320{
2321        struct nand_chip *chip = mtd_to_nand(mtd);
2322        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2323        int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2324
2325        memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2326
2327        nandc->buf_start += real_len;
2328}
2329
2330/* we support only one external chip for now */
2331static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2332{
2333        struct nand_chip *chip = mtd_to_nand(mtd);
2334        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2335
2336        if (chipnr <= 0)
2337                return;
2338
2339        dev_warn(nandc->dev, "invalid chip select\n");
2340}
2341
2342/*
2343 * NAND controller page layout info
2344 *
2345 * Layout with ECC enabled:
2346 *
2347 * |----------------------|  |---------------------------------|
2348 * |           xx.......yy|  |             *********xx.......yy|
2349 * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2350 * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2351 * |           xx.......yy|  |             *********xx.......yy|
2352 * |----------------------|  |---------------------------------|
2353 *     codeword 1,2..n-1                  codeword n
2354 *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2355 *
2356 * n = Number of codewords in the page
2357 * . = ECC bytes
2358 * * = Spare/free bytes
2359 * x = Unused byte(s)
2360 * y = Reserved byte(s)
2361 *
2362 * 2K page: n = 4, spare = 16 bytes
2363 * 4K page: n = 8, spare = 32 bytes
2364 * 8K page: n = 16, spare = 64 bytes
2365 *
2366 * the qcom nand controller operates at a sub page/codeword level. each
2367 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2368 * the number of ECC bytes vary based on the ECC strength and the bus width.
2369 *
2370 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2371 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2372 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2373 *
2374 * When we access a page with ECC enabled, the reserved bytes(s) are not
2375 * accessible at all. When reading, we fill up these unreadable positions
2376 * with 0xffs. When writing, the controller skips writing the inaccessible
2377 * bytes.
2378 *
2379 * Layout with ECC disabled:
2380 *
2381 * |------------------------------|  |---------------------------------------|
2382 * |         yy          xx.......|  |         bb          *********xx.......|
2383 * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2384 * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2385 * |         yy          xx.......|  |         bb          *********xx.......|
2386 * |------------------------------|  |---------------------------------------|
2387 *         codeword 1,2..n-1                        codeword n
2388 *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2389 *
2390 * n = Number of codewords in the page
2391 * . = ECC bytes
2392 * * = Spare/free bytes
2393 * x = Unused byte(s)
2394 * y = Dummy Bad Bock byte(s)
2395 * b = Real Bad Block byte(s)
2396 * size1/size2 = function of codeword size and 'n'
2397 *
2398 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2399 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2400 * Block Markers. In the last codeword, this position contains the real BBM
2401 *
2402 * In order to have a consistent layout between RAW and ECC modes, we assume
2403 * the following OOB layout arrangement:
2404 *
2405 * |-----------|  |--------------------|
2406 * |yyxx.......|  |bb*********xx.......|
2407 * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2408 * |yyxx.......|  |bb*********xx.......|
2409 * |yyxx.......|  |bb*********xx.......|
2410 * |-----------|  |--------------------|
2411 *  first n - 1       nth OOB region
2412 *  OOB regions
2413 *
2414 * n = Number of codewords in the page
2415 * . = ECC bytes
2416 * * = FREE OOB bytes
2417 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2418 * x = Unused byte(s)
2419 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2420 *
2421 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2422 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2423 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2424 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2425 * the sum of the three).
2426 */
2427static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2428                                   struct mtd_oob_region *oobregion)
2429{
2430        struct nand_chip *chip = mtd_to_nand(mtd);
2431        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2432        struct nand_ecc_ctrl *ecc = &chip->ecc;
2433
2434        if (section > 1)
2435                return -ERANGE;
2436
2437        if (!section) {
2438                oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2439                                    host->bbm_size;
2440                oobregion->offset = 0;
2441        } else {
2442                oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2443                oobregion->offset = mtd->oobsize - oobregion->length;
2444        }
2445
2446        return 0;
2447}
2448
2449static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2450                                     struct mtd_oob_region *oobregion)
2451{
2452        struct nand_chip *chip = mtd_to_nand(mtd);
2453        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2454        struct nand_ecc_ctrl *ecc = &chip->ecc;
2455
2456        if (section)
2457                return -ERANGE;
2458
2459        oobregion->length = ecc->steps * 4;
2460        oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2461
2462        return 0;
2463}
2464
2465static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2466        .ecc = qcom_nand_ooblayout_ecc,
2467        .free = qcom_nand_ooblayout_free,
2468};
2469
2470static int
2471qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2472{
2473        return strength == 4 ? 12 : 16;
2474}
2475NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2476                     NANDC_STEP_SIZE, 4, 8);
2477
2478static int qcom_nand_attach_chip(struct nand_chip *chip)
2479{
2480        struct mtd_info *mtd = nand_to_mtd(chip);
2481        struct qcom_nand_host *host = to_qcom_nand_host(chip);
2482        struct nand_ecc_ctrl *ecc = &chip->ecc;
2483        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2484        int cwperpage, bad_block_byte, ret;
2485        bool wide_bus;
2486        int ecc_mode = 1;
2487
2488        /* controller only supports 512 bytes data steps */
2489        ecc->size = NANDC_STEP_SIZE;
2490        wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2491        cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2492
2493        /*
2494         * Each CW has 4 available OOB bytes which will be protected with ECC
2495         * so remaining bytes can be used for ECC.
2496         */
2497        ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2498                                   mtd->oobsize - (cwperpage * 4));
2499        if (ret) {
2500                dev_err(nandc->dev, "No valid ECC settings possible\n");
2501                return ret;
2502        }
2503
2504        if (ecc->strength >= 8) {
2505                /* 8 bit ECC defaults to BCH ECC on all platforms */
2506                host->bch_enabled = true;
2507                ecc_mode = 1;
2508
2509                if (wide_bus) {
2510                        host->ecc_bytes_hw = 14;
2511                        host->spare_bytes = 0;
2512                        host->bbm_size = 2;
2513                } else {
2514                        host->ecc_bytes_hw = 13;
2515                        host->spare_bytes = 2;
2516                        host->bbm_size = 1;
2517                }
2518        } else {
2519                /*
2520                 * if the controller supports BCH for 4 bit ECC, the controller
2521                 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2522                 * always 10 bytes
2523                 */
2524                if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2525                        /* BCH */
2526                        host->bch_enabled = true;
2527                        ecc_mode = 0;
2528
2529                        if (wide_bus) {
2530                                host->ecc_bytes_hw = 8;
2531                                host->spare_bytes = 2;
2532                                host->bbm_size = 2;
2533                        } else {
2534                                host->ecc_bytes_hw = 7;
2535                                host->spare_bytes = 4;
2536                                host->bbm_size = 1;
2537                        }
2538                } else {
2539                        /* RS */
2540                        host->ecc_bytes_hw = 10;
2541
2542                        if (wide_bus) {
2543                                host->spare_bytes = 0;
2544                                host->bbm_size = 2;
2545                        } else {
2546                                host->spare_bytes = 1;
2547                                host->bbm_size = 1;
2548                        }
2549                }
2550        }
2551
2552        /*
2553         * we consider ecc->bytes as the sum of all the non-data content in a
2554         * step. It gives us a clean representation of the oob area (even if
2555         * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2556         * ECC and 12 bytes for 4 bit ECC
2557         */
2558        ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2559
2560        ecc->read_page          = qcom_nandc_read_page;
2561        ecc->read_page_raw      = qcom_nandc_read_page_raw;
2562        ecc->read_oob           = qcom_nandc_read_oob;
2563        ecc->write_page         = qcom_nandc_write_page;
2564        ecc->write_page_raw     = qcom_nandc_write_page_raw;
2565        ecc->write_oob          = qcom_nandc_write_oob;
2566
2567        ecc->mode = NAND_ECC_HW;
2568
2569        mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2570
2571        nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2572                                     cwperpage);
2573
2574        /*
2575         * DATA_UD_BYTES varies based on whether the read/write command protects
2576         * spare data with ECC too. We protect spare data by default, so we set
2577         * it to main + spare data, which are 512 and 4 bytes respectively.
2578         */
2579        host->cw_data = 516;
2580
2581        /*
2582         * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2583         * for 8 bit ECC
2584         */
2585        host->cw_size = host->cw_data + ecc->bytes;
2586        bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2587
2588        host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2589                                | host->cw_data << UD_SIZE_BYTES
2590                                | 0 << DISABLE_STATUS_AFTER_WRITE
2591                                | 5 << NUM_ADDR_CYCLES
2592                                | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2593                                | 0 << STATUS_BFR_READ
2594                                | 1 << SET_RD_MODE_AFTER_STATUS
2595                                | host->spare_bytes << SPARE_SIZE_BYTES;
2596
2597        host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2598                                | 0 <<  CS_ACTIVE_BSY
2599                                | bad_block_byte << BAD_BLOCK_BYTE_NUM
2600                                | 0 << BAD_BLOCK_IN_SPARE_AREA
2601                                | 2 << WR_RD_BSY_GAP
2602                                | wide_bus << WIDE_FLASH
2603                                | host->bch_enabled << ENABLE_BCH_ECC;
2604
2605        host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2606                                | host->cw_size << UD_SIZE_BYTES
2607                                | 5 << NUM_ADDR_CYCLES
2608                                | 0 << SPARE_SIZE_BYTES;
2609
2610        host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2611                                | 0 << CS_ACTIVE_BSY
2612                                | 17 << BAD_BLOCK_BYTE_NUM
2613                                | 1 << BAD_BLOCK_IN_SPARE_AREA
2614                                | 2 << WR_RD_BSY_GAP
2615                                | wide_bus << WIDE_FLASH
2616                                | 1 << DEV0_CFG1_ECC_DISABLE;
2617
2618        host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2619                                | 0 << ECC_SW_RESET
2620                                | host->cw_data << ECC_NUM_DATA_BYTES
2621                                | 1 << ECC_FORCE_CLK_OPEN
2622                                | ecc_mode << ECC_MODE
2623                                | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2624
2625        host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2626
2627        host->clrflashstatus = FS_READY_BSY_N;
2628        host->clrreadstatus = 0xc0;
2629        nandc->regs->erased_cw_detect_cfg_clr =
2630                cpu_to_le32(CLR_ERASED_PAGE_DET);
2631        nandc->regs->erased_cw_detect_cfg_set =
2632                cpu_to_le32(SET_ERASED_PAGE_DET);
2633
2634        dev_dbg(nandc->dev,
2635                "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2636                host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2637                host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2638                cwperpage);
2639
2640        return 0;
2641}
2642
2643static const struct nand_controller_ops qcom_nandc_ops = {
2644        .attach_chip = qcom_nand_attach_chip,
2645};
2646
2647static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2648{
2649        int ret;
2650
2651        ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2652        if (ret) {
2653                dev_err(nandc->dev, "failed to set DMA mask\n");
2654                return ret;
2655        }
2656
2657        /*
2658         * we use the internal buffer for reading ONFI params, reading small
2659         * data like ID and status, and preforming read-copy-write operations
2660         * when writing to a codeword partially. 532 is the maximum possible
2661         * size of a codeword for our nand controller
2662         */
2663        nandc->buf_size = 532;
2664
2665        nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2666                                        GFP_KERNEL);
2667        if (!nandc->data_buffer)
2668                return -ENOMEM;
2669
2670        nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2671                                        GFP_KERNEL);
2672        if (!nandc->regs)
2673                return -ENOMEM;
2674
2675        nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2676                                MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2677                                GFP_KERNEL);
2678        if (!nandc->reg_read_buf)
2679                return -ENOMEM;
2680
2681        if (nandc->props->is_bam) {
2682                nandc->reg_read_dma =
2683                        dma_map_single(nandc->dev, nandc->reg_read_buf,
2684                                       MAX_REG_RD *
2685                                       sizeof(*nandc->reg_read_buf),
2686                                       DMA_FROM_DEVICE);
2687                if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2688                        dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2689                        return -EIO;
2690                }
2691
2692                nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2693                if (!nandc->tx_chan) {
2694                        dev_err(nandc->dev, "failed to request tx channel\n");
2695                        return -ENODEV;
2696                }
2697
2698                nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2699                if (!nandc->rx_chan) {
2700                        dev_err(nandc->dev, "failed to request rx channel\n");
2701                        return -ENODEV;
2702                }
2703
2704                nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2705                if (!nandc->cmd_chan) {
2706                        dev_err(nandc->dev, "failed to request cmd channel\n");
2707                        return -ENODEV;
2708                }
2709
2710                /*
2711                 * Initially allocate BAM transaction to read ONFI param page.
2712                 * After detecting all the devices, this BAM transaction will
2713                 * be freed and the next BAM tranasction will be allocated with
2714                 * maximum codeword size
2715                 */
2716                nandc->max_cwperpage = 1;
2717                nandc->bam_txn = alloc_bam_transaction(nandc);
2718                if (!nandc->bam_txn) {
2719                        dev_err(nandc->dev,
2720                                "failed to allocate bam transaction\n");
2721                        return -ENOMEM;
2722                }
2723        } else {
2724                nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2725                if (!nandc->chan) {
2726                        dev_err(nandc->dev,
2727                                "failed to request slave channel\n");
2728                        return -ENODEV;
2729                }
2730        }
2731
2732        INIT_LIST_HEAD(&nandc->desc_list);
2733        INIT_LIST_HEAD(&nandc->host_list);
2734
2735        nand_controller_init(&nandc->controller);
2736        nandc->controller.ops = &qcom_nandc_ops;
2737
2738        return 0;
2739}
2740
2741static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2742{
2743        if (nandc->props->is_bam) {
2744                if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2745                        dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2746                                         MAX_REG_RD *
2747                                         sizeof(*nandc->reg_read_buf),
2748                                         DMA_FROM_DEVICE);
2749
2750                if (nandc->tx_chan)
2751                        dma_release_channel(nandc->tx_chan);
2752
2753                if (nandc->rx_chan)
2754                        dma_release_channel(nandc->rx_chan);
2755
2756                if (nandc->cmd_chan)
2757                        dma_release_channel(nandc->cmd_chan);
2758        } else {
2759                if (nandc->chan)
2760                        dma_release_channel(nandc->chan);
2761        }
2762}
2763
2764/* one time setup of a few nand controller registers */
2765static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2766{
2767        u32 nand_ctrl;
2768
2769        /* kill onenand */
2770        nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2771        nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2772                    NAND_DEV_CMD_VLD_VAL);
2773
2774        /* enable ADM or BAM DMA */
2775        if (nandc->props->is_bam) {
2776                nand_ctrl = nandc_read(nandc, NAND_CTRL);
2777                nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2778        } else {
2779                nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2780        }
2781
2782        /* save the original values of these registers */
2783        nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2784        nandc->vld = NAND_DEV_CMD_VLD_VAL;
2785
2786        return 0;
2787}
2788
2789static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2790                                            struct qcom_nand_host *host,
2791                                            struct device_node *dn)
2792{
2793        struct nand_chip *chip = &host->chip;
2794        struct mtd_info *mtd = nand_to_mtd(chip);
2795        struct device *dev = nandc->dev;
2796        int ret;
2797
2798        ret = of_property_read_u32(dn, "reg", &host->cs);
2799        if (ret) {
2800                dev_err(dev, "can't get chip-select\n");
2801                return -ENXIO;
2802        }
2803
2804        nand_set_flash_node(chip, dn);
2805        mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2806        if (!mtd->name)
2807                return -ENOMEM;
2808
2809        mtd->owner = THIS_MODULE;
2810        mtd->dev.parent = dev;
2811
2812        chip->cmdfunc           = qcom_nandc_command;
2813        chip->select_chip       = qcom_nandc_select_chip;
2814        chip->read_byte         = qcom_nandc_read_byte;
2815        chip->read_buf          = qcom_nandc_read_buf;
2816        chip->write_buf         = qcom_nandc_write_buf;
2817        chip->set_features      = nand_get_set_features_notsupp;
2818        chip->get_features      = nand_get_set_features_notsupp;
2819
2820        /*
2821         * the bad block marker is readable only when we read the last codeword
2822         * of a page with ECC disabled. currently, the nand_base and nand_bbt
2823         * helpers don't allow us to read BB from a nand chip with ECC
2824         * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2825         * and block_markbad helpers until we permanently switch to using
2826         * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2827         */
2828        chip->block_bad         = qcom_nandc_block_bad;
2829        chip->block_markbad     = qcom_nandc_block_markbad;
2830
2831        chip->controller = &nandc->controller;
2832        chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2833                         NAND_SKIP_BBTSCAN;
2834
2835        /* set up initial status value */
2836        host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2837
2838        ret = nand_scan(mtd, 1);
2839        if (ret)
2840                return ret;
2841
2842        ret = mtd_device_register(mtd, NULL, 0);
2843        if (ret)
2844                nand_cleanup(chip);
2845
2846        return ret;
2847}
2848
2849static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2850{
2851        struct device *dev = nandc->dev;
2852        struct device_node *dn = dev->of_node, *child;
2853        struct qcom_nand_host *host;
2854        int ret;
2855
2856        if (nandc->props->is_bam) {
2857                free_bam_transaction(nandc);
2858                nandc->bam_txn = alloc_bam_transaction(nandc);
2859                if (!nandc->bam_txn) {
2860                        dev_err(nandc->dev,
2861                                "failed to allocate bam transaction\n");
2862                        return -ENOMEM;
2863                }
2864        }
2865
2866        for_each_available_child_of_node(dn, child) {
2867                host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2868                if (!host) {
2869                        of_node_put(child);
2870                        return -ENOMEM;
2871                }
2872
2873                ret = qcom_nand_host_init_and_register(nandc, host, child);
2874                if (ret) {
2875                        devm_kfree(dev, host);
2876                        continue;
2877                }
2878
2879                list_add_tail(&host->node, &nandc->host_list);
2880        }
2881
2882        if (list_empty(&nandc->host_list))
2883                return -ENODEV;
2884
2885        return 0;
2886}
2887
2888/* parse custom DT properties here */
2889static int qcom_nandc_parse_dt(struct platform_device *pdev)
2890{
2891        struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2892        struct device_node *np = nandc->dev->of_node;
2893        int ret;
2894
2895        if (!nandc->props->is_bam) {
2896                ret = of_property_read_u32(np, "qcom,cmd-crci",
2897                                           &nandc->cmd_crci);
2898                if (ret) {
2899                        dev_err(nandc->dev, "command CRCI unspecified\n");
2900                        return ret;
2901                }
2902
2903                ret = of_property_read_u32(np, "qcom,data-crci",
2904                                           &nandc->data_crci);
2905                if (ret) {
2906                        dev_err(nandc->dev, "data CRCI unspecified\n");
2907                        return ret;
2908                }
2909        }
2910
2911        return 0;
2912}
2913
2914static int qcom_nandc_probe(struct platform_device *pdev)
2915{
2916        struct qcom_nand_controller *nandc;
2917        const void *dev_data;
2918        struct device *dev = &pdev->dev;
2919        struct resource *res;
2920        int ret;
2921
2922        nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2923        if (!nandc)
2924                return -ENOMEM;
2925
2926        platform_set_drvdata(pdev, nandc);
2927        nandc->dev = dev;
2928
2929        dev_data = of_device_get_match_data(dev);
2930        if (!dev_data) {
2931                dev_err(&pdev->dev, "failed to get device data\n");
2932                return -ENODEV;
2933        }
2934
2935        nandc->props = dev_data;
2936
2937        nandc->core_clk = devm_clk_get(dev, "core");
2938        if (IS_ERR(nandc->core_clk))
2939                return PTR_ERR(nandc->core_clk);
2940
2941        nandc->aon_clk = devm_clk_get(dev, "aon");
2942        if (IS_ERR(nandc->aon_clk))
2943                return PTR_ERR(nandc->aon_clk);
2944
2945        ret = qcom_nandc_parse_dt(pdev);
2946        if (ret)
2947                return ret;
2948
2949        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2950        nandc->base = devm_ioremap_resource(dev, res);
2951        if (IS_ERR(nandc->base))
2952                return PTR_ERR(nandc->base);
2953
2954        nandc->base_phys = res->start;
2955        nandc->base_dma = dma_map_resource(dev, res->start,
2956                                           resource_size(res),
2957                                           DMA_BIDIRECTIONAL, 0);
2958        if (!nandc->base_dma)
2959                return -ENXIO;
2960
2961        ret = qcom_nandc_alloc(nandc);
2962        if (ret)
2963                goto err_nandc_alloc;
2964
2965        ret = clk_prepare_enable(nandc->core_clk);
2966        if (ret)
2967                goto err_core_clk;
2968
2969        ret = clk_prepare_enable(nandc->aon_clk);
2970        if (ret)
2971                goto err_aon_clk;
2972
2973        ret = qcom_nandc_setup(nandc);
2974        if (ret)
2975                goto err_setup;
2976
2977        ret = qcom_probe_nand_devices(nandc);
2978        if (ret)
2979                goto err_setup;
2980
2981        return 0;
2982
2983err_setup:
2984        clk_disable_unprepare(nandc->aon_clk);
2985err_aon_clk:
2986        clk_disable_unprepare(nandc->core_clk);
2987err_core_clk:
2988        qcom_nandc_unalloc(nandc);
2989err_nandc_alloc:
2990        dma_unmap_resource(dev, res->start, resource_size(res),
2991                           DMA_BIDIRECTIONAL, 0);
2992
2993        return ret;
2994}
2995
2996static int qcom_nandc_remove(struct platform_device *pdev)
2997{
2998        struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2999        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3000        struct qcom_nand_host *host;
3001
3002        list_for_each_entry(host, &nandc->host_list, node)
3003                nand_release(nand_to_mtd(&host->chip));
3004
3005
3006        qcom_nandc_unalloc(nandc);
3007
3008        clk_disable_unprepare(nandc->aon_clk);
3009        clk_disable_unprepare(nandc->core_clk);
3010
3011        dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3012                           DMA_BIDIRECTIONAL, 0);
3013
3014        return 0;
3015}
3016
3017static const struct qcom_nandc_props ipq806x_nandc_props = {
3018        .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3019        .is_bam = false,
3020        .dev_cmd_reg_start = 0x0,
3021};
3022
3023static const struct qcom_nandc_props ipq4019_nandc_props = {
3024        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3025        .is_bam = true,
3026        .dev_cmd_reg_start = 0x0,
3027};
3028
3029static const struct qcom_nandc_props ipq8074_nandc_props = {
3030        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3031        .is_bam = true,
3032        .dev_cmd_reg_start = 0x7000,
3033};
3034
3035/*
3036 * data will hold a struct pointer containing more differences once we support
3037 * more controller variants
3038 */
3039static const struct of_device_id qcom_nandc_of_match[] = {
3040        {
3041                .compatible = "qcom,ipq806x-nand",
3042                .data = &ipq806x_nandc_props,
3043        },
3044        {
3045                .compatible = "qcom,ipq4019-nand",
3046                .data = &ipq4019_nandc_props,
3047        },
3048        {
3049                .compatible = "qcom,ipq8074-nand",
3050                .data = &ipq8074_nandc_props,
3051        },
3052        {}
3053};
3054MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3055
3056static struct platform_driver qcom_nandc_driver = {
3057        .driver = {
3058                .name = "qcom-nandc",
3059                .of_match_table = qcom_nandc_of_match,
3060        },
3061        .probe   = qcom_nandc_probe,
3062        .remove  = qcom_nandc_remove,
3063};
3064module_platform_driver(qcom_nandc_driver);
3065
3066MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3067MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3068MODULE_LICENSE("GPL v2");
3069