linux/drivers/mtd/nand/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2017 Free Electrons
   4 *
   5 * Authors:
   6 *      Boris Brezillon <boris.brezillon@free-electrons.com>
   7 *      Peter Pan <peterpandong@micron.com>
   8 */
   9
  10#define pr_fmt(fmt)     "nand: " fmt
  11
  12#include <linux/module.h>
  13#include <linux/mtd/nand.h>
  14
  15/**
  16 * nanddev_isbad() - Check if a block is bad
  17 * @nand: NAND device
  18 * @pos: position pointing to the block we want to check
  19 *
  20 * Return: true if the block is bad, false otherwise.
  21 */
  22bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
  23{
  24        if (nanddev_bbt_is_initialized(nand)) {
  25                unsigned int entry;
  26                int status;
  27
  28                entry = nanddev_bbt_pos_to_entry(nand, pos);
  29                status = nanddev_bbt_get_block_status(nand, entry);
  30                /* Lazy block status retrieval */
  31                if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
  32                        if (nand->ops->isbad(nand, pos))
  33                                status = NAND_BBT_BLOCK_FACTORY_BAD;
  34                        else
  35                                status = NAND_BBT_BLOCK_GOOD;
  36
  37                        nanddev_bbt_set_block_status(nand, entry, status);
  38                }
  39
  40                if (status == NAND_BBT_BLOCK_WORN ||
  41                    status == NAND_BBT_BLOCK_FACTORY_BAD)
  42                        return true;
  43
  44                return false;
  45        }
  46
  47        return nand->ops->isbad(nand, pos);
  48}
  49EXPORT_SYMBOL_GPL(nanddev_isbad);
  50
  51/**
  52 * nanddev_markbad() - Mark a block as bad
  53 * @nand: NAND device
  54 * @pos: position of the block to mark bad
  55 *
  56 * Mark a block bad. This function is updating the BBT if available and
  57 * calls the low-level markbad hook (nand->ops->markbad()).
  58 *
  59 * Return: 0 in case of success, a negative error code otherwise.
  60 */
  61int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
  62{
  63        struct mtd_info *mtd = nanddev_to_mtd(nand);
  64        unsigned int entry;
  65        int ret = 0;
  66
  67        if (nanddev_isbad(nand, pos))
  68                return 0;
  69
  70        ret = nand->ops->markbad(nand, pos);
  71        if (ret)
  72                pr_warn("failed to write BBM to block @%llx (err = %d)\n",
  73                        nanddev_pos_to_offs(nand, pos), ret);
  74
  75        if (!nanddev_bbt_is_initialized(nand))
  76                goto out;
  77
  78        entry = nanddev_bbt_pos_to_entry(nand, pos);
  79        ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
  80        if (ret)
  81                goto out;
  82
  83        ret = nanddev_bbt_update(nand);
  84
  85out:
  86        if (!ret)
  87                mtd->ecc_stats.badblocks++;
  88
  89        return ret;
  90}
  91EXPORT_SYMBOL_GPL(nanddev_markbad);
  92
  93/**
  94 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
  95 * @nand: NAND device
  96 * @pos: NAND position to test
  97 *
  98 * Checks whether the eraseblock pointed by @pos is reserved or not.
  99 *
 100 * Return: true if the eraseblock is reserved, false otherwise.
 101 */
 102bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
 103{
 104        unsigned int entry;
 105        int status;
 106
 107        if (!nanddev_bbt_is_initialized(nand))
 108                return false;
 109
 110        /* Return info from the table */
 111        entry = nanddev_bbt_pos_to_entry(nand, pos);
 112        status = nanddev_bbt_get_block_status(nand, entry);
 113        return status == NAND_BBT_BLOCK_RESERVED;
 114}
 115EXPORT_SYMBOL_GPL(nanddev_isreserved);
 116
 117/**
 118 * nanddev_erase() - Erase a NAND portion
 119 * @nand: NAND device
 120 * @pos: position of the block to erase
 121 *
 122 * Erases the block if it's not bad.
 123 *
 124 * Return: 0 in case of success, a negative error code otherwise.
 125 */
 126int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
 127{
 128        if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
 129                pr_warn("attempt to erase a bad/reserved block @%llx\n",
 130                        nanddev_pos_to_offs(nand, pos));
 131                return -EIO;
 132        }
 133
 134        return nand->ops->erase(nand, pos);
 135}
 136EXPORT_SYMBOL_GPL(nanddev_erase);
 137
 138/**
 139 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
 140 * @mtd: MTD device
 141 * @einfo: erase request
 142 *
 143 * This is a simple mtd->_erase() implementation iterating over all blocks
 144 * concerned by @einfo and calling nand->ops->erase() on each of them.
 145 *
 146 * Note that mtd->_erase should not be directly assigned to this helper,
 147 * because there's no locking here. NAND specialized layers should instead
 148 * implement there own wrapper around nanddev_mtd_erase() taking the
 149 * appropriate lock before calling nanddev_mtd_erase().
 150 *
 151 * Return: 0 in case of success, a negative error code otherwise.
 152 */
 153int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
 154{
 155        struct nand_device *nand = mtd_to_nanddev(mtd);
 156        struct nand_pos pos, last;
 157        int ret;
 158
 159        nanddev_offs_to_pos(nand, einfo->addr, &pos);
 160        nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
 161        while (nanddev_pos_cmp(&pos, &last) <= 0) {
 162                ret = nanddev_erase(nand, &pos);
 163                if (ret) {
 164                        einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
 165
 166                        return ret;
 167                }
 168
 169                nanddev_pos_next_eraseblock(nand, &pos);
 170        }
 171
 172        return 0;
 173}
 174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
 175
 176/**
 177 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
 178 *                                a specific region of the NAND device
 179 * @mtd: MTD device
 180 * @offs: offset of the NAND region
 181 * @len: length of the NAND region
 182 *
 183 * Default implementation for mtd->_max_bad_blocks(). Only works if
 184 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
 185 *
 186 * Return: a positive number encoding the maximum number of eraseblocks on a
 187 * portion of memory, a negative error code otherwise.
 188 */
 189int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
 190{
 191        struct nand_device *nand = mtd_to_nanddev(mtd);
 192        struct nand_pos pos, end;
 193        unsigned int max_bb = 0;
 194
 195        if (!nand->memorg.max_bad_eraseblocks_per_lun)
 196                return -ENOTSUPP;
 197
 198        nanddev_offs_to_pos(nand, offs, &pos);
 199        nanddev_offs_to_pos(nand, offs + len, &end);
 200
 201        for (nanddev_offs_to_pos(nand, offs, &pos);
 202             nanddev_pos_cmp(&pos, &end) < 0;
 203             nanddev_pos_next_lun(nand, &pos))
 204                max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
 205
 206        return max_bb;
 207}
 208EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
 209
 210/**
 211 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
 212 * @nand: NAND device
 213 */
 214static int nanddev_get_ecc_engine(struct nand_device *nand)
 215{
 216        int engine_type;
 217
 218        /* Read the user desires in terms of ECC engine/configuration */
 219        of_get_nand_ecc_user_config(nand);
 220
 221        engine_type = nand->ecc.user_conf.engine_type;
 222        if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
 223                engine_type = nand->ecc.defaults.engine_type;
 224
 225        switch (engine_type) {
 226        case NAND_ECC_ENGINE_TYPE_NONE:
 227                return 0;
 228        case NAND_ECC_ENGINE_TYPE_SOFT:
 229                nand->ecc.engine = nand_ecc_get_sw_engine(nand);
 230                break;
 231        case NAND_ECC_ENGINE_TYPE_ON_DIE:
 232                nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
 233                break;
 234        case NAND_ECC_ENGINE_TYPE_ON_HOST:
 235                pr_err("On-host hardware ECC engines not supported yet\n");
 236                break;
 237        default:
 238                pr_err("Missing ECC engine type\n");
 239        }
 240
 241        if (!nand->ecc.engine)
 242                return  -EINVAL;
 243
 244        return 0;
 245}
 246
 247/**
 248 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
 249 * @nand: NAND device
 250 */
 251static int nanddev_put_ecc_engine(struct nand_device *nand)
 252{
 253        switch (nand->ecc.ctx.conf.engine_type) {
 254        case NAND_ECC_ENGINE_TYPE_ON_HOST:
 255                pr_err("On-host hardware ECC engines not supported yet\n");
 256                break;
 257        case NAND_ECC_ENGINE_TYPE_NONE:
 258        case NAND_ECC_ENGINE_TYPE_SOFT:
 259        case NAND_ECC_ENGINE_TYPE_ON_DIE:
 260        default:
 261                break;
 262        }
 263
 264        return 0;
 265}
 266
 267/**
 268 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
 269 * @nand: NAND device
 270 */
 271static int nanddev_find_ecc_configuration(struct nand_device *nand)
 272{
 273        int ret;
 274
 275        if (!nand->ecc.engine)
 276                return -ENOTSUPP;
 277
 278        ret = nand_ecc_init_ctx(nand);
 279        if (ret)
 280                return ret;
 281
 282        if (!nand_ecc_is_strong_enough(nand))
 283                pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
 284                        nand->mtd.name);
 285
 286        return 0;
 287}
 288
 289/**
 290 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
 291 * @nand: NAND device
 292 */
 293int nanddev_ecc_engine_init(struct nand_device *nand)
 294{
 295        int ret;
 296
 297        /* Look for the ECC engine to use */
 298        ret = nanddev_get_ecc_engine(nand);
 299        if (ret) {
 300                pr_err("No ECC engine found\n");
 301                return ret;
 302        }
 303
 304        /* No ECC engine requested */
 305        if (!nand->ecc.engine)
 306                return 0;
 307
 308        /* Configure the engine: balance user input and chip requirements */
 309        ret = nanddev_find_ecc_configuration(nand);
 310        if (ret) {
 311                pr_err("No suitable ECC configuration\n");
 312                nanddev_put_ecc_engine(nand);
 313
 314                return ret;
 315        }
 316
 317        return 0;
 318}
 319EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
 320
 321/**
 322 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
 323 * @nand: NAND device
 324 */
 325void nanddev_ecc_engine_cleanup(struct nand_device *nand)
 326{
 327        if (nand->ecc.engine)
 328                nand_ecc_cleanup_ctx(nand);
 329
 330        nanddev_put_ecc_engine(nand);
 331}
 332EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
 333
 334/**
 335 * nanddev_init() - Initialize a NAND device
 336 * @nand: NAND device
 337 * @ops: NAND device operations
 338 * @owner: NAND device owner
 339 *
 340 * Initializes a NAND device object. Consistency checks are done on @ops and
 341 * @nand->memorg. Also takes care of initializing the BBT.
 342 *
 343 * Return: 0 in case of success, a negative error code otherwise.
 344 */
 345int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
 346                 struct module *owner)
 347{
 348        struct mtd_info *mtd = nanddev_to_mtd(nand);
 349        struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
 350
 351        if (!nand || !ops)
 352                return -EINVAL;
 353
 354        if (!ops->erase || !ops->markbad || !ops->isbad)
 355                return -EINVAL;
 356
 357        if (!memorg->bits_per_cell || !memorg->pagesize ||
 358            !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
 359            !memorg->planes_per_lun || !memorg->luns_per_target ||
 360            !memorg->ntargets)
 361                return -EINVAL;
 362
 363        nand->rowconv.eraseblock_addr_shift =
 364                                        fls(memorg->pages_per_eraseblock - 1);
 365        nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
 366                                       nand->rowconv.eraseblock_addr_shift;
 367
 368        nand->ops = ops;
 369
 370        mtd->type = memorg->bits_per_cell == 1 ?
 371                    MTD_NANDFLASH : MTD_MLCNANDFLASH;
 372        mtd->flags = MTD_CAP_NANDFLASH;
 373        mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
 374        mtd->writesize = memorg->pagesize;
 375        mtd->writebufsize = memorg->pagesize;
 376        mtd->oobsize = memorg->oobsize;
 377        mtd->size = nanddev_size(nand);
 378        mtd->owner = owner;
 379
 380        return nanddev_bbt_init(nand);
 381}
 382EXPORT_SYMBOL_GPL(nanddev_init);
 383
 384/**
 385 * nanddev_cleanup() - Release resources allocated in nanddev_init()
 386 * @nand: NAND device
 387 *
 388 * Basically undoes what has been done in nanddev_init().
 389 */
 390void nanddev_cleanup(struct nand_device *nand)
 391{
 392        if (nanddev_bbt_is_initialized(nand))
 393                nanddev_bbt_cleanup(nand);
 394}
 395EXPORT_SYMBOL_GPL(nanddev_cleanup);
 396
 397MODULE_DESCRIPTION("Generic NAND framework");
 398MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
 399MODULE_LICENSE("GPL v2");
 400