linux/include/linux/mtd/nand.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  Copyright 2017 - Free Electrons
   4 *
   5 *  Authors:
   6 *      Boris Brezillon <boris.brezillon@free-electrons.com>
   7 *      Peter Pan <peterpandong@micron.com>
   8 */
   9
  10#ifndef __LINUX_MTD_NAND_H
  11#define __LINUX_MTD_NAND_H
  12
  13#include <linux/mtd/mtd.h>
  14
  15struct nand_device;
  16
  17/**
  18 * struct nand_memory_organization - Memory organization structure
  19 * @bits_per_cell: number of bits per NAND cell
  20 * @pagesize: page size
  21 * @oobsize: OOB area size
  22 * @pages_per_eraseblock: number of pages per eraseblock
  23 * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
  24 * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
  25 * @planes_per_lun: number of planes per LUN
  26 * @luns_per_target: number of LUN per target (target is a synonym for die)
  27 * @ntargets: total number of targets exposed by the NAND device
  28 */
  29struct nand_memory_organization {
  30        unsigned int bits_per_cell;
  31        unsigned int pagesize;
  32        unsigned int oobsize;
  33        unsigned int pages_per_eraseblock;
  34        unsigned int eraseblocks_per_lun;
  35        unsigned int max_bad_eraseblocks_per_lun;
  36        unsigned int planes_per_lun;
  37        unsigned int luns_per_target;
  38        unsigned int ntargets;
  39};
  40
  41#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt)   \
  42        {                                                       \
  43                .bits_per_cell = (bpc),                         \
  44                .pagesize = (ps),                               \
  45                .oobsize = (os),                                \
  46                .pages_per_eraseblock = (ppe),                  \
  47                .eraseblocks_per_lun = (epl),                   \
  48                .max_bad_eraseblocks_per_lun = (mbb),           \
  49                .planes_per_lun = (ppl),                        \
  50                .luns_per_target = (lpt),                       \
  51                .ntargets = (nt),                               \
  52        }
  53
  54/**
  55 * struct nand_row_converter - Information needed to convert an absolute offset
  56 *                             into a row address
  57 * @lun_addr_shift: position of the LUN identifier in the row address
  58 * @eraseblock_addr_shift: position of the eraseblock identifier in the row
  59 *                         address
  60 */
  61struct nand_row_converter {
  62        unsigned int lun_addr_shift;
  63        unsigned int eraseblock_addr_shift;
  64};
  65
  66/**
  67 * struct nand_pos - NAND position object
  68 * @target: the NAND target/die
  69 * @lun: the LUN identifier
  70 * @plane: the plane within the LUN
  71 * @eraseblock: the eraseblock within the LUN
  72 * @page: the page within the LUN
  73 *
  74 * These information are usually used by specific sub-layers to select the
  75 * appropriate target/die and generate a row address to pass to the device.
  76 */
  77struct nand_pos {
  78        unsigned int target;
  79        unsigned int lun;
  80        unsigned int plane;
  81        unsigned int eraseblock;
  82        unsigned int page;
  83};
  84
  85/**
  86 * struct nand_page_io_req - NAND I/O request object
  87 * @pos: the position this I/O request is targeting
  88 * @dataoffs: the offset within the page
  89 * @datalen: number of data bytes to read from/write to this page
  90 * @databuf: buffer to store data in or get data from
  91 * @ooboffs: the OOB offset within the page
  92 * @ooblen: the number of OOB bytes to read from/write to this page
  93 * @oobbuf: buffer to store OOB data in or get OOB data from
  94 * @mode: one of the %MTD_OPS_XXX mode
  95 *
  96 * This object is used to pass per-page I/O requests to NAND sub-layers. This
  97 * way all useful information are already formatted in a useful way and
  98 * specific NAND layers can focus on translating these information into
  99 * specific commands/operations.
 100 */
 101struct nand_page_io_req {
 102        struct nand_pos pos;
 103        unsigned int dataoffs;
 104        unsigned int datalen;
 105        union {
 106                const void *out;
 107                void *in;
 108        } databuf;
 109        unsigned int ooboffs;
 110        unsigned int ooblen;
 111        union {
 112                const void *out;
 113                void *in;
 114        } oobbuf;
 115        int mode;
 116};
 117
 118/**
 119 * struct nand_ecc_props - NAND ECC properties
 120 * @strength: ECC strength
 121 * @step_size: Number of bytes per step
 122 */
 123struct nand_ecc_props {
 124        unsigned int strength;
 125        unsigned int step_size;
 126};
 127
 128#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
 129
 130/**
 131 * struct nand_bbt - bad block table object
 132 * @cache: in memory BBT cache
 133 */
 134struct nand_bbt {
 135        unsigned long *cache;
 136};
 137
 138/**
 139 * struct nand_ops - NAND operations
 140 * @erase: erase a specific block. No need to check if the block is bad before
 141 *         erasing, this has been taken care of by the generic NAND layer
 142 * @markbad: mark a specific block bad. No need to check if the block is
 143 *           already marked bad, this has been taken care of by the generic
 144 *           NAND layer. This method should just write the BBM (Bad Block
 145 *           Marker) so that future call to struct_nand_ops->isbad() return
 146 *           true
 147 * @isbad: check whether a block is bad or not. This method should just read
 148 *         the BBM and return whether the block is bad or not based on what it
 149 *         reads
 150 *
 151 * These are all low level operations that should be implemented by specialized
 152 * NAND layers (SPI NAND, raw NAND, ...).
 153 */
 154struct nand_ops {
 155        int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
 156        int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
 157        bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
 158};
 159
 160/**
 161 * struct nand_device - NAND device
 162 * @mtd: MTD instance attached to the NAND device
 163 * @memorg: memory layout
 164 * @eccreq: ECC requirements
 165 * @rowconv: position to row address converter
 166 * @bbt: bad block table info
 167 * @ops: NAND operations attached to the NAND device
 168 *
 169 * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
 170 * should declare their own NAND object embedding a nand_device struct (that's
 171 * how inheritance is done).
 172 * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
 173 * at device detection time to reflect the NAND device
 174 * capabilities/requirements. Once this is done nanddev_init() can be called.
 175 * It will take care of converting NAND information into MTD ones, which means
 176 * the specialized NAND layers should never manually tweak
 177 * struct_nand_device->mtd except for the ->_read/write() hooks.
 178 */
 179struct nand_device {
 180        struct mtd_info mtd;
 181        struct nand_memory_organization memorg;
 182        struct nand_ecc_props eccreq;
 183        struct nand_row_converter rowconv;
 184        struct nand_bbt bbt;
 185        const struct nand_ops *ops;
 186};
 187
 188/**
 189 * struct nand_io_iter - NAND I/O iterator
 190 * @req: current I/O request
 191 * @oobbytes_per_page: maximum number of OOB bytes per page
 192 * @dataleft: remaining number of data bytes to read/write
 193 * @oobleft: remaining number of OOB bytes to read/write
 194 *
 195 * Can be used by specialized NAND layers to iterate over all pages covered
 196 * by an MTD I/O request, which should greatly simplifies the boiler-plate
 197 * code needed to read/write data from/to a NAND device.
 198 */
 199struct nand_io_iter {
 200        struct nand_page_io_req req;
 201        unsigned int oobbytes_per_page;
 202        unsigned int dataleft;
 203        unsigned int oobleft;
 204};
 205
 206/**
 207 * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
 208 * @mtd: MTD instance
 209 *
 210 * Return: the NAND device embedding @mtd.
 211 */
 212static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
 213{
 214        return container_of(mtd, struct nand_device, mtd);
 215}
 216
 217/**
 218 * nanddev_to_mtd() - Get the MTD device attached to a NAND device
 219 * @nand: NAND device
 220 *
 221 * Return: the MTD device embedded in @nand.
 222 */
 223static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
 224{
 225        return &nand->mtd;
 226}
 227
 228/*
 229 * nanddev_bits_per_cell() - Get the number of bits per cell
 230 * @nand: NAND device
 231 *
 232 * Return: the number of bits per cell.
 233 */
 234static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
 235{
 236        return nand->memorg.bits_per_cell;
 237}
 238
 239/**
 240 * nanddev_page_size() - Get NAND page size
 241 * @nand: NAND device
 242 *
 243 * Return: the page size.
 244 */
 245static inline size_t nanddev_page_size(const struct nand_device *nand)
 246{
 247        return nand->memorg.pagesize;
 248}
 249
 250/**
 251 * nanddev_per_page_oobsize() - Get NAND OOB size
 252 * @nand: NAND device
 253 *
 254 * Return: the OOB size.
 255 */
 256static inline unsigned int
 257nanddev_per_page_oobsize(const struct nand_device *nand)
 258{
 259        return nand->memorg.oobsize;
 260}
 261
 262/**
 263 * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
 264 * @nand: NAND device
 265 *
 266 * Return: the number of pages per eraseblock.
 267 */
 268static inline unsigned int
 269nanddev_pages_per_eraseblock(const struct nand_device *nand)
 270{
 271        return nand->memorg.pages_per_eraseblock;
 272}
 273
 274/**
 275 * nanddev_pages_per_target() - Get the number of pages per target
 276 * @nand: NAND device
 277 *
 278 * Return: the number of pages per target.
 279 */
 280static inline unsigned int
 281nanddev_pages_per_target(const struct nand_device *nand)
 282{
 283        return nand->memorg.pages_per_eraseblock *
 284               nand->memorg.eraseblocks_per_lun *
 285               nand->memorg.luns_per_target;
 286}
 287
 288/**
 289 * nanddev_per_page_oobsize() - Get NAND erase block size
 290 * @nand: NAND device
 291 *
 292 * Return: the eraseblock size.
 293 */
 294static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
 295{
 296        return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
 297}
 298
 299/**
 300 * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
 301 * @nand: NAND device
 302 *
 303 * Return: the number of eraseblocks per LUN.
 304 */
 305static inline unsigned int
 306nanddev_eraseblocks_per_lun(const struct nand_device *nand)
 307{
 308        return nand->memorg.eraseblocks_per_lun;
 309}
 310
 311/**
 312 * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
 313 * @nand: NAND device
 314 *
 315 * Return: the number of eraseblocks per target.
 316 */
 317static inline unsigned int
 318nanddev_eraseblocks_per_target(const struct nand_device *nand)
 319{
 320        return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
 321}
 322
 323/**
 324 * nanddev_target_size() - Get the total size provided by a single target/die
 325 * @nand: NAND device
 326 *
 327 * Return: the total size exposed by a single target/die in bytes.
 328 */
 329static inline u64 nanddev_target_size(const struct nand_device *nand)
 330{
 331        return (u64)nand->memorg.luns_per_target *
 332               nand->memorg.eraseblocks_per_lun *
 333               nand->memorg.pages_per_eraseblock *
 334               nand->memorg.pagesize;
 335}
 336
 337/**
 338 * nanddev_ntarget() - Get the total of targets
 339 * @nand: NAND device
 340 *
 341 * Return: the number of targets/dies exposed by @nand.
 342 */
 343static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
 344{
 345        return nand->memorg.ntargets;
 346}
 347
 348/**
 349 * nanddev_neraseblocks() - Get the total number of eraseblocks
 350 * @nand: NAND device
 351 *
 352 * Return: the total number of eraseblocks exposed by @nand.
 353 */
 354static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
 355{
 356        return nand->memorg.ntargets * nand->memorg.luns_per_target *
 357               nand->memorg.eraseblocks_per_lun;
 358}
 359
 360/**
 361 * nanddev_size() - Get NAND size
 362 * @nand: NAND device
 363 *
 364 * Return: the total size (in bytes) exposed by @nand.
 365 */
 366static inline u64 nanddev_size(const struct nand_device *nand)
 367{
 368        return nanddev_target_size(nand) * nanddev_ntargets(nand);
 369}
 370
 371/**
 372 * nanddev_get_memorg() - Extract memory organization info from a NAND device
 373 * @nand: NAND device
 374 *
 375 * This can be used by the upper layer to fill the memorg info before calling
 376 * nanddev_init().
 377 *
 378 * Return: the memorg object embedded in the NAND device.
 379 */
 380static inline struct nand_memory_organization *
 381nanddev_get_memorg(struct nand_device *nand)
 382{
 383        return &nand->memorg;
 384}
 385
 386int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
 387                 struct module *owner);
 388void nanddev_cleanup(struct nand_device *nand);
 389
 390/**
 391 * nanddev_register() - Register a NAND device
 392 * @nand: NAND device
 393 *
 394 * Register a NAND device.
 395 * This function is just a wrapper around mtd_device_register()
 396 * registering the MTD device embedded in @nand.
 397 *
 398 * Return: 0 in case of success, a negative error code otherwise.
 399 */
 400static inline int nanddev_register(struct nand_device *nand)
 401{
 402        return mtd_device_register(&nand->mtd, NULL, 0);
 403}
 404
 405/**
 406 * nanddev_unregister() - Unregister a NAND device
 407 * @nand: NAND device
 408 *
 409 * Unregister a NAND device.
 410 * This function is just a wrapper around mtd_device_unregister()
 411 * unregistering the MTD device embedded in @nand.
 412 *
 413 * Return: 0 in case of success, a negative error code otherwise.
 414 */
 415static inline int nanddev_unregister(struct nand_device *nand)
 416{
 417        return mtd_device_unregister(&nand->mtd);
 418}
 419
 420/**
 421 * nanddev_set_of_node() - Attach a DT node to a NAND device
 422 * @nand: NAND device
 423 * @np: DT node
 424 *
 425 * Attach a DT node to a NAND device.
 426 */
 427static inline void nanddev_set_of_node(struct nand_device *nand,
 428                                       struct device_node *np)
 429{
 430        mtd_set_of_node(&nand->mtd, np);
 431}
 432
 433/**
 434 * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
 435 * @nand: NAND device
 436 *
 437 * Return: the DT node attached to @nand.
 438 */
 439static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
 440{
 441        return mtd_get_of_node(&nand->mtd);
 442}
 443
 444/**
 445 * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
 446 * @nand: NAND device
 447 * @offs: absolute NAND offset (usually passed by the MTD layer)
 448 * @pos: a NAND position object to fill in
 449 *
 450 * Converts @offs into a nand_pos representation.
 451 *
 452 * Return: the offset within the NAND page pointed by @pos.
 453 */
 454static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
 455                                               loff_t offs,
 456                                               struct nand_pos *pos)
 457{
 458        unsigned int pageoffs;
 459        u64 tmp = offs;
 460
 461        pageoffs = do_div(tmp, nand->memorg.pagesize);
 462        pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
 463        pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
 464        pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
 465        pos->lun = do_div(tmp, nand->memorg.luns_per_target);
 466        pos->target = tmp;
 467
 468        return pageoffs;
 469}
 470
 471/**
 472 * nanddev_pos_cmp() - Compare two NAND positions
 473 * @a: First NAND position
 474 * @b: Second NAND position
 475 *
 476 * Compares two NAND positions.
 477 *
 478 * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
 479 */
 480static inline int nanddev_pos_cmp(const struct nand_pos *a,
 481                                  const struct nand_pos *b)
 482{
 483        if (a->target != b->target)
 484                return a->target < b->target ? -1 : 1;
 485
 486        if (a->lun != b->lun)
 487                return a->lun < b->lun ? -1 : 1;
 488
 489        if (a->eraseblock != b->eraseblock)
 490                return a->eraseblock < b->eraseblock ? -1 : 1;
 491
 492        if (a->page != b->page)
 493                return a->page < b->page ? -1 : 1;
 494
 495        return 0;
 496}
 497
 498/**
 499 * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
 500 * @nand: NAND device
 501 * @pos: the NAND position to convert
 502 *
 503 * Converts @pos NAND position into an absolute offset.
 504 *
 505 * Return: the absolute offset. Note that @pos points to the beginning of a
 506 *         page, if one wants to point to a specific offset within this page
 507 *         the returned offset has to be adjusted manually.
 508 */
 509static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
 510                                         const struct nand_pos *pos)
 511{
 512        unsigned int npages;
 513
 514        npages = pos->page +
 515                 ((pos->eraseblock +
 516                   (pos->lun +
 517                    (pos->target * nand->memorg.luns_per_target)) *
 518                   nand->memorg.eraseblocks_per_lun) *
 519                  nand->memorg.pages_per_eraseblock);
 520
 521        return (loff_t)npages * nand->memorg.pagesize;
 522}
 523
 524/**
 525 * nanddev_pos_to_row() - Extract a row address from a NAND position
 526 * @nand: NAND device
 527 * @pos: the position to convert
 528 *
 529 * Converts a NAND position into a row address that can then be passed to the
 530 * device.
 531 *
 532 * Return: the row address extracted from @pos.
 533 */
 534static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
 535                                              const struct nand_pos *pos)
 536{
 537        return (pos->lun << nand->rowconv.lun_addr_shift) |
 538               (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
 539               pos->page;
 540}
 541
 542/**
 543 * nanddev_pos_next_target() - Move a position to the next target/die
 544 * @nand: NAND device
 545 * @pos: the position to update
 546 *
 547 * Updates @pos to point to the start of the next target/die. Useful when you
 548 * want to iterate over all targets/dies of a NAND device.
 549 */
 550static inline void nanddev_pos_next_target(struct nand_device *nand,
 551                                           struct nand_pos *pos)
 552{
 553        pos->page = 0;
 554        pos->plane = 0;
 555        pos->eraseblock = 0;
 556        pos->lun = 0;
 557        pos->target++;
 558}
 559
 560/**
 561 * nanddev_pos_next_lun() - Move a position to the next LUN
 562 * @nand: NAND device
 563 * @pos: the position to update
 564 *
 565 * Updates @pos to point to the start of the next LUN. Useful when you want to
 566 * iterate over all LUNs of a NAND device.
 567 */
 568static inline void nanddev_pos_next_lun(struct nand_device *nand,
 569                                        struct nand_pos *pos)
 570{
 571        if (pos->lun >= nand->memorg.luns_per_target - 1)
 572                return nanddev_pos_next_target(nand, pos);
 573
 574        pos->lun++;
 575        pos->page = 0;
 576        pos->plane = 0;
 577        pos->eraseblock = 0;
 578}
 579
 580/**
 581 * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
 582 * @nand: NAND device
 583 * @pos: the position to update
 584 *
 585 * Updates @pos to point to the start of the next eraseblock. Useful when you
 586 * want to iterate over all eraseblocks of a NAND device.
 587 */
 588static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
 589                                               struct nand_pos *pos)
 590{
 591        if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
 592                return nanddev_pos_next_lun(nand, pos);
 593
 594        pos->eraseblock++;
 595        pos->page = 0;
 596        pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
 597}
 598
 599/**
 600 * nanddev_pos_next_page() - Move a position to the next page
 601 * @nand: NAND device
 602 * @pos: the position to update
 603 *
 604 * Updates @pos to point to the start of the next page. Useful when you want to
 605 * iterate over all pages of a NAND device.
 606 */
 607static inline void nanddev_pos_next_page(struct nand_device *nand,
 608                                         struct nand_pos *pos)
 609{
 610        if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
 611                return nanddev_pos_next_eraseblock(nand, pos);
 612
 613        pos->page++;
 614}
 615
 616/**
 617 * nand_io_iter_init - Initialize a NAND I/O iterator
 618 * @nand: NAND device
 619 * @offs: absolute offset
 620 * @req: MTD request
 621 * @iter: NAND I/O iterator
 622 *
 623 * Initializes a NAND iterator based on the information passed by the MTD
 624 * layer.
 625 */
 626static inline void nanddev_io_iter_init(struct nand_device *nand,
 627                                        loff_t offs, struct mtd_oob_ops *req,
 628                                        struct nand_io_iter *iter)
 629{
 630        struct mtd_info *mtd = nanddev_to_mtd(nand);
 631
 632        iter->req.mode = req->mode;
 633        iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
 634        iter->req.ooboffs = req->ooboffs;
 635        iter->oobbytes_per_page = mtd_oobavail(mtd, req);
 636        iter->dataleft = req->len;
 637        iter->oobleft = req->ooblen;
 638        iter->req.databuf.in = req->datbuf;
 639        iter->req.datalen = min_t(unsigned int,
 640                                  nand->memorg.pagesize - iter->req.dataoffs,
 641                                  iter->dataleft);
 642        iter->req.oobbuf.in = req->oobbuf;
 643        iter->req.ooblen = min_t(unsigned int,
 644                                 iter->oobbytes_per_page - iter->req.ooboffs,
 645                                 iter->oobleft);
 646}
 647
 648/**
 649 * nand_io_iter_next_page - Move to the next page
 650 * @nand: NAND device
 651 * @iter: NAND I/O iterator
 652 *
 653 * Updates the @iter to point to the next page.
 654 */
 655static inline void nanddev_io_iter_next_page(struct nand_device *nand,
 656                                             struct nand_io_iter *iter)
 657{
 658        nanddev_pos_next_page(nand, &iter->req.pos);
 659        iter->dataleft -= iter->req.datalen;
 660        iter->req.databuf.in += iter->req.datalen;
 661        iter->oobleft -= iter->req.ooblen;
 662        iter->req.oobbuf.in += iter->req.ooblen;
 663        iter->req.dataoffs = 0;
 664        iter->req.ooboffs = 0;
 665        iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
 666                                  iter->dataleft);
 667        iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
 668                                 iter->oobleft);
 669}
 670
 671/**
 672 * nand_io_iter_end - Should end iteration or not
 673 * @nand: NAND device
 674 * @iter: NAND I/O iterator
 675 *
 676 * Check whether @iter has reached the end of the NAND portion it was asked to
 677 * iterate on or not.
 678 *
 679 * Return: true if @iter has reached the end of the iteration request, false
 680 *         otherwise.
 681 */
 682static inline bool nanddev_io_iter_end(struct nand_device *nand,
 683                                       const struct nand_io_iter *iter)
 684{
 685        if (iter->dataleft || iter->oobleft)
 686                return false;
 687
 688        return true;
 689}
 690
 691/**
 692 * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
 693 *                         request
 694 * @nand: NAND device
 695 * @start: start address to read/write from
 696 * @req: MTD I/O request
 697 * @iter: NAND I/O iterator
 698 *
 699 * Should be used for iterate over pages that are contained in an MTD request.
 700 */
 701#define nanddev_io_for_each_page(nand, start, req, iter)                \
 702        for (nanddev_io_iter_init(nand, start, req, iter);              \
 703             !nanddev_io_iter_end(nand, iter);                          \
 704             nanddev_io_iter_next_page(nand, iter))
 705
 706bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
 707bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
 708int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
 709int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
 710
 711/* BBT related functions */
 712enum nand_bbt_block_status {
 713        NAND_BBT_BLOCK_STATUS_UNKNOWN,
 714        NAND_BBT_BLOCK_GOOD,
 715        NAND_BBT_BLOCK_WORN,
 716        NAND_BBT_BLOCK_RESERVED,
 717        NAND_BBT_BLOCK_FACTORY_BAD,
 718        NAND_BBT_BLOCK_NUM_STATUS,
 719};
 720
 721int nanddev_bbt_init(struct nand_device *nand);
 722void nanddev_bbt_cleanup(struct nand_device *nand);
 723int nanddev_bbt_update(struct nand_device *nand);
 724int nanddev_bbt_get_block_status(const struct nand_device *nand,
 725                                 unsigned int entry);
 726int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
 727                                 enum nand_bbt_block_status status);
 728int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
 729
 730/**
 731 * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
 732 * @nand: NAND device
 733 * @pos: the NAND position we want to get BBT entry for
 734 *
 735 * Return the BBT entry used to store information about the eraseblock pointed
 736 * by @pos.
 737 *
 738 * Return: the BBT entry storing information about eraseblock pointed by @pos.
 739 */
 740static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
 741                                                    const struct nand_pos *pos)
 742{
 743        return pos->eraseblock +
 744               ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
 745                nand->memorg.eraseblocks_per_lun);
 746}
 747
 748/**
 749 * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
 750 * @nand: NAND device
 751 *
 752 * Return: true if the BBT has been initialized, false otherwise.
 753 */
 754static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
 755{
 756        return !!nand->bbt.cache;
 757}
 758
 759/* MTD -> NAND helper functions. */
 760int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
 761int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
 762
 763#endif /* __LINUX_MTD_NAND_H */
 764