uboot/drivers/mtd/ubi/wl.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) International Business Machines Corp., 2006
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12 * the GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17 *
  18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
  19 */
  20
  21/*
  22 * UBI wear-leveling unit.
  23 *
  24 * This unit is responsible for wear-leveling. It works in terms of physical
  25 * eraseblocks and erase counters and knows nothing about logical eraseblocks,
  26 * volumes, etc. From this unit's perspective all physical eraseblocks are of
  27 * two types - used and free. Used physical eraseblocks are those that were
  28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
  29 * those that were put by the 'ubi_wl_put_peb()' function.
  30 *
  31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
  32 * header. The rest of the physical eraseblock contains only 0xFF bytes.
  33 *
  34 * When physical eraseblocks are returned to the WL unit by means of the
  35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
  36 * done asynchronously in context of the per-UBI device background thread,
  37 * which is also managed by the WL unit.
  38 *
  39 * The wear-leveling is ensured by means of moving the contents of used
  40 * physical eraseblocks with low erase counter to free physical eraseblocks
  41 * with high erase counter.
  42 *
  43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
  44 * an "optimal" physical eraseblock. For example, when it is known that the
  45 * physical eraseblock will be "put" soon because it contains short-term data,
  46 * the WL unit may pick a free physical eraseblock with low erase counter, and
  47 * so forth.
  48 *
  49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
  50 *
  51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a
  52 * physical eraseblock, it has to be moved. Technically this is the same as
  53 * moving it for wear-leveling reasons.
  54 *
  55 * As it was said, for the UBI unit all physical eraseblocks are either "free"
  56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
  57 * eraseblocks are kept in a set of different RB-trees: @wl->used,
  58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
  59 *
  60 * Note, in this implementation, we keep a small in-RAM object for each physical
  61 * eraseblock. This is surely not a scalable solution. But it appears to be good
  62 * enough for moderately large flashes and it is simple. In future, one may
  63 * re-work this unit and make it more scalable.
  64 *
  65 * At the moment this unit does not utilize the sequence number, which was
  66 * introduced relatively recently. But it would be wise to do this because the
  67 * sequence number of a logical eraseblock characterizes how old is it. For
  68 * example, when we move a PEB with low erase counter, and we need to pick the
  69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
  70 * pick target PEB with an average EC if our PEB is not very "old". This is a
  71 * room for future re-works of the WL unit.
  72 *
  73 * FIXME: looks too complex, should be simplified (later).
  74 */
  75
  76#ifdef UBI_LINUX
  77#include <linux/slab.h>
  78#include <linux/crc32.h>
  79#include <linux/freezer.h>
  80#include <linux/kthread.h>
  81#endif
  82
  83#include <ubi_uboot.h>
  84#include "ubi.h"
  85
  86/* Number of physical eraseblocks reserved for wear-leveling purposes */
  87#define WL_RESERVED_PEBS 1
  88
  89/*
  90 * How many erase cycles are short term, unknown, and long term physical
  91 * eraseblocks protected.
  92 */
  93#define ST_PROTECTION 16
  94#define U_PROTECTION  10
  95#define LT_PROTECTION 4
  96
  97/*
  98 * Maximum difference between two erase counters. If this threshold is
  99 * exceeded, the WL unit starts moving data from used physical eraseblocks with
 100 * low erase counter to free physical eraseblocks with high erase counter.
 101 */
 102#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
 103
 104/*
 105 * When a physical eraseblock is moved, the WL unit has to pick the target
 106 * physical eraseblock to move to. The simplest way would be just to pick the
 107 * one with the highest erase counter. But in certain workloads this could lead
 108 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
 109 * situation when the picked physical eraseblock is constantly erased after the
 110 * data is written to it. So, we have a constant which limits the highest erase
 111 * counter of the free physical eraseblock to pick. Namely, the WL unit does
 112 * not pick eraseblocks with erase counter greater then the lowest erase
 113 * counter plus %WL_FREE_MAX_DIFF.
 114 */
 115#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
 116
 117/*
 118 * Maximum number of consecutive background thread failures which is enough to
 119 * switch to read-only mode.
 120 */
 121#define WL_MAX_FAILURES 32
 122
 123/**
 124 * struct ubi_wl_prot_entry - PEB protection entry.
 125 * @rb_pnum: link in the @wl->prot.pnum RB-tree
 126 * @rb_aec: link in the @wl->prot.aec RB-tree
 127 * @abs_ec: the absolute erase counter value when the protection ends
 128 * @e: the wear-leveling entry of the physical eraseblock under protection
 129 *
 130 * When the WL unit returns a physical eraseblock, the physical eraseblock is
 131 * protected from being moved for some "time". For this reason, the physical
 132 * eraseblock is not directly moved from the @wl->free tree to the @wl->used
 133 * tree. There is one more tree in between where this physical eraseblock is
 134 * temporarily stored (@wl->prot).
 135 *
 136 * All this protection stuff is needed because:
 137 *  o we don't want to move physical eraseblocks just after we have given them
 138 *    to the user; instead, we first want to let users fill them up with data;
 139 *
 140 *  o there is a chance that the user will put the physical eraseblock very
 141 *    soon, so it makes sense not to move it for some time, but wait; this is
 142 *    especially important in case of "short term" physical eraseblocks.
 143 *
 144 * Physical eraseblocks stay protected only for limited time. But the "time" is
 145 * measured in erase cycles in this case. This is implemented with help of the
 146 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
 147 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
 148 * the @wl->used tree.
 149 *
 150 * Protected physical eraseblocks are searched by physical eraseblock number
 151 * (when they are put) and by the absolute erase counter (to check if it is
 152 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
 153 * storing the protected physical eraseblocks: @wl->prot.pnum and
 154 * @wl->prot.aec. They are referred to as the "protection" trees. The
 155 * first one is indexed by the physical eraseblock number. The second one is
 156 * indexed by the absolute erase counter. Both trees store
 157 * &struct ubi_wl_prot_entry objects.
 158 *
 159 * Each physical eraseblock has 2 main states: free and used. The former state
 160 * corresponds to the @wl->free tree. The latter state is split up on several
 161 * sub-states:
 162 * o the WL movement is allowed (@wl->used tree);
 163 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
 164 * @wl->prot.aec trees);
 165 * o scrubbing is needed (@wl->scrub tree).
 166 *
 167 * Depending on the sub-state, wear-leveling entries of the used physical
 168 * eraseblocks may be kept in one of those trees.
 169 */
 170struct ubi_wl_prot_entry {
 171        struct rb_node rb_pnum;
 172        struct rb_node rb_aec;
 173        unsigned long long abs_ec;
 174        struct ubi_wl_entry *e;
 175};
 176
 177/**
 178 * struct ubi_work - UBI work description data structure.
 179 * @list: a link in the list of pending works
 180 * @func: worker function
 181 * @priv: private data of the worker function
 182 *
 183 * @e: physical eraseblock to erase
 184 * @torture: if the physical eraseblock has to be tortured
 185 *
 186 * The @func pointer points to the worker function. If the @cancel argument is
 187 * not zero, the worker has to free the resources and exit immediately. The
 188 * worker has to return zero in case of success and a negative error code in
 189 * case of failure.
 190 */
 191struct ubi_work {
 192        struct list_head list;
 193        int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
 194        /* The below fields are only relevant to erasure works */
 195        struct ubi_wl_entry *e;
 196        int torture;
 197};
 198
 199#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
 200static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
 201static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
 202                                     struct rb_root *root);
 203#else
 204#define paranoid_check_ec(ubi, pnum, ec) 0
 205#define paranoid_check_in_wl_tree(e, root)
 206#endif
 207
 208/**
 209 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
 210 * @e: the wear-leveling entry to add
 211 * @root: the root of the tree
 212 *
 213 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
 214 * the @ubi->used and @ubi->free RB-trees.
 215 */
 216static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
 217{
 218        struct rb_node **p, *parent = NULL;
 219
 220        p = &root->rb_node;
 221        while (*p) {
 222                struct ubi_wl_entry *e1;
 223
 224                parent = *p;
 225                e1 = rb_entry(parent, struct ubi_wl_entry, rb);
 226
 227                if (e->ec < e1->ec)
 228                        p = &(*p)->rb_left;
 229                else if (e->ec > e1->ec)
 230                        p = &(*p)->rb_right;
 231                else {
 232                        ubi_assert(e->pnum != e1->pnum);
 233                        if (e->pnum < e1->pnum)
 234                                p = &(*p)->rb_left;
 235                        else
 236                                p = &(*p)->rb_right;
 237                }
 238        }
 239
 240        rb_link_node(&e->rb, parent, p);
 241        rb_insert_color(&e->rb, root);
 242}
 243
 244/**
 245 * do_work - do one pending work.
 246 * @ubi: UBI device description object
 247 *
 248 * This function returns zero in case of success and a negative error code in
 249 * case of failure.
 250 */
 251static int do_work(struct ubi_device *ubi)
 252{
 253        int err;
 254        struct ubi_work *wrk;
 255
 256        cond_resched();
 257
 258        /*
 259         * @ubi->work_sem is used to synchronize with the workers. Workers take
 260         * it in read mode, so many of them may be doing works at a time. But
 261         * the queue flush code has to be sure the whole queue of works is
 262         * done, and it takes the mutex in write mode.
 263         */
 264        down_read(&ubi->work_sem);
 265        spin_lock(&ubi->wl_lock);
 266        if (list_empty(&ubi->works)) {
 267                spin_unlock(&ubi->wl_lock);
 268                up_read(&ubi->work_sem);
 269                return 0;
 270        }
 271
 272        wrk = list_entry(ubi->works.next, struct ubi_work, list);
 273        list_del(&wrk->list);
 274        ubi->works_count -= 1;
 275        ubi_assert(ubi->works_count >= 0);
 276        spin_unlock(&ubi->wl_lock);
 277
 278        /*
 279         * Call the worker function. Do not touch the work structure
 280         * after this call as it will have been freed or reused by that
 281         * time by the worker function.
 282         */
 283        err = wrk->func(ubi, wrk, 0);
 284        if (err)
 285                ubi_err("work failed with error code %d", err);
 286        up_read(&ubi->work_sem);
 287
 288        return err;
 289}
 290
 291/**
 292 * produce_free_peb - produce a free physical eraseblock.
 293 * @ubi: UBI device description object
 294 *
 295 * This function tries to make a free PEB by means of synchronous execution of
 296 * pending works. This may be needed if, for example the background thread is
 297 * disabled. Returns zero in case of success and a negative error code in case
 298 * of failure.
 299 */
 300static int produce_free_peb(struct ubi_device *ubi)
 301{
 302        int err;
 303
 304        spin_lock(&ubi->wl_lock);
 305        while (!ubi->free.rb_node) {
 306                spin_unlock(&ubi->wl_lock);
 307
 308                dbg_wl("do one work synchronously");
 309                err = do_work(ubi);
 310                if (err)
 311                        return err;
 312
 313                spin_lock(&ubi->wl_lock);
 314        }
 315        spin_unlock(&ubi->wl_lock);
 316
 317        return 0;
 318}
 319
 320/**
 321 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
 322 * @e: the wear-leveling entry to check
 323 * @root: the root of the tree
 324 *
 325 * This function returns non-zero if @e is in the @root RB-tree and zero if it
 326 * is not.
 327 */
 328static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
 329{
 330        struct rb_node *p;
 331
 332        p = root->rb_node;
 333        while (p) {
 334                struct ubi_wl_entry *e1;
 335
 336                e1 = rb_entry(p, struct ubi_wl_entry, rb);
 337
 338                if (e->pnum == e1->pnum) {
 339                        ubi_assert(e == e1);
 340                        return 1;
 341                }
 342
 343                if (e->ec < e1->ec)
 344                        p = p->rb_left;
 345                else if (e->ec > e1->ec)
 346                        p = p->rb_right;
 347                else {
 348                        ubi_assert(e->pnum != e1->pnum);
 349                        if (e->pnum < e1->pnum)
 350                                p = p->rb_left;
 351                        else
 352                                p = p->rb_right;
 353                }
 354        }
 355
 356        return 0;
 357}
 358
 359/**
 360 * prot_tree_add - add physical eraseblock to protection trees.
 361 * @ubi: UBI device description object
 362 * @e: the physical eraseblock to add
 363 * @pe: protection entry object to use
 364 * @abs_ec: absolute erase counter value when this physical eraseblock has
 365 * to be removed from the protection trees.
 366 *
 367 * @wl->lock has to be locked.
 368 */
 369static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
 370                          struct ubi_wl_prot_entry *pe, int abs_ec)
 371{
 372        struct rb_node **p, *parent = NULL;
 373        struct ubi_wl_prot_entry *pe1;
 374
 375        pe->e = e;
 376        pe->abs_ec = ubi->abs_ec + abs_ec;
 377
 378        p = &ubi->prot.pnum.rb_node;
 379        while (*p) {
 380                parent = *p;
 381                pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
 382
 383                if (e->pnum < pe1->e->pnum)
 384                        p = &(*p)->rb_left;
 385                else
 386                        p = &(*p)->rb_right;
 387        }
 388        rb_link_node(&pe->rb_pnum, parent, p);
 389        rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
 390
 391        p = &ubi->prot.aec.rb_node;
 392        parent = NULL;
 393        while (*p) {
 394                parent = *p;
 395                pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
 396
 397                if (pe->abs_ec < pe1->abs_ec)
 398                        p = &(*p)->rb_left;
 399                else
 400                        p = &(*p)->rb_right;
 401        }
 402        rb_link_node(&pe->rb_aec, parent, p);
 403        rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
 404}
 405
 406/**
 407 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
 408 * @root: the RB-tree where to look for
 409 * @max: highest possible erase counter
 410 *
 411 * This function looks for a wear leveling entry with erase counter closest to
 412 * @max and less then @max.
 413 */
 414static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
 415{
 416        struct rb_node *p;
 417        struct ubi_wl_entry *e;
 418
 419        e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
 420        max += e->ec;
 421
 422        p = root->rb_node;
 423        while (p) {
 424                struct ubi_wl_entry *e1;
 425
 426                e1 = rb_entry(p, struct ubi_wl_entry, rb);
 427                if (e1->ec >= max)
 428                        p = p->rb_left;
 429                else {
 430                        p = p->rb_right;
 431                        e = e1;
 432                }
 433        }
 434
 435        return e;
 436}
 437
 438/**
 439 * ubi_wl_get_peb - get a physical eraseblock.
 440 * @ubi: UBI device description object
 441 * @dtype: type of data which will be stored in this physical eraseblock
 442 *
 443 * This function returns a physical eraseblock in case of success and a
 444 * negative error code in case of failure. Might sleep.
 445 */
 446int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
 447{
 448        int err, protect, medium_ec;
 449        struct ubi_wl_entry *e, *first, *last;
 450        struct ubi_wl_prot_entry *pe;
 451
 452        ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
 453                   dtype == UBI_UNKNOWN);
 454
 455        pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
 456        if (!pe)
 457                return -ENOMEM;
 458
 459retry:
 460        spin_lock(&ubi->wl_lock);
 461        if (!ubi->free.rb_node) {
 462                if (ubi->works_count == 0) {
 463                        ubi_assert(list_empty(&ubi->works));
 464                        ubi_err("no free eraseblocks");
 465                        spin_unlock(&ubi->wl_lock);
 466                        kfree(pe);
 467                        return -ENOSPC;
 468                }
 469                spin_unlock(&ubi->wl_lock);
 470
 471                err = produce_free_peb(ubi);
 472                if (err < 0) {
 473                        kfree(pe);
 474                        return err;
 475                }
 476                goto retry;
 477        }
 478
 479        switch (dtype) {
 480                case UBI_LONGTERM:
 481                        /*
 482                         * For long term data we pick a physical eraseblock
 483                         * with high erase counter. But the highest erase
 484                         * counter we can pick is bounded by the the lowest
 485                         * erase counter plus %WL_FREE_MAX_DIFF.
 486                         */
 487                        e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
 488                        protect = LT_PROTECTION;
 489                        break;
 490                case UBI_UNKNOWN:
 491                        /*
 492                         * For unknown data we pick a physical eraseblock with
 493                         * medium erase counter. But we by no means can pick a
 494                         * physical eraseblock with erase counter greater or
 495                         * equivalent than the lowest erase counter plus
 496                         * %WL_FREE_MAX_DIFF.
 497                         */
 498                        first = rb_entry(rb_first(&ubi->free),
 499                                         struct ubi_wl_entry, rb);
 500                        last = rb_entry(rb_last(&ubi->free),
 501                                        struct ubi_wl_entry, rb);
 502
 503                        if (last->ec - first->ec < WL_FREE_MAX_DIFF)
 504                                e = rb_entry(ubi->free.rb_node,
 505                                                struct ubi_wl_entry, rb);
 506                        else {
 507                                medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
 508                                e = find_wl_entry(&ubi->free, medium_ec);
 509                        }
 510                        protect = U_PROTECTION;
 511                        break;
 512                case UBI_SHORTTERM:
 513                        /*
 514                         * For short term data we pick a physical eraseblock
 515                         * with the lowest erase counter as we expect it will
 516                         * be erased soon.
 517                         */
 518                        e = rb_entry(rb_first(&ubi->free),
 519                                     struct ubi_wl_entry, rb);
 520                        protect = ST_PROTECTION;
 521                        break;
 522                default:
 523                        protect = 0;
 524                        e = NULL;
 525                        BUG();
 526        }
 527
 528        /*
 529         * Move the physical eraseblock to the protection trees where it will
 530         * be protected from being moved for some time.
 531         */
 532        paranoid_check_in_wl_tree(e, &ubi->free);
 533        rb_erase(&e->rb, &ubi->free);
 534        prot_tree_add(ubi, e, pe, protect);
 535
 536        dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
 537        spin_unlock(&ubi->wl_lock);
 538
 539        return e->pnum;
 540}
 541
 542/**
 543 * prot_tree_del - remove a physical eraseblock from the protection trees
 544 * @ubi: UBI device description object
 545 * @pnum: the physical eraseblock to remove
 546 *
 547 * This function returns PEB @pnum from the protection trees and returns zero
 548 * in case of success and %-ENODEV if the PEB was not found in the protection
 549 * trees.
 550 */
 551static int prot_tree_del(struct ubi_device *ubi, int pnum)
 552{
 553        struct rb_node *p;
 554        struct ubi_wl_prot_entry *pe = NULL;
 555
 556        p = ubi->prot.pnum.rb_node;
 557        while (p) {
 558
 559                pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
 560
 561                if (pnum == pe->e->pnum)
 562                        goto found;
 563
 564                if (pnum < pe->e->pnum)
 565                        p = p->rb_left;
 566                else
 567                        p = p->rb_right;
 568        }
 569
 570        return -ENODEV;
 571
 572found:
 573        ubi_assert(pe->e->pnum == pnum);
 574        rb_erase(&pe->rb_aec, &ubi->prot.aec);
 575        rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
 576        kfree(pe);
 577        return 0;
 578}
 579
 580/**
 581 * sync_erase - synchronously erase a physical eraseblock.
 582 * @ubi: UBI device description object
 583 * @e: the the physical eraseblock to erase
 584 * @torture: if the physical eraseblock has to be tortured
 585 *
 586 * This function returns zero in case of success and a negative error code in
 587 * case of failure.
 588 */
 589static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
 590{
 591        int err;
 592        struct ubi_ec_hdr *ec_hdr;
 593        unsigned long long ec = e->ec;
 594
 595        dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
 596
 597        err = paranoid_check_ec(ubi, e->pnum, e->ec);
 598        if (err > 0)
 599                return -EINVAL;
 600
 601        ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
 602        if (!ec_hdr)
 603                return -ENOMEM;
 604
 605        err = ubi_io_sync_erase(ubi, e->pnum, torture);
 606        if (err < 0)
 607                goto out_free;
 608
 609        ec += err;
 610        if (ec > UBI_MAX_ERASECOUNTER) {
 611                /*
 612                 * Erase counter overflow. Upgrade UBI and use 64-bit
 613                 * erase counters internally.
 614                 */
 615                ubi_err("erase counter overflow at PEB %d, EC %llu",
 616                        e->pnum, ec);
 617                err = -EINVAL;
 618                goto out_free;
 619        }
 620
 621        dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
 622
 623        ec_hdr->ec = cpu_to_be64(ec);
 624
 625        err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
 626        if (err)
 627                goto out_free;
 628
 629        e->ec = ec;
 630        spin_lock(&ubi->wl_lock);
 631        if (e->ec > ubi->max_ec)
 632                ubi->max_ec = e->ec;
 633        spin_unlock(&ubi->wl_lock);
 634
 635out_free:
 636        kfree(ec_hdr);
 637        return err;
 638}
 639
 640/**
 641 * check_protection_over - check if it is time to stop protecting some
 642 * physical eraseblocks.
 643 * @ubi: UBI device description object
 644 *
 645 * This function is called after each erase operation, when the absolute erase
 646 * counter is incremented, to check if some physical eraseblock  have not to be
 647 * protected any longer. These physical eraseblocks are moved from the
 648 * protection trees to the used tree.
 649 */
 650static void check_protection_over(struct ubi_device *ubi)
 651{
 652        struct ubi_wl_prot_entry *pe;
 653
 654        /*
 655         * There may be several protected physical eraseblock to remove,
 656         * process them all.
 657         */
 658        while (1) {
 659                spin_lock(&ubi->wl_lock);
 660                if (!ubi->prot.aec.rb_node) {
 661                        spin_unlock(&ubi->wl_lock);
 662                        break;
 663                }
 664
 665                pe = rb_entry(rb_first(&ubi->prot.aec),
 666                              struct ubi_wl_prot_entry, rb_aec);
 667
 668                if (pe->abs_ec > ubi->abs_ec) {
 669                        spin_unlock(&ubi->wl_lock);
 670                        break;
 671                }
 672
 673                dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
 674                       pe->e->pnum, ubi->abs_ec, pe->abs_ec);
 675                rb_erase(&pe->rb_aec, &ubi->prot.aec);
 676                rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
 677                wl_tree_add(pe->e, &ubi->used);
 678                spin_unlock(&ubi->wl_lock);
 679
 680                kfree(pe);
 681                cond_resched();
 682        }
 683}
 684
 685/**
 686 * schedule_ubi_work - schedule a work.
 687 * @ubi: UBI device description object
 688 * @wrk: the work to schedule
 689 *
 690 * This function enqueues a work defined by @wrk to the tail of the pending
 691 * works list.
 692 */
 693static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
 694{
 695        spin_lock(&ubi->wl_lock);
 696        list_add_tail(&wrk->list, &ubi->works);
 697        ubi_assert(ubi->works_count >= 0);
 698        ubi->works_count += 1;
 699
 700        /*
 701         * U-Boot special: We have no bgt_thread in U-Boot!
 702         * So just call do_work() here directly.
 703         */
 704        do_work(ubi);
 705
 706        spin_unlock(&ubi->wl_lock);
 707}
 708
 709static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 710                        int cancel);
 711
 712/**
 713 * schedule_erase - schedule an erase work.
 714 * @ubi: UBI device description object
 715 * @e: the WL entry of the physical eraseblock to erase
 716 * @torture: if the physical eraseblock has to be tortured
 717 *
 718 * This function returns zero in case of success and a %-ENOMEM in case of
 719 * failure.
 720 */
 721static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 722                          int torture)
 723{
 724        struct ubi_work *wl_wrk;
 725
 726        dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
 727               e->pnum, e->ec, torture);
 728
 729        wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
 730        if (!wl_wrk)
 731                return -ENOMEM;
 732
 733        wl_wrk->func = &erase_worker;
 734        wl_wrk->e = e;
 735        wl_wrk->torture = torture;
 736
 737        schedule_ubi_work(ubi, wl_wrk);
 738        return 0;
 739}
 740
 741/**
 742 * wear_leveling_worker - wear-leveling worker function.
 743 * @ubi: UBI device description object
 744 * @wrk: the work object
 745 * @cancel: non-zero if the worker has to free memory and exit
 746 *
 747 * This function copies a more worn out physical eraseblock to a less worn out
 748 * one. Returns zero in case of success and a negative error code in case of
 749 * failure.
 750 */
 751static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 752                                int cancel)
 753{
 754        int err, put = 0, scrubbing = 0, protect = 0;
 755        struct ubi_wl_prot_entry *uninitialized_var(pe);
 756        struct ubi_wl_entry *e1, *e2;
 757        struct ubi_vid_hdr *vid_hdr;
 758
 759        kfree(wrk);
 760
 761        if (cancel)
 762                return 0;
 763
 764        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
 765        if (!vid_hdr)
 766                return -ENOMEM;
 767
 768        mutex_lock(&ubi->move_mutex);
 769        spin_lock(&ubi->wl_lock);
 770        ubi_assert(!ubi->move_from && !ubi->move_to);
 771        ubi_assert(!ubi->move_to_put);
 772
 773        if (!ubi->free.rb_node ||
 774            (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
 775                /*
 776                 * No free physical eraseblocks? Well, they must be waiting in
 777                 * the queue to be erased. Cancel movement - it will be
 778                 * triggered again when a free physical eraseblock appears.
 779                 *
 780                 * No used physical eraseblocks? They must be temporarily
 781                 * protected from being moved. They will be moved to the
 782                 * @ubi->used tree later and the wear-leveling will be
 783                 * triggered again.
 784                 */
 785                dbg_wl("cancel WL, a list is empty: free %d, used %d",
 786                       !ubi->free.rb_node, !ubi->used.rb_node);
 787                goto out_cancel;
 788        }
 789
 790        if (!ubi->scrub.rb_node) {
 791                /*
 792                 * Now pick the least worn-out used physical eraseblock and a
 793                 * highly worn-out free physical eraseblock. If the erase
 794                 * counters differ much enough, start wear-leveling.
 795                 */
 796                e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
 797                e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
 798
 799                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
 800                        dbg_wl("no WL needed: min used EC %d, max free EC %d",
 801                               e1->ec, e2->ec);
 802                        goto out_cancel;
 803                }
 804                paranoid_check_in_wl_tree(e1, &ubi->used);
 805                rb_erase(&e1->rb, &ubi->used);
 806                dbg_wl("move PEB %d EC %d to PEB %d EC %d",
 807                       e1->pnum, e1->ec, e2->pnum, e2->ec);
 808        } else {
 809                /* Perform scrubbing */
 810                scrubbing = 1;
 811                e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
 812                e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
 813                paranoid_check_in_wl_tree(e1, &ubi->scrub);
 814                rb_erase(&e1->rb, &ubi->scrub);
 815                dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
 816        }
 817
 818        paranoid_check_in_wl_tree(e2, &ubi->free);
 819        rb_erase(&e2->rb, &ubi->free);
 820        ubi->move_from = e1;
 821        ubi->move_to = e2;
 822        spin_unlock(&ubi->wl_lock);
 823
 824        /*
 825         * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
 826         * We so far do not know which logical eraseblock our physical
 827         * eraseblock (@e1) belongs to. We have to read the volume identifier
 828         * header first.
 829         *
 830         * Note, we are protected from this PEB being unmapped and erased. The
 831         * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
 832         * which is being moved was unmapped.
 833         */
 834
 835        err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
 836        if (err && err != UBI_IO_BITFLIPS) {
 837                if (err == UBI_IO_PEB_FREE) {
 838                        /*
 839                         * We are trying to move PEB without a VID header. UBI
 840                         * always write VID headers shortly after the PEB was
 841                         * given, so we have a situation when it did not have
 842                         * chance to write it down because it was preempted.
 843                         * Just re-schedule the work, so that next time it will
 844                         * likely have the VID header in place.
 845                         */
 846                        dbg_wl("PEB %d has no VID header", e1->pnum);
 847                        goto out_not_moved;
 848                }
 849
 850                ubi_err("error %d while reading VID header from PEB %d",
 851                        err, e1->pnum);
 852                if (err > 0)
 853                        err = -EIO;
 854                goto out_error;
 855        }
 856
 857        err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
 858        if (err) {
 859
 860                if (err < 0)
 861                        goto out_error;
 862                if (err == 1)
 863                        goto out_not_moved;
 864
 865                /*
 866                 * For some reason the LEB was not moved - it might be because
 867                 * the volume is being deleted. We should prevent this PEB from
 868                 * being selected for wear-levelling movement for some "time",
 869                 * so put it to the protection tree.
 870                 */
 871
 872                dbg_wl("cancelled moving PEB %d", e1->pnum);
 873                pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
 874                if (!pe) {
 875                        err = -ENOMEM;
 876                        goto out_error;
 877                }
 878
 879                protect = 1;
 880        }
 881
 882        ubi_free_vid_hdr(ubi, vid_hdr);
 883        spin_lock(&ubi->wl_lock);
 884        if (protect)
 885                prot_tree_add(ubi, e1, pe, protect);
 886        if (!ubi->move_to_put)
 887                wl_tree_add(e2, &ubi->used);
 888        else
 889                put = 1;
 890        ubi->move_from = ubi->move_to = NULL;
 891        ubi->move_to_put = ubi->wl_scheduled = 0;
 892        spin_unlock(&ubi->wl_lock);
 893
 894        if (put) {
 895                /*
 896                 * Well, the target PEB was put meanwhile, schedule it for
 897                 * erasure.
 898                 */
 899                dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
 900                err = schedule_erase(ubi, e2, 0);
 901                if (err)
 902                        goto out_error;
 903        }
 904
 905        if (!protect) {
 906                err = schedule_erase(ubi, e1, 0);
 907                if (err)
 908                        goto out_error;
 909        }
 910
 911
 912        dbg_wl("done");
 913        mutex_unlock(&ubi->move_mutex);
 914        return 0;
 915
 916        /*
 917         * For some reasons the LEB was not moved, might be an error, might be
 918         * something else. @e1 was not changed, so return it back. @e2 might
 919         * be changed, schedule it for erasure.
 920         */
 921out_not_moved:
 922        ubi_free_vid_hdr(ubi, vid_hdr);
 923        spin_lock(&ubi->wl_lock);
 924        if (scrubbing)
 925                wl_tree_add(e1, &ubi->scrub);
 926        else
 927                wl_tree_add(e1, &ubi->used);
 928        ubi->move_from = ubi->move_to = NULL;
 929        ubi->move_to_put = ubi->wl_scheduled = 0;
 930        spin_unlock(&ubi->wl_lock);
 931
 932        err = schedule_erase(ubi, e2, 0);
 933        if (err)
 934                goto out_error;
 935
 936        mutex_unlock(&ubi->move_mutex);
 937        return 0;
 938
 939out_error:
 940        ubi_err("error %d while moving PEB %d to PEB %d",
 941                err, e1->pnum, e2->pnum);
 942
 943        ubi_free_vid_hdr(ubi, vid_hdr);
 944        spin_lock(&ubi->wl_lock);
 945        ubi->move_from = ubi->move_to = NULL;
 946        ubi->move_to_put = ubi->wl_scheduled = 0;
 947        spin_unlock(&ubi->wl_lock);
 948
 949        kmem_cache_free(ubi_wl_entry_slab, e1);
 950        kmem_cache_free(ubi_wl_entry_slab, e2);
 951        ubi_ro_mode(ubi);
 952
 953        mutex_unlock(&ubi->move_mutex);
 954        return err;
 955
 956out_cancel:
 957        ubi->wl_scheduled = 0;
 958        spin_unlock(&ubi->wl_lock);
 959        mutex_unlock(&ubi->move_mutex);
 960        ubi_free_vid_hdr(ubi, vid_hdr);
 961        return 0;
 962}
 963
 964/**
 965 * ensure_wear_leveling - schedule wear-leveling if it is needed.
 966 * @ubi: UBI device description object
 967 *
 968 * This function checks if it is time to start wear-leveling and schedules it
 969 * if yes. This function returns zero in case of success and a negative error
 970 * code in case of failure.
 971 */
 972static int ensure_wear_leveling(struct ubi_device *ubi)
 973{
 974        int err = 0;
 975        struct ubi_wl_entry *e1;
 976        struct ubi_wl_entry *e2;
 977        struct ubi_work *wrk;
 978
 979        spin_lock(&ubi->wl_lock);
 980        if (ubi->wl_scheduled)
 981                /* Wear-leveling is already in the work queue */
 982                goto out_unlock;
 983
 984        /*
 985         * If the ubi->scrub tree is not empty, scrubbing is needed, and the
 986         * the WL worker has to be scheduled anyway.
 987         */
 988        if (!ubi->scrub.rb_node) {
 989                if (!ubi->used.rb_node || !ubi->free.rb_node)
 990                        /* No physical eraseblocks - no deal */
 991                        goto out_unlock;
 992
 993                /*
 994                 * We schedule wear-leveling only if the difference between the
 995                 * lowest erase counter of used physical eraseblocks and a high
 996                 * erase counter of free physical eraseblocks is greater then
 997                 * %UBI_WL_THRESHOLD.
 998                 */
 999                e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
1000                e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1001
1002                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1003                        goto out_unlock;
1004                dbg_wl("schedule wear-leveling");
1005        } else
1006                dbg_wl("schedule scrubbing");
1007
1008        ubi->wl_scheduled = 1;
1009        spin_unlock(&ubi->wl_lock);
1010
1011        wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1012        if (!wrk) {
1013                err = -ENOMEM;
1014                goto out_cancel;
1015        }
1016
1017        wrk->func = &wear_leveling_worker;
1018        schedule_ubi_work(ubi, wrk);
1019        return err;
1020
1021out_cancel:
1022        spin_lock(&ubi->wl_lock);
1023        ubi->wl_scheduled = 0;
1024out_unlock:
1025        spin_unlock(&ubi->wl_lock);
1026        return err;
1027}
1028
1029/**
1030 * erase_worker - physical eraseblock erase worker function.
1031 * @ubi: UBI device description object
1032 * @wl_wrk: the work object
1033 * @cancel: non-zero if the worker has to free memory and exit
1034 *
1035 * This function erases a physical eraseblock and perform torture testing if
1036 * needed. It also takes care about marking the physical eraseblock bad if
1037 * needed. Returns zero in case of success and a negative error code in case of
1038 * failure.
1039 */
1040static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1041                        int cancel)
1042{
1043        struct ubi_wl_entry *e = wl_wrk->e;
1044        int pnum = e->pnum, err, need;
1045
1046        if (cancel) {
1047                dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1048                kfree(wl_wrk);
1049                kmem_cache_free(ubi_wl_entry_slab, e);
1050                return 0;
1051        }
1052
1053        dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1054
1055        err = sync_erase(ubi, e, wl_wrk->torture);
1056        if (!err) {
1057                /* Fine, we've erased it successfully */
1058                kfree(wl_wrk);
1059
1060                spin_lock(&ubi->wl_lock);
1061                ubi->abs_ec += 1;
1062                wl_tree_add(e, &ubi->free);
1063                spin_unlock(&ubi->wl_lock);
1064
1065                /*
1066                 * One more erase operation has happened, take care about protected
1067                 * physical eraseblocks.
1068                 */
1069                check_protection_over(ubi);
1070
1071                /* And take care about wear-leveling */
1072                err = ensure_wear_leveling(ubi);
1073                return err;
1074        }
1075
1076        ubi_err("failed to erase PEB %d, error %d", pnum, err);
1077        kfree(wl_wrk);
1078        kmem_cache_free(ubi_wl_entry_slab, e);
1079
1080        if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1081            err == -EBUSY) {
1082                int err1;
1083
1084                /* Re-schedule the LEB for erasure */
1085                err1 = schedule_erase(ubi, e, 0);
1086                if (err1) {
1087                        err = err1;
1088                        goto out_ro;
1089                }
1090                return err;
1091        } else if (err != -EIO) {
1092                /*
1093                 * If this is not %-EIO, we have no idea what to do. Scheduling
1094                 * this physical eraseblock for erasure again would cause
1095                 * errors again and again. Well, lets switch to RO mode.
1096                 */
1097                goto out_ro;
1098        }
1099
1100        /* It is %-EIO, the PEB went bad */
1101
1102        if (!ubi->bad_allowed) {
1103                ubi_err("bad physical eraseblock %d detected", pnum);
1104                goto out_ro;
1105        }
1106
1107        spin_lock(&ubi->volumes_lock);
1108        need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1109        if (need > 0) {
1110                need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1111                ubi->avail_pebs -= need;
1112                ubi->rsvd_pebs += need;
1113                ubi->beb_rsvd_pebs += need;
1114                if (need > 0)
1115                        ubi_msg("reserve more %d PEBs", need);
1116        }
1117
1118        if (ubi->beb_rsvd_pebs == 0) {
1119                spin_unlock(&ubi->volumes_lock);
1120                ubi_err("no reserved physical eraseblocks");
1121                goto out_ro;
1122        }
1123
1124        spin_unlock(&ubi->volumes_lock);
1125        ubi_msg("mark PEB %d as bad", pnum);
1126
1127        err = ubi_io_mark_bad(ubi, pnum);
1128        if (err)
1129                goto out_ro;
1130
1131        spin_lock(&ubi->volumes_lock);
1132        ubi->beb_rsvd_pebs -= 1;
1133        ubi->bad_peb_count += 1;
1134        ubi->good_peb_count -= 1;
1135        ubi_calculate_reserved(ubi);
1136        if (ubi->beb_rsvd_pebs == 0)
1137                ubi_warn("last PEB from the reserved pool was used");
1138        spin_unlock(&ubi->volumes_lock);
1139
1140        return err;
1141
1142out_ro:
1143        ubi_ro_mode(ubi);
1144        return err;
1145}
1146
1147/**
1148 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1149 * @ubi: UBI device description object
1150 * @pnum: physical eraseblock to return
1151 * @torture: if this physical eraseblock has to be tortured
1152 *
1153 * This function is called to return physical eraseblock @pnum to the pool of
1154 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1155 * occurred to this @pnum and it has to be tested. This function returns zero
1156 * in case of success, and a negative error code in case of failure.
1157 */
1158int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1159{
1160        int err;
1161        struct ubi_wl_entry *e;
1162
1163        dbg_wl("PEB %d", pnum);
1164        ubi_assert(pnum >= 0);
1165        ubi_assert(pnum < ubi->peb_count);
1166
1167retry:
1168        spin_lock(&ubi->wl_lock);
1169        e = ubi->lookuptbl[pnum];
1170        if (e == ubi->move_from) {
1171                /*
1172                 * User is putting the physical eraseblock which was selected to
1173                 * be moved. It will be scheduled for erasure in the
1174                 * wear-leveling worker.
1175                 */
1176                dbg_wl("PEB %d is being moved, wait", pnum);
1177                spin_unlock(&ubi->wl_lock);
1178
1179                /* Wait for the WL worker by taking the @ubi->move_mutex */
1180                mutex_lock(&ubi->move_mutex);
1181                mutex_unlock(&ubi->move_mutex);
1182                goto retry;
1183        } else if (e == ubi->move_to) {
1184                /*
1185                 * User is putting the physical eraseblock which was selected
1186                 * as the target the data is moved to. It may happen if the EBA
1187                 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1188                 * the WL unit has not put the PEB to the "used" tree yet, but
1189                 * it is about to do this. So we just set a flag which will
1190                 * tell the WL worker that the PEB is not needed anymore and
1191                 * should be scheduled for erasure.
1192                 */
1193                dbg_wl("PEB %d is the target of data moving", pnum);
1194                ubi_assert(!ubi->move_to_put);
1195                ubi->move_to_put = 1;
1196                spin_unlock(&ubi->wl_lock);
1197                return 0;
1198        } else {
1199                if (in_wl_tree(e, &ubi->used)) {
1200                        paranoid_check_in_wl_tree(e, &ubi->used);
1201                        rb_erase(&e->rb, &ubi->used);
1202                } else if (in_wl_tree(e, &ubi->scrub)) {
1203                        paranoid_check_in_wl_tree(e, &ubi->scrub);
1204                        rb_erase(&e->rb, &ubi->scrub);
1205                } else {
1206                        err = prot_tree_del(ubi, e->pnum);
1207                        if (err) {
1208                                ubi_err("PEB %d not found", pnum);
1209                                ubi_ro_mode(ubi);
1210                                spin_unlock(&ubi->wl_lock);
1211                                return err;
1212                        }
1213                }
1214        }
1215        spin_unlock(&ubi->wl_lock);
1216
1217        err = schedule_erase(ubi, e, torture);
1218        if (err) {
1219                spin_lock(&ubi->wl_lock);
1220                wl_tree_add(e, &ubi->used);
1221                spin_unlock(&ubi->wl_lock);
1222        }
1223
1224        return err;
1225}
1226
1227/**
1228 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1229 * @ubi: UBI device description object
1230 * @pnum: the physical eraseblock to schedule
1231 *
1232 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1233 * needs scrubbing. This function schedules a physical eraseblock for
1234 * scrubbing which is done in background. This function returns zero in case of
1235 * success and a negative error code in case of failure.
1236 */
1237int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1238{
1239        struct ubi_wl_entry *e;
1240
1241        ubi_msg("schedule PEB %d for scrubbing", pnum);
1242
1243retry:
1244        spin_lock(&ubi->wl_lock);
1245        e = ubi->lookuptbl[pnum];
1246        if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1247                spin_unlock(&ubi->wl_lock);
1248                return 0;
1249        }
1250
1251        if (e == ubi->move_to) {
1252                /*
1253                 * This physical eraseblock was used to move data to. The data
1254                 * was moved but the PEB was not yet inserted to the proper
1255                 * tree. We should just wait a little and let the WL worker
1256                 * proceed.
1257                 */
1258                spin_unlock(&ubi->wl_lock);
1259                dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1260                yield();
1261                goto retry;
1262        }
1263
1264        if (in_wl_tree(e, &ubi->used)) {
1265                paranoid_check_in_wl_tree(e, &ubi->used);
1266                rb_erase(&e->rb, &ubi->used);
1267        } else {
1268                int err;
1269
1270                err = prot_tree_del(ubi, e->pnum);
1271                if (err) {
1272                        ubi_err("PEB %d not found", pnum);
1273                        ubi_ro_mode(ubi);
1274                        spin_unlock(&ubi->wl_lock);
1275                        return err;
1276                }
1277        }
1278
1279        wl_tree_add(e, &ubi->scrub);
1280        spin_unlock(&ubi->wl_lock);
1281
1282        /*
1283         * Technically scrubbing is the same as wear-leveling, so it is done
1284         * by the WL worker.
1285         */
1286        return ensure_wear_leveling(ubi);
1287}
1288
1289/**
1290 * ubi_wl_flush - flush all pending works.
1291 * @ubi: UBI device description object
1292 *
1293 * This function returns zero in case of success and a negative error code in
1294 * case of failure.
1295 */
1296int ubi_wl_flush(struct ubi_device *ubi)
1297{
1298        int err;
1299
1300        /*
1301         * Erase while the pending works queue is not empty, but not more then
1302         * the number of currently pending works.
1303         */
1304        dbg_wl("flush (%d pending works)", ubi->works_count);
1305        while (ubi->works_count) {
1306                err = do_work(ubi);
1307                if (err)
1308                        return err;
1309        }
1310
1311        /*
1312         * Make sure all the works which have been done in parallel are
1313         * finished.
1314         */
1315        down_write(&ubi->work_sem);
1316        up_write(&ubi->work_sem);
1317
1318        /*
1319         * And in case last was the WL worker and it cancelled the LEB
1320         * movement, flush again.
1321         */
1322        while (ubi->works_count) {
1323                dbg_wl("flush more (%d pending works)", ubi->works_count);
1324                err = do_work(ubi);
1325                if (err)
1326                        return err;
1327        }
1328
1329        return 0;
1330}
1331
1332/**
1333 * tree_destroy - destroy an RB-tree.
1334 * @root: the root of the tree to destroy
1335 */
1336static void tree_destroy(struct rb_root *root)
1337{
1338        struct rb_node *rb;
1339        struct ubi_wl_entry *e;
1340
1341        rb = root->rb_node;
1342        while (rb) {
1343                if (rb->rb_left)
1344                        rb = rb->rb_left;
1345                else if (rb->rb_right)
1346                        rb = rb->rb_right;
1347                else {
1348                        e = rb_entry(rb, struct ubi_wl_entry, rb);
1349
1350                        rb = rb_parent(rb);
1351                        if (rb) {
1352                                if (rb->rb_left == &e->rb)
1353                                        rb->rb_left = NULL;
1354                                else
1355                                        rb->rb_right = NULL;
1356                        }
1357
1358                        kmem_cache_free(ubi_wl_entry_slab, e);
1359                }
1360        }
1361}
1362
1363/**
1364 * ubi_thread - UBI background thread.
1365 * @u: the UBI device description object pointer
1366 */
1367int ubi_thread(void *u)
1368{
1369        int failures = 0;
1370        struct ubi_device *ubi = u;
1371
1372        ubi_msg("background thread \"%s\" started, PID %d",
1373                ubi->bgt_name, task_pid_nr(current));
1374
1375        set_freezable();
1376        for (;;) {
1377                int err;
1378
1379                if (kthread_should_stop())
1380                        break;
1381
1382                if (try_to_freeze())
1383                        continue;
1384
1385                spin_lock(&ubi->wl_lock);
1386                if (list_empty(&ubi->works) || ubi->ro_mode ||
1387                               !ubi->thread_enabled) {
1388                        set_current_state(TASK_INTERRUPTIBLE);
1389                        spin_unlock(&ubi->wl_lock);
1390                        schedule();
1391                        continue;
1392                }
1393                spin_unlock(&ubi->wl_lock);
1394
1395                err = do_work(ubi);
1396                if (err) {
1397                        ubi_err("%s: work failed with error code %d",
1398                                ubi->bgt_name, err);
1399                        if (failures++ > WL_MAX_FAILURES) {
1400                                /*
1401                                 * Too many failures, disable the thread and
1402                                 * switch to read-only mode.
1403                                 */
1404                                ubi_msg("%s: %d consecutive failures",
1405                                        ubi->bgt_name, WL_MAX_FAILURES);
1406                                ubi_ro_mode(ubi);
1407                                break;
1408                        }
1409                } else
1410                        failures = 0;
1411
1412                cond_resched();
1413        }
1414
1415        dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1416        return 0;
1417}
1418
1419/**
1420 * cancel_pending - cancel all pending works.
1421 * @ubi: UBI device description object
1422 */
1423static void cancel_pending(struct ubi_device *ubi)
1424{
1425        while (!list_empty(&ubi->works)) {
1426                struct ubi_work *wrk;
1427
1428                wrk = list_entry(ubi->works.next, struct ubi_work, list);
1429                list_del(&wrk->list);
1430                wrk->func(ubi, wrk, 1);
1431                ubi->works_count -= 1;
1432                ubi_assert(ubi->works_count >= 0);
1433        }
1434}
1435
1436/**
1437 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1438 * information.
1439 * @ubi: UBI device description object
1440 * @si: scanning information
1441 *
1442 * This function returns zero in case of success, and a negative error code in
1443 * case of failure.
1444 */
1445int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1446{
1447        int err;
1448        struct rb_node *rb1, *rb2;
1449        struct ubi_scan_volume *sv;
1450        struct ubi_scan_leb *seb, *tmp;
1451        struct ubi_wl_entry *e;
1452
1453
1454        ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1455        ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1456        spin_lock_init(&ubi->wl_lock);
1457        mutex_init(&ubi->move_mutex);
1458        init_rwsem(&ubi->work_sem);
1459        ubi->max_ec = si->max_ec;
1460        INIT_LIST_HEAD(&ubi->works);
1461
1462        sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1463
1464        err = -ENOMEM;
1465        ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1466        if (!ubi->lookuptbl)
1467                return err;
1468
1469        list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1470                cond_resched();
1471
1472                e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1473                if (!e)
1474                        goto out_free;
1475
1476                e->pnum = seb->pnum;
1477                e->ec = seb->ec;
1478                ubi->lookuptbl[e->pnum] = e;
1479                if (schedule_erase(ubi, e, 0)) {
1480                        kmem_cache_free(ubi_wl_entry_slab, e);
1481                        goto out_free;
1482                }
1483        }
1484
1485        list_for_each_entry(seb, &si->free, u.list) {
1486                cond_resched();
1487
1488                e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1489                if (!e)
1490                        goto out_free;
1491
1492                e->pnum = seb->pnum;
1493                e->ec = seb->ec;
1494                ubi_assert(e->ec >= 0);
1495                wl_tree_add(e, &ubi->free);
1496                ubi->lookuptbl[e->pnum] = e;
1497        }
1498
1499        list_for_each_entry(seb, &si->corr, u.list) {
1500                cond_resched();
1501
1502                e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1503                if (!e)
1504                        goto out_free;
1505
1506                e->pnum = seb->pnum;
1507                e->ec = seb->ec;
1508                ubi->lookuptbl[e->pnum] = e;
1509                if (schedule_erase(ubi, e, 0)) {
1510                        kmem_cache_free(ubi_wl_entry_slab, e);
1511                        goto out_free;
1512                }
1513        }
1514
1515        ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1516                ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1517                        cond_resched();
1518
1519                        e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1520                        if (!e)
1521                                goto out_free;
1522
1523                        e->pnum = seb->pnum;
1524                        e->ec = seb->ec;
1525                        ubi->lookuptbl[e->pnum] = e;
1526                        if (!seb->scrub) {
1527                                dbg_wl("add PEB %d EC %d to the used tree",
1528                                       e->pnum, e->ec);
1529                                wl_tree_add(e, &ubi->used);
1530                        } else {
1531                                dbg_wl("add PEB %d EC %d to the scrub tree",
1532                                       e->pnum, e->ec);
1533                                wl_tree_add(e, &ubi->scrub);
1534                        }
1535                }
1536        }
1537
1538        if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1539                ubi_err("no enough physical eraseblocks (%d, need %d)",
1540                        ubi->avail_pebs, WL_RESERVED_PEBS);
1541                goto out_free;
1542        }
1543        ubi->avail_pebs -= WL_RESERVED_PEBS;
1544        ubi->rsvd_pebs += WL_RESERVED_PEBS;
1545
1546        /* Schedule wear-leveling if needed */
1547        err = ensure_wear_leveling(ubi);
1548        if (err)
1549                goto out_free;
1550
1551        return 0;
1552
1553out_free:
1554        cancel_pending(ubi);
1555        tree_destroy(&ubi->used);
1556        tree_destroy(&ubi->free);
1557        tree_destroy(&ubi->scrub);
1558        kfree(ubi->lookuptbl);
1559        return err;
1560}
1561
1562/**
1563 * protection_trees_destroy - destroy the protection RB-trees.
1564 * @ubi: UBI device description object
1565 */
1566static void protection_trees_destroy(struct ubi_device *ubi)
1567{
1568        struct rb_node *rb;
1569        struct ubi_wl_prot_entry *pe;
1570
1571        rb = ubi->prot.aec.rb_node;
1572        while (rb) {
1573                if (rb->rb_left)
1574                        rb = rb->rb_left;
1575                else if (rb->rb_right)
1576                        rb = rb->rb_right;
1577                else {
1578                        pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1579
1580                        rb = rb_parent(rb);
1581                        if (rb) {
1582                                if (rb->rb_left == &pe->rb_aec)
1583                                        rb->rb_left = NULL;
1584                                else
1585                                        rb->rb_right = NULL;
1586                        }
1587
1588                        kmem_cache_free(ubi_wl_entry_slab, pe->e);
1589                        kfree(pe);
1590                }
1591        }
1592}
1593
1594/**
1595 * ubi_wl_close - close the wear-leveling unit.
1596 * @ubi: UBI device description object
1597 */
1598void ubi_wl_close(struct ubi_device *ubi)
1599{
1600        dbg_wl("close the UBI wear-leveling unit");
1601
1602        cancel_pending(ubi);
1603        protection_trees_destroy(ubi);
1604        tree_destroy(&ubi->used);
1605        tree_destroy(&ubi->free);
1606        tree_destroy(&ubi->scrub);
1607        kfree(ubi->lookuptbl);
1608}
1609
1610#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1611
1612/**
1613 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1614 * is correct.
1615 * @ubi: UBI device description object
1616 * @pnum: the physical eraseblock number to check
1617 * @ec: the erase counter to check
1618 *
1619 * This function returns zero if the erase counter of physical eraseblock @pnum
1620 * is equivalent to @ec, %1 if not, and a negative error code if an error
1621 * occurred.
1622 */
1623static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1624{
1625        int err;
1626        long long read_ec;
1627        struct ubi_ec_hdr *ec_hdr;
1628
1629        ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1630        if (!ec_hdr)
1631                return -ENOMEM;
1632
1633        err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1634        if (err && err != UBI_IO_BITFLIPS) {
1635                /* The header does not have to exist */
1636                err = 0;
1637                goto out_free;
1638        }
1639
1640        read_ec = be64_to_cpu(ec_hdr->ec);
1641        if (ec != read_ec) {
1642                ubi_err("paranoid check failed for PEB %d", pnum);
1643                ubi_err("read EC is %lld, should be %d", read_ec, ec);
1644                ubi_dbg_dump_stack();
1645                err = 1;
1646        } else
1647                err = 0;
1648
1649out_free:
1650        kfree(ec_hdr);
1651        return err;
1652}
1653
1654/**
1655 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1656 * in a WL RB-tree.
1657 * @e: the wear-leveling entry to check
1658 * @root: the root of the tree
1659 *
1660 * This function returns zero if @e is in the @root RB-tree and %1 if it
1661 * is not.
1662 */
1663static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1664                                     struct rb_root *root)
1665{
1666        if (in_wl_tree(e, root))
1667                return 0;
1668
1669        ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1670                e->pnum, e->ec, root);
1671        ubi_dbg_dump_stack();
1672        return 1;
1673}
1674
1675#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
1676