linux/fs/ubifs/log.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of UBIFS.
   4 *
   5 * Copyright (C) 2006-2008 Nokia Corporation.
   6 *
   7 * Authors: Artem Bityutskiy (Битюцкий Артём)
   8 *          Adrian Hunter
   9 */
  10
  11/*
  12 * This file is a part of UBIFS journal implementation and contains various
  13 * functions which manipulate the log. The log is a fixed area on the flash
  14 * which does not contain any data but refers to buds. The log is a part of the
  15 * journal.
  16 */
  17
  18#include "ubifs.h"
  19
  20static int dbg_check_bud_bytes(struct ubifs_info *c);
  21
  22/**
  23 * ubifs_search_bud - search bud LEB.
  24 * @c: UBIFS file-system description object
  25 * @lnum: logical eraseblock number to search
  26 *
  27 * This function searches bud LEB @lnum. Returns bud description object in case
  28 * of success and %NULL if there is no bud with this LEB number.
  29 */
  30struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
  31{
  32        struct rb_node *p;
  33        struct ubifs_bud *bud;
  34
  35        spin_lock(&c->buds_lock);
  36        p = c->buds.rb_node;
  37        while (p) {
  38                bud = rb_entry(p, struct ubifs_bud, rb);
  39                if (lnum < bud->lnum)
  40                        p = p->rb_left;
  41                else if (lnum > bud->lnum)
  42                        p = p->rb_right;
  43                else {
  44                        spin_unlock(&c->buds_lock);
  45                        return bud;
  46                }
  47        }
  48        spin_unlock(&c->buds_lock);
  49        return NULL;
  50}
  51
  52/**
  53 * ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one.
  54 * @c: UBIFS file-system description object
  55 * @lnum: logical eraseblock number to search
  56 *
  57 * This functions returns the wbuf for @lnum or %NULL if there is not one.
  58 */
  59struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
  60{
  61        struct rb_node *p;
  62        struct ubifs_bud *bud;
  63        int jhead;
  64
  65        if (!c->jheads)
  66                return NULL;
  67
  68        spin_lock(&c->buds_lock);
  69        p = c->buds.rb_node;
  70        while (p) {
  71                bud = rb_entry(p, struct ubifs_bud, rb);
  72                if (lnum < bud->lnum)
  73                        p = p->rb_left;
  74                else if (lnum > bud->lnum)
  75                        p = p->rb_right;
  76                else {
  77                        jhead = bud->jhead;
  78                        spin_unlock(&c->buds_lock);
  79                        return &c->jheads[jhead].wbuf;
  80                }
  81        }
  82        spin_unlock(&c->buds_lock);
  83        return NULL;
  84}
  85
  86/**
  87 * empty_log_bytes - calculate amount of empty space in the log.
  88 * @c: UBIFS file-system description object
  89 */
  90static inline long long empty_log_bytes(const struct ubifs_info *c)
  91{
  92        long long h, t;
  93
  94        h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
  95        t = (long long)c->ltail_lnum * c->leb_size;
  96
  97        if (h > t)
  98                return c->log_bytes - h + t;
  99        else if (h != t)
 100                return t - h;
 101        else if (c->lhead_lnum != c->ltail_lnum)
 102                return 0;
 103        else
 104                return c->log_bytes;
 105}
 106
 107/**
 108 * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
 109 * @c: UBIFS file-system description object
 110 * @bud: the bud to add
 111 */
 112void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
 113{
 114        struct rb_node **p, *parent = NULL;
 115        struct ubifs_bud *b;
 116        struct ubifs_jhead *jhead;
 117
 118        spin_lock(&c->buds_lock);
 119        p = &c->buds.rb_node;
 120        while (*p) {
 121                parent = *p;
 122                b = rb_entry(parent, struct ubifs_bud, rb);
 123                ubifs_assert(c, bud->lnum != b->lnum);
 124                if (bud->lnum < b->lnum)
 125                        p = &(*p)->rb_left;
 126                else
 127                        p = &(*p)->rb_right;
 128        }
 129
 130        rb_link_node(&bud->rb, parent, p);
 131        rb_insert_color(&bud->rb, &c->buds);
 132        if (c->jheads) {
 133                jhead = &c->jheads[bud->jhead];
 134                list_add_tail(&bud->list, &jhead->buds_list);
 135        } else
 136                ubifs_assert(c, c->replaying && c->ro_mount);
 137
 138        /*
 139         * Note, although this is a new bud, we anyway account this space now,
 140         * before any data has been written to it, because this is about to
 141         * guarantee fixed mount time, and this bud will anyway be read and
 142         * scanned.
 143         */
 144        c->bud_bytes += c->leb_size - bud->start;
 145
 146        dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
 147                bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
 148        spin_unlock(&c->buds_lock);
 149}
 150
 151/**
 152 * ubifs_add_bud_to_log - add a new bud to the log.
 153 * @c: UBIFS file-system description object
 154 * @jhead: journal head the bud belongs to
 155 * @lnum: LEB number of the bud
 156 * @offs: starting offset of the bud
 157 *
 158 * This function writes a reference node for the new bud LEB @lnum to the log,
 159 * and adds it to the buds trees. It also makes sure that log size does not
 160 * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success,
 161 * %-EAGAIN if commit is required, and a negative error code in case of
 162 * failure.
 163 */
 164int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
 165{
 166        int err;
 167        struct ubifs_bud *bud;
 168        struct ubifs_ref_node *ref;
 169
 170        bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
 171        if (!bud)
 172                return -ENOMEM;
 173        ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
 174        if (!ref) {
 175                kfree(bud);
 176                return -ENOMEM;
 177        }
 178
 179        mutex_lock(&c->log_mutex);
 180        ubifs_assert(c, !c->ro_media && !c->ro_mount);
 181        if (c->ro_error) {
 182                err = -EROFS;
 183                goto out_unlock;
 184        }
 185
 186        /* Make sure we have enough space in the log */
 187        if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
 188                dbg_log("not enough log space - %lld, required %d",
 189                        empty_log_bytes(c), c->min_log_bytes);
 190                ubifs_commit_required(c);
 191                err = -EAGAIN;
 192                goto out_unlock;
 193        }
 194
 195        /*
 196         * Make sure the amount of space in buds will not exceed the
 197         * 'c->max_bud_bytes' limit, because we want to guarantee mount time
 198         * limits.
 199         *
 200         * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes
 201         * because we are holding @c->log_mutex. All @c->bud_bytes take place
 202         * when both @c->log_mutex and @c->bud_bytes are locked.
 203         */
 204        if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
 205                dbg_log("bud bytes %lld (%lld max), require commit",
 206                        c->bud_bytes, c->max_bud_bytes);
 207                ubifs_commit_required(c);
 208                err = -EAGAIN;
 209                goto out_unlock;
 210        }
 211
 212        /*
 213         * If the journal is full enough - start background commit. Note, it is
 214         * OK to read 'c->cmt_state' without spinlock because integer reads
 215         * are atomic in the kernel.
 216         */
 217        if (c->bud_bytes >= c->bg_bud_bytes &&
 218            c->cmt_state == COMMIT_RESTING) {
 219                dbg_log("bud bytes %lld (%lld max), initiate BG commit",
 220                        c->bud_bytes, c->max_bud_bytes);
 221                ubifs_request_bg_commit(c);
 222        }
 223
 224        bud->lnum = lnum;
 225        bud->start = offs;
 226        bud->jhead = jhead;
 227        bud->log_hash = NULL;
 228
 229        ref->ch.node_type = UBIFS_REF_NODE;
 230        ref->lnum = cpu_to_le32(bud->lnum);
 231        ref->offs = cpu_to_le32(bud->start);
 232        ref->jhead = cpu_to_le32(jhead);
 233
 234        if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
 235                c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
 236                ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
 237                c->lhead_offs = 0;
 238        }
 239
 240        if (c->lhead_offs == 0) {
 241                /* Must ensure next log LEB has been unmapped */
 242                err = ubifs_leb_unmap(c, c->lhead_lnum);
 243                if (err)
 244                        goto out_unlock;
 245        }
 246
 247        if (bud->start == 0) {
 248                /*
 249                 * Before writing the LEB reference which refers an empty LEB
 250                 * to the log, we have to make sure it is mapped, because
 251                 * otherwise we'd risk to refer an LEB with garbage in case of
 252                 * an unclean reboot, because the target LEB might have been
 253                 * unmapped, but not yet physically erased.
 254                 */
 255                err = ubifs_leb_map(c, bud->lnum);
 256                if (err)
 257                        goto out_unlock;
 258        }
 259
 260        dbg_log("write ref LEB %d:%d",
 261                c->lhead_lnum, c->lhead_offs);
 262        err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
 263                               c->lhead_offs);
 264        if (err)
 265                goto out_unlock;
 266
 267        err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ);
 268        if (err)
 269                goto out_unlock;
 270
 271        err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash);
 272        if (err)
 273                goto out_unlock;
 274
 275        c->lhead_offs += c->ref_node_alsz;
 276
 277        ubifs_add_bud(c, bud);
 278
 279        mutex_unlock(&c->log_mutex);
 280        kfree(ref);
 281        return 0;
 282
 283out_unlock:
 284        mutex_unlock(&c->log_mutex);
 285        kfree(ref);
 286        kfree(bud);
 287        return err;
 288}
 289
 290/**
 291 * remove_buds - remove used buds.
 292 * @c: UBIFS file-system description object
 293 *
 294 * This function removes use buds from the buds tree. It does not remove the
 295 * buds which are pointed to by journal heads.
 296 */
 297static void remove_buds(struct ubifs_info *c)
 298{
 299        struct rb_node *p;
 300
 301        ubifs_assert(c, list_empty(&c->old_buds));
 302        c->cmt_bud_bytes = 0;
 303        spin_lock(&c->buds_lock);
 304        p = rb_first(&c->buds);
 305        while (p) {
 306                struct rb_node *p1 = p;
 307                struct ubifs_bud *bud;
 308                struct ubifs_wbuf *wbuf;
 309
 310                p = rb_next(p);
 311                bud = rb_entry(p1, struct ubifs_bud, rb);
 312                wbuf = &c->jheads[bud->jhead].wbuf;
 313
 314                if (wbuf->lnum == bud->lnum) {
 315                        /*
 316                         * Do not remove buds which are pointed to by journal
 317                         * heads (non-closed buds).
 318                         */
 319                        c->cmt_bud_bytes += wbuf->offs - bud->start;
 320                        dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
 321                                bud->lnum, bud->start, dbg_jhead(bud->jhead),
 322                                wbuf->offs - bud->start, c->cmt_bud_bytes);
 323                        bud->start = wbuf->offs;
 324                } else {
 325                        c->cmt_bud_bytes += c->leb_size - bud->start;
 326                        dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
 327                                bud->lnum, bud->start, dbg_jhead(bud->jhead),
 328                                c->leb_size - bud->start, c->cmt_bud_bytes);
 329                        rb_erase(p1, &c->buds);
 330                        /*
 331                         * If the commit does not finish, the recovery will need
 332                         * to replay the journal, in which case the old buds
 333                         * must be unchanged. Do not release them until post
 334                         * commit i.e. do not allow them to be garbage
 335                         * collected.
 336                         */
 337                        list_move(&bud->list, &c->old_buds);
 338                }
 339        }
 340        spin_unlock(&c->buds_lock);
 341}
 342
 343/**
 344 * ubifs_log_start_commit - start commit.
 345 * @c: UBIFS file-system description object
 346 * @ltail_lnum: return new log tail LEB number
 347 *
 348 * The commit operation starts with writing "commit start" node to the log and
 349 * reference nodes for all journal heads which will define new journal after
 350 * the commit has been finished. The commit start and reference nodes are
 351 * written in one go to the nearest empty log LEB (hence, when commit is
 352 * finished UBIFS may safely unmap all the previous log LEBs). This function
 353 * returns zero in case of success and a negative error code in case of
 354 * failure.
 355 */
 356int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
 357{
 358        void *buf;
 359        struct ubifs_cs_node *cs;
 360        struct ubifs_ref_node *ref;
 361        int err, i, max_len, len;
 362
 363        err = dbg_check_bud_bytes(c);
 364        if (err)
 365                return err;
 366
 367        max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
 368        max_len = ALIGN(max_len, c->min_io_size);
 369        buf = cs = kmalloc(max_len, GFP_NOFS);
 370        if (!buf)
 371                return -ENOMEM;
 372
 373        cs->ch.node_type = UBIFS_CS_NODE;
 374        cs->cmt_no = cpu_to_le64(c->cmt_no);
 375        ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
 376
 377        err = ubifs_shash_init(c, c->log_hash);
 378        if (err)
 379                goto out;
 380
 381        err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ);
 382        if (err < 0)
 383                goto out;
 384
 385        /*
 386         * Note, we do not lock 'c->log_mutex' because this is the commit start
 387         * phase and we are exclusively using the log. And we do not lock
 388         * write-buffer because nobody can write to the file-system at this
 389         * phase.
 390         */
 391
 392        len = UBIFS_CS_NODE_SZ;
 393        for (i = 0; i < c->jhead_cnt; i++) {
 394                int lnum = c->jheads[i].wbuf.lnum;
 395                int offs = c->jheads[i].wbuf.offs;
 396
 397                if (lnum == -1 || offs == c->leb_size)
 398                        continue;
 399
 400                dbg_log("add ref to LEB %d:%d for jhead %s",
 401                        lnum, offs, dbg_jhead(i));
 402                ref = buf + len;
 403                ref->ch.node_type = UBIFS_REF_NODE;
 404                ref->lnum = cpu_to_le32(lnum);
 405                ref->offs = cpu_to_le32(offs);
 406                ref->jhead = cpu_to_le32(i);
 407
 408                ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
 409                len += UBIFS_REF_NODE_SZ;
 410
 411                err = ubifs_shash_update(c, c->log_hash, ref,
 412                                         UBIFS_REF_NODE_SZ);
 413                if (err)
 414                        goto out;
 415                ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash);
 416        }
 417
 418        ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
 419
 420        /* Switch to the next log LEB */
 421        if (c->lhead_offs) {
 422                c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
 423                ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
 424                c->lhead_offs = 0;
 425        }
 426
 427        /* Must ensure next LEB has been unmapped */
 428        err = ubifs_leb_unmap(c, c->lhead_lnum);
 429        if (err)
 430                goto out;
 431
 432        len = ALIGN(len, c->min_io_size);
 433        dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
 434        err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
 435        if (err)
 436                goto out;
 437
 438        *ltail_lnum = c->lhead_lnum;
 439
 440        c->lhead_offs += len;
 441        ubifs_assert(c, c->lhead_offs < c->leb_size);
 442
 443        remove_buds(c);
 444
 445        /*
 446         * We have started the commit and now users may use the rest of the log
 447         * for new writes.
 448         */
 449        c->min_log_bytes = 0;
 450
 451out:
 452        kfree(buf);
 453        return err;
 454}
 455
 456/**
 457 * ubifs_log_end_commit - end commit.
 458 * @c: UBIFS file-system description object
 459 * @ltail_lnum: new log tail LEB number
 460 *
 461 * This function is called on when the commit operation was finished. It
 462 * moves log tail to new position and updates the master node so that it stores
 463 * the new log tail LEB number. Returns zero in case of success and a negative
 464 * error code in case of failure.
 465 */
 466int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
 467{
 468        int err;
 469
 470        /*
 471         * At this phase we have to lock 'c->log_mutex' because UBIFS allows FS
 472         * writes during commit. Its only short "commit" start phase when
 473         * writers are blocked.
 474         */
 475        mutex_lock(&c->log_mutex);
 476
 477        dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
 478                c->ltail_lnum, ltail_lnum);
 479
 480        c->ltail_lnum = ltail_lnum;
 481        /*
 482         * The commit is finished and from now on it must be guaranteed that
 483         * there is always enough space for the next commit.
 484         */
 485        c->min_log_bytes = c->leb_size;
 486
 487        spin_lock(&c->buds_lock);
 488        c->bud_bytes -= c->cmt_bud_bytes;
 489        spin_unlock(&c->buds_lock);
 490
 491        err = dbg_check_bud_bytes(c);
 492        if (err)
 493                goto out;
 494
 495        err = ubifs_write_master(c);
 496
 497out:
 498        mutex_unlock(&c->log_mutex);
 499        return err;
 500}
 501
 502/**
 503 * ubifs_log_post_commit - things to do after commit is completed.
 504 * @c: UBIFS file-system description object
 505 * @old_ltail_lnum: old log tail LEB number
 506 *
 507 * Release buds only after commit is completed, because they must be unchanged
 508 * if recovery is needed.
 509 *
 510 * Unmap log LEBs only after commit is completed, because they may be needed for
 511 * recovery.
 512 *
 513 * This function returns %0 on success and a negative error code on failure.
 514 */
 515int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
 516{
 517        int lnum, err = 0;
 518
 519        while (!list_empty(&c->old_buds)) {
 520                struct ubifs_bud *bud;
 521
 522                bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
 523                err = ubifs_return_leb(c, bud->lnum);
 524                if (err)
 525                        return err;
 526                list_del(&bud->list);
 527                kfree(bud->log_hash);
 528                kfree(bud);
 529        }
 530        mutex_lock(&c->log_mutex);
 531        for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
 532             lnum = ubifs_next_log_lnum(c, lnum)) {
 533                dbg_log("unmap log LEB %d", lnum);
 534                err = ubifs_leb_unmap(c, lnum);
 535                if (err)
 536                        goto out;
 537        }
 538out:
 539        mutex_unlock(&c->log_mutex);
 540        return err;
 541}
 542
 543/**
 544 * struct done_ref - references that have been done.
 545 * @rb: rb-tree node
 546 * @lnum: LEB number
 547 */
 548struct done_ref {
 549        struct rb_node rb;
 550        int lnum;
 551};
 552
 553/**
 554 * done_already - determine if a reference has been done already.
 555 * @done_tree: rb-tree to store references that have been done
 556 * @lnum: LEB number of reference
 557 *
 558 * This function returns %1 if the reference has been done, %0 if not, otherwise
 559 * a negative error code is returned.
 560 */
 561static int done_already(struct rb_root *done_tree, int lnum)
 562{
 563        struct rb_node **p = &done_tree->rb_node, *parent = NULL;
 564        struct done_ref *dr;
 565
 566        while (*p) {
 567                parent = *p;
 568                dr = rb_entry(parent, struct done_ref, rb);
 569                if (lnum < dr->lnum)
 570                        p = &(*p)->rb_left;
 571                else if (lnum > dr->lnum)
 572                        p = &(*p)->rb_right;
 573                else
 574                        return 1;
 575        }
 576
 577        dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
 578        if (!dr)
 579                return -ENOMEM;
 580
 581        dr->lnum = lnum;
 582
 583        rb_link_node(&dr->rb, parent, p);
 584        rb_insert_color(&dr->rb, done_tree);
 585
 586        return 0;
 587}
 588
 589/**
 590 * destroy_done_tree - destroy the done tree.
 591 * @done_tree: done tree to destroy
 592 */
 593static void destroy_done_tree(struct rb_root *done_tree)
 594{
 595        struct done_ref *dr, *n;
 596
 597        rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
 598                kfree(dr);
 599}
 600
 601/**
 602 * add_node - add a node to the consolidated log.
 603 * @c: UBIFS file-system description object
 604 * @buf: buffer to which to add
 605 * @lnum: LEB number to which to write is passed and returned here
 606 * @offs: offset to where to write is passed and returned here
 607 * @node: node to add
 608 *
 609 * This function returns %0 on success and a negative error code on failure.
 610 */
 611static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
 612                    void *node)
 613{
 614        struct ubifs_ch *ch = node;
 615        int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
 616
 617        if (len > remains) {
 618                int sz = ALIGN(*offs, c->min_io_size), err;
 619
 620                ubifs_pad(c, buf + *offs, sz - *offs);
 621                err = ubifs_leb_change(c, *lnum, buf, sz);
 622                if (err)
 623                        return err;
 624                *lnum = ubifs_next_log_lnum(c, *lnum);
 625                *offs = 0;
 626        }
 627        memcpy(buf + *offs, node, len);
 628        *offs += ALIGN(len, 8);
 629        return 0;
 630}
 631
 632/**
 633 * ubifs_consolidate_log - consolidate the log.
 634 * @c: UBIFS file-system description object
 635 *
 636 * Repeated failed commits could cause the log to be full, but at least 1 LEB is
 637 * needed for commit. This function rewrites the reference nodes in the log
 638 * omitting duplicates, and failed CS nodes, and leaving no gaps.
 639 *
 640 * This function returns %0 on success and a negative error code on failure.
 641 */
 642int ubifs_consolidate_log(struct ubifs_info *c)
 643{
 644        struct ubifs_scan_leb *sleb;
 645        struct ubifs_scan_node *snod;
 646        struct rb_root done_tree = RB_ROOT;
 647        int lnum, err, first = 1, write_lnum, offs = 0;
 648        void *buf;
 649
 650        dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
 651                  c->lhead_lnum);
 652        buf = vmalloc(c->leb_size);
 653        if (!buf)
 654                return -ENOMEM;
 655        lnum = c->ltail_lnum;
 656        write_lnum = lnum;
 657        while (1) {
 658                sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
 659                if (IS_ERR(sleb)) {
 660                        err = PTR_ERR(sleb);
 661                        goto out_free;
 662                }
 663                list_for_each_entry(snod, &sleb->nodes, list) {
 664                        switch (snod->type) {
 665                        case UBIFS_REF_NODE: {
 666                                struct ubifs_ref_node *ref = snod->node;
 667                                int ref_lnum = le32_to_cpu(ref->lnum);
 668
 669                                err = done_already(&done_tree, ref_lnum);
 670                                if (err < 0)
 671                                        goto out_scan;
 672                                if (err != 1) {
 673                                        err = add_node(c, buf, &write_lnum,
 674                                                       &offs, snod->node);
 675                                        if (err)
 676                                                goto out_scan;
 677                                }
 678                                break;
 679                        }
 680                        case UBIFS_CS_NODE:
 681                                if (!first)
 682                                        break;
 683                                err = add_node(c, buf, &write_lnum, &offs,
 684                                               snod->node);
 685                                if (err)
 686                                        goto out_scan;
 687                                first = 0;
 688                                break;
 689                        }
 690                }
 691                ubifs_scan_destroy(sleb);
 692                if (lnum == c->lhead_lnum)
 693                        break;
 694                lnum = ubifs_next_log_lnum(c, lnum);
 695        }
 696        if (offs) {
 697                int sz = ALIGN(offs, c->min_io_size);
 698
 699                ubifs_pad(c, buf + offs, sz - offs);
 700                err = ubifs_leb_change(c, write_lnum, buf, sz);
 701                if (err)
 702                        goto out_free;
 703                offs = ALIGN(offs, c->min_io_size);
 704        }
 705        destroy_done_tree(&done_tree);
 706        vfree(buf);
 707        if (write_lnum == c->lhead_lnum) {
 708                ubifs_err(c, "log is too full");
 709                return -EINVAL;
 710        }
 711        /* Unmap remaining LEBs */
 712        lnum = write_lnum;
 713        do {
 714                lnum = ubifs_next_log_lnum(c, lnum);
 715                err = ubifs_leb_unmap(c, lnum);
 716                if (err)
 717                        return err;
 718        } while (lnum != c->lhead_lnum);
 719        c->lhead_lnum = write_lnum;
 720        c->lhead_offs = offs;
 721        dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
 722        return 0;
 723
 724out_scan:
 725        ubifs_scan_destroy(sleb);
 726out_free:
 727        destroy_done_tree(&done_tree);
 728        vfree(buf);
 729        return err;
 730}
 731
 732/**
 733 * dbg_check_bud_bytes - make sure bud bytes calculation are all right.
 734 * @c: UBIFS file-system description object
 735 *
 736 * This function makes sure the amount of flash space used by closed buds
 737 * ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in
 738 * case of failure.
 739 */
 740static int dbg_check_bud_bytes(struct ubifs_info *c)
 741{
 742        int i, err = 0;
 743        struct ubifs_bud *bud;
 744        long long bud_bytes = 0;
 745
 746        if (!dbg_is_chk_gen(c))
 747                return 0;
 748
 749        spin_lock(&c->buds_lock);
 750        for (i = 0; i < c->jhead_cnt; i++)
 751                list_for_each_entry(bud, &c->jheads[i].buds_list, list)
 752                        bud_bytes += c->leb_size - bud->start;
 753
 754        if (c->bud_bytes != bud_bytes) {
 755                ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
 756                          c->bud_bytes, bud_bytes);
 757                err = -EINVAL;
 758        }
 759        spin_unlock(&c->buds_lock);
 760
 761        return err;
 762}
 763