linux/fs/nilfs2/segment.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * NILFS segment constructor.
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
   7 * Written by Ryusuke Konishi.
   8 *
   9 */
  10
  11#include <linux/pagemap.h>
  12#include <linux/buffer_head.h>
  13#include <linux/writeback.h>
  14#include <linux/bitops.h>
  15#include <linux/bio.h>
  16#include <linux/completion.h>
  17#include <linux/blkdev.h>
  18#include <linux/backing-dev.h>
  19#include <linux/freezer.h>
  20#include <linux/kthread.h>
  21#include <linux/crc32.h>
  22#include <linux/pagevec.h>
  23#include <linux/slab.h>
  24#include <linux/sched/signal.h>
  25
  26#include "nilfs.h"
  27#include "btnode.h"
  28#include "page.h"
  29#include "segment.h"
  30#include "sufile.h"
  31#include "cpfile.h"
  32#include "ifile.h"
  33#include "segbuf.h"
  34
  35
  36/*
  37 * Segment constructor
  38 */
  39#define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
  40
  41#define SC_MAX_SEGDELTA 64   /*
  42                              * Upper limit of the number of segments
  43                              * appended in collection retry loop
  44                              */
  45
  46/* Construction mode */
  47enum {
  48        SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  49        SC_LSEG_DSYNC,  /*
  50                         * Flush data blocks of a given file and make
  51                         * a logical segment without a super root.
  52                         */
  53        SC_FLUSH_FILE,  /*
  54                         * Flush data files, leads to segment writes without
  55                         * creating a checkpoint.
  56                         */
  57        SC_FLUSH_DAT,   /*
  58                         * Flush DAT file.  This also creates segments
  59                         * without a checkpoint.
  60                         */
  61};
  62
  63/* Stage numbers of dirty block collection */
  64enum {
  65        NILFS_ST_INIT = 0,
  66        NILFS_ST_GC,            /* Collecting dirty blocks for GC */
  67        NILFS_ST_FILE,
  68        NILFS_ST_IFILE,
  69        NILFS_ST_CPFILE,
  70        NILFS_ST_SUFILE,
  71        NILFS_ST_DAT,
  72        NILFS_ST_SR,            /* Super root */
  73        NILFS_ST_DSYNC,         /* Data sync blocks */
  74        NILFS_ST_DONE,
  75};
  76
  77#define CREATE_TRACE_POINTS
  78#include <trace/events/nilfs2.h>
  79
  80/*
  81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
  82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
  83 * the variable must use them because transition of stage count must involve
  84 * trace events (trace_nilfs2_collection_stage_transition).
  85 *
  86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
  87 * produce tracepoint events. It is provided just for making the intention
  88 * clear.
  89 */
  90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
  91{
  92        sci->sc_stage.scnt++;
  93        trace_nilfs2_collection_stage_transition(sci);
  94}
  95
  96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
  97{
  98        sci->sc_stage.scnt = next_scnt;
  99        trace_nilfs2_collection_stage_transition(sci);
 100}
 101
 102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
 103{
 104        return sci->sc_stage.scnt;
 105}
 106
 107/* State flags of collection */
 108#define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
 109#define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
 110#define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
 111#define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
 112
 113/* Operations depending on the construction mode and file type */
 114struct nilfs_sc_operations {
 115        int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
 116                            struct inode *);
 117        int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
 118                            struct inode *);
 119        int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
 120                            struct inode *);
 121        void (*write_data_binfo)(struct nilfs_sc_info *,
 122                                 struct nilfs_segsum_pointer *,
 123                                 union nilfs_binfo *);
 124        void (*write_node_binfo)(struct nilfs_sc_info *,
 125                                 struct nilfs_segsum_pointer *,
 126                                 union nilfs_binfo *);
 127};
 128
 129/*
 130 * Other definitions
 131 */
 132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
 133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
 134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
 135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
 136
 137#define nilfs_cnt32_ge(a, b)   \
 138        (typecheck(__u32, a) && typecheck(__u32, b) && \
 139         ((__s32)(a) - (__s32)(b) >= 0))
 140
 141static int nilfs_prepare_segment_lock(struct super_block *sb,
 142                                      struct nilfs_transaction_info *ti)
 143{
 144        struct nilfs_transaction_info *cur_ti = current->journal_info;
 145        void *save = NULL;
 146
 147        if (cur_ti) {
 148                if (cur_ti->ti_magic == NILFS_TI_MAGIC)
 149                        return ++cur_ti->ti_count;
 150
 151                /*
 152                 * If journal_info field is occupied by other FS,
 153                 * it is saved and will be restored on
 154                 * nilfs_transaction_commit().
 155                 */
 156                nilfs_warn(sb, "journal info from a different FS");
 157                save = current->journal_info;
 158        }
 159        if (!ti) {
 160                ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
 161                if (!ti)
 162                        return -ENOMEM;
 163                ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
 164        } else {
 165                ti->ti_flags = 0;
 166        }
 167        ti->ti_count = 0;
 168        ti->ti_save = save;
 169        ti->ti_magic = NILFS_TI_MAGIC;
 170        current->journal_info = ti;
 171        return 0;
 172}
 173
 174/**
 175 * nilfs_transaction_begin - start indivisible file operations.
 176 * @sb: super block
 177 * @ti: nilfs_transaction_info
 178 * @vacancy_check: flags for vacancy rate checks
 179 *
 180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
 181 * the segment semaphore, to make a segment construction and write tasks
 182 * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
 183 * The region enclosed by these two functions can be nested.  To avoid a
 184 * deadlock, the semaphore is only acquired or released in the outermost call.
 185 *
 186 * This function allocates a nilfs_transaction_info struct to keep context
 187 * information on it.  It is initialized and hooked onto the current task in
 188 * the outermost call.  If a pre-allocated struct is given to @ti, it is used
 189 * instead; otherwise a new struct is assigned from a slab.
 190 *
 191 * When @vacancy_check flag is set, this function will check the amount of
 192 * free space, and will wait for the GC to reclaim disk space if low capacity.
 193 *
 194 * Return Value: On success, 0 is returned. On error, one of the following
 195 * negative error code is returned.
 196 *
 197 * %-ENOMEM - Insufficient memory available.
 198 *
 199 * %-ENOSPC - No space left on device
 200 */
 201int nilfs_transaction_begin(struct super_block *sb,
 202                            struct nilfs_transaction_info *ti,
 203                            int vacancy_check)
 204{
 205        struct the_nilfs *nilfs;
 206        int ret = nilfs_prepare_segment_lock(sb, ti);
 207        struct nilfs_transaction_info *trace_ti;
 208
 209        if (unlikely(ret < 0))
 210                return ret;
 211        if (ret > 0) {
 212                trace_ti = current->journal_info;
 213
 214                trace_nilfs2_transaction_transition(sb, trace_ti,
 215                                    trace_ti->ti_count, trace_ti->ti_flags,
 216                                    TRACE_NILFS2_TRANSACTION_BEGIN);
 217                return 0;
 218        }
 219
 220        sb_start_intwrite(sb);
 221
 222        nilfs = sb->s_fs_info;
 223        down_read(&nilfs->ns_segctor_sem);
 224        if (vacancy_check && nilfs_near_disk_full(nilfs)) {
 225                up_read(&nilfs->ns_segctor_sem);
 226                ret = -ENOSPC;
 227                goto failed;
 228        }
 229
 230        trace_ti = current->journal_info;
 231        trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
 232                                            trace_ti->ti_flags,
 233                                            TRACE_NILFS2_TRANSACTION_BEGIN);
 234        return 0;
 235
 236 failed:
 237        ti = current->journal_info;
 238        current->journal_info = ti->ti_save;
 239        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 240                kmem_cache_free(nilfs_transaction_cachep, ti);
 241        sb_end_intwrite(sb);
 242        return ret;
 243}
 244
 245/**
 246 * nilfs_transaction_commit - commit indivisible file operations.
 247 * @sb: super block
 248 *
 249 * nilfs_transaction_commit() releases the read semaphore which is
 250 * acquired by nilfs_transaction_begin(). This is only performed
 251 * in outermost call of this function.  If a commit flag is set,
 252 * nilfs_transaction_commit() sets a timer to start the segment
 253 * constructor.  If a sync flag is set, it starts construction
 254 * directly.
 255 */
 256int nilfs_transaction_commit(struct super_block *sb)
 257{
 258        struct nilfs_transaction_info *ti = current->journal_info;
 259        struct the_nilfs *nilfs = sb->s_fs_info;
 260        int err = 0;
 261
 262        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 263        ti->ti_flags |= NILFS_TI_COMMIT;
 264        if (ti->ti_count > 0) {
 265                ti->ti_count--;
 266                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 267                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 268                return 0;
 269        }
 270        if (nilfs->ns_writer) {
 271                struct nilfs_sc_info *sci = nilfs->ns_writer;
 272
 273                if (ti->ti_flags & NILFS_TI_COMMIT)
 274                        nilfs_segctor_start_timer(sci);
 275                if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
 276                        nilfs_segctor_do_flush(sci, 0);
 277        }
 278        up_read(&nilfs->ns_segctor_sem);
 279        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 280                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 281
 282        current->journal_info = ti->ti_save;
 283
 284        if (ti->ti_flags & NILFS_TI_SYNC)
 285                err = nilfs_construct_segment(sb);
 286        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 287                kmem_cache_free(nilfs_transaction_cachep, ti);
 288        sb_end_intwrite(sb);
 289        return err;
 290}
 291
 292void nilfs_transaction_abort(struct super_block *sb)
 293{
 294        struct nilfs_transaction_info *ti = current->journal_info;
 295        struct the_nilfs *nilfs = sb->s_fs_info;
 296
 297        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 298        if (ti->ti_count > 0) {
 299                ti->ti_count--;
 300                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 301                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 302                return;
 303        }
 304        up_read(&nilfs->ns_segctor_sem);
 305
 306        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 307                    ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 308
 309        current->journal_info = ti->ti_save;
 310        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 311                kmem_cache_free(nilfs_transaction_cachep, ti);
 312        sb_end_intwrite(sb);
 313}
 314
 315void nilfs_relax_pressure_in_lock(struct super_block *sb)
 316{
 317        struct the_nilfs *nilfs = sb->s_fs_info;
 318        struct nilfs_sc_info *sci = nilfs->ns_writer;
 319
 320        if (!sci || !sci->sc_flush_request)
 321                return;
 322
 323        set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
 324        up_read(&nilfs->ns_segctor_sem);
 325
 326        down_write(&nilfs->ns_segctor_sem);
 327        if (sci->sc_flush_request &&
 328            test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
 329                struct nilfs_transaction_info *ti = current->journal_info;
 330
 331                ti->ti_flags |= NILFS_TI_WRITER;
 332                nilfs_segctor_do_immediate_flush(sci);
 333                ti->ti_flags &= ~NILFS_TI_WRITER;
 334        }
 335        downgrade_write(&nilfs->ns_segctor_sem);
 336}
 337
 338static void nilfs_transaction_lock(struct super_block *sb,
 339                                   struct nilfs_transaction_info *ti,
 340                                   int gcflag)
 341{
 342        struct nilfs_transaction_info *cur_ti = current->journal_info;
 343        struct the_nilfs *nilfs = sb->s_fs_info;
 344        struct nilfs_sc_info *sci = nilfs->ns_writer;
 345
 346        WARN_ON(cur_ti);
 347        ti->ti_flags = NILFS_TI_WRITER;
 348        ti->ti_count = 0;
 349        ti->ti_save = cur_ti;
 350        ti->ti_magic = NILFS_TI_MAGIC;
 351        current->journal_info = ti;
 352
 353        for (;;) {
 354                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 355                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
 356
 357                down_write(&nilfs->ns_segctor_sem);
 358                if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
 359                        break;
 360
 361                nilfs_segctor_do_immediate_flush(sci);
 362
 363                up_write(&nilfs->ns_segctor_sem);
 364                cond_resched();
 365        }
 366        if (gcflag)
 367                ti->ti_flags |= NILFS_TI_GC;
 368
 369        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 370                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
 371}
 372
 373static void nilfs_transaction_unlock(struct super_block *sb)
 374{
 375        struct nilfs_transaction_info *ti = current->journal_info;
 376        struct the_nilfs *nilfs = sb->s_fs_info;
 377
 378        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 379        BUG_ON(ti->ti_count > 0);
 380
 381        up_write(&nilfs->ns_segctor_sem);
 382        current->journal_info = ti->ti_save;
 383
 384        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 385                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
 386}
 387
 388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
 389                                            struct nilfs_segsum_pointer *ssp,
 390                                            unsigned int bytes)
 391{
 392        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 393        unsigned int blocksize = sci->sc_super->s_blocksize;
 394        void *p;
 395
 396        if (unlikely(ssp->offset + bytes > blocksize)) {
 397                ssp->offset = 0;
 398                BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
 399                                               &segbuf->sb_segsum_buffers));
 400                ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
 401        }
 402        p = ssp->bh->b_data + ssp->offset;
 403        ssp->offset += bytes;
 404        return p;
 405}
 406
 407/**
 408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
 409 * @sci: nilfs_sc_info
 410 */
 411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
 412{
 413        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 414        struct buffer_head *sumbh;
 415        unsigned int sumbytes;
 416        unsigned int flags = 0;
 417        int err;
 418
 419        if (nilfs_doing_gc())
 420                flags = NILFS_SS_GC;
 421        err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
 422        if (unlikely(err))
 423                return err;
 424
 425        sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
 426        sumbytes = segbuf->sb_sum.sumbytes;
 427        sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
 428        sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
 429        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 430        return 0;
 431}
 432
 433static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
 434{
 435        sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
 436        if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
 437                return -E2BIG; /*
 438                                * The current segment is filled up
 439                                * (internal code)
 440                                */
 441        sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
 442        return nilfs_segctor_reset_segment_buffer(sci);
 443}
 444
 445static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
 446{
 447        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 448        int err;
 449
 450        if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
 451                err = nilfs_segctor_feed_segment(sci);
 452                if (err)
 453                        return err;
 454                segbuf = sci->sc_curseg;
 455        }
 456        err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
 457        if (likely(!err))
 458                segbuf->sb_sum.flags |= NILFS_SS_SR;
 459        return err;
 460}
 461
 462/*
 463 * Functions for making segment summary and payloads
 464 */
 465static int nilfs_segctor_segsum_block_required(
 466        struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
 467        unsigned int binfo_size)
 468{
 469        unsigned int blocksize = sci->sc_super->s_blocksize;
 470        /* Size of finfo and binfo is enough small against blocksize */
 471
 472        return ssp->offset + binfo_size +
 473                (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
 474                blocksize;
 475}
 476
 477static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
 478                                      struct inode *inode)
 479{
 480        sci->sc_curseg->sb_sum.nfinfo++;
 481        sci->sc_binfo_ptr = sci->sc_finfo_ptr;
 482        nilfs_segctor_map_segsum_entry(
 483                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 484
 485        if (NILFS_I(inode)->i_root &&
 486            !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 487                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 488        /* skip finfo */
 489}
 490
 491static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
 492                                    struct inode *inode)
 493{
 494        struct nilfs_finfo *finfo;
 495        struct nilfs_inode_info *ii;
 496        struct nilfs_segment_buffer *segbuf;
 497        __u64 cno;
 498
 499        if (sci->sc_blk_cnt == 0)
 500                return;
 501
 502        ii = NILFS_I(inode);
 503
 504        if (test_bit(NILFS_I_GCINODE, &ii->i_state))
 505                cno = ii->i_cno;
 506        else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
 507                cno = 0;
 508        else
 509                cno = sci->sc_cno;
 510
 511        finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
 512                                                 sizeof(*finfo));
 513        finfo->fi_ino = cpu_to_le64(inode->i_ino);
 514        finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
 515        finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
 516        finfo->fi_cno = cpu_to_le64(cno);
 517
 518        segbuf = sci->sc_curseg;
 519        segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
 520                sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
 521        sci->sc_finfo_ptr = sci->sc_binfo_ptr;
 522        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 523}
 524
 525static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
 526                                        struct buffer_head *bh,
 527                                        struct inode *inode,
 528                                        unsigned int binfo_size)
 529{
 530        struct nilfs_segment_buffer *segbuf;
 531        int required, err = 0;
 532
 533 retry:
 534        segbuf = sci->sc_curseg;
 535        required = nilfs_segctor_segsum_block_required(
 536                sci, &sci->sc_binfo_ptr, binfo_size);
 537        if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
 538                nilfs_segctor_end_finfo(sci, inode);
 539                err = nilfs_segctor_feed_segment(sci);
 540                if (err)
 541                        return err;
 542                goto retry;
 543        }
 544        if (unlikely(required)) {
 545                err = nilfs_segbuf_extend_segsum(segbuf);
 546                if (unlikely(err))
 547                        goto failed;
 548        }
 549        if (sci->sc_blk_cnt == 0)
 550                nilfs_segctor_begin_finfo(sci, inode);
 551
 552        nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
 553        /* Substitution to vblocknr is delayed until update_blocknr() */
 554        nilfs_segbuf_add_file_buffer(segbuf, bh);
 555        sci->sc_blk_cnt++;
 556 failed:
 557        return err;
 558}
 559
 560/*
 561 * Callback functions that enumerate, mark, and collect dirty blocks
 562 */
 563static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
 564                                   struct buffer_head *bh, struct inode *inode)
 565{
 566        int err;
 567
 568        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 569        if (err < 0)
 570                return err;
 571
 572        err = nilfs_segctor_add_file_block(sci, bh, inode,
 573                                           sizeof(struct nilfs_binfo_v));
 574        if (!err)
 575                sci->sc_datablk_cnt++;
 576        return err;
 577}
 578
 579static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
 580                                   struct buffer_head *bh,
 581                                   struct inode *inode)
 582{
 583        return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 584}
 585
 586static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
 587                                   struct buffer_head *bh,
 588                                   struct inode *inode)
 589{
 590        WARN_ON(!buffer_dirty(bh));
 591        return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 592}
 593
 594static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
 595                                        struct nilfs_segsum_pointer *ssp,
 596                                        union nilfs_binfo *binfo)
 597{
 598        struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
 599                sci, ssp, sizeof(*binfo_v));
 600        *binfo_v = binfo->bi_v;
 601}
 602
 603static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
 604                                        struct nilfs_segsum_pointer *ssp,
 605                                        union nilfs_binfo *binfo)
 606{
 607        __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
 608                sci, ssp, sizeof(*vblocknr));
 609        *vblocknr = binfo->bi_v.bi_vblocknr;
 610}
 611
 612static const struct nilfs_sc_operations nilfs_sc_file_ops = {
 613        .collect_data = nilfs_collect_file_data,
 614        .collect_node = nilfs_collect_file_node,
 615        .collect_bmap = nilfs_collect_file_bmap,
 616        .write_data_binfo = nilfs_write_file_data_binfo,
 617        .write_node_binfo = nilfs_write_file_node_binfo,
 618};
 619
 620static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
 621                                  struct buffer_head *bh, struct inode *inode)
 622{
 623        int err;
 624
 625        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 626        if (err < 0)
 627                return err;
 628
 629        err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 630        if (!err)
 631                sci->sc_datablk_cnt++;
 632        return err;
 633}
 634
 635static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
 636                                  struct buffer_head *bh, struct inode *inode)
 637{
 638        WARN_ON(!buffer_dirty(bh));
 639        return nilfs_segctor_add_file_block(sci, bh, inode,
 640                                            sizeof(struct nilfs_binfo_dat));
 641}
 642
 643static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
 644                                       struct nilfs_segsum_pointer *ssp,
 645                                       union nilfs_binfo *binfo)
 646{
 647        __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
 648                                                          sizeof(*blkoff));
 649        *blkoff = binfo->bi_dat.bi_blkoff;
 650}
 651
 652static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
 653                                       struct nilfs_segsum_pointer *ssp,
 654                                       union nilfs_binfo *binfo)
 655{
 656        struct nilfs_binfo_dat *binfo_dat =
 657                nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
 658        *binfo_dat = binfo->bi_dat;
 659}
 660
 661static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
 662        .collect_data = nilfs_collect_dat_data,
 663        .collect_node = nilfs_collect_file_node,
 664        .collect_bmap = nilfs_collect_dat_bmap,
 665        .write_data_binfo = nilfs_write_dat_data_binfo,
 666        .write_node_binfo = nilfs_write_dat_node_binfo,
 667};
 668
 669static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
 670        .collect_data = nilfs_collect_file_data,
 671        .collect_node = NULL,
 672        .collect_bmap = NULL,
 673        .write_data_binfo = nilfs_write_file_data_binfo,
 674        .write_node_binfo = NULL,
 675};
 676
 677static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 678                                              struct list_head *listp,
 679                                              size_t nlimit,
 680                                              loff_t start, loff_t end)
 681{
 682        struct address_space *mapping = inode->i_mapping;
 683        struct pagevec pvec;
 684        pgoff_t index = 0, last = ULONG_MAX;
 685        size_t ndirties = 0;
 686        int i;
 687
 688        if (unlikely(start != 0 || end != LLONG_MAX)) {
 689                /*
 690                 * A valid range is given for sync-ing data pages. The
 691                 * range is rounded to per-page; extra dirty buffers
 692                 * may be included if blocksize < pagesize.
 693                 */
 694                index = start >> PAGE_SHIFT;
 695                last = end >> PAGE_SHIFT;
 696        }
 697        pagevec_init(&pvec);
 698 repeat:
 699        if (unlikely(index > last) ||
 700            !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
 701                                PAGECACHE_TAG_DIRTY))
 702                return ndirties;
 703
 704        for (i = 0; i < pagevec_count(&pvec); i++) {
 705                struct buffer_head *bh, *head;
 706                struct page *page = pvec.pages[i];
 707
 708                lock_page(page);
 709                if (!page_has_buffers(page))
 710                        create_empty_buffers(page, i_blocksize(inode), 0);
 711                unlock_page(page);
 712
 713                bh = head = page_buffers(page);
 714                do {
 715                        if (!buffer_dirty(bh) || buffer_async_write(bh))
 716                                continue;
 717                        get_bh(bh);
 718                        list_add_tail(&bh->b_assoc_buffers, listp);
 719                        ndirties++;
 720                        if (unlikely(ndirties >= nlimit)) {
 721                                pagevec_release(&pvec);
 722                                cond_resched();
 723                                return ndirties;
 724                        }
 725                } while (bh = bh->b_this_page, bh != head);
 726        }
 727        pagevec_release(&pvec);
 728        cond_resched();
 729        goto repeat;
 730}
 731
 732static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
 733                                            struct list_head *listp)
 734{
 735        struct nilfs_inode_info *ii = NILFS_I(inode);
 736        struct address_space *mapping = &ii->i_btnode_cache;
 737        struct pagevec pvec;
 738        struct buffer_head *bh, *head;
 739        unsigned int i;
 740        pgoff_t index = 0;
 741
 742        pagevec_init(&pvec);
 743
 744        while (pagevec_lookup_tag(&pvec, mapping, &index,
 745                                        PAGECACHE_TAG_DIRTY)) {
 746                for (i = 0; i < pagevec_count(&pvec); i++) {
 747                        bh = head = page_buffers(pvec.pages[i]);
 748                        do {
 749                                if (buffer_dirty(bh) &&
 750                                                !buffer_async_write(bh)) {
 751                                        get_bh(bh);
 752                                        list_add_tail(&bh->b_assoc_buffers,
 753                                                      listp);
 754                                }
 755                                bh = bh->b_this_page;
 756                        } while (bh != head);
 757                }
 758                pagevec_release(&pvec);
 759                cond_resched();
 760        }
 761}
 762
 763static void nilfs_dispose_list(struct the_nilfs *nilfs,
 764                               struct list_head *head, int force)
 765{
 766        struct nilfs_inode_info *ii, *n;
 767        struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
 768        unsigned int nv = 0;
 769
 770        while (!list_empty(head)) {
 771                spin_lock(&nilfs->ns_inode_lock);
 772                list_for_each_entry_safe(ii, n, head, i_dirty) {
 773                        list_del_init(&ii->i_dirty);
 774                        if (force) {
 775                                if (unlikely(ii->i_bh)) {
 776                                        brelse(ii->i_bh);
 777                                        ii->i_bh = NULL;
 778                                }
 779                        } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
 780                                set_bit(NILFS_I_QUEUED, &ii->i_state);
 781                                list_add_tail(&ii->i_dirty,
 782                                              &nilfs->ns_dirty_files);
 783                                continue;
 784                        }
 785                        ivec[nv++] = ii;
 786                        if (nv == SC_N_INODEVEC)
 787                                break;
 788                }
 789                spin_unlock(&nilfs->ns_inode_lock);
 790
 791                for (pii = ivec; nv > 0; pii++, nv--)
 792                        iput(&(*pii)->vfs_inode);
 793        }
 794}
 795
 796static void nilfs_iput_work_func(struct work_struct *work)
 797{
 798        struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
 799                                                 sc_iput_work);
 800        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 801
 802        nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
 803}
 804
 805static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
 806                                     struct nilfs_root *root)
 807{
 808        int ret = 0;
 809
 810        if (nilfs_mdt_fetch_dirty(root->ifile))
 811                ret++;
 812        if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
 813                ret++;
 814        if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
 815                ret++;
 816        if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
 817                ret++;
 818        return ret;
 819}
 820
 821static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
 822{
 823        return list_empty(&sci->sc_dirty_files) &&
 824                !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
 825                sci->sc_nfreesegs == 0 &&
 826                (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
 827}
 828
 829static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
 830{
 831        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 832        int ret = 0;
 833
 834        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
 835                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
 836
 837        spin_lock(&nilfs->ns_inode_lock);
 838        if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
 839                ret++;
 840
 841        spin_unlock(&nilfs->ns_inode_lock);
 842        return ret;
 843}
 844
 845static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
 846{
 847        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 848
 849        nilfs_mdt_clear_dirty(sci->sc_root->ifile);
 850        nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
 851        nilfs_mdt_clear_dirty(nilfs->ns_sufile);
 852        nilfs_mdt_clear_dirty(nilfs->ns_dat);
 853}
 854
 855static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
 856{
 857        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 858        struct buffer_head *bh_cp;
 859        struct nilfs_checkpoint *raw_cp;
 860        int err;
 861
 862        /* XXX: this interface will be changed */
 863        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
 864                                          &raw_cp, &bh_cp);
 865        if (likely(!err)) {
 866                /*
 867                 * The following code is duplicated with cpfile.  But, it is
 868                 * needed to collect the checkpoint even if it was not newly
 869                 * created.
 870                 */
 871                mark_buffer_dirty(bh_cp);
 872                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
 873                nilfs_cpfile_put_checkpoint(
 874                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 875        } else
 876                WARN_ON(err == -EINVAL || err == -ENOENT);
 877
 878        return err;
 879}
 880
 881static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
 882{
 883        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 884        struct buffer_head *bh_cp;
 885        struct nilfs_checkpoint *raw_cp;
 886        int err;
 887
 888        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
 889                                          &raw_cp, &bh_cp);
 890        if (unlikely(err)) {
 891                WARN_ON(err == -EINVAL || err == -ENOENT);
 892                goto failed_ibh;
 893        }
 894        raw_cp->cp_snapshot_list.ssl_next = 0;
 895        raw_cp->cp_snapshot_list.ssl_prev = 0;
 896        raw_cp->cp_inodes_count =
 897                cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
 898        raw_cp->cp_blocks_count =
 899                cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
 900        raw_cp->cp_nblk_inc =
 901                cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
 902        raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
 903        raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
 904
 905        if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 906                nilfs_checkpoint_clear_minor(raw_cp);
 907        else
 908                nilfs_checkpoint_set_minor(raw_cp);
 909
 910        nilfs_write_inode_common(sci->sc_root->ifile,
 911                                 &raw_cp->cp_ifile_inode, 1);
 912        nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 913        return 0;
 914
 915 failed_ibh:
 916        return err;
 917}
 918
 919static void nilfs_fill_in_file_bmap(struct inode *ifile,
 920                                    struct nilfs_inode_info *ii)
 921
 922{
 923        struct buffer_head *ibh;
 924        struct nilfs_inode *raw_inode;
 925
 926        if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
 927                ibh = ii->i_bh;
 928                BUG_ON(!ibh);
 929                raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
 930                                                  ibh);
 931                nilfs_bmap_write(ii->i_bmap, raw_inode);
 932                nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
 933        }
 934}
 935
 936static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
 937{
 938        struct nilfs_inode_info *ii;
 939
 940        list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
 941                nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
 942                set_bit(NILFS_I_COLLECTED, &ii->i_state);
 943        }
 944}
 945
 946static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
 947                                             struct the_nilfs *nilfs)
 948{
 949        struct buffer_head *bh_sr;
 950        struct nilfs_super_root *raw_sr;
 951        unsigned int isz, srsz;
 952
 953        bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
 954        raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
 955        isz = nilfs->ns_inode_size;
 956        srsz = NILFS_SR_BYTES(isz);
 957
 958        raw_sr->sr_bytes = cpu_to_le16(srsz);
 959        raw_sr->sr_nongc_ctime
 960                = cpu_to_le64(nilfs_doing_gc() ?
 961                              nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 962        raw_sr->sr_flags = 0;
 963
 964        nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
 965                                 NILFS_SR_DAT_OFFSET(isz), 1);
 966        nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
 967                                 NILFS_SR_CPFILE_OFFSET(isz), 1);
 968        nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
 969                                 NILFS_SR_SUFILE_OFFSET(isz), 1);
 970        memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
 971}
 972
 973static void nilfs_redirty_inodes(struct list_head *head)
 974{
 975        struct nilfs_inode_info *ii;
 976
 977        list_for_each_entry(ii, head, i_dirty) {
 978                if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
 979                        clear_bit(NILFS_I_COLLECTED, &ii->i_state);
 980        }
 981}
 982
 983static void nilfs_drop_collected_inodes(struct list_head *head)
 984{
 985        struct nilfs_inode_info *ii;
 986
 987        list_for_each_entry(ii, head, i_dirty) {
 988                if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
 989                        continue;
 990
 991                clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 992                set_bit(NILFS_I_UPDATED, &ii->i_state);
 993        }
 994}
 995
 996static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
 997                                       struct inode *inode,
 998                                       struct list_head *listp,
 999                                       int (*collect)(struct nilfs_sc_info *,
1000                                                      struct buffer_head *,
1001                                                      struct inode *))
1002{
1003        struct buffer_head *bh, *n;
1004        int err = 0;
1005
1006        if (collect) {
1007                list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1008                        list_del_init(&bh->b_assoc_buffers);
1009                        err = collect(sci, bh, inode);
1010                        brelse(bh);
1011                        if (unlikely(err))
1012                                goto dispose_buffers;
1013                }
1014                return 0;
1015        }
1016
1017 dispose_buffers:
1018        while (!list_empty(listp)) {
1019                bh = list_first_entry(listp, struct buffer_head,
1020                                      b_assoc_buffers);
1021                list_del_init(&bh->b_assoc_buffers);
1022                brelse(bh);
1023        }
1024        return err;
1025}
1026
1027static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1028{
1029        /* Remaining number of blocks within segment buffer */
1030        return sci->sc_segbuf_nblocks -
1031                (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1032}
1033
1034static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1035                                   struct inode *inode,
1036                                   const struct nilfs_sc_operations *sc_ops)
1037{
1038        LIST_HEAD(data_buffers);
1039        LIST_HEAD(node_buffers);
1040        int err;
1041
1042        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1043                size_t n, rest = nilfs_segctor_buffer_rest(sci);
1044
1045                n = nilfs_lookup_dirty_data_buffers(
1046                        inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1047                if (n > rest) {
1048                        err = nilfs_segctor_apply_buffers(
1049                                sci, inode, &data_buffers,
1050                                sc_ops->collect_data);
1051                        BUG_ON(!err); /* always receive -E2BIG or true error */
1052                        goto break_or_fail;
1053                }
1054        }
1055        nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1056
1057        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1058                err = nilfs_segctor_apply_buffers(
1059                        sci, inode, &data_buffers, sc_ops->collect_data);
1060                if (unlikely(err)) {
1061                        /* dispose node list */
1062                        nilfs_segctor_apply_buffers(
1063                                sci, inode, &node_buffers, NULL);
1064                        goto break_or_fail;
1065                }
1066                sci->sc_stage.flags |= NILFS_CF_NODE;
1067        }
1068        /* Collect node */
1069        err = nilfs_segctor_apply_buffers(
1070                sci, inode, &node_buffers, sc_ops->collect_node);
1071        if (unlikely(err))
1072                goto break_or_fail;
1073
1074        nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1075        err = nilfs_segctor_apply_buffers(
1076                sci, inode, &node_buffers, sc_ops->collect_bmap);
1077        if (unlikely(err))
1078                goto break_or_fail;
1079
1080        nilfs_segctor_end_finfo(sci, inode);
1081        sci->sc_stage.flags &= ~NILFS_CF_NODE;
1082
1083 break_or_fail:
1084        return err;
1085}
1086
1087static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1088                                         struct inode *inode)
1089{
1090        LIST_HEAD(data_buffers);
1091        size_t n, rest = nilfs_segctor_buffer_rest(sci);
1092        int err;
1093
1094        n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1095                                            sci->sc_dsync_start,
1096                                            sci->sc_dsync_end);
1097
1098        err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1099                                          nilfs_collect_file_data);
1100        if (!err) {
1101                nilfs_segctor_end_finfo(sci, inode);
1102                BUG_ON(n > rest);
1103                /* always receive -E2BIG or true error if n > rest */
1104        }
1105        return err;
1106}
1107
1108static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1109{
1110        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1111        struct list_head *head;
1112        struct nilfs_inode_info *ii;
1113        size_t ndone;
1114        int err = 0;
1115
1116        switch (nilfs_sc_cstage_get(sci)) {
1117        case NILFS_ST_INIT:
1118                /* Pre-processes */
1119                sci->sc_stage.flags = 0;
1120
1121                if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1122                        sci->sc_nblk_inc = 0;
1123                        sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1124                        if (mode == SC_LSEG_DSYNC) {
1125                                nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1126                                goto dsync_mode;
1127                        }
1128                }
1129
1130                sci->sc_stage.dirty_file_ptr = NULL;
1131                sci->sc_stage.gc_inode_ptr = NULL;
1132                if (mode == SC_FLUSH_DAT) {
1133                        nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1134                        goto dat_stage;
1135                }
1136                nilfs_sc_cstage_inc(sci);
1137                fallthrough;
1138        case NILFS_ST_GC:
1139                if (nilfs_doing_gc()) {
1140                        head = &sci->sc_gc_inodes;
1141                        ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1142                                                head, i_dirty);
1143                        list_for_each_entry_continue(ii, head, i_dirty) {
1144                                err = nilfs_segctor_scan_file(
1145                                        sci, &ii->vfs_inode,
1146                                        &nilfs_sc_file_ops);
1147                                if (unlikely(err)) {
1148                                        sci->sc_stage.gc_inode_ptr = list_entry(
1149                                                ii->i_dirty.prev,
1150                                                struct nilfs_inode_info,
1151                                                i_dirty);
1152                                        goto break_or_fail;
1153                                }
1154                                set_bit(NILFS_I_COLLECTED, &ii->i_state);
1155                        }
1156                        sci->sc_stage.gc_inode_ptr = NULL;
1157                }
1158                nilfs_sc_cstage_inc(sci);
1159                fallthrough;
1160        case NILFS_ST_FILE:
1161                head = &sci->sc_dirty_files;
1162                ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1163                                        i_dirty);
1164                list_for_each_entry_continue(ii, head, i_dirty) {
1165                        clear_bit(NILFS_I_DIRTY, &ii->i_state);
1166
1167                        err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1168                                                      &nilfs_sc_file_ops);
1169                        if (unlikely(err)) {
1170                                sci->sc_stage.dirty_file_ptr =
1171                                        list_entry(ii->i_dirty.prev,
1172                                                   struct nilfs_inode_info,
1173                                                   i_dirty);
1174                                goto break_or_fail;
1175                        }
1176                        /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1177                        /* XXX: required ? */
1178                }
1179                sci->sc_stage.dirty_file_ptr = NULL;
1180                if (mode == SC_FLUSH_FILE) {
1181                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1182                        return 0;
1183                }
1184                nilfs_sc_cstage_inc(sci);
1185                sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1186                fallthrough;
1187        case NILFS_ST_IFILE:
1188                err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1189                                              &nilfs_sc_file_ops);
1190                if (unlikely(err))
1191                        break;
1192                nilfs_sc_cstage_inc(sci);
1193                /* Creating a checkpoint */
1194                err = nilfs_segctor_create_checkpoint(sci);
1195                if (unlikely(err))
1196                        break;
1197                fallthrough;
1198        case NILFS_ST_CPFILE:
1199                err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1200                                              &nilfs_sc_file_ops);
1201                if (unlikely(err))
1202                        break;
1203                nilfs_sc_cstage_inc(sci);
1204                fallthrough;
1205        case NILFS_ST_SUFILE:
1206                err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1207                                         sci->sc_nfreesegs, &ndone);
1208                if (unlikely(err)) {
1209                        nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1210                                                  sci->sc_freesegs, ndone,
1211                                                  NULL);
1212                        break;
1213                }
1214                sci->sc_stage.flags |= NILFS_CF_SUFREED;
1215
1216                err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1217                                              &nilfs_sc_file_ops);
1218                if (unlikely(err))
1219                        break;
1220                nilfs_sc_cstage_inc(sci);
1221                fallthrough;
1222        case NILFS_ST_DAT:
1223 dat_stage:
1224                err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1225                                              &nilfs_sc_dat_ops);
1226                if (unlikely(err))
1227                        break;
1228                if (mode == SC_FLUSH_DAT) {
1229                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1230                        return 0;
1231                }
1232                nilfs_sc_cstage_inc(sci);
1233                fallthrough;
1234        case NILFS_ST_SR:
1235                if (mode == SC_LSEG_SR) {
1236                        /* Appending a super root */
1237                        err = nilfs_segctor_add_super_root(sci);
1238                        if (unlikely(err))
1239                                break;
1240                }
1241                /* End of a logical segment */
1242                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1243                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1244                return 0;
1245        case NILFS_ST_DSYNC:
1246 dsync_mode:
1247                sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1248                ii = sci->sc_dsync_inode;
1249                if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1250                        break;
1251
1252                err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1253                if (unlikely(err))
1254                        break;
1255                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1256                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1257                return 0;
1258        case NILFS_ST_DONE:
1259                return 0;
1260        default:
1261                BUG();
1262        }
1263
1264 break_or_fail:
1265        return err;
1266}
1267
1268/**
1269 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1270 * @sci: nilfs_sc_info
1271 * @nilfs: nilfs object
1272 */
1273static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1274                                            struct the_nilfs *nilfs)
1275{
1276        struct nilfs_segment_buffer *segbuf, *prev;
1277        __u64 nextnum;
1278        int err, alloc = 0;
1279
1280        segbuf = nilfs_segbuf_new(sci->sc_super);
1281        if (unlikely(!segbuf))
1282                return -ENOMEM;
1283
1284        if (list_empty(&sci->sc_write_logs)) {
1285                nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1286                                 nilfs->ns_pseg_offset, nilfs);
1287                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1288                        nilfs_shift_to_next_segment(nilfs);
1289                        nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1290                }
1291
1292                segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1293                nextnum = nilfs->ns_nextnum;
1294
1295                if (nilfs->ns_segnum == nilfs->ns_nextnum)
1296                        /* Start from the head of a new full segment */
1297                        alloc++;
1298        } else {
1299                /* Continue logs */
1300                prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1301                nilfs_segbuf_map_cont(segbuf, prev);
1302                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1303                nextnum = prev->sb_nextnum;
1304
1305                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1306                        nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1307                        segbuf->sb_sum.seg_seq++;
1308                        alloc++;
1309                }
1310        }
1311
1312        err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1313        if (err)
1314                goto failed;
1315
1316        if (alloc) {
1317                err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1318                if (err)
1319                        goto failed;
1320        }
1321        nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1322
1323        BUG_ON(!list_empty(&sci->sc_segbufs));
1324        list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1325        sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1326        return 0;
1327
1328 failed:
1329        nilfs_segbuf_free(segbuf);
1330        return err;
1331}
1332
1333static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1334                                         struct the_nilfs *nilfs, int nadd)
1335{
1336        struct nilfs_segment_buffer *segbuf, *prev;
1337        struct inode *sufile = nilfs->ns_sufile;
1338        __u64 nextnextnum;
1339        LIST_HEAD(list);
1340        int err, ret, i;
1341
1342        prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1343        /*
1344         * Since the segment specified with nextnum might be allocated during
1345         * the previous construction, the buffer including its segusage may
1346         * not be dirty.  The following call ensures that the buffer is dirty
1347         * and will pin the buffer on memory until the sufile is written.
1348         */
1349        err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1350        if (unlikely(err))
1351                return err;
1352
1353        for (i = 0; i < nadd; i++) {
1354                /* extend segment info */
1355                err = -ENOMEM;
1356                segbuf = nilfs_segbuf_new(sci->sc_super);
1357                if (unlikely(!segbuf))
1358                        goto failed;
1359
1360                /* map this buffer to region of segment on-disk */
1361                nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1362                sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1363
1364                /* allocate the next next full segment */
1365                err = nilfs_sufile_alloc(sufile, &nextnextnum);
1366                if (unlikely(err))
1367                        goto failed_segbuf;
1368
1369                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1370                nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1371
1372                list_add_tail(&segbuf->sb_list, &list);
1373                prev = segbuf;
1374        }
1375        list_splice_tail(&list, &sci->sc_segbufs);
1376        return 0;
1377
1378 failed_segbuf:
1379        nilfs_segbuf_free(segbuf);
1380 failed:
1381        list_for_each_entry(segbuf, &list, sb_list) {
1382                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1383                WARN_ON(ret); /* never fails */
1384        }
1385        nilfs_destroy_logs(&list);
1386        return err;
1387}
1388
1389static void nilfs_free_incomplete_logs(struct list_head *logs,
1390                                       struct the_nilfs *nilfs)
1391{
1392        struct nilfs_segment_buffer *segbuf, *prev;
1393        struct inode *sufile = nilfs->ns_sufile;
1394        int ret;
1395
1396        segbuf = NILFS_FIRST_SEGBUF(logs);
1397        if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1398                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1399                WARN_ON(ret); /* never fails */
1400        }
1401        if (atomic_read(&segbuf->sb_err)) {
1402                /* Case 1: The first segment failed */
1403                if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1404                        /*
1405                         * Case 1a:  Partial segment appended into an existing
1406                         * segment
1407                         */
1408                        nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1409                                                segbuf->sb_fseg_end);
1410                else /* Case 1b:  New full segment */
1411                        set_nilfs_discontinued(nilfs);
1412        }
1413
1414        prev = segbuf;
1415        list_for_each_entry_continue(segbuf, logs, sb_list) {
1416                if (prev->sb_nextnum != segbuf->sb_nextnum) {
1417                        ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1418                        WARN_ON(ret); /* never fails */
1419                }
1420                if (atomic_read(&segbuf->sb_err) &&
1421                    segbuf->sb_segnum != nilfs->ns_nextnum)
1422                        /* Case 2: extended segment (!= next) failed */
1423                        nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1424                prev = segbuf;
1425        }
1426}
1427
1428static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1429                                          struct inode *sufile)
1430{
1431        struct nilfs_segment_buffer *segbuf;
1432        unsigned long live_blocks;
1433        int ret;
1434
1435        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1436                live_blocks = segbuf->sb_sum.nblocks +
1437                        (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1438                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1439                                                     live_blocks,
1440                                                     sci->sc_seg_ctime);
1441                WARN_ON(ret); /* always succeed because the segusage is dirty */
1442        }
1443}
1444
1445static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1446{
1447        struct nilfs_segment_buffer *segbuf;
1448        int ret;
1449
1450        segbuf = NILFS_FIRST_SEGBUF(logs);
1451        ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1452                                             segbuf->sb_pseg_start -
1453                                             segbuf->sb_fseg_start, 0);
1454        WARN_ON(ret); /* always succeed because the segusage is dirty */
1455
1456        list_for_each_entry_continue(segbuf, logs, sb_list) {
1457                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1458                                                     0, 0);
1459                WARN_ON(ret); /* always succeed */
1460        }
1461}
1462
1463static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1464                                            struct nilfs_segment_buffer *last,
1465                                            struct inode *sufile)
1466{
1467        struct nilfs_segment_buffer *segbuf = last;
1468        int ret;
1469
1470        list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1471                sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1472                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1473                WARN_ON(ret);
1474        }
1475        nilfs_truncate_logs(&sci->sc_segbufs, last);
1476}
1477
1478
1479static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1480                                 struct the_nilfs *nilfs, int mode)
1481{
1482        struct nilfs_cstage prev_stage = sci->sc_stage;
1483        int err, nadd = 1;
1484
1485        /* Collection retry loop */
1486        for (;;) {
1487                sci->sc_nblk_this_inc = 0;
1488                sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1489
1490                err = nilfs_segctor_reset_segment_buffer(sci);
1491                if (unlikely(err))
1492                        goto failed;
1493
1494                err = nilfs_segctor_collect_blocks(sci, mode);
1495                sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1496                if (!err)
1497                        break;
1498
1499                if (unlikely(err != -E2BIG))
1500                        goto failed;
1501
1502                /* The current segment is filled up */
1503                if (mode != SC_LSEG_SR ||
1504                    nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1505                        break;
1506
1507                nilfs_clear_logs(&sci->sc_segbufs);
1508
1509                if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1510                        err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1511                                                        sci->sc_freesegs,
1512                                                        sci->sc_nfreesegs,
1513                                                        NULL);
1514                        WARN_ON(err); /* do not happen */
1515                        sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1516                }
1517
1518                err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1519                if (unlikely(err))
1520                        return err;
1521
1522                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1523                sci->sc_stage = prev_stage;
1524        }
1525        nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1526        return 0;
1527
1528 failed:
1529        return err;
1530}
1531
1532static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1533                                      struct buffer_head *new_bh)
1534{
1535        BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1536
1537        list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1538        /* The caller must release old_bh */
1539}
1540
1541static int
1542nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1543                                     struct nilfs_segment_buffer *segbuf,
1544                                     int mode)
1545{
1546        struct inode *inode = NULL;
1547        sector_t blocknr;
1548        unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1549        unsigned long nblocks = 0, ndatablk = 0;
1550        const struct nilfs_sc_operations *sc_op = NULL;
1551        struct nilfs_segsum_pointer ssp;
1552        struct nilfs_finfo *finfo = NULL;
1553        union nilfs_binfo binfo;
1554        struct buffer_head *bh, *bh_org;
1555        ino_t ino = 0;
1556        int err = 0;
1557
1558        if (!nfinfo)
1559                goto out;
1560
1561        blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1562        ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1563        ssp.offset = sizeof(struct nilfs_segment_summary);
1564
1565        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1566                if (bh == segbuf->sb_super_root)
1567                        break;
1568                if (!finfo) {
1569                        finfo = nilfs_segctor_map_segsum_entry(
1570                                sci, &ssp, sizeof(*finfo));
1571                        ino = le64_to_cpu(finfo->fi_ino);
1572                        nblocks = le32_to_cpu(finfo->fi_nblocks);
1573                        ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1574
1575                        inode = bh->b_page->mapping->host;
1576
1577                        if (mode == SC_LSEG_DSYNC)
1578                                sc_op = &nilfs_sc_dsync_ops;
1579                        else if (ino == NILFS_DAT_INO)
1580                                sc_op = &nilfs_sc_dat_ops;
1581                        else /* file blocks */
1582                                sc_op = &nilfs_sc_file_ops;
1583                }
1584                bh_org = bh;
1585                get_bh(bh_org);
1586                err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1587                                        &binfo);
1588                if (bh != bh_org)
1589                        nilfs_list_replace_buffer(bh_org, bh);
1590                brelse(bh_org);
1591                if (unlikely(err))
1592                        goto failed_bmap;
1593
1594                if (ndatablk > 0)
1595                        sc_op->write_data_binfo(sci, &ssp, &binfo);
1596                else
1597                        sc_op->write_node_binfo(sci, &ssp, &binfo);
1598
1599                blocknr++;
1600                if (--nblocks == 0) {
1601                        finfo = NULL;
1602                        if (--nfinfo == 0)
1603                                break;
1604                } else if (ndatablk > 0)
1605                        ndatablk--;
1606        }
1607 out:
1608        return 0;
1609
1610 failed_bmap:
1611        return err;
1612}
1613
1614static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1615{
1616        struct nilfs_segment_buffer *segbuf;
1617        int err;
1618
1619        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1620                err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1621                if (unlikely(err))
1622                        return err;
1623                nilfs_segbuf_fill_in_segsum(segbuf);
1624        }
1625        return 0;
1626}
1627
1628static void nilfs_begin_page_io(struct page *page)
1629{
1630        if (!page || PageWriteback(page))
1631                /*
1632                 * For split b-tree node pages, this function may be called
1633                 * twice.  We ignore the 2nd or later calls by this check.
1634                 */
1635                return;
1636
1637        lock_page(page);
1638        clear_page_dirty_for_io(page);
1639        set_page_writeback(page);
1640        unlock_page(page);
1641}
1642
1643static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1644{
1645        struct nilfs_segment_buffer *segbuf;
1646        struct page *bd_page = NULL, *fs_page = NULL;
1647
1648        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1649                struct buffer_head *bh;
1650
1651                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1652                                    b_assoc_buffers) {
1653                        if (bh->b_page != bd_page) {
1654                                if (bd_page) {
1655                                        lock_page(bd_page);
1656                                        clear_page_dirty_for_io(bd_page);
1657                                        set_page_writeback(bd_page);
1658                                        unlock_page(bd_page);
1659                                }
1660                                bd_page = bh->b_page;
1661                        }
1662                }
1663
1664                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1665                                    b_assoc_buffers) {
1666                        set_buffer_async_write(bh);
1667                        if (bh == segbuf->sb_super_root) {
1668                                if (bh->b_page != bd_page) {
1669                                        lock_page(bd_page);
1670                                        clear_page_dirty_for_io(bd_page);
1671                                        set_page_writeback(bd_page);
1672                                        unlock_page(bd_page);
1673                                        bd_page = bh->b_page;
1674                                }
1675                                break;
1676                        }
1677                        if (bh->b_page != fs_page) {
1678                                nilfs_begin_page_io(fs_page);
1679                                fs_page = bh->b_page;
1680                        }
1681                }
1682        }
1683        if (bd_page) {
1684                lock_page(bd_page);
1685                clear_page_dirty_for_io(bd_page);
1686                set_page_writeback(bd_page);
1687                unlock_page(bd_page);
1688        }
1689        nilfs_begin_page_io(fs_page);
1690}
1691
1692static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1693                               struct the_nilfs *nilfs)
1694{
1695        int ret;
1696
1697        ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1698        list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1699        return ret;
1700}
1701
1702static void nilfs_end_page_io(struct page *page, int err)
1703{
1704        if (!page)
1705                return;
1706
1707        if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1708                /*
1709                 * For b-tree node pages, this function may be called twice
1710                 * or more because they might be split in a segment.
1711                 */
1712                if (PageDirty(page)) {
1713                        /*
1714                         * For pages holding split b-tree node buffers, dirty
1715                         * flag on the buffers may be cleared discretely.
1716                         * In that case, the page is once redirtied for
1717                         * remaining buffers, and it must be cancelled if
1718                         * all the buffers get cleaned later.
1719                         */
1720                        lock_page(page);
1721                        if (nilfs_page_buffers_clean(page))
1722                                __nilfs_clear_page_dirty(page);
1723                        unlock_page(page);
1724                }
1725                return;
1726        }
1727
1728        if (!err) {
1729                if (!nilfs_page_buffers_clean(page))
1730                        __set_page_dirty_nobuffers(page);
1731                ClearPageError(page);
1732        } else {
1733                __set_page_dirty_nobuffers(page);
1734                SetPageError(page);
1735        }
1736
1737        end_page_writeback(page);
1738}
1739
1740static void nilfs_abort_logs(struct list_head *logs, int err)
1741{
1742        struct nilfs_segment_buffer *segbuf;
1743        struct page *bd_page = NULL, *fs_page = NULL;
1744        struct buffer_head *bh;
1745
1746        if (list_empty(logs))
1747                return;
1748
1749        list_for_each_entry(segbuf, logs, sb_list) {
1750                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1751                                    b_assoc_buffers) {
1752                        if (bh->b_page != bd_page) {
1753                                if (bd_page)
1754                                        end_page_writeback(bd_page);
1755                                bd_page = bh->b_page;
1756                        }
1757                }
1758
1759                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1760                                    b_assoc_buffers) {
1761                        clear_buffer_async_write(bh);
1762                        if (bh == segbuf->sb_super_root) {
1763                                if (bh->b_page != bd_page) {
1764                                        end_page_writeback(bd_page);
1765                                        bd_page = bh->b_page;
1766                                }
1767                                break;
1768                        }
1769                        if (bh->b_page != fs_page) {
1770                                nilfs_end_page_io(fs_page, err);
1771                                fs_page = bh->b_page;
1772                        }
1773                }
1774        }
1775        if (bd_page)
1776                end_page_writeback(bd_page);
1777
1778        nilfs_end_page_io(fs_page, err);
1779}
1780
1781static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1782                                             struct the_nilfs *nilfs, int err)
1783{
1784        LIST_HEAD(logs);
1785        int ret;
1786
1787        list_splice_tail_init(&sci->sc_write_logs, &logs);
1788        ret = nilfs_wait_on_logs(&logs);
1789        nilfs_abort_logs(&logs, ret ? : err);
1790
1791        list_splice_tail_init(&sci->sc_segbufs, &logs);
1792        nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1793        nilfs_free_incomplete_logs(&logs, nilfs);
1794
1795        if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1796                ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1797                                                sci->sc_freesegs,
1798                                                sci->sc_nfreesegs,
1799                                                NULL);
1800                WARN_ON(ret); /* do not happen */
1801        }
1802
1803        nilfs_destroy_logs(&logs);
1804}
1805
1806static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1807                                   struct nilfs_segment_buffer *segbuf)
1808{
1809        nilfs->ns_segnum = segbuf->sb_segnum;
1810        nilfs->ns_nextnum = segbuf->sb_nextnum;
1811        nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1812                + segbuf->sb_sum.nblocks;
1813        nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1814        nilfs->ns_ctime = segbuf->sb_sum.ctime;
1815}
1816
1817static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1818{
1819        struct nilfs_segment_buffer *segbuf;
1820        struct page *bd_page = NULL, *fs_page = NULL;
1821        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1822        int update_sr = false;
1823
1824        list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1825                struct buffer_head *bh;
1826
1827                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1828                                    b_assoc_buffers) {
1829                        set_buffer_uptodate(bh);
1830                        clear_buffer_dirty(bh);
1831                        if (bh->b_page != bd_page) {
1832                                if (bd_page)
1833                                        end_page_writeback(bd_page);
1834                                bd_page = bh->b_page;
1835                        }
1836                }
1837                /*
1838                 * We assume that the buffers which belong to the same page
1839                 * continue over the buffer list.
1840                 * Under this assumption, the last BHs of pages is
1841                 * identifiable by the discontinuity of bh->b_page
1842                 * (page != fs_page).
1843                 *
1844                 * For B-tree node blocks, however, this assumption is not
1845                 * guaranteed.  The cleanup code of B-tree node pages needs
1846                 * special care.
1847                 */
1848                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1849                                    b_assoc_buffers) {
1850                        const unsigned long set_bits = BIT(BH_Uptodate);
1851                        const unsigned long clear_bits =
1852                                (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1853                                 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1854                                 BIT(BH_NILFS_Redirected));
1855
1856                        set_mask_bits(&bh->b_state, clear_bits, set_bits);
1857                        if (bh == segbuf->sb_super_root) {
1858                                if (bh->b_page != bd_page) {
1859                                        end_page_writeback(bd_page);
1860                                        bd_page = bh->b_page;
1861                                }
1862                                update_sr = true;
1863                                break;
1864                        }
1865                        if (bh->b_page != fs_page) {
1866                                nilfs_end_page_io(fs_page, 0);
1867                                fs_page = bh->b_page;
1868                        }
1869                }
1870
1871                if (!nilfs_segbuf_simplex(segbuf)) {
1872                        if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1873                                set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1874                                sci->sc_lseg_stime = jiffies;
1875                        }
1876                        if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1877                                clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1878                }
1879        }
1880        /*
1881         * Since pages may continue over multiple segment buffers,
1882         * end of the last page must be checked outside of the loop.
1883         */
1884        if (bd_page)
1885                end_page_writeback(bd_page);
1886
1887        nilfs_end_page_io(fs_page, 0);
1888
1889        nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1890
1891        if (nilfs_doing_gc())
1892                nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1893        else
1894                nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1895
1896        sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1897
1898        segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1899        nilfs_set_next_segment(nilfs, segbuf);
1900
1901        if (update_sr) {
1902                nilfs->ns_flushed_device = 0;
1903                nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1904                                       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1905
1906                clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1907                clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1908                set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1909                nilfs_segctor_clear_metadata_dirty(sci);
1910        } else
1911                clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1912}
1913
1914static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1915{
1916        int ret;
1917
1918        ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1919        if (!ret) {
1920                nilfs_segctor_complete_write(sci);
1921                nilfs_destroy_logs(&sci->sc_write_logs);
1922        }
1923        return ret;
1924}
1925
1926static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1927                                             struct the_nilfs *nilfs)
1928{
1929        struct nilfs_inode_info *ii, *n;
1930        struct inode *ifile = sci->sc_root->ifile;
1931
1932        spin_lock(&nilfs->ns_inode_lock);
1933 retry:
1934        list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1935                if (!ii->i_bh) {
1936                        struct buffer_head *ibh;
1937                        int err;
1938
1939                        spin_unlock(&nilfs->ns_inode_lock);
1940                        err = nilfs_ifile_get_inode_block(
1941                                ifile, ii->vfs_inode.i_ino, &ibh);
1942                        if (unlikely(err)) {
1943                                nilfs_warn(sci->sc_super,
1944                                           "log writer: error %d getting inode block (ino=%lu)",
1945                                           err, ii->vfs_inode.i_ino);
1946                                return err;
1947                        }
1948                        spin_lock(&nilfs->ns_inode_lock);
1949                        if (likely(!ii->i_bh))
1950                                ii->i_bh = ibh;
1951                        else
1952                                brelse(ibh);
1953                        goto retry;
1954                }
1955
1956                // Always redirty the buffer to avoid race condition
1957                mark_buffer_dirty(ii->i_bh);
1958                nilfs_mdt_mark_dirty(ifile);
1959
1960                clear_bit(NILFS_I_QUEUED, &ii->i_state);
1961                set_bit(NILFS_I_BUSY, &ii->i_state);
1962                list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1963        }
1964        spin_unlock(&nilfs->ns_inode_lock);
1965
1966        return 0;
1967}
1968
1969static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1970                                             struct the_nilfs *nilfs)
1971{
1972        struct nilfs_inode_info *ii, *n;
1973        int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1974        int defer_iput = false;
1975
1976        spin_lock(&nilfs->ns_inode_lock);
1977        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1978                if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1979                    test_bit(NILFS_I_DIRTY, &ii->i_state))
1980                        continue;
1981
1982                clear_bit(NILFS_I_BUSY, &ii->i_state);
1983                brelse(ii->i_bh);
1984                ii->i_bh = NULL;
1985                list_del_init(&ii->i_dirty);
1986                if (!ii->vfs_inode.i_nlink || during_mount) {
1987                        /*
1988                         * Defer calling iput() to avoid deadlocks if
1989                         * i_nlink == 0 or mount is not yet finished.
1990                         */
1991                        list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1992                        defer_iput = true;
1993                } else {
1994                        spin_unlock(&nilfs->ns_inode_lock);
1995                        iput(&ii->vfs_inode);
1996                        spin_lock(&nilfs->ns_inode_lock);
1997                }
1998        }
1999        spin_unlock(&nilfs->ns_inode_lock);
2000
2001        if (defer_iput)
2002                schedule_work(&sci->sc_iput_work);
2003}
2004
2005/*
2006 * Main procedure of segment constructor
2007 */
2008static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2009{
2010        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2011        int err;
2012
2013        nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2014        sci->sc_cno = nilfs->ns_cno;
2015
2016        err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2017        if (unlikely(err))
2018                goto out;
2019
2020        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2021                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2022
2023        if (nilfs_segctor_clean(sci))
2024                goto out;
2025
2026        do {
2027                sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2028
2029                err = nilfs_segctor_begin_construction(sci, nilfs);
2030                if (unlikely(err))
2031                        goto out;
2032
2033                /* Update time stamp */
2034                sci->sc_seg_ctime = ktime_get_real_seconds();
2035
2036                err = nilfs_segctor_collect(sci, nilfs, mode);
2037                if (unlikely(err))
2038                        goto failed;
2039
2040                /* Avoid empty segment */
2041                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2042                    nilfs_segbuf_empty(sci->sc_curseg)) {
2043                        nilfs_segctor_abort_construction(sci, nilfs, 1);
2044                        goto out;
2045                }
2046
2047                err = nilfs_segctor_assign(sci, mode);
2048                if (unlikely(err))
2049                        goto failed;
2050
2051                if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2052                        nilfs_segctor_fill_in_file_bmap(sci);
2053
2054                if (mode == SC_LSEG_SR &&
2055                    nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2056                        err = nilfs_segctor_fill_in_checkpoint(sci);
2057                        if (unlikely(err))
2058                                goto failed_to_write;
2059
2060                        nilfs_segctor_fill_in_super_root(sci, nilfs);
2061                }
2062                nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2063
2064                /* Write partial segments */
2065                nilfs_segctor_prepare_write(sci);
2066
2067                nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2068                                            nilfs->ns_crc_seed);
2069
2070                err = nilfs_segctor_write(sci, nilfs);
2071                if (unlikely(err))
2072                        goto failed_to_write;
2073
2074                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2075                    nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2076                        /*
2077                         * At this point, we avoid double buffering
2078                         * for blocksize < pagesize because page dirty
2079                         * flag is turned off during write and dirty
2080                         * buffers are not properly collected for
2081                         * pages crossing over segments.
2082                         */
2083                        err = nilfs_segctor_wait(sci);
2084                        if (err)
2085                                goto failed_to_write;
2086                }
2087        } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2088
2089 out:
2090        nilfs_segctor_drop_written_files(sci, nilfs);
2091        return err;
2092
2093 failed_to_write:
2094        if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2095                nilfs_redirty_inodes(&sci->sc_dirty_files);
2096
2097 failed:
2098        if (nilfs_doing_gc())
2099                nilfs_redirty_inodes(&sci->sc_gc_inodes);
2100        nilfs_segctor_abort_construction(sci, nilfs, err);
2101        goto out;
2102}
2103
2104/**
2105 * nilfs_segctor_start_timer - set timer of background write
2106 * @sci: nilfs_sc_info
2107 *
2108 * If the timer has already been set, it ignores the new request.
2109 * This function MUST be called within a section locking the segment
2110 * semaphore.
2111 */
2112static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2113{
2114        spin_lock(&sci->sc_state_lock);
2115        if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2116                sci->sc_timer.expires = jiffies + sci->sc_interval;
2117                add_timer(&sci->sc_timer);
2118                sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2119        }
2120        spin_unlock(&sci->sc_state_lock);
2121}
2122
2123static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2124{
2125        spin_lock(&sci->sc_state_lock);
2126        if (!(sci->sc_flush_request & BIT(bn))) {
2127                unsigned long prev_req = sci->sc_flush_request;
2128
2129                sci->sc_flush_request |= BIT(bn);
2130                if (!prev_req)
2131                        wake_up(&sci->sc_wait_daemon);
2132        }
2133        spin_unlock(&sci->sc_state_lock);
2134}
2135
2136/**
2137 * nilfs_flush_segment - trigger a segment construction for resource control
2138 * @sb: super block
2139 * @ino: inode number of the file to be flushed out.
2140 */
2141void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2142{
2143        struct the_nilfs *nilfs = sb->s_fs_info;
2144        struct nilfs_sc_info *sci = nilfs->ns_writer;
2145
2146        if (!sci || nilfs_doing_construction())
2147                return;
2148        nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2149                                        /* assign bit 0 to data files */
2150}
2151
2152struct nilfs_segctor_wait_request {
2153        wait_queue_entry_t      wq;
2154        __u32           seq;
2155        int             err;
2156        atomic_t        done;
2157};
2158
2159static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2160{
2161        struct nilfs_segctor_wait_request wait_req;
2162        int err = 0;
2163
2164        spin_lock(&sci->sc_state_lock);
2165        init_wait(&wait_req.wq);
2166        wait_req.err = 0;
2167        atomic_set(&wait_req.done, 0);
2168        wait_req.seq = ++sci->sc_seq_request;
2169        spin_unlock(&sci->sc_state_lock);
2170
2171        init_waitqueue_entry(&wait_req.wq, current);
2172        add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2173        set_current_state(TASK_INTERRUPTIBLE);
2174        wake_up(&sci->sc_wait_daemon);
2175
2176        for (;;) {
2177                if (atomic_read(&wait_req.done)) {
2178                        err = wait_req.err;
2179                        break;
2180                }
2181                if (!signal_pending(current)) {
2182                        schedule();
2183                        continue;
2184                }
2185                err = -ERESTARTSYS;
2186                break;
2187        }
2188        finish_wait(&sci->sc_wait_request, &wait_req.wq);
2189        return err;
2190}
2191
2192static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2193{
2194        struct nilfs_segctor_wait_request *wrq, *n;
2195        unsigned long flags;
2196
2197        spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2198        list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2199                if (!atomic_read(&wrq->done) &&
2200                    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2201                        wrq->err = err;
2202                        atomic_set(&wrq->done, 1);
2203                }
2204                if (atomic_read(&wrq->done)) {
2205                        wrq->wq.func(&wrq->wq,
2206                                     TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2207                                     0, NULL);
2208                }
2209        }
2210        spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2211}
2212
2213/**
2214 * nilfs_construct_segment - construct a logical segment
2215 * @sb: super block
2216 *
2217 * Return Value: On success, 0 is returned. On errors, one of the following
2218 * negative error code is returned.
2219 *
2220 * %-EROFS - Read only filesystem.
2221 *
2222 * %-EIO - I/O error
2223 *
2224 * %-ENOSPC - No space left on device (only in a panic state).
2225 *
2226 * %-ERESTARTSYS - Interrupted.
2227 *
2228 * %-ENOMEM - Insufficient memory available.
2229 */
2230int nilfs_construct_segment(struct super_block *sb)
2231{
2232        struct the_nilfs *nilfs = sb->s_fs_info;
2233        struct nilfs_sc_info *sci = nilfs->ns_writer;
2234        struct nilfs_transaction_info *ti;
2235        int err;
2236
2237        if (!sci)
2238                return -EROFS;
2239
2240        /* A call inside transactions causes a deadlock. */
2241        BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2242
2243        err = nilfs_segctor_sync(sci);
2244        return err;
2245}
2246
2247/**
2248 * nilfs_construct_dsync_segment - construct a data-only logical segment
2249 * @sb: super block
2250 * @inode: inode whose data blocks should be written out
2251 * @start: start byte offset
2252 * @end: end byte offset (inclusive)
2253 *
2254 * Return Value: On success, 0 is returned. On errors, one of the following
2255 * negative error code is returned.
2256 *
2257 * %-EROFS - Read only filesystem.
2258 *
2259 * %-EIO - I/O error
2260 *
2261 * %-ENOSPC - No space left on device (only in a panic state).
2262 *
2263 * %-ERESTARTSYS - Interrupted.
2264 *
2265 * %-ENOMEM - Insufficient memory available.
2266 */
2267int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2268                                  loff_t start, loff_t end)
2269{
2270        struct the_nilfs *nilfs = sb->s_fs_info;
2271        struct nilfs_sc_info *sci = nilfs->ns_writer;
2272        struct nilfs_inode_info *ii;
2273        struct nilfs_transaction_info ti;
2274        int err = 0;
2275
2276        if (!sci)
2277                return -EROFS;
2278
2279        nilfs_transaction_lock(sb, &ti, 0);
2280
2281        ii = NILFS_I(inode);
2282        if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2283            nilfs_test_opt(nilfs, STRICT_ORDER) ||
2284            test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2285            nilfs_discontinued(nilfs)) {
2286                nilfs_transaction_unlock(sb);
2287                err = nilfs_segctor_sync(sci);
2288                return err;
2289        }
2290
2291        spin_lock(&nilfs->ns_inode_lock);
2292        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2293            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2294                spin_unlock(&nilfs->ns_inode_lock);
2295                nilfs_transaction_unlock(sb);
2296                return 0;
2297        }
2298        spin_unlock(&nilfs->ns_inode_lock);
2299        sci->sc_dsync_inode = ii;
2300        sci->sc_dsync_start = start;
2301        sci->sc_dsync_end = end;
2302
2303        err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2304        if (!err)
2305                nilfs->ns_flushed_device = 0;
2306
2307        nilfs_transaction_unlock(sb);
2308        return err;
2309}
2310
2311#define FLUSH_FILE_BIT  (0x1) /* data file only */
2312#define FLUSH_DAT_BIT   BIT(NILFS_DAT_INO) /* DAT only */
2313
2314/**
2315 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2316 * @sci: segment constructor object
2317 */
2318static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2319{
2320        spin_lock(&sci->sc_state_lock);
2321        sci->sc_seq_accepted = sci->sc_seq_request;
2322        spin_unlock(&sci->sc_state_lock);
2323        del_timer_sync(&sci->sc_timer);
2324}
2325
2326/**
2327 * nilfs_segctor_notify - notify the result of request to caller threads
2328 * @sci: segment constructor object
2329 * @mode: mode of log forming
2330 * @err: error code to be notified
2331 */
2332static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2333{
2334        /* Clear requests (even when the construction failed) */
2335        spin_lock(&sci->sc_state_lock);
2336
2337        if (mode == SC_LSEG_SR) {
2338                sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2339                sci->sc_seq_done = sci->sc_seq_accepted;
2340                nilfs_segctor_wakeup(sci, err);
2341                sci->sc_flush_request = 0;
2342        } else {
2343                if (mode == SC_FLUSH_FILE)
2344                        sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2345                else if (mode == SC_FLUSH_DAT)
2346                        sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2347
2348                /* re-enable timer if checkpoint creation was not done */
2349                if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2350                    time_before(jiffies, sci->sc_timer.expires))
2351                        add_timer(&sci->sc_timer);
2352        }
2353        spin_unlock(&sci->sc_state_lock);
2354}
2355
2356/**
2357 * nilfs_segctor_construct - form logs and write them to disk
2358 * @sci: segment constructor object
2359 * @mode: mode of log forming
2360 */
2361static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2362{
2363        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2364        struct nilfs_super_block **sbp;
2365        int err = 0;
2366
2367        nilfs_segctor_accept(sci);
2368
2369        if (nilfs_discontinued(nilfs))
2370                mode = SC_LSEG_SR;
2371        if (!nilfs_segctor_confirm(sci))
2372                err = nilfs_segctor_do_construct(sci, mode);
2373
2374        if (likely(!err)) {
2375                if (mode != SC_FLUSH_DAT)
2376                        atomic_set(&nilfs->ns_ndirtyblks, 0);
2377                if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2378                    nilfs_discontinued(nilfs)) {
2379                        down_write(&nilfs->ns_sem);
2380                        err = -EIO;
2381                        sbp = nilfs_prepare_super(sci->sc_super,
2382                                                  nilfs_sb_will_flip(nilfs));
2383                        if (likely(sbp)) {
2384                                nilfs_set_log_cursor(sbp[0], nilfs);
2385                                err = nilfs_commit_super(sci->sc_super,
2386                                                         NILFS_SB_COMMIT);
2387                        }
2388                        up_write(&nilfs->ns_sem);
2389                }
2390        }
2391
2392        nilfs_segctor_notify(sci, mode, err);
2393        return err;
2394}
2395
2396static void nilfs_construction_timeout(struct timer_list *t)
2397{
2398        struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2399
2400        wake_up_process(sci->sc_timer_task);
2401}
2402
2403static void
2404nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2405{
2406        struct nilfs_inode_info *ii, *n;
2407
2408        list_for_each_entry_safe(ii, n, head, i_dirty) {
2409                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2410                        continue;
2411                list_del_init(&ii->i_dirty);
2412                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2413                nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2414                iput(&ii->vfs_inode);
2415        }
2416}
2417
2418int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2419                         void **kbufs)
2420{
2421        struct the_nilfs *nilfs = sb->s_fs_info;
2422        struct nilfs_sc_info *sci = nilfs->ns_writer;
2423        struct nilfs_transaction_info ti;
2424        int err;
2425
2426        if (unlikely(!sci))
2427                return -EROFS;
2428
2429        nilfs_transaction_lock(sb, &ti, 1);
2430
2431        err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2432        if (unlikely(err))
2433                goto out_unlock;
2434
2435        err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2436        if (unlikely(err)) {
2437                nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2438                goto out_unlock;
2439        }
2440
2441        sci->sc_freesegs = kbufs[4];
2442        sci->sc_nfreesegs = argv[4].v_nmembs;
2443        list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2444
2445        for (;;) {
2446                err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2447                nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2448
2449                if (likely(!err))
2450                        break;
2451
2452                nilfs_warn(sb, "error %d cleaning segments", err);
2453                set_current_state(TASK_INTERRUPTIBLE);
2454                schedule_timeout(sci->sc_interval);
2455        }
2456        if (nilfs_test_opt(nilfs, DISCARD)) {
2457                int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2458                                                 sci->sc_nfreesegs);
2459                if (ret) {
2460                        nilfs_warn(sb,
2461                                   "error %d on discard request, turning discards off for the device",
2462                                   ret);
2463                        nilfs_clear_opt(nilfs, DISCARD);
2464                }
2465        }
2466
2467 out_unlock:
2468        sci->sc_freesegs = NULL;
2469        sci->sc_nfreesegs = 0;
2470        nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2471        nilfs_transaction_unlock(sb);
2472        return err;
2473}
2474
2475static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2476{
2477        struct nilfs_transaction_info ti;
2478
2479        nilfs_transaction_lock(sci->sc_super, &ti, 0);
2480        nilfs_segctor_construct(sci, mode);
2481
2482        /*
2483         * Unclosed segment should be retried.  We do this using sc_timer.
2484         * Timeout of sc_timer will invoke complete construction which leads
2485         * to close the current logical segment.
2486         */
2487        if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2488                nilfs_segctor_start_timer(sci);
2489
2490        nilfs_transaction_unlock(sci->sc_super);
2491}
2492
2493static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2494{
2495        int mode = 0;
2496
2497        spin_lock(&sci->sc_state_lock);
2498        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2499                SC_FLUSH_DAT : SC_FLUSH_FILE;
2500        spin_unlock(&sci->sc_state_lock);
2501
2502        if (mode) {
2503                nilfs_segctor_do_construct(sci, mode);
2504
2505                spin_lock(&sci->sc_state_lock);
2506                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2507                        ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2508                spin_unlock(&sci->sc_state_lock);
2509        }
2510        clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2511}
2512
2513static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2514{
2515        if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2516            time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2517                if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2518                        return SC_FLUSH_FILE;
2519                else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2520                        return SC_FLUSH_DAT;
2521        }
2522        return SC_LSEG_SR;
2523}
2524
2525/**
2526 * nilfs_segctor_thread - main loop of the segment constructor thread.
2527 * @arg: pointer to a struct nilfs_sc_info.
2528 *
2529 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2530 * to execute segment constructions.
2531 */
2532static int nilfs_segctor_thread(void *arg)
2533{
2534        struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2535        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2536        int timeout = 0;
2537
2538        sci->sc_timer_task = current;
2539
2540        /* start sync. */
2541        sci->sc_task = current;
2542        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2543        nilfs_info(sci->sc_super,
2544                   "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2545                   sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2546
2547        spin_lock(&sci->sc_state_lock);
2548 loop:
2549        for (;;) {
2550                int mode;
2551
2552                if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2553                        goto end_thread;
2554
2555                if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2556                        mode = SC_LSEG_SR;
2557                else if (sci->sc_flush_request)
2558                        mode = nilfs_segctor_flush_mode(sci);
2559                else
2560                        break;
2561
2562                spin_unlock(&sci->sc_state_lock);
2563                nilfs_segctor_thread_construct(sci, mode);
2564                spin_lock(&sci->sc_state_lock);
2565                timeout = 0;
2566        }
2567
2568
2569        if (freezing(current)) {
2570                spin_unlock(&sci->sc_state_lock);
2571                try_to_freeze();
2572                spin_lock(&sci->sc_state_lock);
2573        } else {
2574                DEFINE_WAIT(wait);
2575                int should_sleep = 1;
2576
2577                prepare_to_wait(&sci->sc_wait_daemon, &wait,
2578                                TASK_INTERRUPTIBLE);
2579
2580                if (sci->sc_seq_request != sci->sc_seq_done)
2581                        should_sleep = 0;
2582                else if (sci->sc_flush_request)
2583                        should_sleep = 0;
2584                else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2585                        should_sleep = time_before(jiffies,
2586                                        sci->sc_timer.expires);
2587
2588                if (should_sleep) {
2589                        spin_unlock(&sci->sc_state_lock);
2590                        schedule();
2591                        spin_lock(&sci->sc_state_lock);
2592                }
2593                finish_wait(&sci->sc_wait_daemon, &wait);
2594                timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2595                           time_after_eq(jiffies, sci->sc_timer.expires));
2596
2597                if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2598                        set_nilfs_discontinued(nilfs);
2599        }
2600        goto loop;
2601
2602 end_thread:
2603        spin_unlock(&sci->sc_state_lock);
2604
2605        /* end sync. */
2606        sci->sc_task = NULL;
2607        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2608        return 0;
2609}
2610
2611static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2612{
2613        struct task_struct *t;
2614
2615        t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2616        if (IS_ERR(t)) {
2617                int err = PTR_ERR(t);
2618
2619                nilfs_err(sci->sc_super, "error %d creating segctord thread",
2620                          err);
2621                return err;
2622        }
2623        wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2624        return 0;
2625}
2626
2627static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2628        __acquires(&sci->sc_state_lock)
2629        __releases(&sci->sc_state_lock)
2630{
2631        sci->sc_state |= NILFS_SEGCTOR_QUIT;
2632
2633        while (sci->sc_task) {
2634                wake_up(&sci->sc_wait_daemon);
2635                spin_unlock(&sci->sc_state_lock);
2636                wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2637                spin_lock(&sci->sc_state_lock);
2638        }
2639}
2640
2641/*
2642 * Setup & clean-up functions
2643 */
2644static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2645                                               struct nilfs_root *root)
2646{
2647        struct the_nilfs *nilfs = sb->s_fs_info;
2648        struct nilfs_sc_info *sci;
2649
2650        sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2651        if (!sci)
2652                return NULL;
2653
2654        sci->sc_super = sb;
2655
2656        nilfs_get_root(root);
2657        sci->sc_root = root;
2658
2659        init_waitqueue_head(&sci->sc_wait_request);
2660        init_waitqueue_head(&sci->sc_wait_daemon);
2661        init_waitqueue_head(&sci->sc_wait_task);
2662        spin_lock_init(&sci->sc_state_lock);
2663        INIT_LIST_HEAD(&sci->sc_dirty_files);
2664        INIT_LIST_HEAD(&sci->sc_segbufs);
2665        INIT_LIST_HEAD(&sci->sc_write_logs);
2666        INIT_LIST_HEAD(&sci->sc_gc_inodes);
2667        INIT_LIST_HEAD(&sci->sc_iput_queue);
2668        INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2669        timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2670
2671        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2672        sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2673        sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2674
2675        if (nilfs->ns_interval)
2676                sci->sc_interval = HZ * nilfs->ns_interval;
2677        if (nilfs->ns_watermark)
2678                sci->sc_watermark = nilfs->ns_watermark;
2679        return sci;
2680}
2681
2682static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2683{
2684        int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2685
2686        /*
2687         * The segctord thread was stopped and its timer was removed.
2688         * But some tasks remain.
2689         */
2690        do {
2691                struct nilfs_transaction_info ti;
2692
2693                nilfs_transaction_lock(sci->sc_super, &ti, 0);
2694                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2695                nilfs_transaction_unlock(sci->sc_super);
2696
2697                flush_work(&sci->sc_iput_work);
2698
2699        } while (ret && retrycount-- > 0);
2700}
2701
2702/**
2703 * nilfs_segctor_destroy - destroy the segment constructor.
2704 * @sci: nilfs_sc_info
2705 *
2706 * nilfs_segctor_destroy() kills the segctord thread and frees
2707 * the nilfs_sc_info struct.
2708 * Caller must hold the segment semaphore.
2709 */
2710static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2711{
2712        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2713        int flag;
2714
2715        up_write(&nilfs->ns_segctor_sem);
2716
2717        spin_lock(&sci->sc_state_lock);
2718        nilfs_segctor_kill_thread(sci);
2719        flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2720                || sci->sc_seq_request != sci->sc_seq_done);
2721        spin_unlock(&sci->sc_state_lock);
2722
2723        if (flush_work(&sci->sc_iput_work))
2724                flag = true;
2725
2726        if (flag || !nilfs_segctor_confirm(sci))
2727                nilfs_segctor_write_out(sci);
2728
2729        if (!list_empty(&sci->sc_dirty_files)) {
2730                nilfs_warn(sci->sc_super,
2731                           "disposed unprocessed dirty file(s) when stopping log writer");
2732                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2733        }
2734
2735        if (!list_empty(&sci->sc_iput_queue)) {
2736                nilfs_warn(sci->sc_super,
2737                           "disposed unprocessed inode(s) in iput queue when stopping log writer");
2738                nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2739        }
2740
2741        WARN_ON(!list_empty(&sci->sc_segbufs));
2742        WARN_ON(!list_empty(&sci->sc_write_logs));
2743
2744        nilfs_put_root(sci->sc_root);
2745
2746        down_write(&nilfs->ns_segctor_sem);
2747
2748        del_timer_sync(&sci->sc_timer);
2749        kfree(sci);
2750}
2751
2752/**
2753 * nilfs_attach_log_writer - attach log writer
2754 * @sb: super block instance
2755 * @root: root object of the current filesystem tree
2756 *
2757 * This allocates a log writer object, initializes it, and starts the
2758 * log writer.
2759 *
2760 * Return Value: On success, 0 is returned. On error, one of the following
2761 * negative error code is returned.
2762 *
2763 * %-ENOMEM - Insufficient memory available.
2764 */
2765int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2766{
2767        struct the_nilfs *nilfs = sb->s_fs_info;
2768        int err;
2769
2770        if (nilfs->ns_writer) {
2771                /*
2772                 * This happens if the filesystem was remounted
2773                 * read/write after nilfs_error degenerated it into a
2774                 * read-only mount.
2775                 */
2776                nilfs_detach_log_writer(sb);
2777        }
2778
2779        nilfs->ns_writer = nilfs_segctor_new(sb, root);
2780        if (!nilfs->ns_writer)
2781                return -ENOMEM;
2782
2783        inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2784
2785        err = nilfs_segctor_start_thread(nilfs->ns_writer);
2786        if (err) {
2787                kfree(nilfs->ns_writer);
2788                nilfs->ns_writer = NULL;
2789        }
2790        return err;
2791}
2792
2793/**
2794 * nilfs_detach_log_writer - destroy log writer
2795 * @sb: super block instance
2796 *
2797 * This kills log writer daemon, frees the log writer object, and
2798 * destroys list of dirty files.
2799 */
2800void nilfs_detach_log_writer(struct super_block *sb)
2801{
2802        struct the_nilfs *nilfs = sb->s_fs_info;
2803        LIST_HEAD(garbage_list);
2804
2805        down_write(&nilfs->ns_segctor_sem);
2806        if (nilfs->ns_writer) {
2807                nilfs_segctor_destroy(nilfs->ns_writer);
2808                nilfs->ns_writer = NULL;
2809        }
2810
2811        /* Force to free the list of dirty files */
2812        spin_lock(&nilfs->ns_inode_lock);
2813        if (!list_empty(&nilfs->ns_dirty_files)) {
2814                list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2815                nilfs_warn(sb,
2816                           "disposed unprocessed dirty file(s) when detaching log writer");
2817        }
2818        spin_unlock(&nilfs->ns_inode_lock);
2819        up_write(&nilfs->ns_segctor_sem);
2820
2821        nilfs_dispose_list(nilfs, &garbage_list, 1);
2822}
2823