linux/fs/nilfs2/segment.c
<<
>>
Prefs
   1/*
   2 * segment.c - NILFS segment constructor.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * Written by Ryusuke Konishi.
  17 *
  18 */
  19
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/writeback.h>
  23#include <linux/bitops.h>
  24#include <linux/bio.h>
  25#include <linux/completion.h>
  26#include <linux/blkdev.h>
  27#include <linux/backing-dev.h>
  28#include <linux/freezer.h>
  29#include <linux/kthread.h>
  30#include <linux/crc32.h>
  31#include <linux/pagevec.h>
  32#include <linux/slab.h>
  33#include "nilfs.h"
  34#include "btnode.h"
  35#include "page.h"
  36#include "segment.h"
  37#include "sufile.h"
  38#include "cpfile.h"
  39#include "ifile.h"
  40#include "segbuf.h"
  41
  42
  43/*
  44 * Segment constructor
  45 */
  46#define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
  47
  48#define SC_MAX_SEGDELTA 64   /*
  49                              * Upper limit of the number of segments
  50                              * appended in collection retry loop
  51                              */
  52
  53/* Construction mode */
  54enum {
  55        SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  56        SC_LSEG_DSYNC,  /*
  57                         * Flush data blocks of a given file and make
  58                         * a logical segment without a super root.
  59                         */
  60        SC_FLUSH_FILE,  /*
  61                         * Flush data files, leads to segment writes without
  62                         * creating a checkpoint.
  63                         */
  64        SC_FLUSH_DAT,   /*
  65                         * Flush DAT file.  This also creates segments
  66                         * without a checkpoint.
  67                         */
  68};
  69
  70/* Stage numbers of dirty block collection */
  71enum {
  72        NILFS_ST_INIT = 0,
  73        NILFS_ST_GC,            /* Collecting dirty blocks for GC */
  74        NILFS_ST_FILE,
  75        NILFS_ST_IFILE,
  76        NILFS_ST_CPFILE,
  77        NILFS_ST_SUFILE,
  78        NILFS_ST_DAT,
  79        NILFS_ST_SR,            /* Super root */
  80        NILFS_ST_DSYNC,         /* Data sync blocks */
  81        NILFS_ST_DONE,
  82};
  83
  84#define CREATE_TRACE_POINTS
  85#include <trace/events/nilfs2.h>
  86
  87/*
  88 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
  89 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
  90 * the variable must use them because transition of stage count must involve
  91 * trace events (trace_nilfs2_collection_stage_transition).
  92 *
  93 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
  94 * produce tracepoint events. It is provided just for making the intention
  95 * clear.
  96 */
  97static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
  98{
  99        sci->sc_stage.scnt++;
 100        trace_nilfs2_collection_stage_transition(sci);
 101}
 102
 103static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
 104{
 105        sci->sc_stage.scnt = next_scnt;
 106        trace_nilfs2_collection_stage_transition(sci);
 107}
 108
 109static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
 110{
 111        return sci->sc_stage.scnt;
 112}
 113
 114/* State flags of collection */
 115#define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
 116#define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
 117#define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
 118#define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
 119
 120/* Operations depending on the construction mode and file type */
 121struct nilfs_sc_operations {
 122        int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
 123                            struct inode *);
 124        int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
 125                            struct inode *);
 126        int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
 127                            struct inode *);
 128        void (*write_data_binfo)(struct nilfs_sc_info *,
 129                                 struct nilfs_segsum_pointer *,
 130                                 union nilfs_binfo *);
 131        void (*write_node_binfo)(struct nilfs_sc_info *,
 132                                 struct nilfs_segsum_pointer *,
 133                                 union nilfs_binfo *);
 134};
 135
 136/*
 137 * Other definitions
 138 */
 139static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
 140static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
 141static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
 142static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
 143
 144#define nilfs_cnt32_gt(a, b)   \
 145        (typecheck(__u32, a) && typecheck(__u32, b) && \
 146         ((__s32)(b) - (__s32)(a) < 0))
 147#define nilfs_cnt32_ge(a, b)   \
 148        (typecheck(__u32, a) && typecheck(__u32, b) && \
 149         ((__s32)(a) - (__s32)(b) >= 0))
 150#define nilfs_cnt32_lt(a, b)  nilfs_cnt32_gt(b, a)
 151#define nilfs_cnt32_le(a, b)  nilfs_cnt32_ge(b, a)
 152
 153static int nilfs_prepare_segment_lock(struct super_block *sb,
 154                                      struct nilfs_transaction_info *ti)
 155{
 156        struct nilfs_transaction_info *cur_ti = current->journal_info;
 157        void *save = NULL;
 158
 159        if (cur_ti) {
 160                if (cur_ti->ti_magic == NILFS_TI_MAGIC)
 161                        return ++cur_ti->ti_count;
 162
 163                /*
 164                 * If journal_info field is occupied by other FS,
 165                 * it is saved and will be restored on
 166                 * nilfs_transaction_commit().
 167                 */
 168                nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
 169                save = current->journal_info;
 170        }
 171        if (!ti) {
 172                ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
 173                if (!ti)
 174                        return -ENOMEM;
 175                ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
 176        } else {
 177                ti->ti_flags = 0;
 178        }
 179        ti->ti_count = 0;
 180        ti->ti_save = save;
 181        ti->ti_magic = NILFS_TI_MAGIC;
 182        current->journal_info = ti;
 183        return 0;
 184}
 185
 186/**
 187 * nilfs_transaction_begin - start indivisible file operations.
 188 * @sb: super block
 189 * @ti: nilfs_transaction_info
 190 * @vacancy_check: flags for vacancy rate checks
 191 *
 192 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
 193 * the segment semaphore, to make a segment construction and write tasks
 194 * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
 195 * The region enclosed by these two functions can be nested.  To avoid a
 196 * deadlock, the semaphore is only acquired or released in the outermost call.
 197 *
 198 * This function allocates a nilfs_transaction_info struct to keep context
 199 * information on it.  It is initialized and hooked onto the current task in
 200 * the outermost call.  If a pre-allocated struct is given to @ti, it is used
 201 * instead; otherwise a new struct is assigned from a slab.
 202 *
 203 * When @vacancy_check flag is set, this function will check the amount of
 204 * free space, and will wait for the GC to reclaim disk space if low capacity.
 205 *
 206 * Return Value: On success, 0 is returned. On error, one of the following
 207 * negative error code is returned.
 208 *
 209 * %-ENOMEM - Insufficient memory available.
 210 *
 211 * %-ENOSPC - No space left on device
 212 */
 213int nilfs_transaction_begin(struct super_block *sb,
 214                            struct nilfs_transaction_info *ti,
 215                            int vacancy_check)
 216{
 217        struct the_nilfs *nilfs;
 218        int ret = nilfs_prepare_segment_lock(sb, ti);
 219        struct nilfs_transaction_info *trace_ti;
 220
 221        if (unlikely(ret < 0))
 222                return ret;
 223        if (ret > 0) {
 224                trace_ti = current->journal_info;
 225
 226                trace_nilfs2_transaction_transition(sb, trace_ti,
 227                                    trace_ti->ti_count, trace_ti->ti_flags,
 228                                    TRACE_NILFS2_TRANSACTION_BEGIN);
 229                return 0;
 230        }
 231
 232        sb_start_intwrite(sb);
 233
 234        nilfs = sb->s_fs_info;
 235        down_read(&nilfs->ns_segctor_sem);
 236        if (vacancy_check && nilfs_near_disk_full(nilfs)) {
 237                up_read(&nilfs->ns_segctor_sem);
 238                ret = -ENOSPC;
 239                goto failed;
 240        }
 241
 242        trace_ti = current->journal_info;
 243        trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
 244                                            trace_ti->ti_flags,
 245                                            TRACE_NILFS2_TRANSACTION_BEGIN);
 246        return 0;
 247
 248 failed:
 249        ti = current->journal_info;
 250        current->journal_info = ti->ti_save;
 251        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 252                kmem_cache_free(nilfs_transaction_cachep, ti);
 253        sb_end_intwrite(sb);
 254        return ret;
 255}
 256
 257/**
 258 * nilfs_transaction_commit - commit indivisible file operations.
 259 * @sb: super block
 260 *
 261 * nilfs_transaction_commit() releases the read semaphore which is
 262 * acquired by nilfs_transaction_begin(). This is only performed
 263 * in outermost call of this function.  If a commit flag is set,
 264 * nilfs_transaction_commit() sets a timer to start the segment
 265 * constructor.  If a sync flag is set, it starts construction
 266 * directly.
 267 */
 268int nilfs_transaction_commit(struct super_block *sb)
 269{
 270        struct nilfs_transaction_info *ti = current->journal_info;
 271        struct the_nilfs *nilfs = sb->s_fs_info;
 272        int err = 0;
 273
 274        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 275        ti->ti_flags |= NILFS_TI_COMMIT;
 276        if (ti->ti_count > 0) {
 277                ti->ti_count--;
 278                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 279                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 280                return 0;
 281        }
 282        if (nilfs->ns_writer) {
 283                struct nilfs_sc_info *sci = nilfs->ns_writer;
 284
 285                if (ti->ti_flags & NILFS_TI_COMMIT)
 286                        nilfs_segctor_start_timer(sci);
 287                if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
 288                        nilfs_segctor_do_flush(sci, 0);
 289        }
 290        up_read(&nilfs->ns_segctor_sem);
 291        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 292                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 293
 294        current->journal_info = ti->ti_save;
 295
 296        if (ti->ti_flags & NILFS_TI_SYNC)
 297                err = nilfs_construct_segment(sb);
 298        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 299                kmem_cache_free(nilfs_transaction_cachep, ti);
 300        sb_end_intwrite(sb);
 301        return err;
 302}
 303
 304void nilfs_transaction_abort(struct super_block *sb)
 305{
 306        struct nilfs_transaction_info *ti = current->journal_info;
 307        struct the_nilfs *nilfs = sb->s_fs_info;
 308
 309        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 310        if (ti->ti_count > 0) {
 311                ti->ti_count--;
 312                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 313                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 314                return;
 315        }
 316        up_read(&nilfs->ns_segctor_sem);
 317
 318        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 319                    ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 320
 321        current->journal_info = ti->ti_save;
 322        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 323                kmem_cache_free(nilfs_transaction_cachep, ti);
 324        sb_end_intwrite(sb);
 325}
 326
 327void nilfs_relax_pressure_in_lock(struct super_block *sb)
 328{
 329        struct the_nilfs *nilfs = sb->s_fs_info;
 330        struct nilfs_sc_info *sci = nilfs->ns_writer;
 331
 332        if (!sci || !sci->sc_flush_request)
 333                return;
 334
 335        set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
 336        up_read(&nilfs->ns_segctor_sem);
 337
 338        down_write(&nilfs->ns_segctor_sem);
 339        if (sci->sc_flush_request &&
 340            test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
 341                struct nilfs_transaction_info *ti = current->journal_info;
 342
 343                ti->ti_flags |= NILFS_TI_WRITER;
 344                nilfs_segctor_do_immediate_flush(sci);
 345                ti->ti_flags &= ~NILFS_TI_WRITER;
 346        }
 347        downgrade_write(&nilfs->ns_segctor_sem);
 348}
 349
 350static void nilfs_transaction_lock(struct super_block *sb,
 351                                   struct nilfs_transaction_info *ti,
 352                                   int gcflag)
 353{
 354        struct nilfs_transaction_info *cur_ti = current->journal_info;
 355        struct the_nilfs *nilfs = sb->s_fs_info;
 356        struct nilfs_sc_info *sci = nilfs->ns_writer;
 357
 358        WARN_ON(cur_ti);
 359        ti->ti_flags = NILFS_TI_WRITER;
 360        ti->ti_count = 0;
 361        ti->ti_save = cur_ti;
 362        ti->ti_magic = NILFS_TI_MAGIC;
 363        current->journal_info = ti;
 364
 365        for (;;) {
 366                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 367                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
 368
 369                down_write(&nilfs->ns_segctor_sem);
 370                if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
 371                        break;
 372
 373                nilfs_segctor_do_immediate_flush(sci);
 374
 375                up_write(&nilfs->ns_segctor_sem);
 376                cond_resched();
 377        }
 378        if (gcflag)
 379                ti->ti_flags |= NILFS_TI_GC;
 380
 381        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 382                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
 383}
 384
 385static void nilfs_transaction_unlock(struct super_block *sb)
 386{
 387        struct nilfs_transaction_info *ti = current->journal_info;
 388        struct the_nilfs *nilfs = sb->s_fs_info;
 389
 390        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 391        BUG_ON(ti->ti_count > 0);
 392
 393        up_write(&nilfs->ns_segctor_sem);
 394        current->journal_info = ti->ti_save;
 395
 396        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 397                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
 398}
 399
 400static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
 401                                            struct nilfs_segsum_pointer *ssp,
 402                                            unsigned int bytes)
 403{
 404        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 405        unsigned int blocksize = sci->sc_super->s_blocksize;
 406        void *p;
 407
 408        if (unlikely(ssp->offset + bytes > blocksize)) {
 409                ssp->offset = 0;
 410                BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
 411                                               &segbuf->sb_segsum_buffers));
 412                ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
 413        }
 414        p = ssp->bh->b_data + ssp->offset;
 415        ssp->offset += bytes;
 416        return p;
 417}
 418
 419/**
 420 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
 421 * @sci: nilfs_sc_info
 422 */
 423static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
 424{
 425        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 426        struct buffer_head *sumbh;
 427        unsigned int sumbytes;
 428        unsigned int flags = 0;
 429        int err;
 430
 431        if (nilfs_doing_gc())
 432                flags = NILFS_SS_GC;
 433        err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
 434        if (unlikely(err))
 435                return err;
 436
 437        sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
 438        sumbytes = segbuf->sb_sum.sumbytes;
 439        sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
 440        sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
 441        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 442        return 0;
 443}
 444
 445static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
 446{
 447        sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
 448        if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
 449                return -E2BIG; /*
 450                                * The current segment is filled up
 451                                * (internal code)
 452                                */
 453        sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
 454        return nilfs_segctor_reset_segment_buffer(sci);
 455}
 456
 457static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
 458{
 459        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 460        int err;
 461
 462        if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
 463                err = nilfs_segctor_feed_segment(sci);
 464                if (err)
 465                        return err;
 466                segbuf = sci->sc_curseg;
 467        }
 468        err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
 469        if (likely(!err))
 470                segbuf->sb_sum.flags |= NILFS_SS_SR;
 471        return err;
 472}
 473
 474/*
 475 * Functions for making segment summary and payloads
 476 */
 477static int nilfs_segctor_segsum_block_required(
 478        struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
 479        unsigned int binfo_size)
 480{
 481        unsigned int blocksize = sci->sc_super->s_blocksize;
 482        /* Size of finfo and binfo is enough small against blocksize */
 483
 484        return ssp->offset + binfo_size +
 485                (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
 486                blocksize;
 487}
 488
 489static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
 490                                      struct inode *inode)
 491{
 492        sci->sc_curseg->sb_sum.nfinfo++;
 493        sci->sc_binfo_ptr = sci->sc_finfo_ptr;
 494        nilfs_segctor_map_segsum_entry(
 495                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 496
 497        if (NILFS_I(inode)->i_root &&
 498            !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 499                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 500        /* skip finfo */
 501}
 502
 503static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
 504                                    struct inode *inode)
 505{
 506        struct nilfs_finfo *finfo;
 507        struct nilfs_inode_info *ii;
 508        struct nilfs_segment_buffer *segbuf;
 509        __u64 cno;
 510
 511        if (sci->sc_blk_cnt == 0)
 512                return;
 513
 514        ii = NILFS_I(inode);
 515
 516        if (test_bit(NILFS_I_GCINODE, &ii->i_state))
 517                cno = ii->i_cno;
 518        else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
 519                cno = 0;
 520        else
 521                cno = sci->sc_cno;
 522
 523        finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
 524                                                 sizeof(*finfo));
 525        finfo->fi_ino = cpu_to_le64(inode->i_ino);
 526        finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
 527        finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
 528        finfo->fi_cno = cpu_to_le64(cno);
 529
 530        segbuf = sci->sc_curseg;
 531        segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
 532                sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
 533        sci->sc_finfo_ptr = sci->sc_binfo_ptr;
 534        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 535}
 536
 537static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
 538                                        struct buffer_head *bh,
 539                                        struct inode *inode,
 540                                        unsigned int binfo_size)
 541{
 542        struct nilfs_segment_buffer *segbuf;
 543        int required, err = 0;
 544
 545 retry:
 546        segbuf = sci->sc_curseg;
 547        required = nilfs_segctor_segsum_block_required(
 548                sci, &sci->sc_binfo_ptr, binfo_size);
 549        if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
 550                nilfs_segctor_end_finfo(sci, inode);
 551                err = nilfs_segctor_feed_segment(sci);
 552                if (err)
 553                        return err;
 554                goto retry;
 555        }
 556        if (unlikely(required)) {
 557                err = nilfs_segbuf_extend_segsum(segbuf);
 558                if (unlikely(err))
 559                        goto failed;
 560        }
 561        if (sci->sc_blk_cnt == 0)
 562                nilfs_segctor_begin_finfo(sci, inode);
 563
 564        nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
 565        /* Substitution to vblocknr is delayed until update_blocknr() */
 566        nilfs_segbuf_add_file_buffer(segbuf, bh);
 567        sci->sc_blk_cnt++;
 568 failed:
 569        return err;
 570}
 571
 572/*
 573 * Callback functions that enumerate, mark, and collect dirty blocks
 574 */
 575static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
 576                                   struct buffer_head *bh, struct inode *inode)
 577{
 578        int err;
 579
 580        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 581        if (err < 0)
 582                return err;
 583
 584        err = nilfs_segctor_add_file_block(sci, bh, inode,
 585                                           sizeof(struct nilfs_binfo_v));
 586        if (!err)
 587                sci->sc_datablk_cnt++;
 588        return err;
 589}
 590
 591static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
 592                                   struct buffer_head *bh,
 593                                   struct inode *inode)
 594{
 595        return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 596}
 597
 598static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
 599                                   struct buffer_head *bh,
 600                                   struct inode *inode)
 601{
 602        WARN_ON(!buffer_dirty(bh));
 603        return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 604}
 605
 606static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
 607                                        struct nilfs_segsum_pointer *ssp,
 608                                        union nilfs_binfo *binfo)
 609{
 610        struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
 611                sci, ssp, sizeof(*binfo_v));
 612        *binfo_v = binfo->bi_v;
 613}
 614
 615static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
 616                                        struct nilfs_segsum_pointer *ssp,
 617                                        union nilfs_binfo *binfo)
 618{
 619        __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
 620                sci, ssp, sizeof(*vblocknr));
 621        *vblocknr = binfo->bi_v.bi_vblocknr;
 622}
 623
 624static const struct nilfs_sc_operations nilfs_sc_file_ops = {
 625        .collect_data = nilfs_collect_file_data,
 626        .collect_node = nilfs_collect_file_node,
 627        .collect_bmap = nilfs_collect_file_bmap,
 628        .write_data_binfo = nilfs_write_file_data_binfo,
 629        .write_node_binfo = nilfs_write_file_node_binfo,
 630};
 631
 632static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
 633                                  struct buffer_head *bh, struct inode *inode)
 634{
 635        int err;
 636
 637        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 638        if (err < 0)
 639                return err;
 640
 641        err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 642        if (!err)
 643                sci->sc_datablk_cnt++;
 644        return err;
 645}
 646
 647static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
 648                                  struct buffer_head *bh, struct inode *inode)
 649{
 650        WARN_ON(!buffer_dirty(bh));
 651        return nilfs_segctor_add_file_block(sci, bh, inode,
 652                                            sizeof(struct nilfs_binfo_dat));
 653}
 654
 655static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
 656                                       struct nilfs_segsum_pointer *ssp,
 657                                       union nilfs_binfo *binfo)
 658{
 659        __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
 660                                                          sizeof(*blkoff));
 661        *blkoff = binfo->bi_dat.bi_blkoff;
 662}
 663
 664static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
 665                                       struct nilfs_segsum_pointer *ssp,
 666                                       union nilfs_binfo *binfo)
 667{
 668        struct nilfs_binfo_dat *binfo_dat =
 669                nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
 670        *binfo_dat = binfo->bi_dat;
 671}
 672
 673static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
 674        .collect_data = nilfs_collect_dat_data,
 675        .collect_node = nilfs_collect_file_node,
 676        .collect_bmap = nilfs_collect_dat_bmap,
 677        .write_data_binfo = nilfs_write_dat_data_binfo,
 678        .write_node_binfo = nilfs_write_dat_node_binfo,
 679};
 680
 681static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
 682        .collect_data = nilfs_collect_file_data,
 683        .collect_node = NULL,
 684        .collect_bmap = NULL,
 685        .write_data_binfo = nilfs_write_file_data_binfo,
 686        .write_node_binfo = NULL,
 687};
 688
 689static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 690                                              struct list_head *listp,
 691                                              size_t nlimit,
 692                                              loff_t start, loff_t end)
 693{
 694        struct address_space *mapping = inode->i_mapping;
 695        struct pagevec pvec;
 696        pgoff_t index = 0, last = ULONG_MAX;
 697        size_t ndirties = 0;
 698        int i;
 699
 700        if (unlikely(start != 0 || end != LLONG_MAX)) {
 701                /*
 702                 * A valid range is given for sync-ing data pages. The
 703                 * range is rounded to per-page; extra dirty buffers
 704                 * may be included if blocksize < pagesize.
 705                 */
 706                index = start >> PAGE_SHIFT;
 707                last = end >> PAGE_SHIFT;
 708        }
 709        pagevec_init(&pvec, 0);
 710 repeat:
 711        if (unlikely(index > last) ||
 712            !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
 713                                min_t(pgoff_t, last - index,
 714                                      PAGEVEC_SIZE - 1) + 1))
 715                return ndirties;
 716
 717        for (i = 0; i < pagevec_count(&pvec); i++) {
 718                struct buffer_head *bh, *head;
 719                struct page *page = pvec.pages[i];
 720
 721                if (unlikely(page->index > last))
 722                        break;
 723
 724                lock_page(page);
 725                if (!page_has_buffers(page))
 726                        create_empty_buffers(page, 1 << inode->i_blkbits, 0);
 727                unlock_page(page);
 728
 729                bh = head = page_buffers(page);
 730                do {
 731                        if (!buffer_dirty(bh) || buffer_async_write(bh))
 732                                continue;
 733                        get_bh(bh);
 734                        list_add_tail(&bh->b_assoc_buffers, listp);
 735                        ndirties++;
 736                        if (unlikely(ndirties >= nlimit)) {
 737                                pagevec_release(&pvec);
 738                                cond_resched();
 739                                return ndirties;
 740                        }
 741                } while (bh = bh->b_this_page, bh != head);
 742        }
 743        pagevec_release(&pvec);
 744        cond_resched();
 745        goto repeat;
 746}
 747
 748static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
 749                                            struct list_head *listp)
 750{
 751        struct nilfs_inode_info *ii = NILFS_I(inode);
 752        struct address_space *mapping = &ii->i_btnode_cache;
 753        struct pagevec pvec;
 754        struct buffer_head *bh, *head;
 755        unsigned int i;
 756        pgoff_t index = 0;
 757
 758        pagevec_init(&pvec, 0);
 759
 760        while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
 761                                  PAGEVEC_SIZE)) {
 762                for (i = 0; i < pagevec_count(&pvec); i++) {
 763                        bh = head = page_buffers(pvec.pages[i]);
 764                        do {
 765                                if (buffer_dirty(bh) &&
 766                                                !buffer_async_write(bh)) {
 767                                        get_bh(bh);
 768                                        list_add_tail(&bh->b_assoc_buffers,
 769                                                      listp);
 770                                }
 771                                bh = bh->b_this_page;
 772                        } while (bh != head);
 773                }
 774                pagevec_release(&pvec);
 775                cond_resched();
 776        }
 777}
 778
 779static void nilfs_dispose_list(struct the_nilfs *nilfs,
 780                               struct list_head *head, int force)
 781{
 782        struct nilfs_inode_info *ii, *n;
 783        struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
 784        unsigned int nv = 0;
 785
 786        while (!list_empty(head)) {
 787                spin_lock(&nilfs->ns_inode_lock);
 788                list_for_each_entry_safe(ii, n, head, i_dirty) {
 789                        list_del_init(&ii->i_dirty);
 790                        if (force) {
 791                                if (unlikely(ii->i_bh)) {
 792                                        brelse(ii->i_bh);
 793                                        ii->i_bh = NULL;
 794                                }
 795                        } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
 796                                set_bit(NILFS_I_QUEUED, &ii->i_state);
 797                                list_add_tail(&ii->i_dirty,
 798                                              &nilfs->ns_dirty_files);
 799                                continue;
 800                        }
 801                        ivec[nv++] = ii;
 802                        if (nv == SC_N_INODEVEC)
 803                                break;
 804                }
 805                spin_unlock(&nilfs->ns_inode_lock);
 806
 807                for (pii = ivec; nv > 0; pii++, nv--)
 808                        iput(&(*pii)->vfs_inode);
 809        }
 810}
 811
 812static void nilfs_iput_work_func(struct work_struct *work)
 813{
 814        struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
 815                                                 sc_iput_work);
 816        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 817
 818        nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
 819}
 820
 821static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
 822                                     struct nilfs_root *root)
 823{
 824        int ret = 0;
 825
 826        if (nilfs_mdt_fetch_dirty(root->ifile))
 827                ret++;
 828        if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
 829                ret++;
 830        if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
 831                ret++;
 832        if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
 833                ret++;
 834        return ret;
 835}
 836
 837static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
 838{
 839        return list_empty(&sci->sc_dirty_files) &&
 840                !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
 841                sci->sc_nfreesegs == 0 &&
 842                (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
 843}
 844
 845static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
 846{
 847        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 848        int ret = 0;
 849
 850        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
 851                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
 852
 853        spin_lock(&nilfs->ns_inode_lock);
 854        if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
 855                ret++;
 856
 857        spin_unlock(&nilfs->ns_inode_lock);
 858        return ret;
 859}
 860
 861static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
 862{
 863        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 864
 865        nilfs_mdt_clear_dirty(sci->sc_root->ifile);
 866        nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
 867        nilfs_mdt_clear_dirty(nilfs->ns_sufile);
 868        nilfs_mdt_clear_dirty(nilfs->ns_dat);
 869}
 870
 871static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
 872{
 873        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 874        struct buffer_head *bh_cp;
 875        struct nilfs_checkpoint *raw_cp;
 876        int err;
 877
 878        /* XXX: this interface will be changed */
 879        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
 880                                          &raw_cp, &bh_cp);
 881        if (likely(!err)) {
 882                /*
 883                 * The following code is duplicated with cpfile.  But, it is
 884                 * needed to collect the checkpoint even if it was not newly
 885                 * created.
 886                 */
 887                mark_buffer_dirty(bh_cp);
 888                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
 889                nilfs_cpfile_put_checkpoint(
 890                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 891        } else
 892                WARN_ON(err == -EINVAL || err == -ENOENT);
 893
 894        return err;
 895}
 896
 897static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
 898{
 899        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 900        struct buffer_head *bh_cp;
 901        struct nilfs_checkpoint *raw_cp;
 902        int err;
 903
 904        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
 905                                          &raw_cp, &bh_cp);
 906        if (unlikely(err)) {
 907                WARN_ON(err == -EINVAL || err == -ENOENT);
 908                goto failed_ibh;
 909        }
 910        raw_cp->cp_snapshot_list.ssl_next = 0;
 911        raw_cp->cp_snapshot_list.ssl_prev = 0;
 912        raw_cp->cp_inodes_count =
 913                cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
 914        raw_cp->cp_blocks_count =
 915                cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
 916        raw_cp->cp_nblk_inc =
 917                cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
 918        raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
 919        raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
 920
 921        if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 922                nilfs_checkpoint_clear_minor(raw_cp);
 923        else
 924                nilfs_checkpoint_set_minor(raw_cp);
 925
 926        nilfs_write_inode_common(sci->sc_root->ifile,
 927                                 &raw_cp->cp_ifile_inode, 1);
 928        nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 929        return 0;
 930
 931 failed_ibh:
 932        return err;
 933}
 934
 935static void nilfs_fill_in_file_bmap(struct inode *ifile,
 936                                    struct nilfs_inode_info *ii)
 937
 938{
 939        struct buffer_head *ibh;
 940        struct nilfs_inode *raw_inode;
 941
 942        if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
 943                ibh = ii->i_bh;
 944                BUG_ON(!ibh);
 945                raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
 946                                                  ibh);
 947                nilfs_bmap_write(ii->i_bmap, raw_inode);
 948                nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
 949        }
 950}
 951
 952static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
 953{
 954        struct nilfs_inode_info *ii;
 955
 956        list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
 957                nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
 958                set_bit(NILFS_I_COLLECTED, &ii->i_state);
 959        }
 960}
 961
 962static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
 963                                             struct the_nilfs *nilfs)
 964{
 965        struct buffer_head *bh_sr;
 966        struct nilfs_super_root *raw_sr;
 967        unsigned int isz, srsz;
 968
 969        bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
 970        raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
 971        isz = nilfs->ns_inode_size;
 972        srsz = NILFS_SR_BYTES(isz);
 973
 974        raw_sr->sr_bytes = cpu_to_le16(srsz);
 975        raw_sr->sr_nongc_ctime
 976                = cpu_to_le64(nilfs_doing_gc() ?
 977                              nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 978        raw_sr->sr_flags = 0;
 979
 980        nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
 981                                 NILFS_SR_DAT_OFFSET(isz), 1);
 982        nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
 983                                 NILFS_SR_CPFILE_OFFSET(isz), 1);
 984        nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
 985                                 NILFS_SR_SUFILE_OFFSET(isz), 1);
 986        memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
 987}
 988
 989static void nilfs_redirty_inodes(struct list_head *head)
 990{
 991        struct nilfs_inode_info *ii;
 992
 993        list_for_each_entry(ii, head, i_dirty) {
 994                if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
 995                        clear_bit(NILFS_I_COLLECTED, &ii->i_state);
 996        }
 997}
 998
 999static void nilfs_drop_collected_inodes(struct list_head *head)
1000{
1001        struct nilfs_inode_info *ii;
1002
1003        list_for_each_entry(ii, head, i_dirty) {
1004                if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1005                        continue;
1006
1007                clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1008                set_bit(NILFS_I_UPDATED, &ii->i_state);
1009        }
1010}
1011
1012static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1013                                       struct inode *inode,
1014                                       struct list_head *listp,
1015                                       int (*collect)(struct nilfs_sc_info *,
1016                                                      struct buffer_head *,
1017                                                      struct inode *))
1018{
1019        struct buffer_head *bh, *n;
1020        int err = 0;
1021
1022        if (collect) {
1023                list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1024                        list_del_init(&bh->b_assoc_buffers);
1025                        err = collect(sci, bh, inode);
1026                        brelse(bh);
1027                        if (unlikely(err))
1028                                goto dispose_buffers;
1029                }
1030                return 0;
1031        }
1032
1033 dispose_buffers:
1034        while (!list_empty(listp)) {
1035                bh = list_first_entry(listp, struct buffer_head,
1036                                      b_assoc_buffers);
1037                list_del_init(&bh->b_assoc_buffers);
1038                brelse(bh);
1039        }
1040        return err;
1041}
1042
1043static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1044{
1045        /* Remaining number of blocks within segment buffer */
1046        return sci->sc_segbuf_nblocks -
1047                (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1048}
1049
1050static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1051                                   struct inode *inode,
1052                                   const struct nilfs_sc_operations *sc_ops)
1053{
1054        LIST_HEAD(data_buffers);
1055        LIST_HEAD(node_buffers);
1056        int err;
1057
1058        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1059                size_t n, rest = nilfs_segctor_buffer_rest(sci);
1060
1061                n = nilfs_lookup_dirty_data_buffers(
1062                        inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1063                if (n > rest) {
1064                        err = nilfs_segctor_apply_buffers(
1065                                sci, inode, &data_buffers,
1066                                sc_ops->collect_data);
1067                        BUG_ON(!err); /* always receive -E2BIG or true error */
1068                        goto break_or_fail;
1069                }
1070        }
1071        nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1072
1073        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1074                err = nilfs_segctor_apply_buffers(
1075                        sci, inode, &data_buffers, sc_ops->collect_data);
1076                if (unlikely(err)) {
1077                        /* dispose node list */
1078                        nilfs_segctor_apply_buffers(
1079                                sci, inode, &node_buffers, NULL);
1080                        goto break_or_fail;
1081                }
1082                sci->sc_stage.flags |= NILFS_CF_NODE;
1083        }
1084        /* Collect node */
1085        err = nilfs_segctor_apply_buffers(
1086                sci, inode, &node_buffers, sc_ops->collect_node);
1087        if (unlikely(err))
1088                goto break_or_fail;
1089
1090        nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1091        err = nilfs_segctor_apply_buffers(
1092                sci, inode, &node_buffers, sc_ops->collect_bmap);
1093        if (unlikely(err))
1094                goto break_or_fail;
1095
1096        nilfs_segctor_end_finfo(sci, inode);
1097        sci->sc_stage.flags &= ~NILFS_CF_NODE;
1098
1099 break_or_fail:
1100        return err;
1101}
1102
1103static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1104                                         struct inode *inode)
1105{
1106        LIST_HEAD(data_buffers);
1107        size_t n, rest = nilfs_segctor_buffer_rest(sci);
1108        int err;
1109
1110        n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1111                                            sci->sc_dsync_start,
1112                                            sci->sc_dsync_end);
1113
1114        err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1115                                          nilfs_collect_file_data);
1116        if (!err) {
1117                nilfs_segctor_end_finfo(sci, inode);
1118                BUG_ON(n > rest);
1119                /* always receive -E2BIG or true error if n > rest */
1120        }
1121        return err;
1122}
1123
1124static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1125{
1126        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1127        struct list_head *head;
1128        struct nilfs_inode_info *ii;
1129        size_t ndone;
1130        int err = 0;
1131
1132        switch (nilfs_sc_cstage_get(sci)) {
1133        case NILFS_ST_INIT:
1134                /* Pre-processes */
1135                sci->sc_stage.flags = 0;
1136
1137                if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1138                        sci->sc_nblk_inc = 0;
1139                        sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1140                        if (mode == SC_LSEG_DSYNC) {
1141                                nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1142                                goto dsync_mode;
1143                        }
1144                }
1145
1146                sci->sc_stage.dirty_file_ptr = NULL;
1147                sci->sc_stage.gc_inode_ptr = NULL;
1148                if (mode == SC_FLUSH_DAT) {
1149                        nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1150                        goto dat_stage;
1151                }
1152                nilfs_sc_cstage_inc(sci);  /* Fall through */
1153        case NILFS_ST_GC:
1154                if (nilfs_doing_gc()) {
1155                        head = &sci->sc_gc_inodes;
1156                        ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1157                                                head, i_dirty);
1158                        list_for_each_entry_continue(ii, head, i_dirty) {
1159                                err = nilfs_segctor_scan_file(
1160                                        sci, &ii->vfs_inode,
1161                                        &nilfs_sc_file_ops);
1162                                if (unlikely(err)) {
1163                                        sci->sc_stage.gc_inode_ptr = list_entry(
1164                                                ii->i_dirty.prev,
1165                                                struct nilfs_inode_info,
1166                                                i_dirty);
1167                                        goto break_or_fail;
1168                                }
1169                                set_bit(NILFS_I_COLLECTED, &ii->i_state);
1170                        }
1171                        sci->sc_stage.gc_inode_ptr = NULL;
1172                }
1173                nilfs_sc_cstage_inc(sci);  /* Fall through */
1174        case NILFS_ST_FILE:
1175                head = &sci->sc_dirty_files;
1176                ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1177                                        i_dirty);
1178                list_for_each_entry_continue(ii, head, i_dirty) {
1179                        clear_bit(NILFS_I_DIRTY, &ii->i_state);
1180
1181                        err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1182                                                      &nilfs_sc_file_ops);
1183                        if (unlikely(err)) {
1184                                sci->sc_stage.dirty_file_ptr =
1185                                        list_entry(ii->i_dirty.prev,
1186                                                   struct nilfs_inode_info,
1187                                                   i_dirty);
1188                                goto break_or_fail;
1189                        }
1190                        /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1191                        /* XXX: required ? */
1192                }
1193                sci->sc_stage.dirty_file_ptr = NULL;
1194                if (mode == SC_FLUSH_FILE) {
1195                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1196                        return 0;
1197                }
1198                nilfs_sc_cstage_inc(sci);
1199                sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1200                /* Fall through */
1201        case NILFS_ST_IFILE:
1202                err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1203                                              &nilfs_sc_file_ops);
1204                if (unlikely(err))
1205                        break;
1206                nilfs_sc_cstage_inc(sci);
1207                /* Creating a checkpoint */
1208                err = nilfs_segctor_create_checkpoint(sci);
1209                if (unlikely(err))
1210                        break;
1211                /* Fall through */
1212        case NILFS_ST_CPFILE:
1213                err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1214                                              &nilfs_sc_file_ops);
1215                if (unlikely(err))
1216                        break;
1217                nilfs_sc_cstage_inc(sci);  /* Fall through */
1218        case NILFS_ST_SUFILE:
1219                err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1220                                         sci->sc_nfreesegs, &ndone);
1221                if (unlikely(err)) {
1222                        nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1223                                                  sci->sc_freesegs, ndone,
1224                                                  NULL);
1225                        break;
1226                }
1227                sci->sc_stage.flags |= NILFS_CF_SUFREED;
1228
1229                err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1230                                              &nilfs_sc_file_ops);
1231                if (unlikely(err))
1232                        break;
1233                nilfs_sc_cstage_inc(sci);  /* Fall through */
1234        case NILFS_ST_DAT:
1235 dat_stage:
1236                err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1237                                              &nilfs_sc_dat_ops);
1238                if (unlikely(err))
1239                        break;
1240                if (mode == SC_FLUSH_DAT) {
1241                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1242                        return 0;
1243                }
1244                nilfs_sc_cstage_inc(sci);  /* Fall through */
1245        case NILFS_ST_SR:
1246                if (mode == SC_LSEG_SR) {
1247                        /* Appending a super root */
1248                        err = nilfs_segctor_add_super_root(sci);
1249                        if (unlikely(err))
1250                                break;
1251                }
1252                /* End of a logical segment */
1253                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1254                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1255                return 0;
1256        case NILFS_ST_DSYNC:
1257 dsync_mode:
1258                sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1259                ii = sci->sc_dsync_inode;
1260                if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1261                        break;
1262
1263                err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1264                if (unlikely(err))
1265                        break;
1266                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1267                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1268                return 0;
1269        case NILFS_ST_DONE:
1270                return 0;
1271        default:
1272                BUG();
1273        }
1274
1275 break_or_fail:
1276        return err;
1277}
1278
1279/**
1280 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1281 * @sci: nilfs_sc_info
1282 * @nilfs: nilfs object
1283 */
1284static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1285                                            struct the_nilfs *nilfs)
1286{
1287        struct nilfs_segment_buffer *segbuf, *prev;
1288        __u64 nextnum;
1289        int err, alloc = 0;
1290
1291        segbuf = nilfs_segbuf_new(sci->sc_super);
1292        if (unlikely(!segbuf))
1293                return -ENOMEM;
1294
1295        if (list_empty(&sci->sc_write_logs)) {
1296                nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1297                                 nilfs->ns_pseg_offset, nilfs);
1298                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1299                        nilfs_shift_to_next_segment(nilfs);
1300                        nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1301                }
1302
1303                segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1304                nextnum = nilfs->ns_nextnum;
1305
1306                if (nilfs->ns_segnum == nilfs->ns_nextnum)
1307                        /* Start from the head of a new full segment */
1308                        alloc++;
1309        } else {
1310                /* Continue logs */
1311                prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1312                nilfs_segbuf_map_cont(segbuf, prev);
1313                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1314                nextnum = prev->sb_nextnum;
1315
1316                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1317                        nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1318                        segbuf->sb_sum.seg_seq++;
1319                        alloc++;
1320                }
1321        }
1322
1323        err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1324        if (err)
1325                goto failed;
1326
1327        if (alloc) {
1328                err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1329                if (err)
1330                        goto failed;
1331        }
1332        nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1333
1334        BUG_ON(!list_empty(&sci->sc_segbufs));
1335        list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1336        sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1337        return 0;
1338
1339 failed:
1340        nilfs_segbuf_free(segbuf);
1341        return err;
1342}
1343
1344static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1345                                         struct the_nilfs *nilfs, int nadd)
1346{
1347        struct nilfs_segment_buffer *segbuf, *prev;
1348        struct inode *sufile = nilfs->ns_sufile;
1349        __u64 nextnextnum;
1350        LIST_HEAD(list);
1351        int err, ret, i;
1352
1353        prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1354        /*
1355         * Since the segment specified with nextnum might be allocated during
1356         * the previous construction, the buffer including its segusage may
1357         * not be dirty.  The following call ensures that the buffer is dirty
1358         * and will pin the buffer on memory until the sufile is written.
1359         */
1360        err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1361        if (unlikely(err))
1362                return err;
1363
1364        for (i = 0; i < nadd; i++) {
1365                /* extend segment info */
1366                err = -ENOMEM;
1367                segbuf = nilfs_segbuf_new(sci->sc_super);
1368                if (unlikely(!segbuf))
1369                        goto failed;
1370
1371                /* map this buffer to region of segment on-disk */
1372                nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1373                sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1374
1375                /* allocate the next next full segment */
1376                err = nilfs_sufile_alloc(sufile, &nextnextnum);
1377                if (unlikely(err))
1378                        goto failed_segbuf;
1379
1380                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1381                nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1382
1383                list_add_tail(&segbuf->sb_list, &list);
1384                prev = segbuf;
1385        }
1386        list_splice_tail(&list, &sci->sc_segbufs);
1387        return 0;
1388
1389 failed_segbuf:
1390        nilfs_segbuf_free(segbuf);
1391 failed:
1392        list_for_each_entry(segbuf, &list, sb_list) {
1393                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1394                WARN_ON(ret); /* never fails */
1395        }
1396        nilfs_destroy_logs(&list);
1397        return err;
1398}
1399
1400static void nilfs_free_incomplete_logs(struct list_head *logs,
1401                                       struct the_nilfs *nilfs)
1402{
1403        struct nilfs_segment_buffer *segbuf, *prev;
1404        struct inode *sufile = nilfs->ns_sufile;
1405        int ret;
1406
1407        segbuf = NILFS_FIRST_SEGBUF(logs);
1408        if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1409                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1410                WARN_ON(ret); /* never fails */
1411        }
1412        if (atomic_read(&segbuf->sb_err)) {
1413                /* Case 1: The first segment failed */
1414                if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1415                        /*
1416                         * Case 1a:  Partial segment appended into an existing
1417                         * segment
1418                         */
1419                        nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1420                                                segbuf->sb_fseg_end);
1421                else /* Case 1b:  New full segment */
1422                        set_nilfs_discontinued(nilfs);
1423        }
1424
1425        prev = segbuf;
1426        list_for_each_entry_continue(segbuf, logs, sb_list) {
1427                if (prev->sb_nextnum != segbuf->sb_nextnum) {
1428                        ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1429                        WARN_ON(ret); /* never fails */
1430                }
1431                if (atomic_read(&segbuf->sb_err) &&
1432                    segbuf->sb_segnum != nilfs->ns_nextnum)
1433                        /* Case 2: extended segment (!= next) failed */
1434                        nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1435                prev = segbuf;
1436        }
1437}
1438
1439static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1440                                          struct inode *sufile)
1441{
1442        struct nilfs_segment_buffer *segbuf;
1443        unsigned long live_blocks;
1444        int ret;
1445
1446        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1447                live_blocks = segbuf->sb_sum.nblocks +
1448                        (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1449                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1450                                                     live_blocks,
1451                                                     sci->sc_seg_ctime);
1452                WARN_ON(ret); /* always succeed because the segusage is dirty */
1453        }
1454}
1455
1456static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1457{
1458        struct nilfs_segment_buffer *segbuf;
1459        int ret;
1460
1461        segbuf = NILFS_FIRST_SEGBUF(logs);
1462        ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1463                                             segbuf->sb_pseg_start -
1464                                             segbuf->sb_fseg_start, 0);
1465        WARN_ON(ret); /* always succeed because the segusage is dirty */
1466
1467        list_for_each_entry_continue(segbuf, logs, sb_list) {
1468                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1469                                                     0, 0);
1470                WARN_ON(ret); /* always succeed */
1471        }
1472}
1473
1474static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1475                                            struct nilfs_segment_buffer *last,
1476                                            struct inode *sufile)
1477{
1478        struct nilfs_segment_buffer *segbuf = last;
1479        int ret;
1480
1481        list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1482                sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1483                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1484                WARN_ON(ret);
1485        }
1486        nilfs_truncate_logs(&sci->sc_segbufs, last);
1487}
1488
1489
1490static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1491                                 struct the_nilfs *nilfs, int mode)
1492{
1493        struct nilfs_cstage prev_stage = sci->sc_stage;
1494        int err, nadd = 1;
1495
1496        /* Collection retry loop */
1497        for (;;) {
1498                sci->sc_nblk_this_inc = 0;
1499                sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1500
1501                err = nilfs_segctor_reset_segment_buffer(sci);
1502                if (unlikely(err))
1503                        goto failed;
1504
1505                err = nilfs_segctor_collect_blocks(sci, mode);
1506                sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1507                if (!err)
1508                        break;
1509
1510                if (unlikely(err != -E2BIG))
1511                        goto failed;
1512
1513                /* The current segment is filled up */
1514                if (mode != SC_LSEG_SR ||
1515                    nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1516                        break;
1517
1518                nilfs_clear_logs(&sci->sc_segbufs);
1519
1520                if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1521                        err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1522                                                        sci->sc_freesegs,
1523                                                        sci->sc_nfreesegs,
1524                                                        NULL);
1525                        WARN_ON(err); /* do not happen */
1526                        sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1527                }
1528
1529                err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1530                if (unlikely(err))
1531                        return err;
1532
1533                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1534                sci->sc_stage = prev_stage;
1535        }
1536        nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1537        return 0;
1538
1539 failed:
1540        return err;
1541}
1542
1543static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1544                                      struct buffer_head *new_bh)
1545{
1546        BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1547
1548        list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1549        /* The caller must release old_bh */
1550}
1551
1552static int
1553nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1554                                     struct nilfs_segment_buffer *segbuf,
1555                                     int mode)
1556{
1557        struct inode *inode = NULL;
1558        sector_t blocknr;
1559        unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1560        unsigned long nblocks = 0, ndatablk = 0;
1561        const struct nilfs_sc_operations *sc_op = NULL;
1562        struct nilfs_segsum_pointer ssp;
1563        struct nilfs_finfo *finfo = NULL;
1564        union nilfs_binfo binfo;
1565        struct buffer_head *bh, *bh_org;
1566        ino_t ino = 0;
1567        int err = 0;
1568
1569        if (!nfinfo)
1570                goto out;
1571
1572        blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1573        ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1574        ssp.offset = sizeof(struct nilfs_segment_summary);
1575
1576        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1577                if (bh == segbuf->sb_super_root)
1578                        break;
1579                if (!finfo) {
1580                        finfo = nilfs_segctor_map_segsum_entry(
1581                                sci, &ssp, sizeof(*finfo));
1582                        ino = le64_to_cpu(finfo->fi_ino);
1583                        nblocks = le32_to_cpu(finfo->fi_nblocks);
1584                        ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1585
1586                        inode = bh->b_page->mapping->host;
1587
1588                        if (mode == SC_LSEG_DSYNC)
1589                                sc_op = &nilfs_sc_dsync_ops;
1590                        else if (ino == NILFS_DAT_INO)
1591                                sc_op = &nilfs_sc_dat_ops;
1592                        else /* file blocks */
1593                                sc_op = &nilfs_sc_file_ops;
1594                }
1595                bh_org = bh;
1596                get_bh(bh_org);
1597                err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1598                                        &binfo);
1599                if (bh != bh_org)
1600                        nilfs_list_replace_buffer(bh_org, bh);
1601                brelse(bh_org);
1602                if (unlikely(err))
1603                        goto failed_bmap;
1604
1605                if (ndatablk > 0)
1606                        sc_op->write_data_binfo(sci, &ssp, &binfo);
1607                else
1608                        sc_op->write_node_binfo(sci, &ssp, &binfo);
1609
1610                blocknr++;
1611                if (--nblocks == 0) {
1612                        finfo = NULL;
1613                        if (--nfinfo == 0)
1614                                break;
1615                } else if (ndatablk > 0)
1616                        ndatablk--;
1617        }
1618 out:
1619        return 0;
1620
1621 failed_bmap:
1622        return err;
1623}
1624
1625static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1626{
1627        struct nilfs_segment_buffer *segbuf;
1628        int err;
1629
1630        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1631                err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1632                if (unlikely(err))
1633                        return err;
1634                nilfs_segbuf_fill_in_segsum(segbuf);
1635        }
1636        return 0;
1637}
1638
1639static void nilfs_begin_page_io(struct page *page)
1640{
1641        if (!page || PageWriteback(page))
1642                /*
1643                 * For split b-tree node pages, this function may be called
1644                 * twice.  We ignore the 2nd or later calls by this check.
1645                 */
1646                return;
1647
1648        lock_page(page);
1649        clear_page_dirty_for_io(page);
1650        set_page_writeback(page);
1651        unlock_page(page);
1652}
1653
1654static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1655{
1656        struct nilfs_segment_buffer *segbuf;
1657        struct page *bd_page = NULL, *fs_page = NULL;
1658
1659        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1660                struct buffer_head *bh;
1661
1662                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1663                                    b_assoc_buffers) {
1664                        if (bh->b_page != bd_page) {
1665                                if (bd_page) {
1666                                        lock_page(bd_page);
1667                                        clear_page_dirty_for_io(bd_page);
1668                                        set_page_writeback(bd_page);
1669                                        unlock_page(bd_page);
1670                                }
1671                                bd_page = bh->b_page;
1672                        }
1673                }
1674
1675                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1676                                    b_assoc_buffers) {
1677                        set_buffer_async_write(bh);
1678                        if (bh == segbuf->sb_super_root) {
1679                                if (bh->b_page != bd_page) {
1680                                        lock_page(bd_page);
1681                                        clear_page_dirty_for_io(bd_page);
1682                                        set_page_writeback(bd_page);
1683                                        unlock_page(bd_page);
1684                                        bd_page = bh->b_page;
1685                                }
1686                                break;
1687                        }
1688                        if (bh->b_page != fs_page) {
1689                                nilfs_begin_page_io(fs_page);
1690                                fs_page = bh->b_page;
1691                        }
1692                }
1693        }
1694        if (bd_page) {
1695                lock_page(bd_page);
1696                clear_page_dirty_for_io(bd_page);
1697                set_page_writeback(bd_page);
1698                unlock_page(bd_page);
1699        }
1700        nilfs_begin_page_io(fs_page);
1701}
1702
1703static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1704                               struct the_nilfs *nilfs)
1705{
1706        int ret;
1707
1708        ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1709        list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1710        return ret;
1711}
1712
1713static void nilfs_end_page_io(struct page *page, int err)
1714{
1715        if (!page)
1716                return;
1717
1718        if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1719                /*
1720                 * For b-tree node pages, this function may be called twice
1721                 * or more because they might be split in a segment.
1722                 */
1723                if (PageDirty(page)) {
1724                        /*
1725                         * For pages holding split b-tree node buffers, dirty
1726                         * flag on the buffers may be cleared discretely.
1727                         * In that case, the page is once redirtied for
1728                         * remaining buffers, and it must be cancelled if
1729                         * all the buffers get cleaned later.
1730                         */
1731                        lock_page(page);
1732                        if (nilfs_page_buffers_clean(page))
1733                                __nilfs_clear_page_dirty(page);
1734                        unlock_page(page);
1735                }
1736                return;
1737        }
1738
1739        if (!err) {
1740                if (!nilfs_page_buffers_clean(page))
1741                        __set_page_dirty_nobuffers(page);
1742                ClearPageError(page);
1743        } else {
1744                __set_page_dirty_nobuffers(page);
1745                SetPageError(page);
1746        }
1747
1748        end_page_writeback(page);
1749}
1750
1751static void nilfs_abort_logs(struct list_head *logs, int err)
1752{
1753        struct nilfs_segment_buffer *segbuf;
1754        struct page *bd_page = NULL, *fs_page = NULL;
1755        struct buffer_head *bh;
1756
1757        if (list_empty(logs))
1758                return;
1759
1760        list_for_each_entry(segbuf, logs, sb_list) {
1761                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1762                                    b_assoc_buffers) {
1763                        if (bh->b_page != bd_page) {
1764                                if (bd_page)
1765                                        end_page_writeback(bd_page);
1766                                bd_page = bh->b_page;
1767                        }
1768                }
1769
1770                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1771                                    b_assoc_buffers) {
1772                        clear_buffer_async_write(bh);
1773                        if (bh == segbuf->sb_super_root) {
1774                                if (bh->b_page != bd_page) {
1775                                        end_page_writeback(bd_page);
1776                                        bd_page = bh->b_page;
1777                                }
1778                                break;
1779                        }
1780                        if (bh->b_page != fs_page) {
1781                                nilfs_end_page_io(fs_page, err);
1782                                fs_page = bh->b_page;
1783                        }
1784                }
1785        }
1786        if (bd_page)
1787                end_page_writeback(bd_page);
1788
1789        nilfs_end_page_io(fs_page, err);
1790}
1791
1792static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1793                                             struct the_nilfs *nilfs, int err)
1794{
1795        LIST_HEAD(logs);
1796        int ret;
1797
1798        list_splice_tail_init(&sci->sc_write_logs, &logs);
1799        ret = nilfs_wait_on_logs(&logs);
1800        nilfs_abort_logs(&logs, ret ? : err);
1801
1802        list_splice_tail_init(&sci->sc_segbufs, &logs);
1803        nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1804        nilfs_free_incomplete_logs(&logs, nilfs);
1805
1806        if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1807                ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1808                                                sci->sc_freesegs,
1809                                                sci->sc_nfreesegs,
1810                                                NULL);
1811                WARN_ON(ret); /* do not happen */
1812        }
1813
1814        nilfs_destroy_logs(&logs);
1815}
1816
1817static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1818                                   struct nilfs_segment_buffer *segbuf)
1819{
1820        nilfs->ns_segnum = segbuf->sb_segnum;
1821        nilfs->ns_nextnum = segbuf->sb_nextnum;
1822        nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1823                + segbuf->sb_sum.nblocks;
1824        nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1825        nilfs->ns_ctime = segbuf->sb_sum.ctime;
1826}
1827
1828static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1829{
1830        struct nilfs_segment_buffer *segbuf;
1831        struct page *bd_page = NULL, *fs_page = NULL;
1832        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1833        int update_sr = false;
1834
1835        list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1836                struct buffer_head *bh;
1837
1838                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1839                                    b_assoc_buffers) {
1840                        set_buffer_uptodate(bh);
1841                        clear_buffer_dirty(bh);
1842                        if (bh->b_page != bd_page) {
1843                                if (bd_page)
1844                                        end_page_writeback(bd_page);
1845                                bd_page = bh->b_page;
1846                        }
1847                }
1848                /*
1849                 * We assume that the buffers which belong to the same page
1850                 * continue over the buffer list.
1851                 * Under this assumption, the last BHs of pages is
1852                 * identifiable by the discontinuity of bh->b_page
1853                 * (page != fs_page).
1854                 *
1855                 * For B-tree node blocks, however, this assumption is not
1856                 * guaranteed.  The cleanup code of B-tree node pages needs
1857                 * special care.
1858                 */
1859                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1860                                    b_assoc_buffers) {
1861                        const unsigned long set_bits = BIT(BH_Uptodate);
1862                        const unsigned long clear_bits =
1863                                (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1864                                 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1865                                 BIT(BH_NILFS_Redirected));
1866
1867                        set_mask_bits(&bh->b_state, clear_bits, set_bits);
1868                        if (bh == segbuf->sb_super_root) {
1869                                if (bh->b_page != bd_page) {
1870                                        end_page_writeback(bd_page);
1871                                        bd_page = bh->b_page;
1872                                }
1873                                update_sr = true;
1874                                break;
1875                        }
1876                        if (bh->b_page != fs_page) {
1877                                nilfs_end_page_io(fs_page, 0);
1878                                fs_page = bh->b_page;
1879                        }
1880                }
1881
1882                if (!nilfs_segbuf_simplex(segbuf)) {
1883                        if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1884                                set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1885                                sci->sc_lseg_stime = jiffies;
1886                        }
1887                        if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1888                                clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1889                }
1890        }
1891        /*
1892         * Since pages may continue over multiple segment buffers,
1893         * end of the last page must be checked outside of the loop.
1894         */
1895        if (bd_page)
1896                end_page_writeback(bd_page);
1897
1898        nilfs_end_page_io(fs_page, 0);
1899
1900        nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1901
1902        if (nilfs_doing_gc())
1903                nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1904        else
1905                nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1906
1907        sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1908
1909        segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1910        nilfs_set_next_segment(nilfs, segbuf);
1911
1912        if (update_sr) {
1913                nilfs->ns_flushed_device = 0;
1914                nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1915                                       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1916
1917                clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1918                clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1919                set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1920                nilfs_segctor_clear_metadata_dirty(sci);
1921        } else
1922                clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1923}
1924
1925static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1926{
1927        int ret;
1928
1929        ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1930        if (!ret) {
1931                nilfs_segctor_complete_write(sci);
1932                nilfs_destroy_logs(&sci->sc_write_logs);
1933        }
1934        return ret;
1935}
1936
1937static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1938                                             struct the_nilfs *nilfs)
1939{
1940        struct nilfs_inode_info *ii, *n;
1941        struct inode *ifile = sci->sc_root->ifile;
1942
1943        spin_lock(&nilfs->ns_inode_lock);
1944 retry:
1945        list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1946                if (!ii->i_bh) {
1947                        struct buffer_head *ibh;
1948                        int err;
1949
1950                        spin_unlock(&nilfs->ns_inode_lock);
1951                        err = nilfs_ifile_get_inode_block(
1952                                ifile, ii->vfs_inode.i_ino, &ibh);
1953                        if (unlikely(err)) {
1954                                nilfs_msg(sci->sc_super, KERN_WARNING,
1955                                          "log writer: error %d getting inode block (ino=%lu)",
1956                                          err, ii->vfs_inode.i_ino);
1957                                return err;
1958                        }
1959                        mark_buffer_dirty(ibh);
1960                        nilfs_mdt_mark_dirty(ifile);
1961                        spin_lock(&nilfs->ns_inode_lock);
1962                        if (likely(!ii->i_bh))
1963                                ii->i_bh = ibh;
1964                        else
1965                                brelse(ibh);
1966                        goto retry;
1967                }
1968
1969                clear_bit(NILFS_I_QUEUED, &ii->i_state);
1970                set_bit(NILFS_I_BUSY, &ii->i_state);
1971                list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1972        }
1973        spin_unlock(&nilfs->ns_inode_lock);
1974
1975        return 0;
1976}
1977
1978static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1979                                             struct the_nilfs *nilfs)
1980{
1981        struct nilfs_inode_info *ii, *n;
1982        int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
1983        int defer_iput = false;
1984
1985        spin_lock(&nilfs->ns_inode_lock);
1986        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1987                if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1988                    test_bit(NILFS_I_DIRTY, &ii->i_state))
1989                        continue;
1990
1991                clear_bit(NILFS_I_BUSY, &ii->i_state);
1992                brelse(ii->i_bh);
1993                ii->i_bh = NULL;
1994                list_del_init(&ii->i_dirty);
1995                if (!ii->vfs_inode.i_nlink || during_mount) {
1996                        /*
1997                         * Defer calling iput() to avoid deadlocks if
1998                         * i_nlink == 0 or mount is not yet finished.
1999                         */
2000                        list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2001                        defer_iput = true;
2002                } else {
2003                        spin_unlock(&nilfs->ns_inode_lock);
2004                        iput(&ii->vfs_inode);
2005                        spin_lock(&nilfs->ns_inode_lock);
2006                }
2007        }
2008        spin_unlock(&nilfs->ns_inode_lock);
2009
2010        if (defer_iput)
2011                schedule_work(&sci->sc_iput_work);
2012}
2013
2014/*
2015 * Main procedure of segment constructor
2016 */
2017static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2018{
2019        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2020        int err;
2021
2022        nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2023        sci->sc_cno = nilfs->ns_cno;
2024
2025        err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2026        if (unlikely(err))
2027                goto out;
2028
2029        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2030                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2031
2032        if (nilfs_segctor_clean(sci))
2033                goto out;
2034
2035        do {
2036                sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2037
2038                err = nilfs_segctor_begin_construction(sci, nilfs);
2039                if (unlikely(err))
2040                        goto out;
2041
2042                /* Update time stamp */
2043                sci->sc_seg_ctime = get_seconds();
2044
2045                err = nilfs_segctor_collect(sci, nilfs, mode);
2046                if (unlikely(err))
2047                        goto failed;
2048
2049                /* Avoid empty segment */
2050                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2051                    nilfs_segbuf_empty(sci->sc_curseg)) {
2052                        nilfs_segctor_abort_construction(sci, nilfs, 1);
2053                        goto out;
2054                }
2055
2056                err = nilfs_segctor_assign(sci, mode);
2057                if (unlikely(err))
2058                        goto failed;
2059
2060                if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2061                        nilfs_segctor_fill_in_file_bmap(sci);
2062
2063                if (mode == SC_LSEG_SR &&
2064                    nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2065                        err = nilfs_segctor_fill_in_checkpoint(sci);
2066                        if (unlikely(err))
2067                                goto failed_to_write;
2068
2069                        nilfs_segctor_fill_in_super_root(sci, nilfs);
2070                }
2071                nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2072
2073                /* Write partial segments */
2074                nilfs_segctor_prepare_write(sci);
2075
2076                nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2077                                            nilfs->ns_crc_seed);
2078
2079                err = nilfs_segctor_write(sci, nilfs);
2080                if (unlikely(err))
2081                        goto failed_to_write;
2082
2083                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2084                    nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2085                        /*
2086                         * At this point, we avoid double buffering
2087                         * for blocksize < pagesize because page dirty
2088                         * flag is turned off during write and dirty
2089                         * buffers are not properly collected for
2090                         * pages crossing over segments.
2091                         */
2092                        err = nilfs_segctor_wait(sci);
2093                        if (err)
2094                                goto failed_to_write;
2095                }
2096        } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2097
2098 out:
2099        nilfs_segctor_drop_written_files(sci, nilfs);
2100        return err;
2101
2102 failed_to_write:
2103        if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2104                nilfs_redirty_inodes(&sci->sc_dirty_files);
2105
2106 failed:
2107        if (nilfs_doing_gc())
2108                nilfs_redirty_inodes(&sci->sc_gc_inodes);
2109        nilfs_segctor_abort_construction(sci, nilfs, err);
2110        goto out;
2111}
2112
2113/**
2114 * nilfs_segctor_start_timer - set timer of background write
2115 * @sci: nilfs_sc_info
2116 *
2117 * If the timer has already been set, it ignores the new request.
2118 * This function MUST be called within a section locking the segment
2119 * semaphore.
2120 */
2121static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2122{
2123        spin_lock(&sci->sc_state_lock);
2124        if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2125                sci->sc_timer.expires = jiffies + sci->sc_interval;
2126                add_timer(&sci->sc_timer);
2127                sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2128        }
2129        spin_unlock(&sci->sc_state_lock);
2130}
2131
2132static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2133{
2134        spin_lock(&sci->sc_state_lock);
2135        if (!(sci->sc_flush_request & BIT(bn))) {
2136                unsigned long prev_req = sci->sc_flush_request;
2137
2138                sci->sc_flush_request |= BIT(bn);
2139                if (!prev_req)
2140                        wake_up(&sci->sc_wait_daemon);
2141        }
2142        spin_unlock(&sci->sc_state_lock);
2143}
2144
2145/**
2146 * nilfs_flush_segment - trigger a segment construction for resource control
2147 * @sb: super block
2148 * @ino: inode number of the file to be flushed out.
2149 */
2150void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2151{
2152        struct the_nilfs *nilfs = sb->s_fs_info;
2153        struct nilfs_sc_info *sci = nilfs->ns_writer;
2154
2155        if (!sci || nilfs_doing_construction())
2156                return;
2157        nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2158                                        /* assign bit 0 to data files */
2159}
2160
2161struct nilfs_segctor_wait_request {
2162        wait_queue_t    wq;
2163        __u32           seq;
2164        int             err;
2165        atomic_t        done;
2166};
2167
2168static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2169{
2170        struct nilfs_segctor_wait_request wait_req;
2171        int err = 0;
2172
2173        spin_lock(&sci->sc_state_lock);
2174        init_wait(&wait_req.wq);
2175        wait_req.err = 0;
2176        atomic_set(&wait_req.done, 0);
2177        wait_req.seq = ++sci->sc_seq_request;
2178        spin_unlock(&sci->sc_state_lock);
2179
2180        init_waitqueue_entry(&wait_req.wq, current);
2181        add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2182        set_current_state(TASK_INTERRUPTIBLE);
2183        wake_up(&sci->sc_wait_daemon);
2184
2185        for (;;) {
2186                if (atomic_read(&wait_req.done)) {
2187                        err = wait_req.err;
2188                        break;
2189                }
2190                if (!signal_pending(current)) {
2191                        schedule();
2192                        continue;
2193                }
2194                err = -ERESTARTSYS;
2195                break;
2196        }
2197        finish_wait(&sci->sc_wait_request, &wait_req.wq);
2198        return err;
2199}
2200
2201static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2202{
2203        struct nilfs_segctor_wait_request *wrq, *n;
2204        unsigned long flags;
2205
2206        spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2207        list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2208                                 wq.task_list) {
2209                if (!atomic_read(&wrq->done) &&
2210                    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2211                        wrq->err = err;
2212                        atomic_set(&wrq->done, 1);
2213                }
2214                if (atomic_read(&wrq->done)) {
2215                        wrq->wq.func(&wrq->wq,
2216                                     TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2217                                     0, NULL);
2218                }
2219        }
2220        spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2221}
2222
2223/**
2224 * nilfs_construct_segment - construct a logical segment
2225 * @sb: super block
2226 *
2227 * Return Value: On success, 0 is retured. On errors, one of the following
2228 * negative error code is returned.
2229 *
2230 * %-EROFS - Read only filesystem.
2231 *
2232 * %-EIO - I/O error
2233 *
2234 * %-ENOSPC - No space left on device (only in a panic state).
2235 *
2236 * %-ERESTARTSYS - Interrupted.
2237 *
2238 * %-ENOMEM - Insufficient memory available.
2239 */
2240int nilfs_construct_segment(struct super_block *sb)
2241{
2242        struct the_nilfs *nilfs = sb->s_fs_info;
2243        struct nilfs_sc_info *sci = nilfs->ns_writer;
2244        struct nilfs_transaction_info *ti;
2245        int err;
2246
2247        if (!sci)
2248                return -EROFS;
2249
2250        /* A call inside transactions causes a deadlock. */
2251        BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2252
2253        err = nilfs_segctor_sync(sci);
2254        return err;
2255}
2256
2257/**
2258 * nilfs_construct_dsync_segment - construct a data-only logical segment
2259 * @sb: super block
2260 * @inode: inode whose data blocks should be written out
2261 * @start: start byte offset
2262 * @end: end byte offset (inclusive)
2263 *
2264 * Return Value: On success, 0 is retured. On errors, one of the following
2265 * negative error code is returned.
2266 *
2267 * %-EROFS - Read only filesystem.
2268 *
2269 * %-EIO - I/O error
2270 *
2271 * %-ENOSPC - No space left on device (only in a panic state).
2272 *
2273 * %-ERESTARTSYS - Interrupted.
2274 *
2275 * %-ENOMEM - Insufficient memory available.
2276 */
2277int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2278                                  loff_t start, loff_t end)
2279{
2280        struct the_nilfs *nilfs = sb->s_fs_info;
2281        struct nilfs_sc_info *sci = nilfs->ns_writer;
2282        struct nilfs_inode_info *ii;
2283        struct nilfs_transaction_info ti;
2284        int err = 0;
2285
2286        if (!sci)
2287                return -EROFS;
2288
2289        nilfs_transaction_lock(sb, &ti, 0);
2290
2291        ii = NILFS_I(inode);
2292        if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2293            nilfs_test_opt(nilfs, STRICT_ORDER) ||
2294            test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2295            nilfs_discontinued(nilfs)) {
2296                nilfs_transaction_unlock(sb);
2297                err = nilfs_segctor_sync(sci);
2298                return err;
2299        }
2300
2301        spin_lock(&nilfs->ns_inode_lock);
2302        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2303            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2304                spin_unlock(&nilfs->ns_inode_lock);
2305                nilfs_transaction_unlock(sb);
2306                return 0;
2307        }
2308        spin_unlock(&nilfs->ns_inode_lock);
2309        sci->sc_dsync_inode = ii;
2310        sci->sc_dsync_start = start;
2311        sci->sc_dsync_end = end;
2312
2313        err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2314        if (!err)
2315                nilfs->ns_flushed_device = 0;
2316
2317        nilfs_transaction_unlock(sb);
2318        return err;
2319}
2320
2321#define FLUSH_FILE_BIT  (0x1) /* data file only */
2322#define FLUSH_DAT_BIT   BIT(NILFS_DAT_INO) /* DAT only */
2323
2324/**
2325 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2326 * @sci: segment constructor object
2327 */
2328static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2329{
2330        spin_lock(&sci->sc_state_lock);
2331        sci->sc_seq_accepted = sci->sc_seq_request;
2332        spin_unlock(&sci->sc_state_lock);
2333        del_timer_sync(&sci->sc_timer);
2334}
2335
2336/**
2337 * nilfs_segctor_notify - notify the result of request to caller threads
2338 * @sci: segment constructor object
2339 * @mode: mode of log forming
2340 * @err: error code to be notified
2341 */
2342static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2343{
2344        /* Clear requests (even when the construction failed) */
2345        spin_lock(&sci->sc_state_lock);
2346
2347        if (mode == SC_LSEG_SR) {
2348                sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2349                sci->sc_seq_done = sci->sc_seq_accepted;
2350                nilfs_segctor_wakeup(sci, err);
2351                sci->sc_flush_request = 0;
2352        } else {
2353                if (mode == SC_FLUSH_FILE)
2354                        sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2355                else if (mode == SC_FLUSH_DAT)
2356                        sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2357
2358                /* re-enable timer if checkpoint creation was not done */
2359                if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2360                    time_before(jiffies, sci->sc_timer.expires))
2361                        add_timer(&sci->sc_timer);
2362        }
2363        spin_unlock(&sci->sc_state_lock);
2364}
2365
2366/**
2367 * nilfs_segctor_construct - form logs and write them to disk
2368 * @sci: segment constructor object
2369 * @mode: mode of log forming
2370 */
2371static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2372{
2373        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2374        struct nilfs_super_block **sbp;
2375        int err = 0;
2376
2377        nilfs_segctor_accept(sci);
2378
2379        if (nilfs_discontinued(nilfs))
2380                mode = SC_LSEG_SR;
2381        if (!nilfs_segctor_confirm(sci))
2382                err = nilfs_segctor_do_construct(sci, mode);
2383
2384        if (likely(!err)) {
2385                if (mode != SC_FLUSH_DAT)
2386                        atomic_set(&nilfs->ns_ndirtyblks, 0);
2387                if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2388                    nilfs_discontinued(nilfs)) {
2389                        down_write(&nilfs->ns_sem);
2390                        err = -EIO;
2391                        sbp = nilfs_prepare_super(sci->sc_super,
2392                                                  nilfs_sb_will_flip(nilfs));
2393                        if (likely(sbp)) {
2394                                nilfs_set_log_cursor(sbp[0], nilfs);
2395                                err = nilfs_commit_super(sci->sc_super,
2396                                                         NILFS_SB_COMMIT);
2397                        }
2398                        up_write(&nilfs->ns_sem);
2399                }
2400        }
2401
2402        nilfs_segctor_notify(sci, mode, err);
2403        return err;
2404}
2405
2406static void nilfs_construction_timeout(unsigned long data)
2407{
2408        struct task_struct *p = (struct task_struct *)data;
2409
2410        wake_up_process(p);
2411}
2412
2413static void
2414nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2415{
2416        struct nilfs_inode_info *ii, *n;
2417
2418        list_for_each_entry_safe(ii, n, head, i_dirty) {
2419                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2420                        continue;
2421                list_del_init(&ii->i_dirty);
2422                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2423                nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2424                iput(&ii->vfs_inode);
2425        }
2426}
2427
2428int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2429                         void **kbufs)
2430{
2431        struct the_nilfs *nilfs = sb->s_fs_info;
2432        struct nilfs_sc_info *sci = nilfs->ns_writer;
2433        struct nilfs_transaction_info ti;
2434        int err;
2435
2436        if (unlikely(!sci))
2437                return -EROFS;
2438
2439        nilfs_transaction_lock(sb, &ti, 1);
2440
2441        err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2442        if (unlikely(err))
2443                goto out_unlock;
2444
2445        err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2446        if (unlikely(err)) {
2447                nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2448                goto out_unlock;
2449        }
2450
2451        sci->sc_freesegs = kbufs[4];
2452        sci->sc_nfreesegs = argv[4].v_nmembs;
2453        list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2454
2455        for (;;) {
2456                err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2457                nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2458
2459                if (likely(!err))
2460                        break;
2461
2462                nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
2463                set_current_state(TASK_INTERRUPTIBLE);
2464                schedule_timeout(sci->sc_interval);
2465        }
2466        if (nilfs_test_opt(nilfs, DISCARD)) {
2467                int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2468                                                 sci->sc_nfreesegs);
2469                if (ret) {
2470                        nilfs_msg(sb, KERN_WARNING,
2471                                  "error %d on discard request, turning discards off for the device",
2472                                  ret);
2473                        nilfs_clear_opt(nilfs, DISCARD);
2474                }
2475        }
2476
2477 out_unlock:
2478        sci->sc_freesegs = NULL;
2479        sci->sc_nfreesegs = 0;
2480        nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2481        nilfs_transaction_unlock(sb);
2482        return err;
2483}
2484
2485static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2486{
2487        struct nilfs_transaction_info ti;
2488
2489        nilfs_transaction_lock(sci->sc_super, &ti, 0);
2490        nilfs_segctor_construct(sci, mode);
2491
2492        /*
2493         * Unclosed segment should be retried.  We do this using sc_timer.
2494         * Timeout of sc_timer will invoke complete construction which leads
2495         * to close the current logical segment.
2496         */
2497        if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2498                nilfs_segctor_start_timer(sci);
2499
2500        nilfs_transaction_unlock(sci->sc_super);
2501}
2502
2503static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2504{
2505        int mode = 0;
2506
2507        spin_lock(&sci->sc_state_lock);
2508        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2509                SC_FLUSH_DAT : SC_FLUSH_FILE;
2510        spin_unlock(&sci->sc_state_lock);
2511
2512        if (mode) {
2513                nilfs_segctor_do_construct(sci, mode);
2514
2515                spin_lock(&sci->sc_state_lock);
2516                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2517                        ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2518                spin_unlock(&sci->sc_state_lock);
2519        }
2520        clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2521}
2522
2523static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2524{
2525        if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2526            time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2527                if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2528                        return SC_FLUSH_FILE;
2529                else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2530                        return SC_FLUSH_DAT;
2531        }
2532        return SC_LSEG_SR;
2533}
2534
2535/**
2536 * nilfs_segctor_thread - main loop of the segment constructor thread.
2537 * @arg: pointer to a struct nilfs_sc_info.
2538 *
2539 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2540 * to execute segment constructions.
2541 */
2542static int nilfs_segctor_thread(void *arg)
2543{
2544        struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2545        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2546        int timeout = 0;
2547
2548        sci->sc_timer.data = (unsigned long)current;
2549        sci->sc_timer.function = nilfs_construction_timeout;
2550
2551        /* start sync. */
2552        sci->sc_task = current;
2553        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2554        nilfs_msg(sci->sc_super, KERN_INFO,
2555                  "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2556                  sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2557
2558        spin_lock(&sci->sc_state_lock);
2559 loop:
2560        for (;;) {
2561                int mode;
2562
2563                if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2564                        goto end_thread;
2565
2566                if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2567                        mode = SC_LSEG_SR;
2568                else if (sci->sc_flush_request)
2569                        mode = nilfs_segctor_flush_mode(sci);
2570                else
2571                        break;
2572
2573                spin_unlock(&sci->sc_state_lock);
2574                nilfs_segctor_thread_construct(sci, mode);
2575                spin_lock(&sci->sc_state_lock);
2576                timeout = 0;
2577        }
2578
2579
2580        if (freezing(current)) {
2581                spin_unlock(&sci->sc_state_lock);
2582                try_to_freeze();
2583                spin_lock(&sci->sc_state_lock);
2584        } else {
2585                DEFINE_WAIT(wait);
2586                int should_sleep = 1;
2587
2588                prepare_to_wait(&sci->sc_wait_daemon, &wait,
2589                                TASK_INTERRUPTIBLE);
2590
2591                if (sci->sc_seq_request != sci->sc_seq_done)
2592                        should_sleep = 0;
2593                else if (sci->sc_flush_request)
2594                        should_sleep = 0;
2595                else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2596                        should_sleep = time_before(jiffies,
2597                                        sci->sc_timer.expires);
2598
2599                if (should_sleep) {
2600                        spin_unlock(&sci->sc_state_lock);
2601                        schedule();
2602                        spin_lock(&sci->sc_state_lock);
2603                }
2604                finish_wait(&sci->sc_wait_daemon, &wait);
2605                timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2606                           time_after_eq(jiffies, sci->sc_timer.expires));
2607
2608                if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2609                        set_nilfs_discontinued(nilfs);
2610        }
2611        goto loop;
2612
2613 end_thread:
2614        spin_unlock(&sci->sc_state_lock);
2615
2616        /* end sync. */
2617        sci->sc_task = NULL;
2618        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2619        return 0;
2620}
2621
2622static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2623{
2624        struct task_struct *t;
2625
2626        t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2627        if (IS_ERR(t)) {
2628                int err = PTR_ERR(t);
2629
2630                nilfs_msg(sci->sc_super, KERN_ERR,
2631                          "error %d creating segctord thread", err);
2632                return err;
2633        }
2634        wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2635        return 0;
2636}
2637
2638static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2639        __acquires(&sci->sc_state_lock)
2640        __releases(&sci->sc_state_lock)
2641{
2642        sci->sc_state |= NILFS_SEGCTOR_QUIT;
2643
2644        while (sci->sc_task) {
2645                wake_up(&sci->sc_wait_daemon);
2646                spin_unlock(&sci->sc_state_lock);
2647                wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2648                spin_lock(&sci->sc_state_lock);
2649        }
2650}
2651
2652/*
2653 * Setup & clean-up functions
2654 */
2655static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2656                                               struct nilfs_root *root)
2657{
2658        struct the_nilfs *nilfs = sb->s_fs_info;
2659        struct nilfs_sc_info *sci;
2660
2661        sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2662        if (!sci)
2663                return NULL;
2664
2665        sci->sc_super = sb;
2666
2667        nilfs_get_root(root);
2668        sci->sc_root = root;
2669
2670        init_waitqueue_head(&sci->sc_wait_request);
2671        init_waitqueue_head(&sci->sc_wait_daemon);
2672        init_waitqueue_head(&sci->sc_wait_task);
2673        spin_lock_init(&sci->sc_state_lock);
2674        INIT_LIST_HEAD(&sci->sc_dirty_files);
2675        INIT_LIST_HEAD(&sci->sc_segbufs);
2676        INIT_LIST_HEAD(&sci->sc_write_logs);
2677        INIT_LIST_HEAD(&sci->sc_gc_inodes);
2678        INIT_LIST_HEAD(&sci->sc_iput_queue);
2679        INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2680        init_timer(&sci->sc_timer);
2681
2682        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2683        sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2684        sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2685
2686        if (nilfs->ns_interval)
2687                sci->sc_interval = HZ * nilfs->ns_interval;
2688        if (nilfs->ns_watermark)
2689                sci->sc_watermark = nilfs->ns_watermark;
2690        return sci;
2691}
2692
2693static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2694{
2695        int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2696
2697        /*
2698         * The segctord thread was stopped and its timer was removed.
2699         * But some tasks remain.
2700         */
2701        do {
2702                struct nilfs_transaction_info ti;
2703
2704                nilfs_transaction_lock(sci->sc_super, &ti, 0);
2705                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2706                nilfs_transaction_unlock(sci->sc_super);
2707
2708                flush_work(&sci->sc_iput_work);
2709
2710        } while (ret && retrycount-- > 0);
2711}
2712
2713/**
2714 * nilfs_segctor_destroy - destroy the segment constructor.
2715 * @sci: nilfs_sc_info
2716 *
2717 * nilfs_segctor_destroy() kills the segctord thread and frees
2718 * the nilfs_sc_info struct.
2719 * Caller must hold the segment semaphore.
2720 */
2721static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2722{
2723        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2724        int flag;
2725
2726        up_write(&nilfs->ns_segctor_sem);
2727
2728        spin_lock(&sci->sc_state_lock);
2729        nilfs_segctor_kill_thread(sci);
2730        flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2731                || sci->sc_seq_request != sci->sc_seq_done);
2732        spin_unlock(&sci->sc_state_lock);
2733
2734        if (flush_work(&sci->sc_iput_work))
2735                flag = true;
2736
2737        if (flag || !nilfs_segctor_confirm(sci))
2738                nilfs_segctor_write_out(sci);
2739
2740        if (!list_empty(&sci->sc_dirty_files)) {
2741                nilfs_msg(sci->sc_super, KERN_WARNING,
2742                          "disposed unprocessed dirty file(s) when stopping log writer");
2743                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2744        }
2745
2746        if (!list_empty(&sci->sc_iput_queue)) {
2747                nilfs_msg(sci->sc_super, KERN_WARNING,
2748                          "disposed unprocessed inode(s) in iput queue when stopping log writer");
2749                nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2750        }
2751
2752        WARN_ON(!list_empty(&sci->sc_segbufs));
2753        WARN_ON(!list_empty(&sci->sc_write_logs));
2754
2755        nilfs_put_root(sci->sc_root);
2756
2757        down_write(&nilfs->ns_segctor_sem);
2758
2759        del_timer_sync(&sci->sc_timer);
2760        kfree(sci);
2761}
2762
2763/**
2764 * nilfs_attach_log_writer - attach log writer
2765 * @sb: super block instance
2766 * @root: root object of the current filesystem tree
2767 *
2768 * This allocates a log writer object, initializes it, and starts the
2769 * log writer.
2770 *
2771 * Return Value: On success, 0 is returned. On error, one of the following
2772 * negative error code is returned.
2773 *
2774 * %-ENOMEM - Insufficient memory available.
2775 */
2776int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2777{
2778        struct the_nilfs *nilfs = sb->s_fs_info;
2779        int err;
2780
2781        if (nilfs->ns_writer) {
2782                /*
2783                 * This happens if the filesystem was remounted
2784                 * read/write after nilfs_error degenerated it into a
2785                 * read-only mount.
2786                 */
2787                nilfs_detach_log_writer(sb);
2788        }
2789
2790        nilfs->ns_writer = nilfs_segctor_new(sb, root);
2791        if (!nilfs->ns_writer)
2792                return -ENOMEM;
2793
2794        err = nilfs_segctor_start_thread(nilfs->ns_writer);
2795        if (err) {
2796                kfree(nilfs->ns_writer);
2797                nilfs->ns_writer = NULL;
2798        }
2799        return err;
2800}
2801
2802/**
2803 * nilfs_detach_log_writer - destroy log writer
2804 * @sb: super block instance
2805 *
2806 * This kills log writer daemon, frees the log writer object, and
2807 * destroys list of dirty files.
2808 */
2809void nilfs_detach_log_writer(struct super_block *sb)
2810{
2811        struct the_nilfs *nilfs = sb->s_fs_info;
2812        LIST_HEAD(garbage_list);
2813
2814        down_write(&nilfs->ns_segctor_sem);
2815        if (nilfs->ns_writer) {
2816                nilfs_segctor_destroy(nilfs->ns_writer);
2817                nilfs->ns_writer = NULL;
2818        }
2819
2820        /* Force to free the list of dirty files */
2821        spin_lock(&nilfs->ns_inode_lock);
2822        if (!list_empty(&nilfs->ns_dirty_files)) {
2823                list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2824                nilfs_msg(sb, KERN_WARNING,
2825                          "disposed unprocessed dirty file(s) when detaching log writer");
2826        }
2827        spin_unlock(&nilfs->ns_inode_lock);
2828        up_write(&nilfs->ns_segctor_sem);
2829
2830        nilfs_dispose_list(nilfs, &garbage_list, 1);
2831}
2832