linux/fs/nilfs2/segment.c
<<
>>
Prefs
   1/*
   2 * segment.c - NILFS segment constructor.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21 *
  22 */
  23
  24#include <linux/pagemap.h>
  25#include <linux/buffer_head.h>
  26#include <linux/writeback.h>
  27#include <linux/bio.h>
  28#include <linux/completion.h>
  29#include <linux/blkdev.h>
  30#include <linux/backing-dev.h>
  31#include <linux/freezer.h>
  32#include <linux/kthread.h>
  33#include <linux/crc32.h>
  34#include <linux/pagevec.h>
  35#include <linux/slab.h>
  36#include "nilfs.h"
  37#include "btnode.h"
  38#include "page.h"
  39#include "segment.h"
  40#include "sufile.h"
  41#include "cpfile.h"
  42#include "ifile.h"
  43#include "segbuf.h"
  44
  45
  46/*
  47 * Segment constructor
  48 */
  49#define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
  50
  51#define SC_MAX_SEGDELTA 64   /* Upper limit of the number of segments
  52                                appended in collection retry loop */
  53
  54/* Construction mode */
  55enum {
  56        SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  57        SC_LSEG_DSYNC,  /* Flush data blocks of a given file and make
  58                           a logical segment without a super root */
  59        SC_FLUSH_FILE,  /* Flush data files, leads to segment writes without
  60                           creating a checkpoint */
  61        SC_FLUSH_DAT,   /* Flush DAT file. This also creates segments without
  62                           a checkpoint */
  63};
  64
  65/* Stage numbers of dirty block collection */
  66enum {
  67        NILFS_ST_INIT = 0,
  68        NILFS_ST_GC,            /* Collecting dirty blocks for GC */
  69        NILFS_ST_FILE,
  70        NILFS_ST_IFILE,
  71        NILFS_ST_CPFILE,
  72        NILFS_ST_SUFILE,
  73        NILFS_ST_DAT,
  74        NILFS_ST_SR,            /* Super root */
  75        NILFS_ST_DSYNC,         /* Data sync blocks */
  76        NILFS_ST_DONE,
  77};
  78
  79/* State flags of collection */
  80#define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
  81#define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
  82#define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
  83#define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
  84
  85/* Operations depending on the construction mode and file type */
  86struct nilfs_sc_operations {
  87        int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
  88                            struct inode *);
  89        int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
  90                            struct inode *);
  91        int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
  92                            struct inode *);
  93        void (*write_data_binfo)(struct nilfs_sc_info *,
  94                                 struct nilfs_segsum_pointer *,
  95                                 union nilfs_binfo *);
  96        void (*write_node_binfo)(struct nilfs_sc_info *,
  97                                 struct nilfs_segsum_pointer *,
  98                                 union nilfs_binfo *);
  99};
 100
 101/*
 102 * Other definitions
 103 */
 104static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
 105static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
 106static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
 107static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
 108                               int);
 109
 110#define nilfs_cnt32_gt(a, b)   \
 111        (typecheck(__u32, a) && typecheck(__u32, b) && \
 112         ((__s32)(b) - (__s32)(a) < 0))
 113#define nilfs_cnt32_ge(a, b)   \
 114        (typecheck(__u32, a) && typecheck(__u32, b) && \
 115         ((__s32)(a) - (__s32)(b) >= 0))
 116#define nilfs_cnt32_lt(a, b)  nilfs_cnt32_gt(b, a)
 117#define nilfs_cnt32_le(a, b)  nilfs_cnt32_ge(b, a)
 118
 119static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
 120{
 121        struct nilfs_transaction_info *cur_ti = current->journal_info;
 122        void *save = NULL;
 123
 124        if (cur_ti) {
 125                if (cur_ti->ti_magic == NILFS_TI_MAGIC)
 126                        return ++cur_ti->ti_count;
 127                else {
 128                        /*
 129                         * If journal_info field is occupied by other FS,
 130                         * it is saved and will be restored on
 131                         * nilfs_transaction_commit().
 132                         */
 133                        printk(KERN_WARNING
 134                               "NILFS warning: journal info from a different "
 135                               "FS\n");
 136                        save = current->journal_info;
 137                }
 138        }
 139        if (!ti) {
 140                ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
 141                if (!ti)
 142                        return -ENOMEM;
 143                ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
 144        } else {
 145                ti->ti_flags = 0;
 146        }
 147        ti->ti_count = 0;
 148        ti->ti_save = save;
 149        ti->ti_magic = NILFS_TI_MAGIC;
 150        current->journal_info = ti;
 151        return 0;
 152}
 153
 154/**
 155 * nilfs_transaction_begin - start indivisible file operations.
 156 * @sb: super block
 157 * @ti: nilfs_transaction_info
 158 * @vacancy_check: flags for vacancy rate checks
 159 *
 160 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
 161 * the segment semaphore, to make a segment construction and write tasks
 162 * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
 163 * The region enclosed by these two functions can be nested.  To avoid a
 164 * deadlock, the semaphore is only acquired or released in the outermost call.
 165 *
 166 * This function allocates a nilfs_transaction_info struct to keep context
 167 * information on it.  It is initialized and hooked onto the current task in
 168 * the outermost call.  If a pre-allocated struct is given to @ti, it is used
 169 * instead; otherwise a new struct is assigned from a slab.
 170 *
 171 * When @vacancy_check flag is set, this function will check the amount of
 172 * free space, and will wait for the GC to reclaim disk space if low capacity.
 173 *
 174 * Return Value: On success, 0 is returned. On error, one of the following
 175 * negative error code is returned.
 176 *
 177 * %-ENOMEM - Insufficient memory available.
 178 *
 179 * %-ENOSPC - No space left on device
 180 */
 181int nilfs_transaction_begin(struct super_block *sb,
 182                            struct nilfs_transaction_info *ti,
 183                            int vacancy_check)
 184{
 185        struct nilfs_sb_info *sbi;
 186        struct the_nilfs *nilfs;
 187        int ret = nilfs_prepare_segment_lock(ti);
 188
 189        if (unlikely(ret < 0))
 190                return ret;
 191        if (ret > 0)
 192                return 0;
 193
 194        vfs_check_frozen(sb, SB_FREEZE_WRITE);
 195
 196        sbi = NILFS_SB(sb);
 197        nilfs = sbi->s_nilfs;
 198        down_read(&nilfs->ns_segctor_sem);
 199        if (vacancy_check && nilfs_near_disk_full(nilfs)) {
 200                up_read(&nilfs->ns_segctor_sem);
 201                ret = -ENOSPC;
 202                goto failed;
 203        }
 204        return 0;
 205
 206 failed:
 207        ti = current->journal_info;
 208        current->journal_info = ti->ti_save;
 209        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 210                kmem_cache_free(nilfs_transaction_cachep, ti);
 211        return ret;
 212}
 213
 214/**
 215 * nilfs_transaction_commit - commit indivisible file operations.
 216 * @sb: super block
 217 *
 218 * nilfs_transaction_commit() releases the read semaphore which is
 219 * acquired by nilfs_transaction_begin(). This is only performed
 220 * in outermost call of this function.  If a commit flag is set,
 221 * nilfs_transaction_commit() sets a timer to start the segment
 222 * constructor.  If a sync flag is set, it starts construction
 223 * directly.
 224 */
 225int nilfs_transaction_commit(struct super_block *sb)
 226{
 227        struct nilfs_transaction_info *ti = current->journal_info;
 228        struct nilfs_sb_info *sbi;
 229        struct nilfs_sc_info *sci;
 230        int err = 0;
 231
 232        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 233        ti->ti_flags |= NILFS_TI_COMMIT;
 234        if (ti->ti_count > 0) {
 235                ti->ti_count--;
 236                return 0;
 237        }
 238        sbi = NILFS_SB(sb);
 239        sci = NILFS_SC(sbi);
 240        if (sci != NULL) {
 241                if (ti->ti_flags & NILFS_TI_COMMIT)
 242                        nilfs_segctor_start_timer(sci);
 243                if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) >
 244                    sci->sc_watermark)
 245                        nilfs_segctor_do_flush(sci, 0);
 246        }
 247        up_read(&sbi->s_nilfs->ns_segctor_sem);
 248        current->journal_info = ti->ti_save;
 249
 250        if (ti->ti_flags & NILFS_TI_SYNC)
 251                err = nilfs_construct_segment(sb);
 252        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 253                kmem_cache_free(nilfs_transaction_cachep, ti);
 254        return err;
 255}
 256
 257void nilfs_transaction_abort(struct super_block *sb)
 258{
 259        struct nilfs_transaction_info *ti = current->journal_info;
 260
 261        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 262        if (ti->ti_count > 0) {
 263                ti->ti_count--;
 264                return;
 265        }
 266        up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
 267
 268        current->journal_info = ti->ti_save;
 269        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 270                kmem_cache_free(nilfs_transaction_cachep, ti);
 271}
 272
 273void nilfs_relax_pressure_in_lock(struct super_block *sb)
 274{
 275        struct nilfs_sb_info *sbi = NILFS_SB(sb);
 276        struct nilfs_sc_info *sci = NILFS_SC(sbi);
 277        struct the_nilfs *nilfs = sbi->s_nilfs;
 278
 279        if (!sci || !sci->sc_flush_request)
 280                return;
 281
 282        set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
 283        up_read(&nilfs->ns_segctor_sem);
 284
 285        down_write(&nilfs->ns_segctor_sem);
 286        if (sci->sc_flush_request &&
 287            test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
 288                struct nilfs_transaction_info *ti = current->journal_info;
 289
 290                ti->ti_flags |= NILFS_TI_WRITER;
 291                nilfs_segctor_do_immediate_flush(sci);
 292                ti->ti_flags &= ~NILFS_TI_WRITER;
 293        }
 294        downgrade_write(&nilfs->ns_segctor_sem);
 295}
 296
 297static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
 298                                   struct nilfs_transaction_info *ti,
 299                                   int gcflag)
 300{
 301        struct nilfs_transaction_info *cur_ti = current->journal_info;
 302
 303        WARN_ON(cur_ti);
 304        ti->ti_flags = NILFS_TI_WRITER;
 305        ti->ti_count = 0;
 306        ti->ti_save = cur_ti;
 307        ti->ti_magic = NILFS_TI_MAGIC;
 308        INIT_LIST_HEAD(&ti->ti_garbage);
 309        current->journal_info = ti;
 310
 311        for (;;) {
 312                down_write(&sbi->s_nilfs->ns_segctor_sem);
 313                if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags))
 314                        break;
 315
 316                nilfs_segctor_do_immediate_flush(NILFS_SC(sbi));
 317
 318                up_write(&sbi->s_nilfs->ns_segctor_sem);
 319                yield();
 320        }
 321        if (gcflag)
 322                ti->ti_flags |= NILFS_TI_GC;
 323}
 324
 325static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
 326{
 327        struct nilfs_transaction_info *ti = current->journal_info;
 328
 329        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 330        BUG_ON(ti->ti_count > 0);
 331
 332        up_write(&sbi->s_nilfs->ns_segctor_sem);
 333        current->journal_info = ti->ti_save;
 334        if (!list_empty(&ti->ti_garbage))
 335                nilfs_dispose_list(sbi, &ti->ti_garbage, 0);
 336}
 337
 338static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
 339                                            struct nilfs_segsum_pointer *ssp,
 340                                            unsigned bytes)
 341{
 342        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 343        unsigned blocksize = sci->sc_super->s_blocksize;
 344        void *p;
 345
 346        if (unlikely(ssp->offset + bytes > blocksize)) {
 347                ssp->offset = 0;
 348                BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
 349                                               &segbuf->sb_segsum_buffers));
 350                ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
 351        }
 352        p = ssp->bh->b_data + ssp->offset;
 353        ssp->offset += bytes;
 354        return p;
 355}
 356
 357/**
 358 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
 359 * @sci: nilfs_sc_info
 360 */
 361static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
 362{
 363        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 364        struct buffer_head *sumbh;
 365        unsigned sumbytes;
 366        unsigned flags = 0;
 367        int err;
 368
 369        if (nilfs_doing_gc())
 370                flags = NILFS_SS_GC;
 371        err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
 372        if (unlikely(err))
 373                return err;
 374
 375        sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
 376        sumbytes = segbuf->sb_sum.sumbytes;
 377        sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
 378        sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
 379        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 380        return 0;
 381}
 382
 383static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
 384{
 385        sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
 386        if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
 387                return -E2BIG; /* The current segment is filled up
 388                                  (internal code) */
 389        sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
 390        return nilfs_segctor_reset_segment_buffer(sci);
 391}
 392
 393static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
 394{
 395        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 396        int err;
 397
 398        if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
 399                err = nilfs_segctor_feed_segment(sci);
 400                if (err)
 401                        return err;
 402                segbuf = sci->sc_curseg;
 403        }
 404        err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
 405        if (likely(!err))
 406                segbuf->sb_sum.flags |= NILFS_SS_SR;
 407        return err;
 408}
 409
 410/*
 411 * Functions for making segment summary and payloads
 412 */
 413static int nilfs_segctor_segsum_block_required(
 414        struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
 415        unsigned binfo_size)
 416{
 417        unsigned blocksize = sci->sc_super->s_blocksize;
 418        /* Size of finfo and binfo is enough small against blocksize */
 419
 420        return ssp->offset + binfo_size +
 421                (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
 422                blocksize;
 423}
 424
 425static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
 426                                      struct inode *inode)
 427{
 428        sci->sc_curseg->sb_sum.nfinfo++;
 429        sci->sc_binfo_ptr = sci->sc_finfo_ptr;
 430        nilfs_segctor_map_segsum_entry(
 431                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 432
 433        if (NILFS_I(inode)->i_root &&
 434            !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 435                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 436        /* skip finfo */
 437}
 438
 439static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
 440                                    struct inode *inode)
 441{
 442        struct nilfs_finfo *finfo;
 443        struct nilfs_inode_info *ii;
 444        struct nilfs_segment_buffer *segbuf;
 445        __u64 cno;
 446
 447        if (sci->sc_blk_cnt == 0)
 448                return;
 449
 450        ii = NILFS_I(inode);
 451
 452        if (test_bit(NILFS_I_GCINODE, &ii->i_state))
 453                cno = ii->i_cno;
 454        else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
 455                cno = 0;
 456        else
 457                cno = sci->sc_cno;
 458
 459        finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
 460                                                 sizeof(*finfo));
 461        finfo->fi_ino = cpu_to_le64(inode->i_ino);
 462        finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
 463        finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
 464        finfo->fi_cno = cpu_to_le64(cno);
 465
 466        segbuf = sci->sc_curseg;
 467        segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
 468                sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
 469        sci->sc_finfo_ptr = sci->sc_binfo_ptr;
 470        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 471}
 472
 473static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
 474                                        struct buffer_head *bh,
 475                                        struct inode *inode,
 476                                        unsigned binfo_size)
 477{
 478        struct nilfs_segment_buffer *segbuf;
 479        int required, err = 0;
 480
 481 retry:
 482        segbuf = sci->sc_curseg;
 483        required = nilfs_segctor_segsum_block_required(
 484                sci, &sci->sc_binfo_ptr, binfo_size);
 485        if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
 486                nilfs_segctor_end_finfo(sci, inode);
 487                err = nilfs_segctor_feed_segment(sci);
 488                if (err)
 489                        return err;
 490                goto retry;
 491        }
 492        if (unlikely(required)) {
 493                err = nilfs_segbuf_extend_segsum(segbuf);
 494                if (unlikely(err))
 495                        goto failed;
 496        }
 497        if (sci->sc_blk_cnt == 0)
 498                nilfs_segctor_begin_finfo(sci, inode);
 499
 500        nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
 501        /* Substitution to vblocknr is delayed until update_blocknr() */
 502        nilfs_segbuf_add_file_buffer(segbuf, bh);
 503        sci->sc_blk_cnt++;
 504 failed:
 505        return err;
 506}
 507
 508/*
 509 * Callback functions that enumerate, mark, and collect dirty blocks
 510 */
 511static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
 512                                   struct buffer_head *bh, struct inode *inode)
 513{
 514        int err;
 515
 516        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 517        if (err < 0)
 518                return err;
 519
 520        err = nilfs_segctor_add_file_block(sci, bh, inode,
 521                                           sizeof(struct nilfs_binfo_v));
 522        if (!err)
 523                sci->sc_datablk_cnt++;
 524        return err;
 525}
 526
 527static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
 528                                   struct buffer_head *bh,
 529                                   struct inode *inode)
 530{
 531        return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 532}
 533
 534static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
 535                                   struct buffer_head *bh,
 536                                   struct inode *inode)
 537{
 538        WARN_ON(!buffer_dirty(bh));
 539        return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 540}
 541
 542static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
 543                                        struct nilfs_segsum_pointer *ssp,
 544                                        union nilfs_binfo *binfo)
 545{
 546        struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
 547                sci, ssp, sizeof(*binfo_v));
 548        *binfo_v = binfo->bi_v;
 549}
 550
 551static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
 552                                        struct nilfs_segsum_pointer *ssp,
 553                                        union nilfs_binfo *binfo)
 554{
 555        __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
 556                sci, ssp, sizeof(*vblocknr));
 557        *vblocknr = binfo->bi_v.bi_vblocknr;
 558}
 559
 560static struct nilfs_sc_operations nilfs_sc_file_ops = {
 561        .collect_data = nilfs_collect_file_data,
 562        .collect_node = nilfs_collect_file_node,
 563        .collect_bmap = nilfs_collect_file_bmap,
 564        .write_data_binfo = nilfs_write_file_data_binfo,
 565        .write_node_binfo = nilfs_write_file_node_binfo,
 566};
 567
 568static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
 569                                  struct buffer_head *bh, struct inode *inode)
 570{
 571        int err;
 572
 573        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 574        if (err < 0)
 575                return err;
 576
 577        err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 578        if (!err)
 579                sci->sc_datablk_cnt++;
 580        return err;
 581}
 582
 583static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
 584                                  struct buffer_head *bh, struct inode *inode)
 585{
 586        WARN_ON(!buffer_dirty(bh));
 587        return nilfs_segctor_add_file_block(sci, bh, inode,
 588                                            sizeof(struct nilfs_binfo_dat));
 589}
 590
 591static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
 592                                       struct nilfs_segsum_pointer *ssp,
 593                                       union nilfs_binfo *binfo)
 594{
 595        __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
 596                                                          sizeof(*blkoff));
 597        *blkoff = binfo->bi_dat.bi_blkoff;
 598}
 599
 600static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
 601                                       struct nilfs_segsum_pointer *ssp,
 602                                       union nilfs_binfo *binfo)
 603{
 604        struct nilfs_binfo_dat *binfo_dat =
 605                nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
 606        *binfo_dat = binfo->bi_dat;
 607}
 608
 609static struct nilfs_sc_operations nilfs_sc_dat_ops = {
 610        .collect_data = nilfs_collect_dat_data,
 611        .collect_node = nilfs_collect_file_node,
 612        .collect_bmap = nilfs_collect_dat_bmap,
 613        .write_data_binfo = nilfs_write_dat_data_binfo,
 614        .write_node_binfo = nilfs_write_dat_node_binfo,
 615};
 616
 617static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
 618        .collect_data = nilfs_collect_file_data,
 619        .collect_node = NULL,
 620        .collect_bmap = NULL,
 621        .write_data_binfo = nilfs_write_file_data_binfo,
 622        .write_node_binfo = NULL,
 623};
 624
 625static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 626                                              struct list_head *listp,
 627                                              size_t nlimit,
 628                                              loff_t start, loff_t end)
 629{
 630        struct address_space *mapping = inode->i_mapping;
 631        struct pagevec pvec;
 632        pgoff_t index = 0, last = ULONG_MAX;
 633        size_t ndirties = 0;
 634        int i;
 635
 636        if (unlikely(start != 0 || end != LLONG_MAX)) {
 637                /*
 638                 * A valid range is given for sync-ing data pages. The
 639                 * range is rounded to per-page; extra dirty buffers
 640                 * may be included if blocksize < pagesize.
 641                 */
 642                index = start >> PAGE_SHIFT;
 643                last = end >> PAGE_SHIFT;
 644        }
 645        pagevec_init(&pvec, 0);
 646 repeat:
 647        if (unlikely(index > last) ||
 648            !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
 649                                min_t(pgoff_t, last - index,
 650                                      PAGEVEC_SIZE - 1) + 1))
 651                return ndirties;
 652
 653        for (i = 0; i < pagevec_count(&pvec); i++) {
 654                struct buffer_head *bh, *head;
 655                struct page *page = pvec.pages[i];
 656
 657                if (unlikely(page->index > last))
 658                        break;
 659
 660                if (mapping->host) {
 661                        lock_page(page);
 662                        if (!page_has_buffers(page))
 663                                create_empty_buffers(page,
 664                                                     1 << inode->i_blkbits, 0);
 665                        unlock_page(page);
 666                }
 667
 668                bh = head = page_buffers(page);
 669                do {
 670                        if (!buffer_dirty(bh))
 671                                continue;
 672                        get_bh(bh);
 673                        list_add_tail(&bh->b_assoc_buffers, listp);
 674                        ndirties++;
 675                        if (unlikely(ndirties >= nlimit)) {
 676                                pagevec_release(&pvec);
 677                                cond_resched();
 678                                return ndirties;
 679                        }
 680                } while (bh = bh->b_this_page, bh != head);
 681        }
 682        pagevec_release(&pvec);
 683        cond_resched();
 684        goto repeat;
 685}
 686
 687static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
 688                                            struct list_head *listp)
 689{
 690        struct nilfs_inode_info *ii = NILFS_I(inode);
 691        struct address_space *mapping = &ii->i_btnode_cache;
 692        struct pagevec pvec;
 693        struct buffer_head *bh, *head;
 694        unsigned int i;
 695        pgoff_t index = 0;
 696
 697        pagevec_init(&pvec, 0);
 698
 699        while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
 700                                  PAGEVEC_SIZE)) {
 701                for (i = 0; i < pagevec_count(&pvec); i++) {
 702                        bh = head = page_buffers(pvec.pages[i]);
 703                        do {
 704                                if (buffer_dirty(bh)) {
 705                                        get_bh(bh);
 706                                        list_add_tail(&bh->b_assoc_buffers,
 707                                                      listp);
 708                                }
 709                                bh = bh->b_this_page;
 710                        } while (bh != head);
 711                }
 712                pagevec_release(&pvec);
 713                cond_resched();
 714        }
 715}
 716
 717static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
 718                               struct list_head *head, int force)
 719{
 720        struct nilfs_inode_info *ii, *n;
 721        struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
 722        unsigned nv = 0;
 723
 724        while (!list_empty(head)) {
 725                spin_lock(&sbi->s_inode_lock);
 726                list_for_each_entry_safe(ii, n, head, i_dirty) {
 727                        list_del_init(&ii->i_dirty);
 728                        if (force) {
 729                                if (unlikely(ii->i_bh)) {
 730                                        brelse(ii->i_bh);
 731                                        ii->i_bh = NULL;
 732                                }
 733                        } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
 734                                set_bit(NILFS_I_QUEUED, &ii->i_state);
 735                                list_add_tail(&ii->i_dirty,
 736                                              &sbi->s_dirty_files);
 737                                continue;
 738                        }
 739                        ivec[nv++] = ii;
 740                        if (nv == SC_N_INODEVEC)
 741                                break;
 742                }
 743                spin_unlock(&sbi->s_inode_lock);
 744
 745                for (pii = ivec; nv > 0; pii++, nv--)
 746                        iput(&(*pii)->vfs_inode);
 747        }
 748}
 749
 750static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
 751                                     struct nilfs_root *root)
 752{
 753        int ret = 0;
 754
 755        if (nilfs_mdt_fetch_dirty(root->ifile))
 756                ret++;
 757        if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
 758                ret++;
 759        if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
 760                ret++;
 761        if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
 762                ret++;
 763        return ret;
 764}
 765
 766static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
 767{
 768        return list_empty(&sci->sc_dirty_files) &&
 769                !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
 770                sci->sc_nfreesegs == 0 &&
 771                (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
 772}
 773
 774static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
 775{
 776        struct nilfs_sb_info *sbi = sci->sc_sbi;
 777        int ret = 0;
 778
 779        if (nilfs_test_metadata_dirty(sbi->s_nilfs, sci->sc_root))
 780                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
 781
 782        spin_lock(&sbi->s_inode_lock);
 783        if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
 784                ret++;
 785
 786        spin_unlock(&sbi->s_inode_lock);
 787        return ret;
 788}
 789
 790static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
 791{
 792        struct nilfs_sb_info *sbi = sci->sc_sbi;
 793        struct the_nilfs *nilfs = sbi->s_nilfs;
 794
 795        nilfs_mdt_clear_dirty(sci->sc_root->ifile);
 796        nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
 797        nilfs_mdt_clear_dirty(nilfs->ns_sufile);
 798        nilfs_mdt_clear_dirty(nilfs->ns_dat);
 799}
 800
 801static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
 802{
 803        struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
 804        struct buffer_head *bh_cp;
 805        struct nilfs_checkpoint *raw_cp;
 806        int err;
 807
 808        /* XXX: this interface will be changed */
 809        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
 810                                          &raw_cp, &bh_cp);
 811        if (likely(!err)) {
 812                /* The following code is duplicated with cpfile.  But, it is
 813                   needed to collect the checkpoint even if it was not newly
 814                   created */
 815                nilfs_mdt_mark_buffer_dirty(bh_cp);
 816                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
 817                nilfs_cpfile_put_checkpoint(
 818                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 819        } else
 820                WARN_ON(err == -EINVAL || err == -ENOENT);
 821
 822        return err;
 823}
 824
 825static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
 826{
 827        struct nilfs_sb_info *sbi = sci->sc_sbi;
 828        struct the_nilfs *nilfs = sbi->s_nilfs;
 829        struct buffer_head *bh_cp;
 830        struct nilfs_checkpoint *raw_cp;
 831        int err;
 832
 833        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
 834                                          &raw_cp, &bh_cp);
 835        if (unlikely(err)) {
 836                WARN_ON(err == -EINVAL || err == -ENOENT);
 837                goto failed_ibh;
 838        }
 839        raw_cp->cp_snapshot_list.ssl_next = 0;
 840        raw_cp->cp_snapshot_list.ssl_prev = 0;
 841        raw_cp->cp_inodes_count =
 842                cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
 843        raw_cp->cp_blocks_count =
 844                cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
 845        raw_cp->cp_nblk_inc =
 846                cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
 847        raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
 848        raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
 849
 850        if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 851                nilfs_checkpoint_clear_minor(raw_cp);
 852        else
 853                nilfs_checkpoint_set_minor(raw_cp);
 854
 855        nilfs_write_inode_common(sci->sc_root->ifile,
 856                                 &raw_cp->cp_ifile_inode, 1);
 857        nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 858        return 0;
 859
 860 failed_ibh:
 861        return err;
 862}
 863
 864static void nilfs_fill_in_file_bmap(struct inode *ifile,
 865                                    struct nilfs_inode_info *ii)
 866
 867{
 868        struct buffer_head *ibh;
 869        struct nilfs_inode *raw_inode;
 870
 871        if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
 872                ibh = ii->i_bh;
 873                BUG_ON(!ibh);
 874                raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
 875                                                  ibh);
 876                nilfs_bmap_write(ii->i_bmap, raw_inode);
 877                nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
 878        }
 879}
 880
 881static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
 882{
 883        struct nilfs_inode_info *ii;
 884
 885        list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
 886                nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
 887                set_bit(NILFS_I_COLLECTED, &ii->i_state);
 888        }
 889}
 890
 891static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
 892                                             struct the_nilfs *nilfs)
 893{
 894        struct buffer_head *bh_sr;
 895        struct nilfs_super_root *raw_sr;
 896        unsigned isz = nilfs->ns_inode_size;
 897
 898        bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
 899        raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
 900
 901        raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
 902        raw_sr->sr_nongc_ctime
 903                = cpu_to_le64(nilfs_doing_gc() ?
 904                              nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 905        raw_sr->sr_flags = 0;
 906
 907        nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
 908                                 NILFS_SR_DAT_OFFSET(isz), 1);
 909        nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
 910                                 NILFS_SR_CPFILE_OFFSET(isz), 1);
 911        nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
 912                                 NILFS_SR_SUFILE_OFFSET(isz), 1);
 913}
 914
 915static void nilfs_redirty_inodes(struct list_head *head)
 916{
 917        struct nilfs_inode_info *ii;
 918
 919        list_for_each_entry(ii, head, i_dirty) {
 920                if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
 921                        clear_bit(NILFS_I_COLLECTED, &ii->i_state);
 922        }
 923}
 924
 925static void nilfs_drop_collected_inodes(struct list_head *head)
 926{
 927        struct nilfs_inode_info *ii;
 928
 929        list_for_each_entry(ii, head, i_dirty) {
 930                if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
 931                        continue;
 932
 933                clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
 934                set_bit(NILFS_I_UPDATED, &ii->i_state);
 935        }
 936}
 937
 938static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
 939                                       struct inode *inode,
 940                                       struct list_head *listp,
 941                                       int (*collect)(struct nilfs_sc_info *,
 942                                                      struct buffer_head *,
 943                                                      struct inode *))
 944{
 945        struct buffer_head *bh, *n;
 946        int err = 0;
 947
 948        if (collect) {
 949                list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
 950                        list_del_init(&bh->b_assoc_buffers);
 951                        err = collect(sci, bh, inode);
 952                        brelse(bh);
 953                        if (unlikely(err))
 954                                goto dispose_buffers;
 955                }
 956                return 0;
 957        }
 958
 959 dispose_buffers:
 960        while (!list_empty(listp)) {
 961                bh = list_entry(listp->next, struct buffer_head,
 962                                b_assoc_buffers);
 963                list_del_init(&bh->b_assoc_buffers);
 964                brelse(bh);
 965        }
 966        return err;
 967}
 968
 969static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
 970{
 971        /* Remaining number of blocks within segment buffer */
 972        return sci->sc_segbuf_nblocks -
 973                (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
 974}
 975
 976static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
 977                                   struct inode *inode,
 978                                   struct nilfs_sc_operations *sc_ops)
 979{
 980        LIST_HEAD(data_buffers);
 981        LIST_HEAD(node_buffers);
 982        int err;
 983
 984        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
 985                size_t n, rest = nilfs_segctor_buffer_rest(sci);
 986
 987                n = nilfs_lookup_dirty_data_buffers(
 988                        inode, &data_buffers, rest + 1, 0, LLONG_MAX);
 989                if (n > rest) {
 990                        err = nilfs_segctor_apply_buffers(
 991                                sci, inode, &data_buffers,
 992                                sc_ops->collect_data);
 993                        BUG_ON(!err); /* always receive -E2BIG or true error */
 994                        goto break_or_fail;
 995                }
 996        }
 997        nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
 998
 999        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1000                err = nilfs_segctor_apply_buffers(
1001                        sci, inode, &data_buffers, sc_ops->collect_data);
1002                if (unlikely(err)) {
1003                        /* dispose node list */
1004                        nilfs_segctor_apply_buffers(
1005                                sci, inode, &node_buffers, NULL);
1006                        goto break_or_fail;
1007                }
1008                sci->sc_stage.flags |= NILFS_CF_NODE;
1009        }
1010        /* Collect node */
1011        err = nilfs_segctor_apply_buffers(
1012                sci, inode, &node_buffers, sc_ops->collect_node);
1013        if (unlikely(err))
1014                goto break_or_fail;
1015
1016        nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1017        err = nilfs_segctor_apply_buffers(
1018                sci, inode, &node_buffers, sc_ops->collect_bmap);
1019        if (unlikely(err))
1020                goto break_or_fail;
1021
1022        nilfs_segctor_end_finfo(sci, inode);
1023        sci->sc_stage.flags &= ~NILFS_CF_NODE;
1024
1025 break_or_fail:
1026        return err;
1027}
1028
1029static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1030                                         struct inode *inode)
1031{
1032        LIST_HEAD(data_buffers);
1033        size_t n, rest = nilfs_segctor_buffer_rest(sci);
1034        int err;
1035
1036        n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1037                                            sci->sc_dsync_start,
1038                                            sci->sc_dsync_end);
1039
1040        err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1041                                          nilfs_collect_file_data);
1042        if (!err) {
1043                nilfs_segctor_end_finfo(sci, inode);
1044                BUG_ON(n > rest);
1045                /* always receive -E2BIG or true error if n > rest */
1046        }
1047        return err;
1048}
1049
1050static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1051{
1052        struct nilfs_sb_info *sbi = sci->sc_sbi;
1053        struct the_nilfs *nilfs = sbi->s_nilfs;
1054        struct list_head *head;
1055        struct nilfs_inode_info *ii;
1056        size_t ndone;
1057        int err = 0;
1058
1059        switch (sci->sc_stage.scnt) {
1060        case NILFS_ST_INIT:
1061                /* Pre-processes */
1062                sci->sc_stage.flags = 0;
1063
1064                if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1065                        sci->sc_nblk_inc = 0;
1066                        sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1067                        if (mode == SC_LSEG_DSYNC) {
1068                                sci->sc_stage.scnt = NILFS_ST_DSYNC;
1069                                goto dsync_mode;
1070                        }
1071                }
1072
1073                sci->sc_stage.dirty_file_ptr = NULL;
1074                sci->sc_stage.gc_inode_ptr = NULL;
1075                if (mode == SC_FLUSH_DAT) {
1076                        sci->sc_stage.scnt = NILFS_ST_DAT;
1077                        goto dat_stage;
1078                }
1079                sci->sc_stage.scnt++;  /* Fall through */
1080        case NILFS_ST_GC:
1081                if (nilfs_doing_gc()) {
1082                        head = &sci->sc_gc_inodes;
1083                        ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1084                                                head, i_dirty);
1085                        list_for_each_entry_continue(ii, head, i_dirty) {
1086                                err = nilfs_segctor_scan_file(
1087                                        sci, &ii->vfs_inode,
1088                                        &nilfs_sc_file_ops);
1089                                if (unlikely(err)) {
1090                                        sci->sc_stage.gc_inode_ptr = list_entry(
1091                                                ii->i_dirty.prev,
1092                                                struct nilfs_inode_info,
1093                                                i_dirty);
1094                                        goto break_or_fail;
1095                                }
1096                                set_bit(NILFS_I_COLLECTED, &ii->i_state);
1097                        }
1098                        sci->sc_stage.gc_inode_ptr = NULL;
1099                }
1100                sci->sc_stage.scnt++;  /* Fall through */
1101        case NILFS_ST_FILE:
1102                head = &sci->sc_dirty_files;
1103                ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1104                                        i_dirty);
1105                list_for_each_entry_continue(ii, head, i_dirty) {
1106                        clear_bit(NILFS_I_DIRTY, &ii->i_state);
1107
1108                        err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1109                                                      &nilfs_sc_file_ops);
1110                        if (unlikely(err)) {
1111                                sci->sc_stage.dirty_file_ptr =
1112                                        list_entry(ii->i_dirty.prev,
1113                                                   struct nilfs_inode_info,
1114                                                   i_dirty);
1115                                goto break_or_fail;
1116                        }
1117                        /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1118                        /* XXX: required ? */
1119                }
1120                sci->sc_stage.dirty_file_ptr = NULL;
1121                if (mode == SC_FLUSH_FILE) {
1122                        sci->sc_stage.scnt = NILFS_ST_DONE;
1123                        return 0;
1124                }
1125                sci->sc_stage.scnt++;
1126                sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1127                /* Fall through */
1128        case NILFS_ST_IFILE:
1129                err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1130                                              &nilfs_sc_file_ops);
1131                if (unlikely(err))
1132                        break;
1133                sci->sc_stage.scnt++;
1134                /* Creating a checkpoint */
1135                err = nilfs_segctor_create_checkpoint(sci);
1136                if (unlikely(err))
1137                        break;
1138                /* Fall through */
1139        case NILFS_ST_CPFILE:
1140                err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1141                                              &nilfs_sc_file_ops);
1142                if (unlikely(err))
1143                        break;
1144                sci->sc_stage.scnt++;  /* Fall through */
1145        case NILFS_ST_SUFILE:
1146                err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1147                                         sci->sc_nfreesegs, &ndone);
1148                if (unlikely(err)) {
1149                        nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1150                                                  sci->sc_freesegs, ndone,
1151                                                  NULL);
1152                        break;
1153                }
1154                sci->sc_stage.flags |= NILFS_CF_SUFREED;
1155
1156                err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1157                                              &nilfs_sc_file_ops);
1158                if (unlikely(err))
1159                        break;
1160                sci->sc_stage.scnt++;  /* Fall through */
1161        case NILFS_ST_DAT:
1162 dat_stage:
1163                err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1164                                              &nilfs_sc_dat_ops);
1165                if (unlikely(err))
1166                        break;
1167                if (mode == SC_FLUSH_DAT) {
1168                        sci->sc_stage.scnt = NILFS_ST_DONE;
1169                        return 0;
1170                }
1171                sci->sc_stage.scnt++;  /* Fall through */
1172        case NILFS_ST_SR:
1173                if (mode == SC_LSEG_SR) {
1174                        /* Appending a super root */
1175                        err = nilfs_segctor_add_super_root(sci);
1176                        if (unlikely(err))
1177                                break;
1178                }
1179                /* End of a logical segment */
1180                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1181                sci->sc_stage.scnt = NILFS_ST_DONE;
1182                return 0;
1183        case NILFS_ST_DSYNC:
1184 dsync_mode:
1185                sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1186                ii = sci->sc_dsync_inode;
1187                if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1188                        break;
1189
1190                err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1191                if (unlikely(err))
1192                        break;
1193                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1194                sci->sc_stage.scnt = NILFS_ST_DONE;
1195                return 0;
1196        case NILFS_ST_DONE:
1197                return 0;
1198        default:
1199                BUG();
1200        }
1201
1202 break_or_fail:
1203        return err;
1204}
1205
1206/**
1207 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1208 * @sci: nilfs_sc_info
1209 * @nilfs: nilfs object
1210 */
1211static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1212                                            struct the_nilfs *nilfs)
1213{
1214        struct nilfs_segment_buffer *segbuf, *prev;
1215        __u64 nextnum;
1216        int err, alloc = 0;
1217
1218        segbuf = nilfs_segbuf_new(sci->sc_super);
1219        if (unlikely(!segbuf))
1220                return -ENOMEM;
1221
1222        if (list_empty(&sci->sc_write_logs)) {
1223                nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1224                                 nilfs->ns_pseg_offset, nilfs);
1225                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1226                        nilfs_shift_to_next_segment(nilfs);
1227                        nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1228                }
1229
1230                segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1231                nextnum = nilfs->ns_nextnum;
1232
1233                if (nilfs->ns_segnum == nilfs->ns_nextnum)
1234                        /* Start from the head of a new full segment */
1235                        alloc++;
1236        } else {
1237                /* Continue logs */
1238                prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1239                nilfs_segbuf_map_cont(segbuf, prev);
1240                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1241                nextnum = prev->sb_nextnum;
1242
1243                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1244                        nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1245                        segbuf->sb_sum.seg_seq++;
1246                        alloc++;
1247                }
1248        }
1249
1250        err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1251        if (err)
1252                goto failed;
1253
1254        if (alloc) {
1255                err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1256                if (err)
1257                        goto failed;
1258        }
1259        nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1260
1261        BUG_ON(!list_empty(&sci->sc_segbufs));
1262        list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1263        sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1264        return 0;
1265
1266 failed:
1267        nilfs_segbuf_free(segbuf);
1268        return err;
1269}
1270
1271static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1272                                         struct the_nilfs *nilfs, int nadd)
1273{
1274        struct nilfs_segment_buffer *segbuf, *prev;
1275        struct inode *sufile = nilfs->ns_sufile;
1276        __u64 nextnextnum;
1277        LIST_HEAD(list);
1278        int err, ret, i;
1279
1280        prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1281        /*
1282         * Since the segment specified with nextnum might be allocated during
1283         * the previous construction, the buffer including its segusage may
1284         * not be dirty.  The following call ensures that the buffer is dirty
1285         * and will pin the buffer on memory until the sufile is written.
1286         */
1287        err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1288        if (unlikely(err))
1289                return err;
1290
1291        for (i = 0; i < nadd; i++) {
1292                /* extend segment info */
1293                err = -ENOMEM;
1294                segbuf = nilfs_segbuf_new(sci->sc_super);
1295                if (unlikely(!segbuf))
1296                        goto failed;
1297
1298                /* map this buffer to region of segment on-disk */
1299                nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1300                sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1301
1302                /* allocate the next next full segment */
1303                err = nilfs_sufile_alloc(sufile, &nextnextnum);
1304                if (unlikely(err))
1305                        goto failed_segbuf;
1306
1307                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1308                nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1309
1310                list_add_tail(&segbuf->sb_list, &list);
1311                prev = segbuf;
1312        }
1313        list_splice_tail(&list, &sci->sc_segbufs);
1314        return 0;
1315
1316 failed_segbuf:
1317        nilfs_segbuf_free(segbuf);
1318 failed:
1319        list_for_each_entry(segbuf, &list, sb_list) {
1320                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1321                WARN_ON(ret); /* never fails */
1322        }
1323        nilfs_destroy_logs(&list);
1324        return err;
1325}
1326
1327static void nilfs_free_incomplete_logs(struct list_head *logs,
1328                                       struct the_nilfs *nilfs)
1329{
1330        struct nilfs_segment_buffer *segbuf, *prev;
1331        struct inode *sufile = nilfs->ns_sufile;
1332        int ret;
1333
1334        segbuf = NILFS_FIRST_SEGBUF(logs);
1335        if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1336                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1337                WARN_ON(ret); /* never fails */
1338        }
1339        if (atomic_read(&segbuf->sb_err)) {
1340                /* Case 1: The first segment failed */
1341                if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1342                        /* Case 1a:  Partial segment appended into an existing
1343                           segment */
1344                        nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1345                                                segbuf->sb_fseg_end);
1346                else /* Case 1b:  New full segment */
1347                        set_nilfs_discontinued(nilfs);
1348        }
1349
1350        prev = segbuf;
1351        list_for_each_entry_continue(segbuf, logs, sb_list) {
1352                if (prev->sb_nextnum != segbuf->sb_nextnum) {
1353                        ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1354                        WARN_ON(ret); /* never fails */
1355                }
1356                if (atomic_read(&segbuf->sb_err) &&
1357                    segbuf->sb_segnum != nilfs->ns_nextnum)
1358                        /* Case 2: extended segment (!= next) failed */
1359                        nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1360                prev = segbuf;
1361        }
1362}
1363
1364static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1365                                          struct inode *sufile)
1366{
1367        struct nilfs_segment_buffer *segbuf;
1368        unsigned long live_blocks;
1369        int ret;
1370
1371        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1372                live_blocks = segbuf->sb_sum.nblocks +
1373                        (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1374                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1375                                                     live_blocks,
1376                                                     sci->sc_seg_ctime);
1377                WARN_ON(ret); /* always succeed because the segusage is dirty */
1378        }
1379}
1380
1381static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1382{
1383        struct nilfs_segment_buffer *segbuf;
1384        int ret;
1385
1386        segbuf = NILFS_FIRST_SEGBUF(logs);
1387        ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1388                                             segbuf->sb_pseg_start -
1389                                             segbuf->sb_fseg_start, 0);
1390        WARN_ON(ret); /* always succeed because the segusage is dirty */
1391
1392        list_for_each_entry_continue(segbuf, logs, sb_list) {
1393                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1394                                                     0, 0);
1395                WARN_ON(ret); /* always succeed */
1396        }
1397}
1398
1399static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1400                                            struct nilfs_segment_buffer *last,
1401                                            struct inode *sufile)
1402{
1403        struct nilfs_segment_buffer *segbuf = last;
1404        int ret;
1405
1406        list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1407                sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1408                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1409                WARN_ON(ret);
1410        }
1411        nilfs_truncate_logs(&sci->sc_segbufs, last);
1412}
1413
1414
1415static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1416                                 struct the_nilfs *nilfs, int mode)
1417{
1418        struct nilfs_cstage prev_stage = sci->sc_stage;
1419        int err, nadd = 1;
1420
1421        /* Collection retry loop */
1422        for (;;) {
1423                sci->sc_nblk_this_inc = 0;
1424                sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1425
1426                err = nilfs_segctor_reset_segment_buffer(sci);
1427                if (unlikely(err))
1428                        goto failed;
1429
1430                err = nilfs_segctor_collect_blocks(sci, mode);
1431                sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1432                if (!err)
1433                        break;
1434
1435                if (unlikely(err != -E2BIG))
1436                        goto failed;
1437
1438                /* The current segment is filled up */
1439                if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1440                        break;
1441
1442                nilfs_clear_logs(&sci->sc_segbufs);
1443
1444                err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1445                if (unlikely(err))
1446                        return err;
1447
1448                if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1449                        err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1450                                                        sci->sc_freesegs,
1451                                                        sci->sc_nfreesegs,
1452                                                        NULL);
1453                        WARN_ON(err); /* do not happen */
1454                }
1455                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1456                sci->sc_stage = prev_stage;
1457        }
1458        nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1459        return 0;
1460
1461 failed:
1462        return err;
1463}
1464
1465static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1466                                      struct buffer_head *new_bh)
1467{
1468        BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1469
1470        list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1471        /* The caller must release old_bh */
1472}
1473
1474static int
1475nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1476                                     struct nilfs_segment_buffer *segbuf,
1477                                     int mode)
1478{
1479        struct inode *inode = NULL;
1480        sector_t blocknr;
1481        unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1482        unsigned long nblocks = 0, ndatablk = 0;
1483        struct nilfs_sc_operations *sc_op = NULL;
1484        struct nilfs_segsum_pointer ssp;
1485        struct nilfs_finfo *finfo = NULL;
1486        union nilfs_binfo binfo;
1487        struct buffer_head *bh, *bh_org;
1488        ino_t ino = 0;
1489        int err = 0;
1490
1491        if (!nfinfo)
1492                goto out;
1493
1494        blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1495        ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1496        ssp.offset = sizeof(struct nilfs_segment_summary);
1497
1498        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1499                if (bh == segbuf->sb_super_root)
1500                        break;
1501                if (!finfo) {
1502                        finfo = nilfs_segctor_map_segsum_entry(
1503                                sci, &ssp, sizeof(*finfo));
1504                        ino = le64_to_cpu(finfo->fi_ino);
1505                        nblocks = le32_to_cpu(finfo->fi_nblocks);
1506                        ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1507
1508                        if (buffer_nilfs_node(bh))
1509                                inode = NILFS_BTNC_I(bh->b_page->mapping);
1510                        else
1511                                inode = NILFS_AS_I(bh->b_page->mapping);
1512
1513                        if (mode == SC_LSEG_DSYNC)
1514                                sc_op = &nilfs_sc_dsync_ops;
1515                        else if (ino == NILFS_DAT_INO)
1516                                sc_op = &nilfs_sc_dat_ops;
1517                        else /* file blocks */
1518                                sc_op = &nilfs_sc_file_ops;
1519                }
1520                bh_org = bh;
1521                get_bh(bh_org);
1522                err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1523                                        &binfo);
1524                if (bh != bh_org)
1525                        nilfs_list_replace_buffer(bh_org, bh);
1526                brelse(bh_org);
1527                if (unlikely(err))
1528                        goto failed_bmap;
1529
1530                if (ndatablk > 0)
1531                        sc_op->write_data_binfo(sci, &ssp, &binfo);
1532                else
1533                        sc_op->write_node_binfo(sci, &ssp, &binfo);
1534
1535                blocknr++;
1536                if (--nblocks == 0) {
1537                        finfo = NULL;
1538                        if (--nfinfo == 0)
1539                                break;
1540                } else if (ndatablk > 0)
1541                        ndatablk--;
1542        }
1543 out:
1544        return 0;
1545
1546 failed_bmap:
1547        return err;
1548}
1549
1550static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1551{
1552        struct nilfs_segment_buffer *segbuf;
1553        int err;
1554
1555        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1556                err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1557                if (unlikely(err))
1558                        return err;
1559                nilfs_segbuf_fill_in_segsum(segbuf);
1560        }
1561        return 0;
1562}
1563
1564static int
1565nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
1566{
1567        struct page *clone_page;
1568        struct buffer_head *bh, *head, *bh2;
1569        void *kaddr;
1570
1571        bh = head = page_buffers(page);
1572
1573        clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
1574        if (unlikely(!clone_page))
1575                return -ENOMEM;
1576
1577        bh2 = page_buffers(clone_page);
1578        kaddr = kmap_atomic(page, KM_USER0);
1579        do {
1580                if (list_empty(&bh->b_assoc_buffers))
1581                        continue;
1582                get_bh(bh2);
1583                page_cache_get(clone_page); /* for each bh */
1584                memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
1585                bh2->b_blocknr = bh->b_blocknr;
1586                list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
1587                list_add_tail(&bh->b_assoc_buffers, out);
1588        } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
1589        kunmap_atomic(kaddr, KM_USER0);
1590
1591        if (!TestSetPageWriteback(clone_page))
1592                account_page_writeback(clone_page);
1593        unlock_page(clone_page);
1594
1595        return 0;
1596}
1597
1598static int nilfs_test_page_to_be_frozen(struct page *page)
1599{
1600        struct address_space *mapping = page->mapping;
1601
1602        if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
1603                return 0;
1604
1605        if (page_mapped(page)) {
1606                ClearPageChecked(page);
1607                return 1;
1608        }
1609        return PageChecked(page);
1610}
1611
1612static int nilfs_begin_page_io(struct page *page, struct list_head *out)
1613{
1614        if (!page || PageWriteback(page))
1615                /* For split b-tree node pages, this function may be called
1616                   twice.  We ignore the 2nd or later calls by this check. */
1617                return 0;
1618
1619        lock_page(page);
1620        clear_page_dirty_for_io(page);
1621        set_page_writeback(page);
1622        unlock_page(page);
1623
1624        if (nilfs_test_page_to_be_frozen(page)) {
1625                int err = nilfs_copy_replace_page_buffers(page, out);
1626                if (unlikely(err))
1627                        return err;
1628        }
1629        return 0;
1630}
1631
1632static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1633                                       struct page **failed_page)
1634{
1635        struct nilfs_segment_buffer *segbuf;
1636        struct page *bd_page = NULL, *fs_page = NULL;
1637        struct list_head *list = &sci->sc_copied_buffers;
1638        int err;
1639
1640        *failed_page = NULL;
1641        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1642                struct buffer_head *bh;
1643
1644                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1645                                    b_assoc_buffers) {
1646                        if (bh->b_page != bd_page) {
1647                                if (bd_page) {
1648                                        lock_page(bd_page);
1649                                        clear_page_dirty_for_io(bd_page);
1650                                        set_page_writeback(bd_page);
1651                                        unlock_page(bd_page);
1652                                }
1653                                bd_page = bh->b_page;
1654                        }
1655                }
1656
1657                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1658                                    b_assoc_buffers) {
1659                        if (bh == segbuf->sb_super_root) {
1660                                if (bh->b_page != bd_page) {
1661                                        lock_page(bd_page);
1662                                        clear_page_dirty_for_io(bd_page);
1663                                        set_page_writeback(bd_page);
1664                                        unlock_page(bd_page);
1665                                        bd_page = bh->b_page;
1666                                }
1667                                break;
1668                        }
1669                        if (bh->b_page != fs_page) {
1670                                err = nilfs_begin_page_io(fs_page, list);
1671                                if (unlikely(err)) {
1672                                        *failed_page = fs_page;
1673                                        goto out;
1674                                }
1675                                fs_page = bh->b_page;
1676                        }
1677                }
1678        }
1679        if (bd_page) {
1680                lock_page(bd_page);
1681                clear_page_dirty_for_io(bd_page);
1682                set_page_writeback(bd_page);
1683                unlock_page(bd_page);
1684        }
1685        err = nilfs_begin_page_io(fs_page, list);
1686        if (unlikely(err))
1687                *failed_page = fs_page;
1688 out:
1689        return err;
1690}
1691
1692static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1693                               struct the_nilfs *nilfs)
1694{
1695        int ret;
1696
1697        ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1698        list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1699        return ret;
1700}
1701
1702static void __nilfs_end_page_io(struct page *page, int err)
1703{
1704        if (!err) {
1705                if (!nilfs_page_buffers_clean(page))
1706                        __set_page_dirty_nobuffers(page);
1707                ClearPageError(page);
1708        } else {
1709                __set_page_dirty_nobuffers(page);
1710                SetPageError(page);
1711        }
1712
1713        if (buffer_nilfs_allocated(page_buffers(page))) {
1714                if (TestClearPageWriteback(page))
1715                        dec_zone_page_state(page, NR_WRITEBACK);
1716        } else
1717                end_page_writeback(page);
1718}
1719
1720static void nilfs_end_page_io(struct page *page, int err)
1721{
1722        if (!page)
1723                return;
1724
1725        if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1726                /*
1727                 * For b-tree node pages, this function may be called twice
1728                 * or more because they might be split in a segment.
1729                 */
1730                if (PageDirty(page)) {
1731                        /*
1732                         * For pages holding split b-tree node buffers, dirty
1733                         * flag on the buffers may be cleared discretely.
1734                         * In that case, the page is once redirtied for
1735                         * remaining buffers, and it must be cancelled if
1736                         * all the buffers get cleaned later.
1737                         */
1738                        lock_page(page);
1739                        if (nilfs_page_buffers_clean(page))
1740                                __nilfs_clear_page_dirty(page);
1741                        unlock_page(page);
1742                }
1743                return;
1744        }
1745
1746        __nilfs_end_page_io(page, err);
1747}
1748
1749static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1750{
1751        struct buffer_head *bh, *head;
1752        struct page *page;
1753
1754        while (!list_empty(list)) {
1755                bh = list_entry(list->next, struct buffer_head,
1756                                b_assoc_buffers);
1757                page = bh->b_page;
1758                page_cache_get(page);
1759                head = bh = page_buffers(page);
1760                do {
1761                        if (!list_empty(&bh->b_assoc_buffers)) {
1762                                list_del_init(&bh->b_assoc_buffers);
1763                                if (!err) {
1764                                        set_buffer_uptodate(bh);
1765                                        clear_buffer_dirty(bh);
1766                                        clear_buffer_delay(bh);
1767                                        clear_buffer_nilfs_volatile(bh);
1768                                }
1769                                brelse(bh); /* for b_assoc_buffers */
1770                        }
1771                } while ((bh = bh->b_this_page) != head);
1772
1773                __nilfs_end_page_io(page, err);
1774                page_cache_release(page);
1775        }
1776}
1777
1778static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
1779                             int err)
1780{
1781        struct nilfs_segment_buffer *segbuf;
1782        struct page *bd_page = NULL, *fs_page = NULL;
1783        struct buffer_head *bh;
1784
1785        if (list_empty(logs))
1786                return;
1787
1788        list_for_each_entry(segbuf, logs, sb_list) {
1789                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1790                                    b_assoc_buffers) {
1791                        if (bh->b_page != bd_page) {
1792                                if (bd_page)
1793                                        end_page_writeback(bd_page);
1794                                bd_page = bh->b_page;
1795                        }
1796                }
1797
1798                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1799                                    b_assoc_buffers) {
1800                        if (bh == segbuf->sb_super_root) {
1801                                if (bh->b_page != bd_page) {
1802                                        end_page_writeback(bd_page);
1803                                        bd_page = bh->b_page;
1804                                }
1805                                break;
1806                        }
1807                        if (bh->b_page != fs_page) {
1808                                nilfs_end_page_io(fs_page, err);
1809                                if (fs_page && fs_page == failed_page)
1810                                        return;
1811                                fs_page = bh->b_page;
1812                        }
1813                }
1814        }
1815        if (bd_page)
1816                end_page_writeback(bd_page);
1817
1818        nilfs_end_page_io(fs_page, err);
1819}
1820
1821static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1822                                             struct the_nilfs *nilfs, int err)
1823{
1824        LIST_HEAD(logs);
1825        int ret;
1826
1827        list_splice_tail_init(&sci->sc_write_logs, &logs);
1828        ret = nilfs_wait_on_logs(&logs);
1829        nilfs_abort_logs(&logs, NULL, ret ? : err);
1830
1831        list_splice_tail_init(&sci->sc_segbufs, &logs);
1832        nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1833        nilfs_free_incomplete_logs(&logs, nilfs);
1834        nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
1835
1836        if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1837                ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1838                                                sci->sc_freesegs,
1839                                                sci->sc_nfreesegs,
1840                                                NULL);
1841                WARN_ON(ret); /* do not happen */
1842        }
1843
1844        nilfs_destroy_logs(&logs);
1845}
1846
1847static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1848                                   struct nilfs_segment_buffer *segbuf)
1849{
1850        nilfs->ns_segnum = segbuf->sb_segnum;
1851        nilfs->ns_nextnum = segbuf->sb_nextnum;
1852        nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1853                + segbuf->sb_sum.nblocks;
1854        nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1855        nilfs->ns_ctime = segbuf->sb_sum.ctime;
1856}
1857
1858static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1859{
1860        struct nilfs_segment_buffer *segbuf;
1861        struct page *bd_page = NULL, *fs_page = NULL;
1862        struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
1863        int update_sr = false;
1864
1865        list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1866                struct buffer_head *bh;
1867
1868                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1869                                    b_assoc_buffers) {
1870                        set_buffer_uptodate(bh);
1871                        clear_buffer_dirty(bh);
1872                        if (bh->b_page != bd_page) {
1873                                if (bd_page)
1874                                        end_page_writeback(bd_page);
1875                                bd_page = bh->b_page;
1876                        }
1877                }
1878                /*
1879                 * We assume that the buffers which belong to the same page
1880                 * continue over the buffer list.
1881                 * Under this assumption, the last BHs of pages is
1882                 * identifiable by the discontinuity of bh->b_page
1883                 * (page != fs_page).
1884                 *
1885                 * For B-tree node blocks, however, this assumption is not
1886                 * guaranteed.  The cleanup code of B-tree node pages needs
1887                 * special care.
1888                 */
1889                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1890                                    b_assoc_buffers) {
1891                        set_buffer_uptodate(bh);
1892                        clear_buffer_dirty(bh);
1893                        clear_buffer_delay(bh);
1894                        clear_buffer_nilfs_volatile(bh);
1895                        clear_buffer_nilfs_redirected(bh);
1896                        if (bh == segbuf->sb_super_root) {
1897                                if (bh->b_page != bd_page) {
1898                                        end_page_writeback(bd_page);
1899                                        bd_page = bh->b_page;
1900                                }
1901                                update_sr = true;
1902                                break;
1903                        }
1904                        if (bh->b_page != fs_page) {
1905                                nilfs_end_page_io(fs_page, 0);
1906                                fs_page = bh->b_page;
1907                        }
1908                }
1909
1910                if (!nilfs_segbuf_simplex(segbuf)) {
1911                        if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1912                                set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1913                                sci->sc_lseg_stime = jiffies;
1914                        }
1915                        if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1916                                clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1917                }
1918        }
1919        /*
1920         * Since pages may continue over multiple segment buffers,
1921         * end of the last page must be checked outside of the loop.
1922         */
1923        if (bd_page)
1924                end_page_writeback(bd_page);
1925
1926        nilfs_end_page_io(fs_page, 0);
1927
1928        nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
1929
1930        nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1931
1932        if (nilfs_doing_gc())
1933                nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1934        else
1935                nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1936
1937        sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1938
1939        segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1940        nilfs_set_next_segment(nilfs, segbuf);
1941
1942        if (update_sr) {
1943                nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1944                                       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1945
1946                clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1947                clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1948                set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1949                nilfs_segctor_clear_metadata_dirty(sci);
1950        } else
1951                clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1952}
1953
1954static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1955{
1956        int ret;
1957
1958        ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1959        if (!ret) {
1960                nilfs_segctor_complete_write(sci);
1961                nilfs_destroy_logs(&sci->sc_write_logs);
1962        }
1963        return ret;
1964}
1965
1966static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
1967                                        struct nilfs_sb_info *sbi)
1968{
1969        struct nilfs_inode_info *ii, *n;
1970        struct inode *ifile = sci->sc_root->ifile;
1971
1972        spin_lock(&sbi->s_inode_lock);
1973 retry:
1974        list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) {
1975                if (!ii->i_bh) {
1976                        struct buffer_head *ibh;
1977                        int err;
1978
1979                        spin_unlock(&sbi->s_inode_lock);
1980                        err = nilfs_ifile_get_inode_block(
1981                                ifile, ii->vfs_inode.i_ino, &ibh);
1982                        if (unlikely(err)) {
1983                                nilfs_warning(sbi->s_super, __func__,
1984                                              "failed to get inode block.\n");
1985                                return err;
1986                        }
1987                        nilfs_mdt_mark_buffer_dirty(ibh);
1988                        nilfs_mdt_mark_dirty(ifile);
1989                        spin_lock(&sbi->s_inode_lock);
1990                        if (likely(!ii->i_bh))
1991                                ii->i_bh = ibh;
1992                        else
1993                                brelse(ibh);
1994                        goto retry;
1995                }
1996
1997                clear_bit(NILFS_I_QUEUED, &ii->i_state);
1998                set_bit(NILFS_I_BUSY, &ii->i_state);
1999                list_del(&ii->i_dirty);
2000                list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
2001        }
2002        spin_unlock(&sbi->s_inode_lock);
2003
2004        return 0;
2005}
2006
2007static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
2008                                          struct nilfs_sb_info *sbi)
2009{
2010        struct nilfs_transaction_info *ti = current->journal_info;
2011        struct nilfs_inode_info *ii, *n;
2012
2013        spin_lock(&sbi->s_inode_lock);
2014        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2015                if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2016                    test_bit(NILFS_I_DIRTY, &ii->i_state))
2017                        continue;
2018
2019                clear_bit(NILFS_I_BUSY, &ii->i_state);
2020                brelse(ii->i_bh);
2021                ii->i_bh = NULL;
2022                list_del(&ii->i_dirty);
2023                list_add_tail(&ii->i_dirty, &ti->ti_garbage);
2024        }
2025        spin_unlock(&sbi->s_inode_lock);
2026}
2027
2028/*
2029 * Main procedure of segment constructor
2030 */
2031static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2032{
2033        struct nilfs_sb_info *sbi = sci->sc_sbi;
2034        struct the_nilfs *nilfs = sbi->s_nilfs;
2035        struct page *failed_page;
2036        int err;
2037
2038        sci->sc_stage.scnt = NILFS_ST_INIT;
2039        sci->sc_cno = nilfs->ns_cno;
2040
2041        err = nilfs_segctor_check_in_files(sci, sbi);
2042        if (unlikely(err))
2043                goto out;
2044
2045        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2046                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2047
2048        if (nilfs_segctor_clean(sci))
2049                goto out;
2050
2051        do {
2052                sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2053
2054                err = nilfs_segctor_begin_construction(sci, nilfs);
2055                if (unlikely(err))
2056                        goto out;
2057
2058                /* Update time stamp */
2059                sci->sc_seg_ctime = get_seconds();
2060
2061                err = nilfs_segctor_collect(sci, nilfs, mode);
2062                if (unlikely(err))
2063                        goto failed;
2064
2065                /* Avoid empty segment */
2066                if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2067                    nilfs_segbuf_empty(sci->sc_curseg)) {
2068                        nilfs_segctor_abort_construction(sci, nilfs, 1);
2069                        goto out;
2070                }
2071
2072                err = nilfs_segctor_assign(sci, mode);
2073                if (unlikely(err))
2074                        goto failed;
2075
2076                if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2077                        nilfs_segctor_fill_in_file_bmap(sci);
2078
2079                if (mode == SC_LSEG_SR &&
2080                    sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
2081                        err = nilfs_segctor_fill_in_checkpoint(sci);
2082                        if (unlikely(err))
2083                                goto failed_to_write;
2084
2085                        nilfs_segctor_fill_in_super_root(sci, nilfs);
2086                }
2087                nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2088
2089                /* Write partial segments */
2090                err = nilfs_segctor_prepare_write(sci, &failed_page);
2091                if (err) {
2092                        nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
2093                        goto failed_to_write;
2094                }
2095
2096                nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2097                                            nilfs->ns_crc_seed);
2098
2099                err = nilfs_segctor_write(sci, nilfs);
2100                if (unlikely(err))
2101                        goto failed_to_write;
2102
2103                if (sci->sc_stage.scnt == NILFS_ST_DONE ||
2104                    nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
2105                        /*
2106                         * At this point, we avoid double buffering
2107                         * for blocksize < pagesize because page dirty
2108                         * flag is turned off during write and dirty
2109                         * buffers are not properly collected for
2110                         * pages crossing over segments.
2111                         */
2112                        err = nilfs_segctor_wait(sci);
2113                        if (err)
2114                                goto failed_to_write;
2115                }
2116        } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2117
2118 out:
2119        nilfs_segctor_check_out_files(sci, sbi);
2120        return err;
2121
2122 failed_to_write:
2123        if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2124                nilfs_redirty_inodes(&sci->sc_dirty_files);
2125
2126 failed:
2127        if (nilfs_doing_gc())
2128                nilfs_redirty_inodes(&sci->sc_gc_inodes);
2129        nilfs_segctor_abort_construction(sci, nilfs, err);
2130        goto out;
2131}
2132
2133/**
2134 * nilfs_segctor_start_timer - set timer of background write
2135 * @sci: nilfs_sc_info
2136 *
2137 * If the timer has already been set, it ignores the new request.
2138 * This function MUST be called within a section locking the segment
2139 * semaphore.
2140 */
2141static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2142{
2143        spin_lock(&sci->sc_state_lock);
2144        if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2145                sci->sc_timer.expires = jiffies + sci->sc_interval;
2146                add_timer(&sci->sc_timer);
2147                sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2148        }
2149        spin_unlock(&sci->sc_state_lock);
2150}
2151
2152static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2153{
2154        spin_lock(&sci->sc_state_lock);
2155        if (!(sci->sc_flush_request & (1 << bn))) {
2156                unsigned long prev_req = sci->sc_flush_request;
2157
2158                sci->sc_flush_request |= (1 << bn);
2159                if (!prev_req)
2160                        wake_up(&sci->sc_wait_daemon);
2161        }
2162        spin_unlock(&sci->sc_state_lock);
2163}
2164
2165/**
2166 * nilfs_flush_segment - trigger a segment construction for resource control
2167 * @sb: super block
2168 * @ino: inode number of the file to be flushed out.
2169 */
2170void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2171{
2172        struct nilfs_sb_info *sbi = NILFS_SB(sb);
2173        struct nilfs_sc_info *sci = NILFS_SC(sbi);
2174
2175        if (!sci || nilfs_doing_construction())
2176                return;
2177        nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2178                                        /* assign bit 0 to data files */
2179}
2180
2181struct nilfs_segctor_wait_request {
2182        wait_queue_t    wq;
2183        __u32           seq;
2184        int             err;
2185        atomic_t        done;
2186};
2187
2188static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2189{
2190        struct nilfs_segctor_wait_request wait_req;
2191        int err = 0;
2192
2193        spin_lock(&sci->sc_state_lock);
2194        init_wait(&wait_req.wq);
2195        wait_req.err = 0;
2196        atomic_set(&wait_req.done, 0);
2197        wait_req.seq = ++sci->sc_seq_request;
2198        spin_unlock(&sci->sc_state_lock);
2199
2200        init_waitqueue_entry(&wait_req.wq, current);
2201        add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2202        set_current_state(TASK_INTERRUPTIBLE);
2203        wake_up(&sci->sc_wait_daemon);
2204
2205        for (;;) {
2206                if (atomic_read(&wait_req.done)) {
2207                        err = wait_req.err;
2208                        break;
2209                }
2210                if (!signal_pending(current)) {
2211                        schedule();
2212                        continue;
2213                }
2214                err = -ERESTARTSYS;
2215                break;
2216        }
2217        finish_wait(&sci->sc_wait_request, &wait_req.wq);
2218        return err;
2219}
2220
2221static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2222{
2223        struct nilfs_segctor_wait_request *wrq, *n;
2224        unsigned long flags;
2225
2226        spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2227        list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2228                                 wq.task_list) {
2229                if (!atomic_read(&wrq->done) &&
2230                    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2231                        wrq->err = err;
2232                        atomic_set(&wrq->done, 1);
2233                }
2234                if (atomic_read(&wrq->done)) {
2235                        wrq->wq.func(&wrq->wq,
2236                                     TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2237                                     0, NULL);
2238                }
2239        }
2240        spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2241}
2242
2243/**
2244 * nilfs_construct_segment - construct a logical segment
2245 * @sb: super block
2246 *
2247 * Return Value: On success, 0 is retured. On errors, one of the following
2248 * negative error code is returned.
2249 *
2250 * %-EROFS - Read only filesystem.
2251 *
2252 * %-EIO - I/O error
2253 *
2254 * %-ENOSPC - No space left on device (only in a panic state).
2255 *
2256 * %-ERESTARTSYS - Interrupted.
2257 *
2258 * %-ENOMEM - Insufficient memory available.
2259 */
2260int nilfs_construct_segment(struct super_block *sb)
2261{
2262        struct nilfs_sb_info *sbi = NILFS_SB(sb);
2263        struct nilfs_sc_info *sci = NILFS_SC(sbi);
2264        struct nilfs_transaction_info *ti;
2265        int err;
2266
2267        if (!sci)
2268                return -EROFS;
2269
2270        /* A call inside transactions causes a deadlock. */
2271        BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2272
2273        err = nilfs_segctor_sync(sci);
2274        return err;
2275}
2276
2277/**
2278 * nilfs_construct_dsync_segment - construct a data-only logical segment
2279 * @sb: super block
2280 * @inode: inode whose data blocks should be written out
2281 * @start: start byte offset
2282 * @end: end byte offset (inclusive)
2283 *
2284 * Return Value: On success, 0 is retured. On errors, one of the following
2285 * negative error code is returned.
2286 *
2287 * %-EROFS - Read only filesystem.
2288 *
2289 * %-EIO - I/O error
2290 *
2291 * %-ENOSPC - No space left on device (only in a panic state).
2292 *
2293 * %-ERESTARTSYS - Interrupted.
2294 *
2295 * %-ENOMEM - Insufficient memory available.
2296 */
2297int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2298                                  loff_t start, loff_t end)
2299{
2300        struct nilfs_sb_info *sbi = NILFS_SB(sb);
2301        struct nilfs_sc_info *sci = NILFS_SC(sbi);
2302        struct nilfs_inode_info *ii;
2303        struct nilfs_transaction_info ti;
2304        int err = 0;
2305
2306        if (!sci)
2307                return -EROFS;
2308
2309        nilfs_transaction_lock(sbi, &ti, 0);
2310
2311        ii = NILFS_I(inode);
2312        if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2313            nilfs_test_opt(sbi, STRICT_ORDER) ||
2314            test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2315            nilfs_discontinued(sbi->s_nilfs)) {
2316                nilfs_transaction_unlock(sbi);
2317                err = nilfs_segctor_sync(sci);
2318                return err;
2319        }
2320
2321        spin_lock(&sbi->s_inode_lock);
2322        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2323            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2324                spin_unlock(&sbi->s_inode_lock);
2325                nilfs_transaction_unlock(sbi);
2326                return 0;
2327        }
2328        spin_unlock(&sbi->s_inode_lock);
2329        sci->sc_dsync_inode = ii;
2330        sci->sc_dsync_start = start;
2331        sci->sc_dsync_end = end;
2332
2333        err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2334
2335        nilfs_transaction_unlock(sbi);
2336        return err;
2337}
2338
2339#define FLUSH_FILE_BIT  (0x1) /* data file only */
2340#define FLUSH_DAT_BIT   (1 << NILFS_DAT_INO) /* DAT only */
2341
2342/**
2343 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2344 * @sci: segment constructor object
2345 */
2346static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2347{
2348        spin_lock(&sci->sc_state_lock);
2349        sci->sc_seq_accepted = sci->sc_seq_request;
2350        spin_unlock(&sci->sc_state_lock);
2351        del_timer_sync(&sci->sc_timer);
2352}
2353
2354/**
2355 * nilfs_segctor_notify - notify the result of request to caller threads
2356 * @sci: segment constructor object
2357 * @mode: mode of log forming
2358 * @err: error code to be notified
2359 */
2360static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2361{
2362        /* Clear requests (even when the construction failed) */
2363        spin_lock(&sci->sc_state_lock);
2364
2365        if (mode == SC_LSEG_SR) {
2366                sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2367                sci->sc_seq_done = sci->sc_seq_accepted;
2368                nilfs_segctor_wakeup(sci, err);
2369                sci->sc_flush_request = 0;
2370        } else {
2371                if (mode == SC_FLUSH_FILE)
2372                        sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2373                else if (mode == SC_FLUSH_DAT)
2374                        sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2375
2376                /* re-enable timer if checkpoint creation was not done */
2377                if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2378                    time_before(jiffies, sci->sc_timer.expires))
2379                        add_timer(&sci->sc_timer);
2380        }
2381        spin_unlock(&sci->sc_state_lock);
2382}
2383
2384/**
2385 * nilfs_segctor_construct - form logs and write them to disk
2386 * @sci: segment constructor object
2387 * @mode: mode of log forming
2388 */
2389static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2390{
2391        struct nilfs_sb_info *sbi = sci->sc_sbi;
2392        struct the_nilfs *nilfs = sbi->s_nilfs;
2393        struct nilfs_super_block **sbp;
2394        int err = 0;
2395
2396        nilfs_segctor_accept(sci);
2397
2398        if (nilfs_discontinued(nilfs))
2399                mode = SC_LSEG_SR;
2400        if (!nilfs_segctor_confirm(sci))
2401                err = nilfs_segctor_do_construct(sci, mode);
2402
2403        if (likely(!err)) {
2404                if (mode != SC_FLUSH_DAT)
2405                        atomic_set(&nilfs->ns_ndirtyblks, 0);
2406                if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2407                    nilfs_discontinued(nilfs)) {
2408                        down_write(&nilfs->ns_sem);
2409                        err = -EIO;
2410                        sbp = nilfs_prepare_super(sbi,
2411                                                  nilfs_sb_will_flip(nilfs));
2412                        if (likely(sbp)) {
2413                                nilfs_set_log_cursor(sbp[0], nilfs);
2414                                err = nilfs_commit_super(sbi, NILFS_SB_COMMIT);
2415                        }
2416                        up_write(&nilfs->ns_sem);
2417                }
2418        }
2419
2420        nilfs_segctor_notify(sci, mode, err);
2421        return err;
2422}
2423
2424static void nilfs_construction_timeout(unsigned long data)
2425{
2426        struct task_struct *p = (struct task_struct *)data;
2427        wake_up_process(p);
2428}
2429
2430static void
2431nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2432{
2433        struct nilfs_inode_info *ii, *n;
2434
2435        list_for_each_entry_safe(ii, n, head, i_dirty) {
2436                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2437                        continue;
2438                list_del_init(&ii->i_dirty);
2439                iput(&ii->vfs_inode);
2440        }
2441}
2442
2443int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2444                         void **kbufs)
2445{
2446        struct nilfs_sb_info *sbi = NILFS_SB(sb);
2447        struct nilfs_sc_info *sci = NILFS_SC(sbi);
2448        struct the_nilfs *nilfs = sbi->s_nilfs;
2449        struct nilfs_transaction_info ti;
2450        int err;
2451
2452        if (unlikely(!sci))
2453                return -EROFS;
2454
2455        nilfs_transaction_lock(sbi, &ti, 1);
2456
2457        err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2458        if (unlikely(err))
2459                goto out_unlock;
2460
2461        err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2462        if (unlikely(err)) {
2463                nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2464                goto out_unlock;
2465        }
2466
2467        sci->sc_freesegs = kbufs[4];
2468        sci->sc_nfreesegs = argv[4].v_nmembs;
2469        list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2470
2471        for (;;) {
2472                err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2473                nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2474
2475                if (likely(!err))
2476                        break;
2477
2478                nilfs_warning(sb, __func__,
2479                              "segment construction failed. (err=%d)", err);
2480                set_current_state(TASK_INTERRUPTIBLE);
2481                schedule_timeout(sci->sc_interval);
2482        }
2483        if (nilfs_test_opt(sbi, DISCARD)) {
2484                int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2485                                                 sci->sc_nfreesegs);
2486                if (ret) {
2487                        printk(KERN_WARNING
2488                               "NILFS warning: error %d on discard request, "
2489                               "turning discards off for the device\n", ret);
2490                        nilfs_clear_opt(sbi, DISCARD);
2491                }
2492        }
2493
2494 out_unlock:
2495        sci->sc_freesegs = NULL;
2496        sci->sc_nfreesegs = 0;
2497        nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2498        nilfs_transaction_unlock(sbi);
2499        return err;
2500}
2501
2502static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2503{
2504        struct nilfs_sb_info *sbi = sci->sc_sbi;
2505        struct nilfs_transaction_info ti;
2506
2507        nilfs_transaction_lock(sbi, &ti, 0);
2508        nilfs_segctor_construct(sci, mode);
2509
2510        /*
2511         * Unclosed segment should be retried.  We do this using sc_timer.
2512         * Timeout of sc_timer will invoke complete construction which leads
2513         * to close the current logical segment.
2514         */
2515        if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2516                nilfs_segctor_start_timer(sci);
2517
2518        nilfs_transaction_unlock(sbi);
2519}
2520
2521static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2522{
2523        int mode = 0;
2524        int err;
2525
2526        spin_lock(&sci->sc_state_lock);
2527        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2528                SC_FLUSH_DAT : SC_FLUSH_FILE;
2529        spin_unlock(&sci->sc_state_lock);
2530
2531        if (mode) {
2532                err = nilfs_segctor_do_construct(sci, mode);
2533
2534                spin_lock(&sci->sc_state_lock);
2535                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2536                        ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2537                spin_unlock(&sci->sc_state_lock);
2538        }
2539        clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2540}
2541
2542static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2543{
2544        if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2545            time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2546                if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2547                        return SC_FLUSH_FILE;
2548                else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2549                        return SC_FLUSH_DAT;
2550        }
2551        return SC_LSEG_SR;
2552}
2553
2554/**
2555 * nilfs_segctor_thread - main loop of the segment constructor thread.
2556 * @arg: pointer to a struct nilfs_sc_info.
2557 *
2558 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2559 * to execute segment constructions.
2560 */
2561static int nilfs_segctor_thread(void *arg)
2562{
2563        struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2564        struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
2565        int timeout = 0;
2566
2567        sci->sc_timer.data = (unsigned long)current;
2568        sci->sc_timer.function = nilfs_construction_timeout;
2569
2570        /* start sync. */
2571        sci->sc_task = current;
2572        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2573        printk(KERN_INFO
2574               "segctord starting. Construction interval = %lu seconds, "
2575               "CP frequency < %lu seconds\n",
2576               sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2577
2578        spin_lock(&sci->sc_state_lock);
2579 loop:
2580        for (;;) {
2581                int mode;
2582
2583                if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2584                        goto end_thread;
2585
2586                if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2587                        mode = SC_LSEG_SR;
2588                else if (!sci->sc_flush_request)
2589                        break;
2590                else
2591                        mode = nilfs_segctor_flush_mode(sci);
2592
2593                spin_unlock(&sci->sc_state_lock);
2594                nilfs_segctor_thread_construct(sci, mode);
2595                spin_lock(&sci->sc_state_lock);
2596                timeout = 0;
2597        }
2598
2599
2600        if (freezing(current)) {
2601                spin_unlock(&sci->sc_state_lock);
2602                refrigerator();
2603                spin_lock(&sci->sc_state_lock);
2604        } else {
2605                DEFINE_WAIT(wait);
2606                int should_sleep = 1;
2607
2608                prepare_to_wait(&sci->sc_wait_daemon, &wait,
2609                                TASK_INTERRUPTIBLE);
2610
2611                if (sci->sc_seq_request != sci->sc_seq_done)
2612                        should_sleep = 0;
2613                else if (sci->sc_flush_request)
2614                        should_sleep = 0;
2615                else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2616                        should_sleep = time_before(jiffies,
2617                                        sci->sc_timer.expires);
2618
2619                if (should_sleep) {
2620                        spin_unlock(&sci->sc_state_lock);
2621                        schedule();
2622                        spin_lock(&sci->sc_state_lock);
2623                }
2624                finish_wait(&sci->sc_wait_daemon, &wait);
2625                timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2626                           time_after_eq(jiffies, sci->sc_timer.expires));
2627
2628                if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2629                        set_nilfs_discontinued(nilfs);
2630        }
2631        goto loop;
2632
2633 end_thread:
2634        spin_unlock(&sci->sc_state_lock);
2635
2636        /* end sync. */
2637        sci->sc_task = NULL;
2638        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2639        return 0;
2640}
2641
2642static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2643{
2644        struct task_struct *t;
2645
2646        t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2647        if (IS_ERR(t)) {
2648                int err = PTR_ERR(t);
2649
2650                printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2651                       err);
2652                return err;
2653        }
2654        wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2655        return 0;
2656}
2657
2658static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2659        __acquires(&sci->sc_state_lock)
2660        __releases(&sci->sc_state_lock)
2661{
2662        sci->sc_state |= NILFS_SEGCTOR_QUIT;
2663
2664        while (sci->sc_task) {
2665                wake_up(&sci->sc_wait_daemon);
2666                spin_unlock(&sci->sc_state_lock);
2667                wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2668                spin_lock(&sci->sc_state_lock);
2669        }
2670}
2671
2672/*
2673 * Setup & clean-up functions
2674 */
2675static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
2676                                               struct nilfs_root *root)
2677{
2678        struct nilfs_sc_info *sci;
2679
2680        sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2681        if (!sci)
2682                return NULL;
2683
2684        sci->sc_sbi = sbi;
2685        sci->sc_super = sbi->s_super;
2686
2687        nilfs_get_root(root);
2688        sci->sc_root = root;
2689
2690        init_waitqueue_head(&sci->sc_wait_request);
2691        init_waitqueue_head(&sci->sc_wait_daemon);
2692        init_waitqueue_head(&sci->sc_wait_task);
2693        spin_lock_init(&sci->sc_state_lock);
2694        INIT_LIST_HEAD(&sci->sc_dirty_files);
2695        INIT_LIST_HEAD(&sci->sc_segbufs);
2696        INIT_LIST_HEAD(&sci->sc_write_logs);
2697        INIT_LIST_HEAD(&sci->sc_gc_inodes);
2698        INIT_LIST_HEAD(&sci->sc_copied_buffers);
2699        init_timer(&sci->sc_timer);
2700
2701        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2702        sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2703        sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2704
2705        if (sbi->s_interval)
2706                sci->sc_interval = sbi->s_interval;
2707        if (sbi->s_watermark)
2708                sci->sc_watermark = sbi->s_watermark;
2709        return sci;
2710}
2711
2712static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2713{
2714        int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2715
2716        /* The segctord thread was stopped and its timer was removed.
2717           But some tasks remain. */
2718        do {
2719                struct nilfs_sb_info *sbi = sci->sc_sbi;
2720                struct nilfs_transaction_info ti;
2721
2722                nilfs_transaction_lock(sbi, &ti, 0);
2723                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2724                nilfs_transaction_unlock(sbi);
2725
2726        } while (ret && retrycount-- > 0);
2727}
2728
2729/**
2730 * nilfs_segctor_destroy - destroy the segment constructor.
2731 * @sci: nilfs_sc_info
2732 *
2733 * nilfs_segctor_destroy() kills the segctord thread and frees
2734 * the nilfs_sc_info struct.
2735 * Caller must hold the segment semaphore.
2736 */
2737static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2738{
2739        struct nilfs_sb_info *sbi = sci->sc_sbi;
2740        int flag;
2741
2742        up_write(&sbi->s_nilfs->ns_segctor_sem);
2743
2744        spin_lock(&sci->sc_state_lock);
2745        nilfs_segctor_kill_thread(sci);
2746        flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2747                || sci->sc_seq_request != sci->sc_seq_done);
2748        spin_unlock(&sci->sc_state_lock);
2749
2750        if (flag || !nilfs_segctor_confirm(sci))
2751                nilfs_segctor_write_out(sci);
2752
2753        WARN_ON(!list_empty(&sci->sc_copied_buffers));
2754
2755        if (!list_empty(&sci->sc_dirty_files)) {
2756                nilfs_warning(sbi->s_super, __func__,
2757                              "dirty file(s) after the final construction\n");
2758                nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
2759        }
2760
2761        WARN_ON(!list_empty(&sci->sc_segbufs));
2762        WARN_ON(!list_empty(&sci->sc_write_logs));
2763
2764        nilfs_put_root(sci->sc_root);
2765
2766        down_write(&sbi->s_nilfs->ns_segctor_sem);
2767
2768        del_timer_sync(&sci->sc_timer);
2769        kfree(sci);
2770}
2771
2772/**
2773 * nilfs_attach_segment_constructor - attach a segment constructor
2774 * @sbi: nilfs_sb_info
2775 * @root: root object of the current filesystem tree
2776 *
2777 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2778 * initializes it, and starts the segment constructor.
2779 *
2780 * Return Value: On success, 0 is returned. On error, one of the following
2781 * negative error code is returned.
2782 *
2783 * %-ENOMEM - Insufficient memory available.
2784 */
2785int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
2786                                     struct nilfs_root *root)
2787{
2788        int err;
2789
2790        if (NILFS_SC(sbi)) {
2791                /*
2792                 * This happens if the filesystem was remounted
2793                 * read/write after nilfs_error degenerated it into a
2794                 * read-only mount.
2795                 */
2796                nilfs_detach_segment_constructor(sbi);
2797        }
2798
2799        sbi->s_sc_info = nilfs_segctor_new(sbi, root);
2800        if (!sbi->s_sc_info)
2801                return -ENOMEM;
2802
2803        err = nilfs_segctor_start_thread(NILFS_SC(sbi));
2804        if (err) {
2805                kfree(sbi->s_sc_info);
2806                sbi->s_sc_info = NULL;
2807        }
2808        return err;
2809}
2810
2811/**
2812 * nilfs_detach_segment_constructor - destroy the segment constructor
2813 * @sbi: nilfs_sb_info
2814 *
2815 * nilfs_detach_segment_constructor() kills the segment constructor daemon,
2816 * frees the struct nilfs_sc_info, and destroy the dirty file list.
2817 */
2818void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
2819{
2820        struct the_nilfs *nilfs = sbi->s_nilfs;
2821        LIST_HEAD(garbage_list);
2822
2823        down_write(&nilfs->ns_segctor_sem);
2824        if (NILFS_SC(sbi)) {
2825                nilfs_segctor_destroy(NILFS_SC(sbi));
2826                sbi->s_sc_info = NULL;
2827        }
2828
2829        /* Force to free the list of dirty files */
2830        spin_lock(&sbi->s_inode_lock);
2831        if (!list_empty(&sbi->s_dirty_files)) {
2832                list_splice_init(&sbi->s_dirty_files, &garbage_list);
2833                nilfs_warning(sbi->s_super, __func__,
2834                              "Non empty dirty list after the last "
2835                              "segment construction\n");
2836        }
2837        spin_unlock(&sbi->s_inode_lock);
2838        up_write(&nilfs->ns_segctor_sem);
2839
2840        nilfs_dispose_list(sbi, &garbage_list, 1);
2841}
2842