linux/fs/nilfs2/segment.c
<<
>>
Prefs
   1/*
   2 * segment.c - NILFS segment constructor.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * Written by Ryusuke Konishi.
  17 *
  18 */
  19
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/writeback.h>
  23#include <linux/bitops.h>
  24#include <linux/bio.h>
  25#include <linux/completion.h>
  26#include <linux/blkdev.h>
  27#include <linux/backing-dev.h>
  28#include <linux/freezer.h>
  29#include <linux/kthread.h>
  30#include <linux/crc32.h>
  31#include <linux/pagevec.h>
  32#include <linux/slab.h>
  33#include <linux/sched/signal.h>
  34
  35#include "nilfs.h"
  36#include "btnode.h"
  37#include "page.h"
  38#include "segment.h"
  39#include "sufile.h"
  40#include "cpfile.h"
  41#include "ifile.h"
  42#include "segbuf.h"
  43
  44
  45/*
  46 * Segment constructor
  47 */
  48#define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
  49
  50#define SC_MAX_SEGDELTA 64   /*
  51                              * Upper limit of the number of segments
  52                              * appended in collection retry loop
  53                              */
  54
  55/* Construction mode */
  56enum {
  57        SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  58        SC_LSEG_DSYNC,  /*
  59                         * Flush data blocks of a given file and make
  60                         * a logical segment without a super root.
  61                         */
  62        SC_FLUSH_FILE,  /*
  63                         * Flush data files, leads to segment writes without
  64                         * creating a checkpoint.
  65                         */
  66        SC_FLUSH_DAT,   /*
  67                         * Flush DAT file.  This also creates segments
  68                         * without a checkpoint.
  69                         */
  70};
  71
  72/* Stage numbers of dirty block collection */
  73enum {
  74        NILFS_ST_INIT = 0,
  75        NILFS_ST_GC,            /* Collecting dirty blocks for GC */
  76        NILFS_ST_FILE,
  77        NILFS_ST_IFILE,
  78        NILFS_ST_CPFILE,
  79        NILFS_ST_SUFILE,
  80        NILFS_ST_DAT,
  81        NILFS_ST_SR,            /* Super root */
  82        NILFS_ST_DSYNC,         /* Data sync blocks */
  83        NILFS_ST_DONE,
  84};
  85
  86#define CREATE_TRACE_POINTS
  87#include <trace/events/nilfs2.h>
  88
  89/*
  90 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
  91 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
  92 * the variable must use them because transition of stage count must involve
  93 * trace events (trace_nilfs2_collection_stage_transition).
  94 *
  95 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
  96 * produce tracepoint events. It is provided just for making the intention
  97 * clear.
  98 */
  99static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
 100{
 101        sci->sc_stage.scnt++;
 102        trace_nilfs2_collection_stage_transition(sci);
 103}
 104
 105static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
 106{
 107        sci->sc_stage.scnt = next_scnt;
 108        trace_nilfs2_collection_stage_transition(sci);
 109}
 110
 111static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
 112{
 113        return sci->sc_stage.scnt;
 114}
 115
 116/* State flags of collection */
 117#define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
 118#define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
 119#define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
 120#define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
 121
 122/* Operations depending on the construction mode and file type */
 123struct nilfs_sc_operations {
 124        int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
 125                            struct inode *);
 126        int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
 127                            struct inode *);
 128        int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
 129                            struct inode *);
 130        void (*write_data_binfo)(struct nilfs_sc_info *,
 131                                 struct nilfs_segsum_pointer *,
 132                                 union nilfs_binfo *);
 133        void (*write_node_binfo)(struct nilfs_sc_info *,
 134                                 struct nilfs_segsum_pointer *,
 135                                 union nilfs_binfo *);
 136};
 137
 138/*
 139 * Other definitions
 140 */
 141static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
 142static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
 143static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
 144static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
 145
 146#define nilfs_cnt32_gt(a, b)   \
 147        (typecheck(__u32, a) && typecheck(__u32, b) && \
 148         ((__s32)(b) - (__s32)(a) < 0))
 149#define nilfs_cnt32_ge(a, b)   \
 150        (typecheck(__u32, a) && typecheck(__u32, b) && \
 151         ((__s32)(a) - (__s32)(b) >= 0))
 152#define nilfs_cnt32_lt(a, b)  nilfs_cnt32_gt(b, a)
 153#define nilfs_cnt32_le(a, b)  nilfs_cnt32_ge(b, a)
 154
 155static int nilfs_prepare_segment_lock(struct super_block *sb,
 156                                      struct nilfs_transaction_info *ti)
 157{
 158        struct nilfs_transaction_info *cur_ti = current->journal_info;
 159        void *save = NULL;
 160
 161        if (cur_ti) {
 162                if (cur_ti->ti_magic == NILFS_TI_MAGIC)
 163                        return ++cur_ti->ti_count;
 164
 165                /*
 166                 * If journal_info field is occupied by other FS,
 167                 * it is saved and will be restored on
 168                 * nilfs_transaction_commit().
 169                 */
 170                nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
 171                save = current->journal_info;
 172        }
 173        if (!ti) {
 174                ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
 175                if (!ti)
 176                        return -ENOMEM;
 177                ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
 178        } else {
 179                ti->ti_flags = 0;
 180        }
 181        ti->ti_count = 0;
 182        ti->ti_save = save;
 183        ti->ti_magic = NILFS_TI_MAGIC;
 184        current->journal_info = ti;
 185        return 0;
 186}
 187
 188/**
 189 * nilfs_transaction_begin - start indivisible file operations.
 190 * @sb: super block
 191 * @ti: nilfs_transaction_info
 192 * @vacancy_check: flags for vacancy rate checks
 193 *
 194 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
 195 * the segment semaphore, to make a segment construction and write tasks
 196 * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
 197 * The region enclosed by these two functions can be nested.  To avoid a
 198 * deadlock, the semaphore is only acquired or released in the outermost call.
 199 *
 200 * This function allocates a nilfs_transaction_info struct to keep context
 201 * information on it.  It is initialized and hooked onto the current task in
 202 * the outermost call.  If a pre-allocated struct is given to @ti, it is used
 203 * instead; otherwise a new struct is assigned from a slab.
 204 *
 205 * When @vacancy_check flag is set, this function will check the amount of
 206 * free space, and will wait for the GC to reclaim disk space if low capacity.
 207 *
 208 * Return Value: On success, 0 is returned. On error, one of the following
 209 * negative error code is returned.
 210 *
 211 * %-ENOMEM - Insufficient memory available.
 212 *
 213 * %-ENOSPC - No space left on device
 214 */
 215int nilfs_transaction_begin(struct super_block *sb,
 216                            struct nilfs_transaction_info *ti,
 217                            int vacancy_check)
 218{
 219        struct the_nilfs *nilfs;
 220        int ret = nilfs_prepare_segment_lock(sb, ti);
 221        struct nilfs_transaction_info *trace_ti;
 222
 223        if (unlikely(ret < 0))
 224                return ret;
 225        if (ret > 0) {
 226                trace_ti = current->journal_info;
 227
 228                trace_nilfs2_transaction_transition(sb, trace_ti,
 229                                    trace_ti->ti_count, trace_ti->ti_flags,
 230                                    TRACE_NILFS2_TRANSACTION_BEGIN);
 231                return 0;
 232        }
 233
 234        sb_start_intwrite(sb);
 235
 236        nilfs = sb->s_fs_info;
 237        down_read(&nilfs->ns_segctor_sem);
 238        if (vacancy_check && nilfs_near_disk_full(nilfs)) {
 239                up_read(&nilfs->ns_segctor_sem);
 240                ret = -ENOSPC;
 241                goto failed;
 242        }
 243
 244        trace_ti = current->journal_info;
 245        trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
 246                                            trace_ti->ti_flags,
 247                                            TRACE_NILFS2_TRANSACTION_BEGIN);
 248        return 0;
 249
 250 failed:
 251        ti = current->journal_info;
 252        current->journal_info = ti->ti_save;
 253        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 254                kmem_cache_free(nilfs_transaction_cachep, ti);
 255        sb_end_intwrite(sb);
 256        return ret;
 257}
 258
 259/**
 260 * nilfs_transaction_commit - commit indivisible file operations.
 261 * @sb: super block
 262 *
 263 * nilfs_transaction_commit() releases the read semaphore which is
 264 * acquired by nilfs_transaction_begin(). This is only performed
 265 * in outermost call of this function.  If a commit flag is set,
 266 * nilfs_transaction_commit() sets a timer to start the segment
 267 * constructor.  If a sync flag is set, it starts construction
 268 * directly.
 269 */
 270int nilfs_transaction_commit(struct super_block *sb)
 271{
 272        struct nilfs_transaction_info *ti = current->journal_info;
 273        struct the_nilfs *nilfs = sb->s_fs_info;
 274        int err = 0;
 275
 276        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 277        ti->ti_flags |= NILFS_TI_COMMIT;
 278        if (ti->ti_count > 0) {
 279                ti->ti_count--;
 280                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 281                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 282                return 0;
 283        }
 284        if (nilfs->ns_writer) {
 285                struct nilfs_sc_info *sci = nilfs->ns_writer;
 286
 287                if (ti->ti_flags & NILFS_TI_COMMIT)
 288                        nilfs_segctor_start_timer(sci);
 289                if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
 290                        nilfs_segctor_do_flush(sci, 0);
 291        }
 292        up_read(&nilfs->ns_segctor_sem);
 293        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 294                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
 295
 296        current->journal_info = ti->ti_save;
 297
 298        if (ti->ti_flags & NILFS_TI_SYNC)
 299                err = nilfs_construct_segment(sb);
 300        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 301                kmem_cache_free(nilfs_transaction_cachep, ti);
 302        sb_end_intwrite(sb);
 303        return err;
 304}
 305
 306void nilfs_transaction_abort(struct super_block *sb)
 307{
 308        struct nilfs_transaction_info *ti = current->journal_info;
 309        struct the_nilfs *nilfs = sb->s_fs_info;
 310
 311        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 312        if (ti->ti_count > 0) {
 313                ti->ti_count--;
 314                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 315                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 316                return;
 317        }
 318        up_read(&nilfs->ns_segctor_sem);
 319
 320        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 321                    ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
 322
 323        current->journal_info = ti->ti_save;
 324        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
 325                kmem_cache_free(nilfs_transaction_cachep, ti);
 326        sb_end_intwrite(sb);
 327}
 328
 329void nilfs_relax_pressure_in_lock(struct super_block *sb)
 330{
 331        struct the_nilfs *nilfs = sb->s_fs_info;
 332        struct nilfs_sc_info *sci = nilfs->ns_writer;
 333
 334        if (!sci || !sci->sc_flush_request)
 335                return;
 336
 337        set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
 338        up_read(&nilfs->ns_segctor_sem);
 339
 340        down_write(&nilfs->ns_segctor_sem);
 341        if (sci->sc_flush_request &&
 342            test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
 343                struct nilfs_transaction_info *ti = current->journal_info;
 344
 345                ti->ti_flags |= NILFS_TI_WRITER;
 346                nilfs_segctor_do_immediate_flush(sci);
 347                ti->ti_flags &= ~NILFS_TI_WRITER;
 348        }
 349        downgrade_write(&nilfs->ns_segctor_sem);
 350}
 351
 352static void nilfs_transaction_lock(struct super_block *sb,
 353                                   struct nilfs_transaction_info *ti,
 354                                   int gcflag)
 355{
 356        struct nilfs_transaction_info *cur_ti = current->journal_info;
 357        struct the_nilfs *nilfs = sb->s_fs_info;
 358        struct nilfs_sc_info *sci = nilfs->ns_writer;
 359
 360        WARN_ON(cur_ti);
 361        ti->ti_flags = NILFS_TI_WRITER;
 362        ti->ti_count = 0;
 363        ti->ti_save = cur_ti;
 364        ti->ti_magic = NILFS_TI_MAGIC;
 365        current->journal_info = ti;
 366
 367        for (;;) {
 368                trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 369                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
 370
 371                down_write(&nilfs->ns_segctor_sem);
 372                if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
 373                        break;
 374
 375                nilfs_segctor_do_immediate_flush(sci);
 376
 377                up_write(&nilfs->ns_segctor_sem);
 378                cond_resched();
 379        }
 380        if (gcflag)
 381                ti->ti_flags |= NILFS_TI_GC;
 382
 383        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 384                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
 385}
 386
 387static void nilfs_transaction_unlock(struct super_block *sb)
 388{
 389        struct nilfs_transaction_info *ti = current->journal_info;
 390        struct the_nilfs *nilfs = sb->s_fs_info;
 391
 392        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
 393        BUG_ON(ti->ti_count > 0);
 394
 395        up_write(&nilfs->ns_segctor_sem);
 396        current->journal_info = ti->ti_save;
 397
 398        trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
 399                            ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
 400}
 401
 402static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
 403                                            struct nilfs_segsum_pointer *ssp,
 404                                            unsigned int bytes)
 405{
 406        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 407        unsigned int blocksize = sci->sc_super->s_blocksize;
 408        void *p;
 409
 410        if (unlikely(ssp->offset + bytes > blocksize)) {
 411                ssp->offset = 0;
 412                BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
 413                                               &segbuf->sb_segsum_buffers));
 414                ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
 415        }
 416        p = ssp->bh->b_data + ssp->offset;
 417        ssp->offset += bytes;
 418        return p;
 419}
 420
 421/**
 422 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
 423 * @sci: nilfs_sc_info
 424 */
 425static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
 426{
 427        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 428        struct buffer_head *sumbh;
 429        unsigned int sumbytes;
 430        unsigned int flags = 0;
 431        int err;
 432
 433        if (nilfs_doing_gc())
 434                flags = NILFS_SS_GC;
 435        err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
 436        if (unlikely(err))
 437                return err;
 438
 439        sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
 440        sumbytes = segbuf->sb_sum.sumbytes;
 441        sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
 442        sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
 443        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 444        return 0;
 445}
 446
 447static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
 448{
 449        sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
 450        if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
 451                return -E2BIG; /*
 452                                * The current segment is filled up
 453                                * (internal code)
 454                                */
 455        sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
 456        return nilfs_segctor_reset_segment_buffer(sci);
 457}
 458
 459static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
 460{
 461        struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 462        int err;
 463
 464        if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
 465                err = nilfs_segctor_feed_segment(sci);
 466                if (err)
 467                        return err;
 468                segbuf = sci->sc_curseg;
 469        }
 470        err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
 471        if (likely(!err))
 472                segbuf->sb_sum.flags |= NILFS_SS_SR;
 473        return err;
 474}
 475
 476/*
 477 * Functions for making segment summary and payloads
 478 */
 479static int nilfs_segctor_segsum_block_required(
 480        struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
 481        unsigned int binfo_size)
 482{
 483        unsigned int blocksize = sci->sc_super->s_blocksize;
 484        /* Size of finfo and binfo is enough small against blocksize */
 485
 486        return ssp->offset + binfo_size +
 487                (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
 488                blocksize;
 489}
 490
 491static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
 492                                      struct inode *inode)
 493{
 494        sci->sc_curseg->sb_sum.nfinfo++;
 495        sci->sc_binfo_ptr = sci->sc_finfo_ptr;
 496        nilfs_segctor_map_segsum_entry(
 497                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 498
 499        if (NILFS_I(inode)->i_root &&
 500            !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 501                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 502        /* skip finfo */
 503}
 504
 505static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
 506                                    struct inode *inode)
 507{
 508        struct nilfs_finfo *finfo;
 509        struct nilfs_inode_info *ii;
 510        struct nilfs_segment_buffer *segbuf;
 511        __u64 cno;
 512
 513        if (sci->sc_blk_cnt == 0)
 514                return;
 515
 516        ii = NILFS_I(inode);
 517
 518        if (test_bit(NILFS_I_GCINODE, &ii->i_state))
 519                cno = ii->i_cno;
 520        else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
 521                cno = 0;
 522        else
 523                cno = sci->sc_cno;
 524
 525        finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
 526                                                 sizeof(*finfo));
 527        finfo->fi_ino = cpu_to_le64(inode->i_ino);
 528        finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
 529        finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
 530        finfo->fi_cno = cpu_to_le64(cno);
 531
 532        segbuf = sci->sc_curseg;
 533        segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
 534                sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
 535        sci->sc_finfo_ptr = sci->sc_binfo_ptr;
 536        sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
 537}
 538
 539static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
 540                                        struct buffer_head *bh,
 541                                        struct inode *inode,
 542                                        unsigned int binfo_size)
 543{
 544        struct nilfs_segment_buffer *segbuf;
 545        int required, err = 0;
 546
 547 retry:
 548        segbuf = sci->sc_curseg;
 549        required = nilfs_segctor_segsum_block_required(
 550                sci, &sci->sc_binfo_ptr, binfo_size);
 551        if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
 552                nilfs_segctor_end_finfo(sci, inode);
 553                err = nilfs_segctor_feed_segment(sci);
 554                if (err)
 555                        return err;
 556                goto retry;
 557        }
 558        if (unlikely(required)) {
 559                err = nilfs_segbuf_extend_segsum(segbuf);
 560                if (unlikely(err))
 561                        goto failed;
 562        }
 563        if (sci->sc_blk_cnt == 0)
 564                nilfs_segctor_begin_finfo(sci, inode);
 565
 566        nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
 567        /* Substitution to vblocknr is delayed until update_blocknr() */
 568        nilfs_segbuf_add_file_buffer(segbuf, bh);
 569        sci->sc_blk_cnt++;
 570 failed:
 571        return err;
 572}
 573
 574/*
 575 * Callback functions that enumerate, mark, and collect dirty blocks
 576 */
 577static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
 578                                   struct buffer_head *bh, struct inode *inode)
 579{
 580        int err;
 581
 582        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 583        if (err < 0)
 584                return err;
 585
 586        err = nilfs_segctor_add_file_block(sci, bh, inode,
 587                                           sizeof(struct nilfs_binfo_v));
 588        if (!err)
 589                sci->sc_datablk_cnt++;
 590        return err;
 591}
 592
 593static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
 594                                   struct buffer_head *bh,
 595                                   struct inode *inode)
 596{
 597        return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 598}
 599
 600static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
 601                                   struct buffer_head *bh,
 602                                   struct inode *inode)
 603{
 604        WARN_ON(!buffer_dirty(bh));
 605        return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 606}
 607
 608static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
 609                                        struct nilfs_segsum_pointer *ssp,
 610                                        union nilfs_binfo *binfo)
 611{
 612        struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
 613                sci, ssp, sizeof(*binfo_v));
 614        *binfo_v = binfo->bi_v;
 615}
 616
 617static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
 618                                        struct nilfs_segsum_pointer *ssp,
 619                                        union nilfs_binfo *binfo)
 620{
 621        __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
 622                sci, ssp, sizeof(*vblocknr));
 623        *vblocknr = binfo->bi_v.bi_vblocknr;
 624}
 625
 626static const struct nilfs_sc_operations nilfs_sc_file_ops = {
 627        .collect_data = nilfs_collect_file_data,
 628        .collect_node = nilfs_collect_file_node,
 629        .collect_bmap = nilfs_collect_file_bmap,
 630        .write_data_binfo = nilfs_write_file_data_binfo,
 631        .write_node_binfo = nilfs_write_file_node_binfo,
 632};
 633
 634static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
 635                                  struct buffer_head *bh, struct inode *inode)
 636{
 637        int err;
 638
 639        err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 640        if (err < 0)
 641                return err;
 642
 643        err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 644        if (!err)
 645                sci->sc_datablk_cnt++;
 646        return err;
 647}
 648
 649static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
 650                                  struct buffer_head *bh, struct inode *inode)
 651{
 652        WARN_ON(!buffer_dirty(bh));
 653        return nilfs_segctor_add_file_block(sci, bh, inode,
 654                                            sizeof(struct nilfs_binfo_dat));
 655}
 656
 657static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
 658                                       struct nilfs_segsum_pointer *ssp,
 659                                       union nilfs_binfo *binfo)
 660{
 661        __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
 662                                                          sizeof(*blkoff));
 663        *blkoff = binfo->bi_dat.bi_blkoff;
 664}
 665
 666static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
 667                                       struct nilfs_segsum_pointer *ssp,
 668                                       union nilfs_binfo *binfo)
 669{
 670        struct nilfs_binfo_dat *binfo_dat =
 671                nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
 672        *binfo_dat = binfo->bi_dat;
 673}
 674
 675static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
 676        .collect_data = nilfs_collect_dat_data,
 677        .collect_node = nilfs_collect_file_node,
 678        .collect_bmap = nilfs_collect_dat_bmap,
 679        .write_data_binfo = nilfs_write_dat_data_binfo,
 680        .write_node_binfo = nilfs_write_dat_node_binfo,
 681};
 682
 683static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
 684        .collect_data = nilfs_collect_file_data,
 685        .collect_node = NULL,
 686        .collect_bmap = NULL,
 687        .write_data_binfo = nilfs_write_file_data_binfo,
 688        .write_node_binfo = NULL,
 689};
 690
 691static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 692                                              struct list_head *listp,
 693                                              size_t nlimit,
 694                                              loff_t start, loff_t end)
 695{
 696        struct address_space *mapping = inode->i_mapping;
 697        struct pagevec pvec;
 698        pgoff_t index = 0, last = ULONG_MAX;
 699        size_t ndirties = 0;
 700        int i;
 701
 702        if (unlikely(start != 0 || end != LLONG_MAX)) {
 703                /*
 704                 * A valid range is given for sync-ing data pages. The
 705                 * range is rounded to per-page; extra dirty buffers
 706                 * may be included if blocksize < pagesize.
 707                 */
 708                index = start >> PAGE_SHIFT;
 709                last = end >> PAGE_SHIFT;
 710        }
 711        pagevec_init(&pvec);
 712 repeat:
 713        if (unlikely(index > last) ||
 714            !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
 715                                PAGECACHE_TAG_DIRTY))
 716                return ndirties;
 717
 718        for (i = 0; i < pagevec_count(&pvec); i++) {
 719                struct buffer_head *bh, *head;
 720                struct page *page = pvec.pages[i];
 721
 722                lock_page(page);
 723                if (!page_has_buffers(page))
 724                        create_empty_buffers(page, i_blocksize(inode), 0);
 725                unlock_page(page);
 726
 727                bh = head = page_buffers(page);
 728                do {
 729                        if (!buffer_dirty(bh) || buffer_async_write(bh))
 730                                continue;
 731                        get_bh(bh);
 732                        list_add_tail(&bh->b_assoc_buffers, listp);
 733                        ndirties++;
 734                        if (unlikely(ndirties >= nlimit)) {
 735                                pagevec_release(&pvec);
 736                                cond_resched();
 737                                return ndirties;
 738                        }
 739                } while (bh = bh->b_this_page, bh != head);
 740        }
 741        pagevec_release(&pvec);
 742        cond_resched();
 743        goto repeat;
 744}
 745
 746static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
 747                                            struct list_head *listp)
 748{
 749        struct nilfs_inode_info *ii = NILFS_I(inode);
 750        struct address_space *mapping = &ii->i_btnode_cache;
 751        struct pagevec pvec;
 752        struct buffer_head *bh, *head;
 753        unsigned int i;
 754        pgoff_t index = 0;
 755
 756        pagevec_init(&pvec);
 757
 758        while (pagevec_lookup_tag(&pvec, mapping, &index,
 759                                        PAGECACHE_TAG_DIRTY)) {
 760                for (i = 0; i < pagevec_count(&pvec); i++) {
 761                        bh = head = page_buffers(pvec.pages[i]);
 762                        do {
 763                                if (buffer_dirty(bh) &&
 764                                                !buffer_async_write(bh)) {
 765                                        get_bh(bh);
 766                                        list_add_tail(&bh->b_assoc_buffers,
 767                                                      listp);
 768                                }
 769                                bh = bh->b_this_page;
 770                        } while (bh != head);
 771                }
 772                pagevec_release(&pvec);
 773                cond_resched();
 774        }
 775}
 776
 777static void nilfs_dispose_list(struct the_nilfs *nilfs,
 778                               struct list_head *head, int force)
 779{
 780        struct nilfs_inode_info *ii, *n;
 781        struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
 782        unsigned int nv = 0;
 783
 784        while (!list_empty(head)) {
 785                spin_lock(&nilfs->ns_inode_lock);
 786                list_for_each_entry_safe(ii, n, head, i_dirty) {
 787                        list_del_init(&ii->i_dirty);
 788                        if (force) {
 789                                if (unlikely(ii->i_bh)) {
 790                                        brelse(ii->i_bh);
 791                                        ii->i_bh = NULL;
 792                                }
 793                        } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
 794                                set_bit(NILFS_I_QUEUED, &ii->i_state);
 795                                list_add_tail(&ii->i_dirty,
 796                                              &nilfs->ns_dirty_files);
 797                                continue;
 798                        }
 799                        ivec[nv++] = ii;
 800                        if (nv == SC_N_INODEVEC)
 801                                break;
 802                }
 803                spin_unlock(&nilfs->ns_inode_lock);
 804
 805                for (pii = ivec; nv > 0; pii++, nv--)
 806                        iput(&(*pii)->vfs_inode);
 807        }
 808}
 809
 810static void nilfs_iput_work_func(struct work_struct *work)
 811{
 812        struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
 813                                                 sc_iput_work);
 814        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 815
 816        nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
 817}
 818
 819static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
 820                                     struct nilfs_root *root)
 821{
 822        int ret = 0;
 823
 824        if (nilfs_mdt_fetch_dirty(root->ifile))
 825                ret++;
 826        if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
 827                ret++;
 828        if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
 829                ret++;
 830        if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
 831                ret++;
 832        return ret;
 833}
 834
 835static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
 836{
 837        return list_empty(&sci->sc_dirty_files) &&
 838                !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
 839                sci->sc_nfreesegs == 0 &&
 840                (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
 841}
 842
 843static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
 844{
 845        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 846        int ret = 0;
 847
 848        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
 849                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
 850
 851        spin_lock(&nilfs->ns_inode_lock);
 852        if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
 853                ret++;
 854
 855        spin_unlock(&nilfs->ns_inode_lock);
 856        return ret;
 857}
 858
 859static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
 860{
 861        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 862
 863        nilfs_mdt_clear_dirty(sci->sc_root->ifile);
 864        nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
 865        nilfs_mdt_clear_dirty(nilfs->ns_sufile);
 866        nilfs_mdt_clear_dirty(nilfs->ns_dat);
 867}
 868
 869static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
 870{
 871        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 872        struct buffer_head *bh_cp;
 873        struct nilfs_checkpoint *raw_cp;
 874        int err;
 875
 876        /* XXX: this interface will be changed */
 877        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
 878                                          &raw_cp, &bh_cp);
 879        if (likely(!err)) {
 880                /*
 881                 * The following code is duplicated with cpfile.  But, it is
 882                 * needed to collect the checkpoint even if it was not newly
 883                 * created.
 884                 */
 885                mark_buffer_dirty(bh_cp);
 886                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
 887                nilfs_cpfile_put_checkpoint(
 888                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 889        } else
 890                WARN_ON(err == -EINVAL || err == -ENOENT);
 891
 892        return err;
 893}
 894
 895static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
 896{
 897        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
 898        struct buffer_head *bh_cp;
 899        struct nilfs_checkpoint *raw_cp;
 900        int err;
 901
 902        err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
 903                                          &raw_cp, &bh_cp);
 904        if (unlikely(err)) {
 905                WARN_ON(err == -EINVAL || err == -ENOENT);
 906                goto failed_ibh;
 907        }
 908        raw_cp->cp_snapshot_list.ssl_next = 0;
 909        raw_cp->cp_snapshot_list.ssl_prev = 0;
 910        raw_cp->cp_inodes_count =
 911                cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
 912        raw_cp->cp_blocks_count =
 913                cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
 914        raw_cp->cp_nblk_inc =
 915                cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
 916        raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
 917        raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
 918
 919        if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 920                nilfs_checkpoint_clear_minor(raw_cp);
 921        else
 922                nilfs_checkpoint_set_minor(raw_cp);
 923
 924        nilfs_write_inode_common(sci->sc_root->ifile,
 925                                 &raw_cp->cp_ifile_inode, 1);
 926        nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
 927        return 0;
 928
 929 failed_ibh:
 930        return err;
 931}
 932
 933static void nilfs_fill_in_file_bmap(struct inode *ifile,
 934                                    struct nilfs_inode_info *ii)
 935
 936{
 937        struct buffer_head *ibh;
 938        struct nilfs_inode *raw_inode;
 939
 940        if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
 941                ibh = ii->i_bh;
 942                BUG_ON(!ibh);
 943                raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
 944                                                  ibh);
 945                nilfs_bmap_write(ii->i_bmap, raw_inode);
 946                nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
 947        }
 948}
 949
 950static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
 951{
 952        struct nilfs_inode_info *ii;
 953
 954        list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
 955                nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
 956                set_bit(NILFS_I_COLLECTED, &ii->i_state);
 957        }
 958}
 959
 960static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
 961                                             struct the_nilfs *nilfs)
 962{
 963        struct buffer_head *bh_sr;
 964        struct nilfs_super_root *raw_sr;
 965        unsigned int isz, srsz;
 966
 967        bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
 968        raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
 969        isz = nilfs->ns_inode_size;
 970        srsz = NILFS_SR_BYTES(isz);
 971
 972        raw_sr->sr_bytes = cpu_to_le16(srsz);
 973        raw_sr->sr_nongc_ctime
 974                = cpu_to_le64(nilfs_doing_gc() ?
 975                              nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 976        raw_sr->sr_flags = 0;
 977
 978        nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
 979                                 NILFS_SR_DAT_OFFSET(isz), 1);
 980        nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
 981                                 NILFS_SR_CPFILE_OFFSET(isz), 1);
 982        nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
 983                                 NILFS_SR_SUFILE_OFFSET(isz), 1);
 984        memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
 985}
 986
 987static void nilfs_redirty_inodes(struct list_head *head)
 988{
 989        struct nilfs_inode_info *ii;
 990
 991        list_for_each_entry(ii, head, i_dirty) {
 992                if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
 993                        clear_bit(NILFS_I_COLLECTED, &ii->i_state);
 994        }
 995}
 996
 997static void nilfs_drop_collected_inodes(struct list_head *head)
 998{
 999        struct nilfs_inode_info *ii;
1000
1001        list_for_each_entry(ii, head, i_dirty) {
1002                if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1003                        continue;
1004
1005                clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1006                set_bit(NILFS_I_UPDATED, &ii->i_state);
1007        }
1008}
1009
1010static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1011                                       struct inode *inode,
1012                                       struct list_head *listp,
1013                                       int (*collect)(struct nilfs_sc_info *,
1014                                                      struct buffer_head *,
1015                                                      struct inode *))
1016{
1017        struct buffer_head *bh, *n;
1018        int err = 0;
1019
1020        if (collect) {
1021                list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1022                        list_del_init(&bh->b_assoc_buffers);
1023                        err = collect(sci, bh, inode);
1024                        brelse(bh);
1025                        if (unlikely(err))
1026                                goto dispose_buffers;
1027                }
1028                return 0;
1029        }
1030
1031 dispose_buffers:
1032        while (!list_empty(listp)) {
1033                bh = list_first_entry(listp, struct buffer_head,
1034                                      b_assoc_buffers);
1035                list_del_init(&bh->b_assoc_buffers);
1036                brelse(bh);
1037        }
1038        return err;
1039}
1040
1041static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1042{
1043        /* Remaining number of blocks within segment buffer */
1044        return sci->sc_segbuf_nblocks -
1045                (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1046}
1047
1048static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1049                                   struct inode *inode,
1050                                   const struct nilfs_sc_operations *sc_ops)
1051{
1052        LIST_HEAD(data_buffers);
1053        LIST_HEAD(node_buffers);
1054        int err;
1055
1056        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1057                size_t n, rest = nilfs_segctor_buffer_rest(sci);
1058
1059                n = nilfs_lookup_dirty_data_buffers(
1060                        inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1061                if (n > rest) {
1062                        err = nilfs_segctor_apply_buffers(
1063                                sci, inode, &data_buffers,
1064                                sc_ops->collect_data);
1065                        BUG_ON(!err); /* always receive -E2BIG or true error */
1066                        goto break_or_fail;
1067                }
1068        }
1069        nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1070
1071        if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1072                err = nilfs_segctor_apply_buffers(
1073                        sci, inode, &data_buffers, sc_ops->collect_data);
1074                if (unlikely(err)) {
1075                        /* dispose node list */
1076                        nilfs_segctor_apply_buffers(
1077                                sci, inode, &node_buffers, NULL);
1078                        goto break_or_fail;
1079                }
1080                sci->sc_stage.flags |= NILFS_CF_NODE;
1081        }
1082        /* Collect node */
1083        err = nilfs_segctor_apply_buffers(
1084                sci, inode, &node_buffers, sc_ops->collect_node);
1085        if (unlikely(err))
1086                goto break_or_fail;
1087
1088        nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1089        err = nilfs_segctor_apply_buffers(
1090                sci, inode, &node_buffers, sc_ops->collect_bmap);
1091        if (unlikely(err))
1092                goto break_or_fail;
1093
1094        nilfs_segctor_end_finfo(sci, inode);
1095        sci->sc_stage.flags &= ~NILFS_CF_NODE;
1096
1097 break_or_fail:
1098        return err;
1099}
1100
1101static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1102                                         struct inode *inode)
1103{
1104        LIST_HEAD(data_buffers);
1105        size_t n, rest = nilfs_segctor_buffer_rest(sci);
1106        int err;
1107
1108        n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1109                                            sci->sc_dsync_start,
1110                                            sci->sc_dsync_end);
1111
1112        err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1113                                          nilfs_collect_file_data);
1114        if (!err) {
1115                nilfs_segctor_end_finfo(sci, inode);
1116                BUG_ON(n > rest);
1117                /* always receive -E2BIG or true error if n > rest */
1118        }
1119        return err;
1120}
1121
1122static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1123{
1124        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1125        struct list_head *head;
1126        struct nilfs_inode_info *ii;
1127        size_t ndone;
1128        int err = 0;
1129
1130        switch (nilfs_sc_cstage_get(sci)) {
1131        case NILFS_ST_INIT:
1132                /* Pre-processes */
1133                sci->sc_stage.flags = 0;
1134
1135                if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1136                        sci->sc_nblk_inc = 0;
1137                        sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1138                        if (mode == SC_LSEG_DSYNC) {
1139                                nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1140                                goto dsync_mode;
1141                        }
1142                }
1143
1144                sci->sc_stage.dirty_file_ptr = NULL;
1145                sci->sc_stage.gc_inode_ptr = NULL;
1146                if (mode == SC_FLUSH_DAT) {
1147                        nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1148                        goto dat_stage;
1149                }
1150                nilfs_sc_cstage_inc(sci);  /* Fall through */
1151        case NILFS_ST_GC:
1152                if (nilfs_doing_gc()) {
1153                        head = &sci->sc_gc_inodes;
1154                        ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1155                                                head, i_dirty);
1156                        list_for_each_entry_continue(ii, head, i_dirty) {
1157                                err = nilfs_segctor_scan_file(
1158                                        sci, &ii->vfs_inode,
1159                                        &nilfs_sc_file_ops);
1160                                if (unlikely(err)) {
1161                                        sci->sc_stage.gc_inode_ptr = list_entry(
1162                                                ii->i_dirty.prev,
1163                                                struct nilfs_inode_info,
1164                                                i_dirty);
1165                                        goto break_or_fail;
1166                                }
1167                                set_bit(NILFS_I_COLLECTED, &ii->i_state);
1168                        }
1169                        sci->sc_stage.gc_inode_ptr = NULL;
1170                }
1171                nilfs_sc_cstage_inc(sci);  /* Fall through */
1172        case NILFS_ST_FILE:
1173                head = &sci->sc_dirty_files;
1174                ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1175                                        i_dirty);
1176                list_for_each_entry_continue(ii, head, i_dirty) {
1177                        clear_bit(NILFS_I_DIRTY, &ii->i_state);
1178
1179                        err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1180                                                      &nilfs_sc_file_ops);
1181                        if (unlikely(err)) {
1182                                sci->sc_stage.dirty_file_ptr =
1183                                        list_entry(ii->i_dirty.prev,
1184                                                   struct nilfs_inode_info,
1185                                                   i_dirty);
1186                                goto break_or_fail;
1187                        }
1188                        /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1189                        /* XXX: required ? */
1190                }
1191                sci->sc_stage.dirty_file_ptr = NULL;
1192                if (mode == SC_FLUSH_FILE) {
1193                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1194                        return 0;
1195                }
1196                nilfs_sc_cstage_inc(sci);
1197                sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1198                /* Fall through */
1199        case NILFS_ST_IFILE:
1200                err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1201                                              &nilfs_sc_file_ops);
1202                if (unlikely(err))
1203                        break;
1204                nilfs_sc_cstage_inc(sci);
1205                /* Creating a checkpoint */
1206                err = nilfs_segctor_create_checkpoint(sci);
1207                if (unlikely(err))
1208                        break;
1209                /* Fall through */
1210        case NILFS_ST_CPFILE:
1211                err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1212                                              &nilfs_sc_file_ops);
1213                if (unlikely(err))
1214                        break;
1215                nilfs_sc_cstage_inc(sci);  /* Fall through */
1216        case NILFS_ST_SUFILE:
1217                err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1218                                         sci->sc_nfreesegs, &ndone);
1219                if (unlikely(err)) {
1220                        nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1221                                                  sci->sc_freesegs, ndone,
1222                                                  NULL);
1223                        break;
1224                }
1225                sci->sc_stage.flags |= NILFS_CF_SUFREED;
1226
1227                err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1228                                              &nilfs_sc_file_ops);
1229                if (unlikely(err))
1230                        break;
1231                nilfs_sc_cstage_inc(sci);  /* Fall through */
1232        case NILFS_ST_DAT:
1233 dat_stage:
1234                err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1235                                              &nilfs_sc_dat_ops);
1236                if (unlikely(err))
1237                        break;
1238                if (mode == SC_FLUSH_DAT) {
1239                        nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1240                        return 0;
1241                }
1242                nilfs_sc_cstage_inc(sci);  /* Fall through */
1243        case NILFS_ST_SR:
1244                if (mode == SC_LSEG_SR) {
1245                        /* Appending a super root */
1246                        err = nilfs_segctor_add_super_root(sci);
1247                        if (unlikely(err))
1248                                break;
1249                }
1250                /* End of a logical segment */
1251                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1252                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1253                return 0;
1254        case NILFS_ST_DSYNC:
1255 dsync_mode:
1256                sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1257                ii = sci->sc_dsync_inode;
1258                if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1259                        break;
1260
1261                err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1262                if (unlikely(err))
1263                        break;
1264                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1265                nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1266                return 0;
1267        case NILFS_ST_DONE:
1268                return 0;
1269        default:
1270                BUG();
1271        }
1272
1273 break_or_fail:
1274        return err;
1275}
1276
1277/**
1278 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1279 * @sci: nilfs_sc_info
1280 * @nilfs: nilfs object
1281 */
1282static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1283                                            struct the_nilfs *nilfs)
1284{
1285        struct nilfs_segment_buffer *segbuf, *prev;
1286        __u64 nextnum;
1287        int err, alloc = 0;
1288
1289        segbuf = nilfs_segbuf_new(sci->sc_super);
1290        if (unlikely(!segbuf))
1291                return -ENOMEM;
1292
1293        if (list_empty(&sci->sc_write_logs)) {
1294                nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1295                                 nilfs->ns_pseg_offset, nilfs);
1296                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1297                        nilfs_shift_to_next_segment(nilfs);
1298                        nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1299                }
1300
1301                segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1302                nextnum = nilfs->ns_nextnum;
1303
1304                if (nilfs->ns_segnum == nilfs->ns_nextnum)
1305                        /* Start from the head of a new full segment */
1306                        alloc++;
1307        } else {
1308                /* Continue logs */
1309                prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1310                nilfs_segbuf_map_cont(segbuf, prev);
1311                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1312                nextnum = prev->sb_nextnum;
1313
1314                if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1315                        nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1316                        segbuf->sb_sum.seg_seq++;
1317                        alloc++;
1318                }
1319        }
1320
1321        err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1322        if (err)
1323                goto failed;
1324
1325        if (alloc) {
1326                err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1327                if (err)
1328                        goto failed;
1329        }
1330        nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1331
1332        BUG_ON(!list_empty(&sci->sc_segbufs));
1333        list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1334        sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1335        return 0;
1336
1337 failed:
1338        nilfs_segbuf_free(segbuf);
1339        return err;
1340}
1341
1342static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1343                                         struct the_nilfs *nilfs, int nadd)
1344{
1345        struct nilfs_segment_buffer *segbuf, *prev;
1346        struct inode *sufile = nilfs->ns_sufile;
1347        __u64 nextnextnum;
1348        LIST_HEAD(list);
1349        int err, ret, i;
1350
1351        prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1352        /*
1353         * Since the segment specified with nextnum might be allocated during
1354         * the previous construction, the buffer including its segusage may
1355         * not be dirty.  The following call ensures that the buffer is dirty
1356         * and will pin the buffer on memory until the sufile is written.
1357         */
1358        err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1359        if (unlikely(err))
1360                return err;
1361
1362        for (i = 0; i < nadd; i++) {
1363                /* extend segment info */
1364                err = -ENOMEM;
1365                segbuf = nilfs_segbuf_new(sci->sc_super);
1366                if (unlikely(!segbuf))
1367                        goto failed;
1368
1369                /* map this buffer to region of segment on-disk */
1370                nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1371                sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1372
1373                /* allocate the next next full segment */
1374                err = nilfs_sufile_alloc(sufile, &nextnextnum);
1375                if (unlikely(err))
1376                        goto failed_segbuf;
1377
1378                segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1379                nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1380
1381                list_add_tail(&segbuf->sb_list, &list);
1382                prev = segbuf;
1383        }
1384        list_splice_tail(&list, &sci->sc_segbufs);
1385        return 0;
1386
1387 failed_segbuf:
1388        nilfs_segbuf_free(segbuf);
1389 failed:
1390        list_for_each_entry(segbuf, &list, sb_list) {
1391                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1392                WARN_ON(ret); /* never fails */
1393        }
1394        nilfs_destroy_logs(&list);
1395        return err;
1396}
1397
1398static void nilfs_free_incomplete_logs(struct list_head *logs,
1399                                       struct the_nilfs *nilfs)
1400{
1401        struct nilfs_segment_buffer *segbuf, *prev;
1402        struct inode *sufile = nilfs->ns_sufile;
1403        int ret;
1404
1405        segbuf = NILFS_FIRST_SEGBUF(logs);
1406        if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1407                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1408                WARN_ON(ret); /* never fails */
1409        }
1410        if (atomic_read(&segbuf->sb_err)) {
1411                /* Case 1: The first segment failed */
1412                if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1413                        /*
1414                         * Case 1a:  Partial segment appended into an existing
1415                         * segment
1416                         */
1417                        nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1418                                                segbuf->sb_fseg_end);
1419                else /* Case 1b:  New full segment */
1420                        set_nilfs_discontinued(nilfs);
1421        }
1422
1423        prev = segbuf;
1424        list_for_each_entry_continue(segbuf, logs, sb_list) {
1425                if (prev->sb_nextnum != segbuf->sb_nextnum) {
1426                        ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1427                        WARN_ON(ret); /* never fails */
1428                }
1429                if (atomic_read(&segbuf->sb_err) &&
1430                    segbuf->sb_segnum != nilfs->ns_nextnum)
1431                        /* Case 2: extended segment (!= next) failed */
1432                        nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1433                prev = segbuf;
1434        }
1435}
1436
1437static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1438                                          struct inode *sufile)
1439{
1440        struct nilfs_segment_buffer *segbuf;
1441        unsigned long live_blocks;
1442        int ret;
1443
1444        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1445                live_blocks = segbuf->sb_sum.nblocks +
1446                        (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1447                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1448                                                     live_blocks,
1449                                                     sci->sc_seg_ctime);
1450                WARN_ON(ret); /* always succeed because the segusage is dirty */
1451        }
1452}
1453
1454static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1455{
1456        struct nilfs_segment_buffer *segbuf;
1457        int ret;
1458
1459        segbuf = NILFS_FIRST_SEGBUF(logs);
1460        ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1461                                             segbuf->sb_pseg_start -
1462                                             segbuf->sb_fseg_start, 0);
1463        WARN_ON(ret); /* always succeed because the segusage is dirty */
1464
1465        list_for_each_entry_continue(segbuf, logs, sb_list) {
1466                ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1467                                                     0, 0);
1468                WARN_ON(ret); /* always succeed */
1469        }
1470}
1471
1472static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1473                                            struct nilfs_segment_buffer *last,
1474                                            struct inode *sufile)
1475{
1476        struct nilfs_segment_buffer *segbuf = last;
1477        int ret;
1478
1479        list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1480                sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1481                ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1482                WARN_ON(ret);
1483        }
1484        nilfs_truncate_logs(&sci->sc_segbufs, last);
1485}
1486
1487
1488static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1489                                 struct the_nilfs *nilfs, int mode)
1490{
1491        struct nilfs_cstage prev_stage = sci->sc_stage;
1492        int err, nadd = 1;
1493
1494        /* Collection retry loop */
1495        for (;;) {
1496                sci->sc_nblk_this_inc = 0;
1497                sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1498
1499                err = nilfs_segctor_reset_segment_buffer(sci);
1500                if (unlikely(err))
1501                        goto failed;
1502
1503                err = nilfs_segctor_collect_blocks(sci, mode);
1504                sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1505                if (!err)
1506                        break;
1507
1508                if (unlikely(err != -E2BIG))
1509                        goto failed;
1510
1511                /* The current segment is filled up */
1512                if (mode != SC_LSEG_SR ||
1513                    nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1514                        break;
1515
1516                nilfs_clear_logs(&sci->sc_segbufs);
1517
1518                if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1519                        err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1520                                                        sci->sc_freesegs,
1521                                                        sci->sc_nfreesegs,
1522                                                        NULL);
1523                        WARN_ON(err); /* do not happen */
1524                        sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1525                }
1526
1527                err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1528                if (unlikely(err))
1529                        return err;
1530
1531                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1532                sci->sc_stage = prev_stage;
1533        }
1534        nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1535        return 0;
1536
1537 failed:
1538        return err;
1539}
1540
1541static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1542                                      struct buffer_head *new_bh)
1543{
1544        BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1545
1546        list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1547        /* The caller must release old_bh */
1548}
1549
1550static int
1551nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1552                                     struct nilfs_segment_buffer *segbuf,
1553                                     int mode)
1554{
1555        struct inode *inode = NULL;
1556        sector_t blocknr;
1557        unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1558        unsigned long nblocks = 0, ndatablk = 0;
1559        const struct nilfs_sc_operations *sc_op = NULL;
1560        struct nilfs_segsum_pointer ssp;
1561        struct nilfs_finfo *finfo = NULL;
1562        union nilfs_binfo binfo;
1563        struct buffer_head *bh, *bh_org;
1564        ino_t ino = 0;
1565        int err = 0;
1566
1567        if (!nfinfo)
1568                goto out;
1569
1570        blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1571        ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1572        ssp.offset = sizeof(struct nilfs_segment_summary);
1573
1574        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1575                if (bh == segbuf->sb_super_root)
1576                        break;
1577                if (!finfo) {
1578                        finfo = nilfs_segctor_map_segsum_entry(
1579                                sci, &ssp, sizeof(*finfo));
1580                        ino = le64_to_cpu(finfo->fi_ino);
1581                        nblocks = le32_to_cpu(finfo->fi_nblocks);
1582                        ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1583
1584                        inode = bh->b_page->mapping->host;
1585
1586                        if (mode == SC_LSEG_DSYNC)
1587                                sc_op = &nilfs_sc_dsync_ops;
1588                        else if (ino == NILFS_DAT_INO)
1589                                sc_op = &nilfs_sc_dat_ops;
1590                        else /* file blocks */
1591                                sc_op = &nilfs_sc_file_ops;
1592                }
1593                bh_org = bh;
1594                get_bh(bh_org);
1595                err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1596                                        &binfo);
1597                if (bh != bh_org)
1598                        nilfs_list_replace_buffer(bh_org, bh);
1599                brelse(bh_org);
1600                if (unlikely(err))
1601                        goto failed_bmap;
1602
1603                if (ndatablk > 0)
1604                        sc_op->write_data_binfo(sci, &ssp, &binfo);
1605                else
1606                        sc_op->write_node_binfo(sci, &ssp, &binfo);
1607
1608                blocknr++;
1609                if (--nblocks == 0) {
1610                        finfo = NULL;
1611                        if (--nfinfo == 0)
1612                                break;
1613                } else if (ndatablk > 0)
1614                        ndatablk--;
1615        }
1616 out:
1617        return 0;
1618
1619 failed_bmap:
1620        return err;
1621}
1622
1623static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1624{
1625        struct nilfs_segment_buffer *segbuf;
1626        int err;
1627
1628        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1629                err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1630                if (unlikely(err))
1631                        return err;
1632                nilfs_segbuf_fill_in_segsum(segbuf);
1633        }
1634        return 0;
1635}
1636
1637static void nilfs_begin_page_io(struct page *page)
1638{
1639        if (!page || PageWriteback(page))
1640                /*
1641                 * For split b-tree node pages, this function may be called
1642                 * twice.  We ignore the 2nd or later calls by this check.
1643                 */
1644                return;
1645
1646        lock_page(page);
1647        clear_page_dirty_for_io(page);
1648        set_page_writeback(page);
1649        unlock_page(page);
1650}
1651
1652static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1653{
1654        struct nilfs_segment_buffer *segbuf;
1655        struct page *bd_page = NULL, *fs_page = NULL;
1656
1657        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1658                struct buffer_head *bh;
1659
1660                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1661                                    b_assoc_buffers) {
1662                        if (bh->b_page != bd_page) {
1663                                if (bd_page) {
1664                                        lock_page(bd_page);
1665                                        clear_page_dirty_for_io(bd_page);
1666                                        set_page_writeback(bd_page);
1667                                        unlock_page(bd_page);
1668                                }
1669                                bd_page = bh->b_page;
1670                        }
1671                }
1672
1673                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1674                                    b_assoc_buffers) {
1675                        set_buffer_async_write(bh);
1676                        if (bh == segbuf->sb_super_root) {
1677                                if (bh->b_page != bd_page) {
1678                                        lock_page(bd_page);
1679                                        clear_page_dirty_for_io(bd_page);
1680                                        set_page_writeback(bd_page);
1681                                        unlock_page(bd_page);
1682                                        bd_page = bh->b_page;
1683                                }
1684                                break;
1685                        }
1686                        if (bh->b_page != fs_page) {
1687                                nilfs_begin_page_io(fs_page);
1688                                fs_page = bh->b_page;
1689                        }
1690                }
1691        }
1692        if (bd_page) {
1693                lock_page(bd_page);
1694                clear_page_dirty_for_io(bd_page);
1695                set_page_writeback(bd_page);
1696                unlock_page(bd_page);
1697        }
1698        nilfs_begin_page_io(fs_page);
1699}
1700
1701static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1702                               struct the_nilfs *nilfs)
1703{
1704        int ret;
1705
1706        ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1707        list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1708        return ret;
1709}
1710
1711static void nilfs_end_page_io(struct page *page, int err)
1712{
1713        if (!page)
1714                return;
1715
1716        if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1717                /*
1718                 * For b-tree node pages, this function may be called twice
1719                 * or more because they might be split in a segment.
1720                 */
1721                if (PageDirty(page)) {
1722                        /*
1723                         * For pages holding split b-tree node buffers, dirty
1724                         * flag on the buffers may be cleared discretely.
1725                         * In that case, the page is once redirtied for
1726                         * remaining buffers, and it must be cancelled if
1727                         * all the buffers get cleaned later.
1728                         */
1729                        lock_page(page);
1730                        if (nilfs_page_buffers_clean(page))
1731                                __nilfs_clear_page_dirty(page);
1732                        unlock_page(page);
1733                }
1734                return;
1735        }
1736
1737        if (!err) {
1738                if (!nilfs_page_buffers_clean(page))
1739                        __set_page_dirty_nobuffers(page);
1740                ClearPageError(page);
1741        } else {
1742                __set_page_dirty_nobuffers(page);
1743                SetPageError(page);
1744        }
1745
1746        end_page_writeback(page);
1747}
1748
1749static void nilfs_abort_logs(struct list_head *logs, int err)
1750{
1751        struct nilfs_segment_buffer *segbuf;
1752        struct page *bd_page = NULL, *fs_page = NULL;
1753        struct buffer_head *bh;
1754
1755        if (list_empty(logs))
1756                return;
1757
1758        list_for_each_entry(segbuf, logs, sb_list) {
1759                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1760                                    b_assoc_buffers) {
1761                        if (bh->b_page != bd_page) {
1762                                if (bd_page)
1763                                        end_page_writeback(bd_page);
1764                                bd_page = bh->b_page;
1765                        }
1766                }
1767
1768                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1769                                    b_assoc_buffers) {
1770                        clear_buffer_async_write(bh);
1771                        if (bh == segbuf->sb_super_root) {
1772                                if (bh->b_page != bd_page) {
1773                                        end_page_writeback(bd_page);
1774                                        bd_page = bh->b_page;
1775                                }
1776                                break;
1777                        }
1778                        if (bh->b_page != fs_page) {
1779                                nilfs_end_page_io(fs_page, err);
1780                                fs_page = bh->b_page;
1781                        }
1782                }
1783        }
1784        if (bd_page)
1785                end_page_writeback(bd_page);
1786
1787        nilfs_end_page_io(fs_page, err);
1788}
1789
1790static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1791                                             struct the_nilfs *nilfs, int err)
1792{
1793        LIST_HEAD(logs);
1794        int ret;
1795
1796        list_splice_tail_init(&sci->sc_write_logs, &logs);
1797        ret = nilfs_wait_on_logs(&logs);
1798        nilfs_abort_logs(&logs, ret ? : err);
1799
1800        list_splice_tail_init(&sci->sc_segbufs, &logs);
1801        nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1802        nilfs_free_incomplete_logs(&logs, nilfs);
1803
1804        if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1805                ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1806                                                sci->sc_freesegs,
1807                                                sci->sc_nfreesegs,
1808                                                NULL);
1809                WARN_ON(ret); /* do not happen */
1810        }
1811
1812        nilfs_destroy_logs(&logs);
1813}
1814
1815static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1816                                   struct nilfs_segment_buffer *segbuf)
1817{
1818        nilfs->ns_segnum = segbuf->sb_segnum;
1819        nilfs->ns_nextnum = segbuf->sb_nextnum;
1820        nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1821                + segbuf->sb_sum.nblocks;
1822        nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1823        nilfs->ns_ctime = segbuf->sb_sum.ctime;
1824}
1825
1826static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1827{
1828        struct nilfs_segment_buffer *segbuf;
1829        struct page *bd_page = NULL, *fs_page = NULL;
1830        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1831        int update_sr = false;
1832
1833        list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1834                struct buffer_head *bh;
1835
1836                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1837                                    b_assoc_buffers) {
1838                        set_buffer_uptodate(bh);
1839                        clear_buffer_dirty(bh);
1840                        if (bh->b_page != bd_page) {
1841                                if (bd_page)
1842                                        end_page_writeback(bd_page);
1843                                bd_page = bh->b_page;
1844                        }
1845                }
1846                /*
1847                 * We assume that the buffers which belong to the same page
1848                 * continue over the buffer list.
1849                 * Under this assumption, the last BHs of pages is
1850                 * identifiable by the discontinuity of bh->b_page
1851                 * (page != fs_page).
1852                 *
1853                 * For B-tree node blocks, however, this assumption is not
1854                 * guaranteed.  The cleanup code of B-tree node pages needs
1855                 * special care.
1856                 */
1857                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1858                                    b_assoc_buffers) {
1859                        const unsigned long set_bits = BIT(BH_Uptodate);
1860                        const unsigned long clear_bits =
1861                                (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1862                                 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1863                                 BIT(BH_NILFS_Redirected));
1864
1865                        set_mask_bits(&bh->b_state, clear_bits, set_bits);
1866                        if (bh == segbuf->sb_super_root) {
1867                                if (bh->b_page != bd_page) {
1868                                        end_page_writeback(bd_page);
1869                                        bd_page = bh->b_page;
1870                                }
1871                                update_sr = true;
1872                                break;
1873                        }
1874                        if (bh->b_page != fs_page) {
1875                                nilfs_end_page_io(fs_page, 0);
1876                                fs_page = bh->b_page;
1877                        }
1878                }
1879
1880                if (!nilfs_segbuf_simplex(segbuf)) {
1881                        if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1882                                set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1883                                sci->sc_lseg_stime = jiffies;
1884                        }
1885                        if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1886                                clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1887                }
1888        }
1889        /*
1890         * Since pages may continue over multiple segment buffers,
1891         * end of the last page must be checked outside of the loop.
1892         */
1893        if (bd_page)
1894                end_page_writeback(bd_page);
1895
1896        nilfs_end_page_io(fs_page, 0);
1897
1898        nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1899
1900        if (nilfs_doing_gc())
1901                nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1902        else
1903                nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1904
1905        sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1906
1907        segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1908        nilfs_set_next_segment(nilfs, segbuf);
1909
1910        if (update_sr) {
1911                nilfs->ns_flushed_device = 0;
1912                nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1913                                       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1914
1915                clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1916                clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1917                set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1918                nilfs_segctor_clear_metadata_dirty(sci);
1919        } else
1920                clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1921}
1922
1923static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1924{
1925        int ret;
1926
1927        ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1928        if (!ret) {
1929                nilfs_segctor_complete_write(sci);
1930                nilfs_destroy_logs(&sci->sc_write_logs);
1931        }
1932        return ret;
1933}
1934
1935static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1936                                             struct the_nilfs *nilfs)
1937{
1938        struct nilfs_inode_info *ii, *n;
1939        struct inode *ifile = sci->sc_root->ifile;
1940
1941        spin_lock(&nilfs->ns_inode_lock);
1942 retry:
1943        list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1944                if (!ii->i_bh) {
1945                        struct buffer_head *ibh;
1946                        int err;
1947
1948                        spin_unlock(&nilfs->ns_inode_lock);
1949                        err = nilfs_ifile_get_inode_block(
1950                                ifile, ii->vfs_inode.i_ino, &ibh);
1951                        if (unlikely(err)) {
1952                                nilfs_msg(sci->sc_super, KERN_WARNING,
1953                                          "log writer: error %d getting inode block (ino=%lu)",
1954                                          err, ii->vfs_inode.i_ino);
1955                                return err;
1956                        }
1957                        spin_lock(&nilfs->ns_inode_lock);
1958                        if (likely(!ii->i_bh))
1959                                ii->i_bh = ibh;
1960                        else
1961                                brelse(ibh);
1962                        goto retry;
1963                }
1964
1965                // Always redirty the buffer to avoid race condition
1966                mark_buffer_dirty(ii->i_bh);
1967                nilfs_mdt_mark_dirty(ifile);
1968
1969                clear_bit(NILFS_I_QUEUED, &ii->i_state);
1970                set_bit(NILFS_I_BUSY, &ii->i_state);
1971                list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1972        }
1973        spin_unlock(&nilfs->ns_inode_lock);
1974
1975        return 0;
1976}
1977
1978static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1979                                             struct the_nilfs *nilfs)
1980{
1981        struct nilfs_inode_info *ii, *n;
1982        int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1983        int defer_iput = false;
1984
1985        spin_lock(&nilfs->ns_inode_lock);
1986        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1987                if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1988                    test_bit(NILFS_I_DIRTY, &ii->i_state))
1989                        continue;
1990
1991                clear_bit(NILFS_I_BUSY, &ii->i_state);
1992                brelse(ii->i_bh);
1993                ii->i_bh = NULL;
1994                list_del_init(&ii->i_dirty);
1995                if (!ii->vfs_inode.i_nlink || during_mount) {
1996                        /*
1997                         * Defer calling iput() to avoid deadlocks if
1998                         * i_nlink == 0 or mount is not yet finished.
1999                         */
2000                        list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2001                        defer_iput = true;
2002                } else {
2003                        spin_unlock(&nilfs->ns_inode_lock);
2004                        iput(&ii->vfs_inode);
2005                        spin_lock(&nilfs->ns_inode_lock);
2006                }
2007        }
2008        spin_unlock(&nilfs->ns_inode_lock);
2009
2010        if (defer_iput)
2011                schedule_work(&sci->sc_iput_work);
2012}
2013
2014/*
2015 * Main procedure of segment constructor
2016 */
2017static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2018{
2019        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2020        int err;
2021
2022        nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2023        sci->sc_cno = nilfs->ns_cno;
2024
2025        err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2026        if (unlikely(err))
2027                goto out;
2028
2029        if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2030                set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2031
2032        if (nilfs_segctor_clean(sci))
2033                goto out;
2034
2035        do {
2036                sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2037
2038                err = nilfs_segctor_begin_construction(sci, nilfs);
2039                if (unlikely(err))
2040                        goto out;
2041
2042                /* Update time stamp */
2043                sci->sc_seg_ctime = ktime_get_real_seconds();
2044
2045                err = nilfs_segctor_collect(sci, nilfs, mode);
2046                if (unlikely(err))
2047                        goto failed;
2048
2049                /* Avoid empty segment */
2050                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2051                    nilfs_segbuf_empty(sci->sc_curseg)) {
2052                        nilfs_segctor_abort_construction(sci, nilfs, 1);
2053                        goto out;
2054                }
2055
2056                err = nilfs_segctor_assign(sci, mode);
2057                if (unlikely(err))
2058                        goto failed;
2059
2060                if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2061                        nilfs_segctor_fill_in_file_bmap(sci);
2062
2063                if (mode == SC_LSEG_SR &&
2064                    nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2065                        err = nilfs_segctor_fill_in_checkpoint(sci);
2066                        if (unlikely(err))
2067                                goto failed_to_write;
2068
2069                        nilfs_segctor_fill_in_super_root(sci, nilfs);
2070                }
2071                nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2072
2073                /* Write partial segments */
2074                nilfs_segctor_prepare_write(sci);
2075
2076                nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2077                                            nilfs->ns_crc_seed);
2078
2079                err = nilfs_segctor_write(sci, nilfs);
2080                if (unlikely(err))
2081                        goto failed_to_write;
2082
2083                if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2084                    nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2085                        /*
2086                         * At this point, we avoid double buffering
2087                         * for blocksize < pagesize because page dirty
2088                         * flag is turned off during write and dirty
2089                         * buffers are not properly collected for
2090                         * pages crossing over segments.
2091                         */
2092                        err = nilfs_segctor_wait(sci);
2093                        if (err)
2094                                goto failed_to_write;
2095                }
2096        } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2097
2098 out:
2099        nilfs_segctor_drop_written_files(sci, nilfs);
2100        return err;
2101
2102 failed_to_write:
2103        if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2104                nilfs_redirty_inodes(&sci->sc_dirty_files);
2105
2106 failed:
2107        if (nilfs_doing_gc())
2108                nilfs_redirty_inodes(&sci->sc_gc_inodes);
2109        nilfs_segctor_abort_construction(sci, nilfs, err);
2110        goto out;
2111}
2112
2113/**
2114 * nilfs_segctor_start_timer - set timer of background write
2115 * @sci: nilfs_sc_info
2116 *
2117 * If the timer has already been set, it ignores the new request.
2118 * This function MUST be called within a section locking the segment
2119 * semaphore.
2120 */
2121static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2122{
2123        spin_lock(&sci->sc_state_lock);
2124        if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2125                sci->sc_timer.expires = jiffies + sci->sc_interval;
2126                add_timer(&sci->sc_timer);
2127                sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2128        }
2129        spin_unlock(&sci->sc_state_lock);
2130}
2131
2132static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2133{
2134        spin_lock(&sci->sc_state_lock);
2135        if (!(sci->sc_flush_request & BIT(bn))) {
2136                unsigned long prev_req = sci->sc_flush_request;
2137
2138                sci->sc_flush_request |= BIT(bn);
2139                if (!prev_req)
2140                        wake_up(&sci->sc_wait_daemon);
2141        }
2142        spin_unlock(&sci->sc_state_lock);
2143}
2144
2145/**
2146 * nilfs_flush_segment - trigger a segment construction for resource control
2147 * @sb: super block
2148 * @ino: inode number of the file to be flushed out.
2149 */
2150void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2151{
2152        struct the_nilfs *nilfs = sb->s_fs_info;
2153        struct nilfs_sc_info *sci = nilfs->ns_writer;
2154
2155        if (!sci || nilfs_doing_construction())
2156                return;
2157        nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2158                                        /* assign bit 0 to data files */
2159}
2160
2161struct nilfs_segctor_wait_request {
2162        wait_queue_entry_t      wq;
2163        __u32           seq;
2164        int             err;
2165        atomic_t        done;
2166};
2167
2168static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2169{
2170        struct nilfs_segctor_wait_request wait_req;
2171        int err = 0;
2172
2173        spin_lock(&sci->sc_state_lock);
2174        init_wait(&wait_req.wq);
2175        wait_req.err = 0;
2176        atomic_set(&wait_req.done, 0);
2177        wait_req.seq = ++sci->sc_seq_request;
2178        spin_unlock(&sci->sc_state_lock);
2179
2180        init_waitqueue_entry(&wait_req.wq, current);
2181        add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2182        set_current_state(TASK_INTERRUPTIBLE);
2183        wake_up(&sci->sc_wait_daemon);
2184
2185        for (;;) {
2186                if (atomic_read(&wait_req.done)) {
2187                        err = wait_req.err;
2188                        break;
2189                }
2190                if (!signal_pending(current)) {
2191                        schedule();
2192                        continue;
2193                }
2194                err = -ERESTARTSYS;
2195                break;
2196        }
2197        finish_wait(&sci->sc_wait_request, &wait_req.wq);
2198        return err;
2199}
2200
2201static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2202{
2203        struct nilfs_segctor_wait_request *wrq, *n;
2204        unsigned long flags;
2205
2206        spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2207        list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2208                if (!atomic_read(&wrq->done) &&
2209                    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2210                        wrq->err = err;
2211                        atomic_set(&wrq->done, 1);
2212                }
2213                if (atomic_read(&wrq->done)) {
2214                        wrq->wq.func(&wrq->wq,
2215                                     TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2216                                     0, NULL);
2217                }
2218        }
2219        spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2220}
2221
2222/**
2223 * nilfs_construct_segment - construct a logical segment
2224 * @sb: super block
2225 *
2226 * Return Value: On success, 0 is retured. On errors, one of the following
2227 * negative error code is returned.
2228 *
2229 * %-EROFS - Read only filesystem.
2230 *
2231 * %-EIO - I/O error
2232 *
2233 * %-ENOSPC - No space left on device (only in a panic state).
2234 *
2235 * %-ERESTARTSYS - Interrupted.
2236 *
2237 * %-ENOMEM - Insufficient memory available.
2238 */
2239int nilfs_construct_segment(struct super_block *sb)
2240{
2241        struct the_nilfs *nilfs = sb->s_fs_info;
2242        struct nilfs_sc_info *sci = nilfs->ns_writer;
2243        struct nilfs_transaction_info *ti;
2244        int err;
2245
2246        if (!sci)
2247                return -EROFS;
2248
2249        /* A call inside transactions causes a deadlock. */
2250        BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2251
2252        err = nilfs_segctor_sync(sci);
2253        return err;
2254}
2255
2256/**
2257 * nilfs_construct_dsync_segment - construct a data-only logical segment
2258 * @sb: super block
2259 * @inode: inode whose data blocks should be written out
2260 * @start: start byte offset
2261 * @end: end byte offset (inclusive)
2262 *
2263 * Return Value: On success, 0 is retured. On errors, one of the following
2264 * negative error code is returned.
2265 *
2266 * %-EROFS - Read only filesystem.
2267 *
2268 * %-EIO - I/O error
2269 *
2270 * %-ENOSPC - No space left on device (only in a panic state).
2271 *
2272 * %-ERESTARTSYS - Interrupted.
2273 *
2274 * %-ENOMEM - Insufficient memory available.
2275 */
2276int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2277                                  loff_t start, loff_t end)
2278{
2279        struct the_nilfs *nilfs = sb->s_fs_info;
2280        struct nilfs_sc_info *sci = nilfs->ns_writer;
2281        struct nilfs_inode_info *ii;
2282        struct nilfs_transaction_info ti;
2283        int err = 0;
2284
2285        if (!sci)
2286                return -EROFS;
2287
2288        nilfs_transaction_lock(sb, &ti, 0);
2289
2290        ii = NILFS_I(inode);
2291        if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2292            nilfs_test_opt(nilfs, STRICT_ORDER) ||
2293            test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2294            nilfs_discontinued(nilfs)) {
2295                nilfs_transaction_unlock(sb);
2296                err = nilfs_segctor_sync(sci);
2297                return err;
2298        }
2299
2300        spin_lock(&nilfs->ns_inode_lock);
2301        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2302            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2303                spin_unlock(&nilfs->ns_inode_lock);
2304                nilfs_transaction_unlock(sb);
2305                return 0;
2306        }
2307        spin_unlock(&nilfs->ns_inode_lock);
2308        sci->sc_dsync_inode = ii;
2309        sci->sc_dsync_start = start;
2310        sci->sc_dsync_end = end;
2311
2312        err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2313        if (!err)
2314                nilfs->ns_flushed_device = 0;
2315
2316        nilfs_transaction_unlock(sb);
2317        return err;
2318}
2319
2320#define FLUSH_FILE_BIT  (0x1) /* data file only */
2321#define FLUSH_DAT_BIT   BIT(NILFS_DAT_INO) /* DAT only */
2322
2323/**
2324 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2325 * @sci: segment constructor object
2326 */
2327static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2328{
2329        spin_lock(&sci->sc_state_lock);
2330        sci->sc_seq_accepted = sci->sc_seq_request;
2331        spin_unlock(&sci->sc_state_lock);
2332        del_timer_sync(&sci->sc_timer);
2333}
2334
2335/**
2336 * nilfs_segctor_notify - notify the result of request to caller threads
2337 * @sci: segment constructor object
2338 * @mode: mode of log forming
2339 * @err: error code to be notified
2340 */
2341static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2342{
2343        /* Clear requests (even when the construction failed) */
2344        spin_lock(&sci->sc_state_lock);
2345
2346        if (mode == SC_LSEG_SR) {
2347                sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2348                sci->sc_seq_done = sci->sc_seq_accepted;
2349                nilfs_segctor_wakeup(sci, err);
2350                sci->sc_flush_request = 0;
2351        } else {
2352                if (mode == SC_FLUSH_FILE)
2353                        sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2354                else if (mode == SC_FLUSH_DAT)
2355                        sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2356
2357                /* re-enable timer if checkpoint creation was not done */
2358                if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2359                    time_before(jiffies, sci->sc_timer.expires))
2360                        add_timer(&sci->sc_timer);
2361        }
2362        spin_unlock(&sci->sc_state_lock);
2363}
2364
2365/**
2366 * nilfs_segctor_construct - form logs and write them to disk
2367 * @sci: segment constructor object
2368 * @mode: mode of log forming
2369 */
2370static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2371{
2372        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2373        struct nilfs_super_block **sbp;
2374        int err = 0;
2375
2376        nilfs_segctor_accept(sci);
2377
2378        if (nilfs_discontinued(nilfs))
2379                mode = SC_LSEG_SR;
2380        if (!nilfs_segctor_confirm(sci))
2381                err = nilfs_segctor_do_construct(sci, mode);
2382
2383        if (likely(!err)) {
2384                if (mode != SC_FLUSH_DAT)
2385                        atomic_set(&nilfs->ns_ndirtyblks, 0);
2386                if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2387                    nilfs_discontinued(nilfs)) {
2388                        down_write(&nilfs->ns_sem);
2389                        err = -EIO;
2390                        sbp = nilfs_prepare_super(sci->sc_super,
2391                                                  nilfs_sb_will_flip(nilfs));
2392                        if (likely(sbp)) {
2393                                nilfs_set_log_cursor(sbp[0], nilfs);
2394                                err = nilfs_commit_super(sci->sc_super,
2395                                                         NILFS_SB_COMMIT);
2396                        }
2397                        up_write(&nilfs->ns_sem);
2398                }
2399        }
2400
2401        nilfs_segctor_notify(sci, mode, err);
2402        return err;
2403}
2404
2405static void nilfs_construction_timeout(struct timer_list *t)
2406{
2407        struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2408
2409        wake_up_process(sci->sc_timer_task);
2410}
2411
2412static void
2413nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2414{
2415        struct nilfs_inode_info *ii, *n;
2416
2417        list_for_each_entry_safe(ii, n, head, i_dirty) {
2418                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2419                        continue;
2420                list_del_init(&ii->i_dirty);
2421                truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2422                nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2423                iput(&ii->vfs_inode);
2424        }
2425}
2426
2427int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2428                         void **kbufs)
2429{
2430        struct the_nilfs *nilfs = sb->s_fs_info;
2431        struct nilfs_sc_info *sci = nilfs->ns_writer;
2432        struct nilfs_transaction_info ti;
2433        int err;
2434
2435        if (unlikely(!sci))
2436                return -EROFS;
2437
2438        nilfs_transaction_lock(sb, &ti, 1);
2439
2440        err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2441        if (unlikely(err))
2442                goto out_unlock;
2443
2444        err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2445        if (unlikely(err)) {
2446                nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2447                goto out_unlock;
2448        }
2449
2450        sci->sc_freesegs = kbufs[4];
2451        sci->sc_nfreesegs = argv[4].v_nmembs;
2452        list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2453
2454        for (;;) {
2455                err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2456                nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2457
2458                if (likely(!err))
2459                        break;
2460
2461                nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
2462                set_current_state(TASK_INTERRUPTIBLE);
2463                schedule_timeout(sci->sc_interval);
2464        }
2465        if (nilfs_test_opt(nilfs, DISCARD)) {
2466                int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2467                                                 sci->sc_nfreesegs);
2468                if (ret) {
2469                        nilfs_msg(sb, KERN_WARNING,
2470                                  "error %d on discard request, turning discards off for the device",
2471                                  ret);
2472                        nilfs_clear_opt(nilfs, DISCARD);
2473                }
2474        }
2475
2476 out_unlock:
2477        sci->sc_freesegs = NULL;
2478        sci->sc_nfreesegs = 0;
2479        nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2480        nilfs_transaction_unlock(sb);
2481        return err;
2482}
2483
2484static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2485{
2486        struct nilfs_transaction_info ti;
2487
2488        nilfs_transaction_lock(sci->sc_super, &ti, 0);
2489        nilfs_segctor_construct(sci, mode);
2490
2491        /*
2492         * Unclosed segment should be retried.  We do this using sc_timer.
2493         * Timeout of sc_timer will invoke complete construction which leads
2494         * to close the current logical segment.
2495         */
2496        if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2497                nilfs_segctor_start_timer(sci);
2498
2499        nilfs_transaction_unlock(sci->sc_super);
2500}
2501
2502static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2503{
2504        int mode = 0;
2505
2506        spin_lock(&sci->sc_state_lock);
2507        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2508                SC_FLUSH_DAT : SC_FLUSH_FILE;
2509        spin_unlock(&sci->sc_state_lock);
2510
2511        if (mode) {
2512                nilfs_segctor_do_construct(sci, mode);
2513
2514                spin_lock(&sci->sc_state_lock);
2515                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2516                        ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2517                spin_unlock(&sci->sc_state_lock);
2518        }
2519        clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2520}
2521
2522static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2523{
2524        if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2525            time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2526                if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2527                        return SC_FLUSH_FILE;
2528                else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2529                        return SC_FLUSH_DAT;
2530        }
2531        return SC_LSEG_SR;
2532}
2533
2534/**
2535 * nilfs_segctor_thread - main loop of the segment constructor thread.
2536 * @arg: pointer to a struct nilfs_sc_info.
2537 *
2538 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2539 * to execute segment constructions.
2540 */
2541static int nilfs_segctor_thread(void *arg)
2542{
2543        struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2544        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2545        int timeout = 0;
2546
2547        sci->sc_timer_task = current;
2548
2549        /* start sync. */
2550        sci->sc_task = current;
2551        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2552        nilfs_msg(sci->sc_super, KERN_INFO,
2553                  "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2554                  sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2555
2556        spin_lock(&sci->sc_state_lock);
2557 loop:
2558        for (;;) {
2559                int mode;
2560
2561                if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2562                        goto end_thread;
2563
2564                if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2565                        mode = SC_LSEG_SR;
2566                else if (sci->sc_flush_request)
2567                        mode = nilfs_segctor_flush_mode(sci);
2568                else
2569                        break;
2570
2571                spin_unlock(&sci->sc_state_lock);
2572                nilfs_segctor_thread_construct(sci, mode);
2573                spin_lock(&sci->sc_state_lock);
2574                timeout = 0;
2575        }
2576
2577
2578        if (freezing(current)) {
2579                spin_unlock(&sci->sc_state_lock);
2580                try_to_freeze();
2581                spin_lock(&sci->sc_state_lock);
2582        } else {
2583                DEFINE_WAIT(wait);
2584                int should_sleep = 1;
2585
2586                prepare_to_wait(&sci->sc_wait_daemon, &wait,
2587                                TASK_INTERRUPTIBLE);
2588
2589                if (sci->sc_seq_request != sci->sc_seq_done)
2590                        should_sleep = 0;
2591                else if (sci->sc_flush_request)
2592                        should_sleep = 0;
2593                else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2594                        should_sleep = time_before(jiffies,
2595                                        sci->sc_timer.expires);
2596
2597                if (should_sleep) {
2598                        spin_unlock(&sci->sc_state_lock);
2599                        schedule();
2600                        spin_lock(&sci->sc_state_lock);
2601                }
2602                finish_wait(&sci->sc_wait_daemon, &wait);
2603                timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2604                           time_after_eq(jiffies, sci->sc_timer.expires));
2605
2606                if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2607                        set_nilfs_discontinued(nilfs);
2608        }
2609        goto loop;
2610
2611 end_thread:
2612        spin_unlock(&sci->sc_state_lock);
2613
2614        /* end sync. */
2615        sci->sc_task = NULL;
2616        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2617        return 0;
2618}
2619
2620static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2621{
2622        struct task_struct *t;
2623
2624        t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2625        if (IS_ERR(t)) {
2626                int err = PTR_ERR(t);
2627
2628                nilfs_msg(sci->sc_super, KERN_ERR,
2629                          "error %d creating segctord thread", err);
2630                return err;
2631        }
2632        wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2633        return 0;
2634}
2635
2636static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2637        __acquires(&sci->sc_state_lock)
2638        __releases(&sci->sc_state_lock)
2639{
2640        sci->sc_state |= NILFS_SEGCTOR_QUIT;
2641
2642        while (sci->sc_task) {
2643                wake_up(&sci->sc_wait_daemon);
2644                spin_unlock(&sci->sc_state_lock);
2645                wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2646                spin_lock(&sci->sc_state_lock);
2647        }
2648}
2649
2650/*
2651 * Setup & clean-up functions
2652 */
2653static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2654                                               struct nilfs_root *root)
2655{
2656        struct the_nilfs *nilfs = sb->s_fs_info;
2657        struct nilfs_sc_info *sci;
2658
2659        sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2660        if (!sci)
2661                return NULL;
2662
2663        sci->sc_super = sb;
2664
2665        nilfs_get_root(root);
2666        sci->sc_root = root;
2667
2668        init_waitqueue_head(&sci->sc_wait_request);
2669        init_waitqueue_head(&sci->sc_wait_daemon);
2670        init_waitqueue_head(&sci->sc_wait_task);
2671        spin_lock_init(&sci->sc_state_lock);
2672        INIT_LIST_HEAD(&sci->sc_dirty_files);
2673        INIT_LIST_HEAD(&sci->sc_segbufs);
2674        INIT_LIST_HEAD(&sci->sc_write_logs);
2675        INIT_LIST_HEAD(&sci->sc_gc_inodes);
2676        INIT_LIST_HEAD(&sci->sc_iput_queue);
2677        INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2678        timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2679
2680        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2681        sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2682        sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2683
2684        if (nilfs->ns_interval)
2685                sci->sc_interval = HZ * nilfs->ns_interval;
2686        if (nilfs->ns_watermark)
2687                sci->sc_watermark = nilfs->ns_watermark;
2688        return sci;
2689}
2690
2691static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2692{
2693        int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2694
2695        /*
2696         * The segctord thread was stopped and its timer was removed.
2697         * But some tasks remain.
2698         */
2699        do {
2700                struct nilfs_transaction_info ti;
2701
2702                nilfs_transaction_lock(sci->sc_super, &ti, 0);
2703                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2704                nilfs_transaction_unlock(sci->sc_super);
2705
2706                flush_work(&sci->sc_iput_work);
2707
2708        } while (ret && retrycount-- > 0);
2709}
2710
2711/**
2712 * nilfs_segctor_destroy - destroy the segment constructor.
2713 * @sci: nilfs_sc_info
2714 *
2715 * nilfs_segctor_destroy() kills the segctord thread and frees
2716 * the nilfs_sc_info struct.
2717 * Caller must hold the segment semaphore.
2718 */
2719static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2720{
2721        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2722        int flag;
2723
2724        up_write(&nilfs->ns_segctor_sem);
2725
2726        spin_lock(&sci->sc_state_lock);
2727        nilfs_segctor_kill_thread(sci);
2728        flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2729                || sci->sc_seq_request != sci->sc_seq_done);
2730        spin_unlock(&sci->sc_state_lock);
2731
2732        if (flush_work(&sci->sc_iput_work))
2733                flag = true;
2734
2735        if (flag || !nilfs_segctor_confirm(sci))
2736                nilfs_segctor_write_out(sci);
2737
2738        if (!list_empty(&sci->sc_dirty_files)) {
2739                nilfs_msg(sci->sc_super, KERN_WARNING,
2740                          "disposed unprocessed dirty file(s) when stopping log writer");
2741                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2742        }
2743
2744        if (!list_empty(&sci->sc_iput_queue)) {
2745                nilfs_msg(sci->sc_super, KERN_WARNING,
2746                          "disposed unprocessed inode(s) in iput queue when stopping log writer");
2747                nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2748        }
2749
2750        WARN_ON(!list_empty(&sci->sc_segbufs));
2751        WARN_ON(!list_empty(&sci->sc_write_logs));
2752
2753        nilfs_put_root(sci->sc_root);
2754
2755        down_write(&nilfs->ns_segctor_sem);
2756
2757        del_timer_sync(&sci->sc_timer);
2758        kfree(sci);
2759}
2760
2761/**
2762 * nilfs_attach_log_writer - attach log writer
2763 * @sb: super block instance
2764 * @root: root object of the current filesystem tree
2765 *
2766 * This allocates a log writer object, initializes it, and starts the
2767 * log writer.
2768 *
2769 * Return Value: On success, 0 is returned. On error, one of the following
2770 * negative error code is returned.
2771 *
2772 * %-ENOMEM - Insufficient memory available.
2773 */
2774int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2775{
2776        struct the_nilfs *nilfs = sb->s_fs_info;
2777        int err;
2778
2779        if (nilfs->ns_writer) {
2780                /*
2781                 * This happens if the filesystem was remounted
2782                 * read/write after nilfs_error degenerated it into a
2783                 * read-only mount.
2784                 */
2785                nilfs_detach_log_writer(sb);
2786        }
2787
2788        nilfs->ns_writer = nilfs_segctor_new(sb, root);
2789        if (!nilfs->ns_writer)
2790                return -ENOMEM;
2791
2792        err = nilfs_segctor_start_thread(nilfs->ns_writer);
2793        if (err) {
2794                kfree(nilfs->ns_writer);
2795                nilfs->ns_writer = NULL;
2796        }
2797        return err;
2798}
2799
2800/**
2801 * nilfs_detach_log_writer - destroy log writer
2802 * @sb: super block instance
2803 *
2804 * This kills log writer daemon, frees the log writer object, and
2805 * destroys list of dirty files.
2806 */
2807void nilfs_detach_log_writer(struct super_block *sb)
2808{
2809        struct the_nilfs *nilfs = sb->s_fs_info;
2810        LIST_HEAD(garbage_list);
2811
2812        down_write(&nilfs->ns_segctor_sem);
2813        if (nilfs->ns_writer) {
2814                nilfs_segctor_destroy(nilfs->ns_writer);
2815                nilfs->ns_writer = NULL;
2816        }
2817
2818        /* Force to free the list of dirty files */
2819        spin_lock(&nilfs->ns_inode_lock);
2820        if (!list_empty(&nilfs->ns_dirty_files)) {
2821                list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2822                nilfs_msg(sb, KERN_WARNING,
2823                          "disposed unprocessed dirty file(s) when detaching log writer");
2824        }
2825        spin_unlock(&nilfs->ns_inode_lock);
2826        up_write(&nilfs->ns_segctor_sem);
2827
2828        nilfs_dispose_list(nilfs, &garbage_list, 1);
2829}
2830