linux/fs/nilfs2/inode.c
<<
>>
Prefs
   1/*
   2 * inode.c - NILFS inode operations.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * Written by Ryusuke Konishi.
  17 *
  18 */
  19
  20#include <linux/buffer_head.h>
  21#include <linux/gfp.h>
  22#include <linux/mpage.h>
  23#include <linux/pagemap.h>
  24#include <linux/writeback.h>
  25#include <linux/uio.h>
  26#include "nilfs.h"
  27#include "btnode.h"
  28#include "segment.h"
  29#include "page.h"
  30#include "mdt.h"
  31#include "cpfile.h"
  32#include "ifile.h"
  33
  34/**
  35 * struct nilfs_iget_args - arguments used during comparison between inodes
  36 * @ino: inode number
  37 * @cno: checkpoint number
  38 * @root: pointer on NILFS root object (mounted checkpoint)
  39 * @for_gc: inode for GC flag
  40 */
  41struct nilfs_iget_args {
  42        u64 ino;
  43        __u64 cno;
  44        struct nilfs_root *root;
  45        int for_gc;
  46};
  47
  48static int nilfs_iget_test(struct inode *inode, void *opaque);
  49
  50void nilfs_inode_add_blocks(struct inode *inode, int n)
  51{
  52        struct nilfs_root *root = NILFS_I(inode)->i_root;
  53
  54        inode_add_bytes(inode, i_blocksize(inode) * n);
  55        if (root)
  56                atomic64_add(n, &root->blocks_count);
  57}
  58
  59void nilfs_inode_sub_blocks(struct inode *inode, int n)
  60{
  61        struct nilfs_root *root = NILFS_I(inode)->i_root;
  62
  63        inode_sub_bytes(inode, i_blocksize(inode) * n);
  64        if (root)
  65                atomic64_sub(n, &root->blocks_count);
  66}
  67
  68/**
  69 * nilfs_get_block() - get a file block on the filesystem (callback function)
  70 * @inode - inode struct of the target file
  71 * @blkoff - file block number
  72 * @bh_result - buffer head to be mapped on
  73 * @create - indicate whether allocating the block or not when it has not
  74 *      been allocated yet.
  75 *
  76 * This function does not issue actual read request of the specified data
  77 * block. It is done by VFS.
  78 */
  79int nilfs_get_block(struct inode *inode, sector_t blkoff,
  80                    struct buffer_head *bh_result, int create)
  81{
  82        struct nilfs_inode_info *ii = NILFS_I(inode);
  83        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  84        __u64 blknum = 0;
  85        int err = 0, ret;
  86        unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  87
  88        down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  89        ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  90        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  91        if (ret >= 0) { /* found */
  92                map_bh(bh_result, inode->i_sb, blknum);
  93                if (ret > 0)
  94                        bh_result->b_size = (ret << inode->i_blkbits);
  95                goto out;
  96        }
  97        /* data block was not found */
  98        if (ret == -ENOENT && create) {
  99                struct nilfs_transaction_info ti;
 100
 101                bh_result->b_blocknr = 0;
 102                err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
 103                if (unlikely(err))
 104                        goto out;
 105                err = nilfs_bmap_insert(ii->i_bmap, blkoff,
 106                                        (unsigned long)bh_result);
 107                if (unlikely(err != 0)) {
 108                        if (err == -EEXIST) {
 109                                /*
 110                                 * The get_block() function could be called
 111                                 * from multiple callers for an inode.
 112                                 * However, the page having this block must
 113                                 * be locked in this case.
 114                                 */
 115                                nilfs_msg(inode->i_sb, KERN_WARNING,
 116                                          "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
 117                                          __func__, inode->i_ino,
 118                                          (unsigned long long)blkoff);
 119                                err = 0;
 120                        }
 121                        nilfs_transaction_abort(inode->i_sb);
 122                        goto out;
 123                }
 124                nilfs_mark_inode_dirty_sync(inode);
 125                nilfs_transaction_commit(inode->i_sb); /* never fails */
 126                /* Error handling should be detailed */
 127                set_buffer_new(bh_result);
 128                set_buffer_delay(bh_result);
 129                map_bh(bh_result, inode->i_sb, 0);
 130                /* Disk block number must be changed to proper value */
 131
 132        } else if (ret == -ENOENT) {
 133                /*
 134                 * not found is not error (e.g. hole); must return without
 135                 * the mapped state flag.
 136                 */
 137                ;
 138        } else {
 139                err = ret;
 140        }
 141
 142 out:
 143        return err;
 144}
 145
 146/**
 147 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 148 * address_space_operations.
 149 * @file - file struct of the file to be read
 150 * @page - the page to be read
 151 */
 152static int nilfs_readpage(struct file *file, struct page *page)
 153{
 154        return mpage_readpage(page, nilfs_get_block);
 155}
 156
 157/**
 158 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
 159 * address_space_operations.
 160 * @file - file struct of the file to be read
 161 * @mapping - address_space struct used for reading multiple pages
 162 * @pages - the pages to be read
 163 * @nr_pages - number of pages to be read
 164 */
 165static int nilfs_readpages(struct file *file, struct address_space *mapping,
 166                           struct list_head *pages, unsigned int nr_pages)
 167{
 168        return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
 169}
 170
 171static int nilfs_writepages(struct address_space *mapping,
 172                            struct writeback_control *wbc)
 173{
 174        struct inode *inode = mapping->host;
 175        int err = 0;
 176
 177        if (sb_rdonly(inode->i_sb)) {
 178                nilfs_clear_dirty_pages(mapping, false);
 179                return -EROFS;
 180        }
 181
 182        if (wbc->sync_mode == WB_SYNC_ALL)
 183                err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 184                                                    wbc->range_start,
 185                                                    wbc->range_end);
 186        return err;
 187}
 188
 189static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 190{
 191        struct inode *inode = page->mapping->host;
 192        int err;
 193
 194        if (sb_rdonly(inode->i_sb)) {
 195                /*
 196                 * It means that filesystem was remounted in read-only
 197                 * mode because of error or metadata corruption. But we
 198                 * have dirty pages that try to be flushed in background.
 199                 * So, here we simply discard this dirty page.
 200                 */
 201                nilfs_clear_dirty_page(page, false);
 202                unlock_page(page);
 203                return -EROFS;
 204        }
 205
 206        redirty_page_for_writepage(wbc, page);
 207        unlock_page(page);
 208
 209        if (wbc->sync_mode == WB_SYNC_ALL) {
 210                err = nilfs_construct_segment(inode->i_sb);
 211                if (unlikely(err))
 212                        return err;
 213        } else if (wbc->for_reclaim)
 214                nilfs_flush_segment(inode->i_sb, inode->i_ino);
 215
 216        return 0;
 217}
 218
 219static int nilfs_set_page_dirty(struct page *page)
 220{
 221        struct inode *inode = page->mapping->host;
 222        int ret = __set_page_dirty_nobuffers(page);
 223
 224        if (page_has_buffers(page)) {
 225                unsigned int nr_dirty = 0;
 226                struct buffer_head *bh, *head;
 227
 228                /*
 229                 * This page is locked by callers, and no other thread
 230                 * concurrently marks its buffers dirty since they are
 231                 * only dirtied through routines in fs/buffer.c in
 232                 * which call sites of mark_buffer_dirty are protected
 233                 * by page lock.
 234                 */
 235                bh = head = page_buffers(page);
 236                do {
 237                        /* Do not mark hole blocks dirty */
 238                        if (buffer_dirty(bh) || !buffer_mapped(bh))
 239                                continue;
 240
 241                        set_buffer_dirty(bh);
 242                        nr_dirty++;
 243                } while (bh = bh->b_this_page, bh != head);
 244
 245                if (nr_dirty)
 246                        nilfs_set_file_dirty(inode, nr_dirty);
 247        } else if (ret) {
 248                unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 249
 250                nilfs_set_file_dirty(inode, nr_dirty);
 251        }
 252        return ret;
 253}
 254
 255void nilfs_write_failed(struct address_space *mapping, loff_t to)
 256{
 257        struct inode *inode = mapping->host;
 258
 259        if (to > inode->i_size) {
 260                truncate_pagecache(inode, inode->i_size);
 261                nilfs_truncate(inode);
 262        }
 263}
 264
 265static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 266                             loff_t pos, unsigned len, unsigned flags,
 267                             struct page **pagep, void **fsdata)
 268
 269{
 270        struct inode *inode = mapping->host;
 271        int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 272
 273        if (unlikely(err))
 274                return err;
 275
 276        err = block_write_begin(mapping, pos, len, flags, pagep,
 277                                nilfs_get_block);
 278        if (unlikely(err)) {
 279                nilfs_write_failed(mapping, pos + len);
 280                nilfs_transaction_abort(inode->i_sb);
 281        }
 282        return err;
 283}
 284
 285static int nilfs_write_end(struct file *file, struct address_space *mapping,
 286                           loff_t pos, unsigned len, unsigned copied,
 287                           struct page *page, void *fsdata)
 288{
 289        struct inode *inode = mapping->host;
 290        unsigned int start = pos & (PAGE_SIZE - 1);
 291        unsigned int nr_dirty;
 292        int err;
 293
 294        nr_dirty = nilfs_page_count_clean_buffers(page, start,
 295                                                  start + copied);
 296        copied = generic_write_end(file, mapping, pos, len, copied, page,
 297                                   fsdata);
 298        nilfs_set_file_dirty(inode, nr_dirty);
 299        err = nilfs_transaction_commit(inode->i_sb);
 300        return err ? : copied;
 301}
 302
 303static ssize_t
 304nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 305{
 306        struct inode *inode = file_inode(iocb->ki_filp);
 307
 308        if (iov_iter_rw(iter) == WRITE)
 309                return 0;
 310
 311        /* Needs synchronization with the cleaner */
 312        return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 313}
 314
 315const struct address_space_operations nilfs_aops = {
 316        .writepage              = nilfs_writepage,
 317        .readpage               = nilfs_readpage,
 318        .writepages             = nilfs_writepages,
 319        .set_page_dirty         = nilfs_set_page_dirty,
 320        .readpages              = nilfs_readpages,
 321        .write_begin            = nilfs_write_begin,
 322        .write_end              = nilfs_write_end,
 323        /* .releasepage         = nilfs_releasepage, */
 324        .invalidatepage         = block_invalidatepage,
 325        .direct_IO              = nilfs_direct_IO,
 326        .is_partially_uptodate  = block_is_partially_uptodate,
 327};
 328
 329static int nilfs_insert_inode_locked(struct inode *inode,
 330                                     struct nilfs_root *root,
 331                                     unsigned long ino)
 332{
 333        struct nilfs_iget_args args = {
 334                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 335        };
 336
 337        return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
 338}
 339
 340struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 341{
 342        struct super_block *sb = dir->i_sb;
 343        struct the_nilfs *nilfs = sb->s_fs_info;
 344        struct inode *inode;
 345        struct nilfs_inode_info *ii;
 346        struct nilfs_root *root;
 347        int err = -ENOMEM;
 348        ino_t ino;
 349
 350        inode = new_inode(sb);
 351        if (unlikely(!inode))
 352                goto failed;
 353
 354        mapping_set_gfp_mask(inode->i_mapping,
 355                           mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 356
 357        root = NILFS_I(dir)->i_root;
 358        ii = NILFS_I(inode);
 359        ii->i_state = BIT(NILFS_I_NEW);
 360        ii->i_root = root;
 361
 362        err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 363        if (unlikely(err))
 364                goto failed_ifile_create_inode;
 365        /* reference count of i_bh inherits from nilfs_mdt_read_block() */
 366
 367        atomic64_inc(&root->inodes_count);
 368        inode_init_owner(inode, dir, mode);
 369        inode->i_ino = ino;
 370        inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 371
 372        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 373                err = nilfs_bmap_read(ii->i_bmap, NULL);
 374                if (err < 0)
 375                        goto failed_after_creation;
 376
 377                set_bit(NILFS_I_BMAP, &ii->i_state);
 378                /* No lock is needed; iget() ensures it. */
 379        }
 380
 381        ii->i_flags = nilfs_mask_flags(
 382                mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 383
 384        /* ii->i_file_acl = 0; */
 385        /* ii->i_dir_acl = 0; */
 386        ii->i_dir_start_lookup = 0;
 387        nilfs_set_inode_flags(inode);
 388        spin_lock(&nilfs->ns_next_gen_lock);
 389        inode->i_generation = nilfs->ns_next_generation++;
 390        spin_unlock(&nilfs->ns_next_gen_lock);
 391        if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
 392                err = -EIO;
 393                goto failed_after_creation;
 394        }
 395
 396        err = nilfs_init_acl(inode, dir);
 397        if (unlikely(err))
 398                /*
 399                 * Never occur.  When supporting nilfs_init_acl(),
 400                 * proper cancellation of above jobs should be considered.
 401                 */
 402                goto failed_after_creation;
 403
 404        return inode;
 405
 406 failed_after_creation:
 407        clear_nlink(inode);
 408        unlock_new_inode(inode);
 409        iput(inode);  /*
 410                       * raw_inode will be deleted through
 411                       * nilfs_evict_inode().
 412                       */
 413        goto failed;
 414
 415 failed_ifile_create_inode:
 416        make_bad_inode(inode);
 417        iput(inode);
 418 failed:
 419        return ERR_PTR(err);
 420}
 421
 422void nilfs_set_inode_flags(struct inode *inode)
 423{
 424        unsigned int flags = NILFS_I(inode)->i_flags;
 425        unsigned int new_fl = 0;
 426
 427        if (flags & FS_SYNC_FL)
 428                new_fl |= S_SYNC;
 429        if (flags & FS_APPEND_FL)
 430                new_fl |= S_APPEND;
 431        if (flags & FS_IMMUTABLE_FL)
 432                new_fl |= S_IMMUTABLE;
 433        if (flags & FS_NOATIME_FL)
 434                new_fl |= S_NOATIME;
 435        if (flags & FS_DIRSYNC_FL)
 436                new_fl |= S_DIRSYNC;
 437        inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
 438                        S_NOATIME | S_DIRSYNC);
 439}
 440
 441int nilfs_read_inode_common(struct inode *inode,
 442                            struct nilfs_inode *raw_inode)
 443{
 444        struct nilfs_inode_info *ii = NILFS_I(inode);
 445        int err;
 446
 447        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 448        i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 449        i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 450        set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 451        inode->i_size = le64_to_cpu(raw_inode->i_size);
 452        inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 453        inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 454        inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 455        inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 456        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 457        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 458        if (inode->i_nlink == 0)
 459                return -ESTALE; /* this inode is deleted */
 460
 461        inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 462        ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 463#if 0
 464        ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 465        ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 466                0 : le32_to_cpu(raw_inode->i_dir_acl);
 467#endif
 468        ii->i_dir_start_lookup = 0;
 469        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 470
 471        if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 472            S_ISLNK(inode->i_mode)) {
 473                err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 474                if (err < 0)
 475                        return err;
 476                set_bit(NILFS_I_BMAP, &ii->i_state);
 477                /* No lock is needed; iget() ensures it. */
 478        }
 479        return 0;
 480}
 481
 482static int __nilfs_read_inode(struct super_block *sb,
 483                              struct nilfs_root *root, unsigned long ino,
 484                              struct inode *inode)
 485{
 486        struct the_nilfs *nilfs = sb->s_fs_info;
 487        struct buffer_head *bh;
 488        struct nilfs_inode *raw_inode;
 489        int err;
 490
 491        down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 492        err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 493        if (unlikely(err))
 494                goto bad_inode;
 495
 496        raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 497
 498        err = nilfs_read_inode_common(inode, raw_inode);
 499        if (err)
 500                goto failed_unmap;
 501
 502        if (S_ISREG(inode->i_mode)) {
 503                inode->i_op = &nilfs_file_inode_operations;
 504                inode->i_fop = &nilfs_file_operations;
 505                inode->i_mapping->a_ops = &nilfs_aops;
 506        } else if (S_ISDIR(inode->i_mode)) {
 507                inode->i_op = &nilfs_dir_inode_operations;
 508                inode->i_fop = &nilfs_dir_operations;
 509                inode->i_mapping->a_ops = &nilfs_aops;
 510        } else if (S_ISLNK(inode->i_mode)) {
 511                inode->i_op = &nilfs_symlink_inode_operations;
 512                inode_nohighmem(inode);
 513                inode->i_mapping->a_ops = &nilfs_aops;
 514        } else {
 515                inode->i_op = &nilfs_special_inode_operations;
 516                init_special_inode(
 517                        inode, inode->i_mode,
 518                        huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 519        }
 520        nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 521        brelse(bh);
 522        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 523        nilfs_set_inode_flags(inode);
 524        mapping_set_gfp_mask(inode->i_mapping,
 525                           mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 526        return 0;
 527
 528 failed_unmap:
 529        nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 530        brelse(bh);
 531
 532 bad_inode:
 533        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 534        return err;
 535}
 536
 537static int nilfs_iget_test(struct inode *inode, void *opaque)
 538{
 539        struct nilfs_iget_args *args = opaque;
 540        struct nilfs_inode_info *ii;
 541
 542        if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 543                return 0;
 544
 545        ii = NILFS_I(inode);
 546        if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 547                return !args->for_gc;
 548
 549        return args->for_gc && args->cno == ii->i_cno;
 550}
 551
 552static int nilfs_iget_set(struct inode *inode, void *opaque)
 553{
 554        struct nilfs_iget_args *args = opaque;
 555
 556        inode->i_ino = args->ino;
 557        if (args->for_gc) {
 558                NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
 559                NILFS_I(inode)->i_cno = args->cno;
 560                NILFS_I(inode)->i_root = NULL;
 561        } else {
 562                if (args->root && args->ino == NILFS_ROOT_INO)
 563                        nilfs_get_root(args->root);
 564                NILFS_I(inode)->i_root = args->root;
 565        }
 566        return 0;
 567}
 568
 569struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 570                            unsigned long ino)
 571{
 572        struct nilfs_iget_args args = {
 573                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 574        };
 575
 576        return ilookup5(sb, ino, nilfs_iget_test, &args);
 577}
 578
 579struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 580                                unsigned long ino)
 581{
 582        struct nilfs_iget_args args = {
 583                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 584        };
 585
 586        return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 587}
 588
 589struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 590                         unsigned long ino)
 591{
 592        struct inode *inode;
 593        int err;
 594
 595        inode = nilfs_iget_locked(sb, root, ino);
 596        if (unlikely(!inode))
 597                return ERR_PTR(-ENOMEM);
 598        if (!(inode->i_state & I_NEW))
 599                return inode;
 600
 601        err = __nilfs_read_inode(sb, root, ino, inode);
 602        if (unlikely(err)) {
 603                iget_failed(inode);
 604                return ERR_PTR(err);
 605        }
 606        unlock_new_inode(inode);
 607        return inode;
 608}
 609
 610struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 611                                __u64 cno)
 612{
 613        struct nilfs_iget_args args = {
 614                .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 615        };
 616        struct inode *inode;
 617        int err;
 618
 619        inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 620        if (unlikely(!inode))
 621                return ERR_PTR(-ENOMEM);
 622        if (!(inode->i_state & I_NEW))
 623                return inode;
 624
 625        err = nilfs_init_gcinode(inode);
 626        if (unlikely(err)) {
 627                iget_failed(inode);
 628                return ERR_PTR(err);
 629        }
 630        unlock_new_inode(inode);
 631        return inode;
 632}
 633
 634void nilfs_write_inode_common(struct inode *inode,
 635                              struct nilfs_inode *raw_inode, int has_bmap)
 636{
 637        struct nilfs_inode_info *ii = NILFS_I(inode);
 638
 639        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 640        raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 641        raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 642        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 643        raw_inode->i_size = cpu_to_le64(inode->i_size);
 644        raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 645        raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 646        raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 647        raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 648        raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 649
 650        raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 651        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 652
 653        if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 654                struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 655
 656                /* zero-fill unused portion in the case of super root block */
 657                raw_inode->i_xattr = 0;
 658                raw_inode->i_pad = 0;
 659                memset((void *)raw_inode + sizeof(*raw_inode), 0,
 660                       nilfs->ns_inode_size - sizeof(*raw_inode));
 661        }
 662
 663        if (has_bmap)
 664                nilfs_bmap_write(ii->i_bmap, raw_inode);
 665        else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 666                raw_inode->i_device_code =
 667                        cpu_to_le64(huge_encode_dev(inode->i_rdev));
 668        /*
 669         * When extending inode, nilfs->ns_inode_size should be checked
 670         * for substitutions of appended fields.
 671         */
 672}
 673
 674void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
 675{
 676        ino_t ino = inode->i_ino;
 677        struct nilfs_inode_info *ii = NILFS_I(inode);
 678        struct inode *ifile = ii->i_root->ifile;
 679        struct nilfs_inode *raw_inode;
 680
 681        raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 682
 683        if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 684                memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 685        if (flags & I_DIRTY_DATASYNC)
 686                set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 687
 688        nilfs_write_inode_common(inode, raw_inode, 0);
 689                /*
 690                 * XXX: call with has_bmap = 0 is a workaround to avoid
 691                 * deadlock of bmap.  This delays update of i_bmap to just
 692                 * before writing.
 693                 */
 694
 695        nilfs_ifile_unmap_inode(ifile, ino, ibh);
 696}
 697
 698#define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
 699
 700static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 701                                unsigned long from)
 702{
 703        __u64 b;
 704        int ret;
 705
 706        if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 707                return;
 708repeat:
 709        ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 710        if (ret == -ENOENT)
 711                return;
 712        else if (ret < 0)
 713                goto failed;
 714
 715        if (b < from)
 716                return;
 717
 718        b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 719        ret = nilfs_bmap_truncate(ii->i_bmap, b);
 720        nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 721        if (!ret || (ret == -ENOMEM &&
 722                     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 723                goto repeat;
 724
 725failed:
 726        nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
 727                  "error %d truncating bmap (ino=%lu)", ret,
 728                  ii->vfs_inode.i_ino);
 729}
 730
 731void nilfs_truncate(struct inode *inode)
 732{
 733        unsigned long blkoff;
 734        unsigned int blocksize;
 735        struct nilfs_transaction_info ti;
 736        struct super_block *sb = inode->i_sb;
 737        struct nilfs_inode_info *ii = NILFS_I(inode);
 738
 739        if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 740                return;
 741        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 742                return;
 743
 744        blocksize = sb->s_blocksize;
 745        blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 746        nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 747
 748        block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 749
 750        nilfs_truncate_bmap(ii, blkoff);
 751
 752        inode->i_mtime = inode->i_ctime = current_time(inode);
 753        if (IS_SYNC(inode))
 754                nilfs_set_transaction_flag(NILFS_TI_SYNC);
 755
 756        nilfs_mark_inode_dirty(inode);
 757        nilfs_set_file_dirty(inode, 0);
 758        nilfs_transaction_commit(sb);
 759        /*
 760         * May construct a logical segment and may fail in sync mode.
 761         * But truncate has no return value.
 762         */
 763}
 764
 765static void nilfs_clear_inode(struct inode *inode)
 766{
 767        struct nilfs_inode_info *ii = NILFS_I(inode);
 768
 769        /*
 770         * Free resources allocated in nilfs_read_inode(), here.
 771         */
 772        BUG_ON(!list_empty(&ii->i_dirty));
 773        brelse(ii->i_bh);
 774        ii->i_bh = NULL;
 775
 776        if (nilfs_is_metadata_file_inode(inode))
 777                nilfs_mdt_clear(inode);
 778
 779        if (test_bit(NILFS_I_BMAP, &ii->i_state))
 780                nilfs_bmap_clear(ii->i_bmap);
 781
 782        nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 783
 784        if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 785                nilfs_put_root(ii->i_root);
 786}
 787
 788void nilfs_evict_inode(struct inode *inode)
 789{
 790        struct nilfs_transaction_info ti;
 791        struct super_block *sb = inode->i_sb;
 792        struct nilfs_inode_info *ii = NILFS_I(inode);
 793        int ret;
 794
 795        if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 796                truncate_inode_pages_final(&inode->i_data);
 797                clear_inode(inode);
 798                nilfs_clear_inode(inode);
 799                return;
 800        }
 801        nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 802
 803        truncate_inode_pages_final(&inode->i_data);
 804
 805        /* TODO: some of the following operations may fail.  */
 806        nilfs_truncate_bmap(ii, 0);
 807        nilfs_mark_inode_dirty(inode);
 808        clear_inode(inode);
 809
 810        ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 811        if (!ret)
 812                atomic64_dec(&ii->i_root->inodes_count);
 813
 814        nilfs_clear_inode(inode);
 815
 816        if (IS_SYNC(inode))
 817                nilfs_set_transaction_flag(NILFS_TI_SYNC);
 818        nilfs_transaction_commit(sb);
 819        /*
 820         * May construct a logical segment and may fail in sync mode.
 821         * But delete_inode has no return value.
 822         */
 823}
 824
 825int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
 826{
 827        struct nilfs_transaction_info ti;
 828        struct inode *inode = d_inode(dentry);
 829        struct super_block *sb = inode->i_sb;
 830        int err;
 831
 832        err = setattr_prepare(dentry, iattr);
 833        if (err)
 834                return err;
 835
 836        err = nilfs_transaction_begin(sb, &ti, 0);
 837        if (unlikely(err))
 838                return err;
 839
 840        if ((iattr->ia_valid & ATTR_SIZE) &&
 841            iattr->ia_size != i_size_read(inode)) {
 842                inode_dio_wait(inode);
 843                truncate_setsize(inode, iattr->ia_size);
 844                nilfs_truncate(inode);
 845        }
 846
 847        setattr_copy(inode, iattr);
 848        mark_inode_dirty(inode);
 849
 850        if (iattr->ia_valid & ATTR_MODE) {
 851                err = nilfs_acl_chmod(inode);
 852                if (unlikely(err))
 853                        goto out_err;
 854        }
 855
 856        return nilfs_transaction_commit(sb);
 857
 858out_err:
 859        nilfs_transaction_abort(sb);
 860        return err;
 861}
 862
 863int nilfs_permission(struct inode *inode, int mask)
 864{
 865        struct nilfs_root *root = NILFS_I(inode)->i_root;
 866
 867        if ((mask & MAY_WRITE) && root &&
 868            root->cno != NILFS_CPTREE_CURRENT_CNO)
 869                return -EROFS; /* snapshot is not writable */
 870
 871        return generic_permission(inode, mask);
 872}
 873
 874int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 875{
 876        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 877        struct nilfs_inode_info *ii = NILFS_I(inode);
 878        int err;
 879
 880        spin_lock(&nilfs->ns_inode_lock);
 881        if (ii->i_bh == NULL) {
 882                spin_unlock(&nilfs->ns_inode_lock);
 883                err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 884                                                  inode->i_ino, pbh);
 885                if (unlikely(err))
 886                        return err;
 887                spin_lock(&nilfs->ns_inode_lock);
 888                if (ii->i_bh == NULL)
 889                        ii->i_bh = *pbh;
 890                else {
 891                        brelse(*pbh);
 892                        *pbh = ii->i_bh;
 893                }
 894        } else
 895                *pbh = ii->i_bh;
 896
 897        get_bh(*pbh);
 898        spin_unlock(&nilfs->ns_inode_lock);
 899        return 0;
 900}
 901
 902int nilfs_inode_dirty(struct inode *inode)
 903{
 904        struct nilfs_inode_info *ii = NILFS_I(inode);
 905        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 906        int ret = 0;
 907
 908        if (!list_empty(&ii->i_dirty)) {
 909                spin_lock(&nilfs->ns_inode_lock);
 910                ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 911                        test_bit(NILFS_I_BUSY, &ii->i_state);
 912                spin_unlock(&nilfs->ns_inode_lock);
 913        }
 914        return ret;
 915}
 916
 917int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 918{
 919        struct nilfs_inode_info *ii = NILFS_I(inode);
 920        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 921
 922        atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 923
 924        if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 925                return 0;
 926
 927        spin_lock(&nilfs->ns_inode_lock);
 928        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 929            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 930                /*
 931                 * Because this routine may race with nilfs_dispose_list(),
 932                 * we have to check NILFS_I_QUEUED here, too.
 933                 */
 934                if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 935                        /*
 936                         * This will happen when somebody is freeing
 937                         * this inode.
 938                         */
 939                        nilfs_msg(inode->i_sb, KERN_WARNING,
 940                                  "cannot set file dirty (ino=%lu): the file is being freed",
 941                                  inode->i_ino);
 942                        spin_unlock(&nilfs->ns_inode_lock);
 943                        return -EINVAL; /*
 944                                         * NILFS_I_DIRTY may remain for
 945                                         * freeing inode.
 946                                         */
 947                }
 948                list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 949                set_bit(NILFS_I_QUEUED, &ii->i_state);
 950        }
 951        spin_unlock(&nilfs->ns_inode_lock);
 952        return 0;
 953}
 954
 955int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 956{
 957        struct buffer_head *ibh;
 958        int err;
 959
 960        err = nilfs_load_inode_block(inode, &ibh);
 961        if (unlikely(err)) {
 962                nilfs_msg(inode->i_sb, KERN_WARNING,
 963                          "cannot mark inode dirty (ino=%lu): error %d loading inode block",
 964                          inode->i_ino, err);
 965                return err;
 966        }
 967        nilfs_update_inode(inode, ibh, flags);
 968        mark_buffer_dirty(ibh);
 969        nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 970        brelse(ibh);
 971        return 0;
 972}
 973
 974/**
 975 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 976 * @inode: inode of the file to be registered.
 977 *
 978 * nilfs_dirty_inode() loads a inode block containing the specified
 979 * @inode and copies data from a nilfs_inode to a corresponding inode
 980 * entry in the inode block. This operation is excluded from the segment
 981 * construction. This function can be called both as a single operation
 982 * and as a part of indivisible file operations.
 983 */
 984void nilfs_dirty_inode(struct inode *inode, int flags)
 985{
 986        struct nilfs_transaction_info ti;
 987        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 988
 989        if (is_bad_inode(inode)) {
 990                nilfs_msg(inode->i_sb, KERN_WARNING,
 991                          "tried to mark bad_inode dirty. ignored.");
 992                dump_stack();
 993                return;
 994        }
 995        if (mdi) {
 996                nilfs_mdt_mark_dirty(inode);
 997                return;
 998        }
 999        nilfs_transaction_begin(inode->i_sb, &ti, 0);
1000        __nilfs_mark_inode_dirty(inode, flags);
1001        nilfs_transaction_commit(inode->i_sb); /* never fails */
1002}
1003
1004int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1005                 __u64 start, __u64 len)
1006{
1007        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1008        __u64 logical = 0, phys = 0, size = 0;
1009        __u32 flags = 0;
1010        loff_t isize;
1011        sector_t blkoff, end_blkoff;
1012        sector_t delalloc_blkoff;
1013        unsigned long delalloc_blklen;
1014        unsigned int blkbits = inode->i_blkbits;
1015        int ret, n;
1016
1017        ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1018        if (ret)
1019                return ret;
1020
1021        inode_lock(inode);
1022
1023        isize = i_size_read(inode);
1024
1025        blkoff = start >> blkbits;
1026        end_blkoff = (start + len - 1) >> blkbits;
1027
1028        delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1029                                                        &delalloc_blkoff);
1030
1031        do {
1032                __u64 blkphy;
1033                unsigned int maxblocks;
1034
1035                if (delalloc_blklen && blkoff == delalloc_blkoff) {
1036                        if (size) {
1037                                /* End of the current extent */
1038                                ret = fiemap_fill_next_extent(
1039                                        fieinfo, logical, phys, size, flags);
1040                                if (ret)
1041                                        break;
1042                        }
1043                        if (blkoff > end_blkoff)
1044                                break;
1045
1046                        flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1047                        logical = blkoff << blkbits;
1048                        phys = 0;
1049                        size = delalloc_blklen << blkbits;
1050
1051                        blkoff = delalloc_blkoff + delalloc_blklen;
1052                        delalloc_blklen = nilfs_find_uncommitted_extent(
1053                                inode, blkoff, &delalloc_blkoff);
1054                        continue;
1055                }
1056
1057                /*
1058                 * Limit the number of blocks that we look up so as
1059                 * not to get into the next delayed allocation extent.
1060                 */
1061                maxblocks = INT_MAX;
1062                if (delalloc_blklen)
1063                        maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1064                                          maxblocks);
1065                blkphy = 0;
1066
1067                down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1068                n = nilfs_bmap_lookup_contig(
1069                        NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1070                up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1071
1072                if (n < 0) {
1073                        int past_eof;
1074
1075                        if (unlikely(n != -ENOENT))
1076                                break; /* error */
1077
1078                        /* HOLE */
1079                        blkoff++;
1080                        past_eof = ((blkoff << blkbits) >= isize);
1081
1082                        if (size) {
1083                                /* End of the current extent */
1084
1085                                if (past_eof)
1086                                        flags |= FIEMAP_EXTENT_LAST;
1087
1088                                ret = fiemap_fill_next_extent(
1089                                        fieinfo, logical, phys, size, flags);
1090                                if (ret)
1091                                        break;
1092                                size = 0;
1093                        }
1094                        if (blkoff > end_blkoff || past_eof)
1095                                break;
1096                } else {
1097                        if (size) {
1098                                if (phys && blkphy << blkbits == phys + size) {
1099                                        /* The current extent goes on */
1100                                        size += n << blkbits;
1101                                } else {
1102                                        /* Terminate the current extent */
1103                                        ret = fiemap_fill_next_extent(
1104                                                fieinfo, logical, phys, size,
1105                                                flags);
1106                                        if (ret || blkoff > end_blkoff)
1107                                                break;
1108
1109                                        /* Start another extent */
1110                                        flags = FIEMAP_EXTENT_MERGED;
1111                                        logical = blkoff << blkbits;
1112                                        phys = blkphy << blkbits;
1113                                        size = n << blkbits;
1114                                }
1115                        } else {
1116                                /* Start a new extent */
1117                                flags = FIEMAP_EXTENT_MERGED;
1118                                logical = blkoff << blkbits;
1119                                phys = blkphy << blkbits;
1120                                size = n << blkbits;
1121                        }
1122                        blkoff += n;
1123                }
1124                cond_resched();
1125        } while (true);
1126
1127        /* If ret is 1 then we just hit the end of the extent array */
1128        if (ret == 1)
1129                ret = 0;
1130
1131        inode_unlock(inode);
1132        return ret;
1133}
1134