linux/fs/nilfs2/inode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c - NILFS inode operations.
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
   7 * Written by Ryusuke Konishi.
   8 *
   9 */
  10
  11#include <linux/buffer_head.h>
  12#include <linux/gfp.h>
  13#include <linux/mpage.h>
  14#include <linux/pagemap.h>
  15#include <linux/writeback.h>
  16#include <linux/uio.h>
  17#include <linux/fiemap.h>
  18#include "nilfs.h"
  19#include "btnode.h"
  20#include "segment.h"
  21#include "page.h"
  22#include "mdt.h"
  23#include "cpfile.h"
  24#include "ifile.h"
  25
  26/**
  27 * struct nilfs_iget_args - arguments used during comparison between inodes
  28 * @ino: inode number
  29 * @cno: checkpoint number
  30 * @root: pointer on NILFS root object (mounted checkpoint)
  31 * @for_gc: inode for GC flag
  32 */
  33struct nilfs_iget_args {
  34        u64 ino;
  35        __u64 cno;
  36        struct nilfs_root *root;
  37        int for_gc;
  38};
  39
  40static int nilfs_iget_test(struct inode *inode, void *opaque);
  41
  42void nilfs_inode_add_blocks(struct inode *inode, int n)
  43{
  44        struct nilfs_root *root = NILFS_I(inode)->i_root;
  45
  46        inode_add_bytes(inode, i_blocksize(inode) * n);
  47        if (root)
  48                atomic64_add(n, &root->blocks_count);
  49}
  50
  51void nilfs_inode_sub_blocks(struct inode *inode, int n)
  52{
  53        struct nilfs_root *root = NILFS_I(inode)->i_root;
  54
  55        inode_sub_bytes(inode, i_blocksize(inode) * n);
  56        if (root)
  57                atomic64_sub(n, &root->blocks_count);
  58}
  59
  60/**
  61 * nilfs_get_block() - get a file block on the filesystem (callback function)
  62 * @inode - inode struct of the target file
  63 * @blkoff - file block number
  64 * @bh_result - buffer head to be mapped on
  65 * @create - indicate whether allocating the block or not when it has not
  66 *      been allocated yet.
  67 *
  68 * This function does not issue actual read request of the specified data
  69 * block. It is done by VFS.
  70 */
  71int nilfs_get_block(struct inode *inode, sector_t blkoff,
  72                    struct buffer_head *bh_result, int create)
  73{
  74        struct nilfs_inode_info *ii = NILFS_I(inode);
  75        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  76        __u64 blknum = 0;
  77        int err = 0, ret;
  78        unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  79
  80        down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  81        ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  82        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  83        if (ret >= 0) { /* found */
  84                map_bh(bh_result, inode->i_sb, blknum);
  85                if (ret > 0)
  86                        bh_result->b_size = (ret << inode->i_blkbits);
  87                goto out;
  88        }
  89        /* data block was not found */
  90        if (ret == -ENOENT && create) {
  91                struct nilfs_transaction_info ti;
  92
  93                bh_result->b_blocknr = 0;
  94                err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95                if (unlikely(err))
  96                        goto out;
  97                err = nilfs_bmap_insert(ii->i_bmap, blkoff,
  98                                        (unsigned long)bh_result);
  99                if (unlikely(err != 0)) {
 100                        if (err == -EEXIST) {
 101                                /*
 102                                 * The get_block() function could be called
 103                                 * from multiple callers for an inode.
 104                                 * However, the page having this block must
 105                                 * be locked in this case.
 106                                 */
 107                                nilfs_warn(inode->i_sb,
 108                                           "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
 109                                           __func__, inode->i_ino,
 110                                           (unsigned long long)blkoff);
 111                                err = 0;
 112                        }
 113                        nilfs_transaction_abort(inode->i_sb);
 114                        goto out;
 115                }
 116                nilfs_mark_inode_dirty_sync(inode);
 117                nilfs_transaction_commit(inode->i_sb); /* never fails */
 118                /* Error handling should be detailed */
 119                set_buffer_new(bh_result);
 120                set_buffer_delay(bh_result);
 121                map_bh(bh_result, inode->i_sb, 0);
 122                /* Disk block number must be changed to proper value */
 123
 124        } else if (ret == -ENOENT) {
 125                /*
 126                 * not found is not error (e.g. hole); must return without
 127                 * the mapped state flag.
 128                 */
 129                ;
 130        } else {
 131                err = ret;
 132        }
 133
 134 out:
 135        return err;
 136}
 137
 138/**
 139 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 140 * address_space_operations.
 141 * @file - file struct of the file to be read
 142 * @page - the page to be read
 143 */
 144static int nilfs_readpage(struct file *file, struct page *page)
 145{
 146        return mpage_readpage(page, nilfs_get_block);
 147}
 148
 149static void nilfs_readahead(struct readahead_control *rac)
 150{
 151        mpage_readahead(rac, nilfs_get_block);
 152}
 153
 154static int nilfs_writepages(struct address_space *mapping,
 155                            struct writeback_control *wbc)
 156{
 157        struct inode *inode = mapping->host;
 158        int err = 0;
 159
 160        if (sb_rdonly(inode->i_sb)) {
 161                nilfs_clear_dirty_pages(mapping, false);
 162                return -EROFS;
 163        }
 164
 165        if (wbc->sync_mode == WB_SYNC_ALL)
 166                err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 167                                                    wbc->range_start,
 168                                                    wbc->range_end);
 169        return err;
 170}
 171
 172static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 173{
 174        struct inode *inode = page->mapping->host;
 175        int err;
 176
 177        if (sb_rdonly(inode->i_sb)) {
 178                /*
 179                 * It means that filesystem was remounted in read-only
 180                 * mode because of error or metadata corruption. But we
 181                 * have dirty pages that try to be flushed in background.
 182                 * So, here we simply discard this dirty page.
 183                 */
 184                nilfs_clear_dirty_page(page, false);
 185                unlock_page(page);
 186                return -EROFS;
 187        }
 188
 189        redirty_page_for_writepage(wbc, page);
 190        unlock_page(page);
 191
 192        if (wbc->sync_mode == WB_SYNC_ALL) {
 193                err = nilfs_construct_segment(inode->i_sb);
 194                if (unlikely(err))
 195                        return err;
 196        } else if (wbc->for_reclaim)
 197                nilfs_flush_segment(inode->i_sb, inode->i_ino);
 198
 199        return 0;
 200}
 201
 202static int nilfs_set_page_dirty(struct page *page)
 203{
 204        struct inode *inode = page->mapping->host;
 205        int ret = __set_page_dirty_nobuffers(page);
 206
 207        if (page_has_buffers(page)) {
 208                unsigned int nr_dirty = 0;
 209                struct buffer_head *bh, *head;
 210
 211                /*
 212                 * This page is locked by callers, and no other thread
 213                 * concurrently marks its buffers dirty since they are
 214                 * only dirtied through routines in fs/buffer.c in
 215                 * which call sites of mark_buffer_dirty are protected
 216                 * by page lock.
 217                 */
 218                bh = head = page_buffers(page);
 219                do {
 220                        /* Do not mark hole blocks dirty */
 221                        if (buffer_dirty(bh) || !buffer_mapped(bh))
 222                                continue;
 223
 224                        set_buffer_dirty(bh);
 225                        nr_dirty++;
 226                } while (bh = bh->b_this_page, bh != head);
 227
 228                if (nr_dirty)
 229                        nilfs_set_file_dirty(inode, nr_dirty);
 230        } else if (ret) {
 231                unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 232
 233                nilfs_set_file_dirty(inode, nr_dirty);
 234        }
 235        return ret;
 236}
 237
 238void nilfs_write_failed(struct address_space *mapping, loff_t to)
 239{
 240        struct inode *inode = mapping->host;
 241
 242        if (to > inode->i_size) {
 243                truncate_pagecache(inode, inode->i_size);
 244                nilfs_truncate(inode);
 245        }
 246}
 247
 248static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 249                             loff_t pos, unsigned len, unsigned flags,
 250                             struct page **pagep, void **fsdata)
 251
 252{
 253        struct inode *inode = mapping->host;
 254        int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 255
 256        if (unlikely(err))
 257                return err;
 258
 259        err = block_write_begin(mapping, pos, len, flags, pagep,
 260                                nilfs_get_block);
 261        if (unlikely(err)) {
 262                nilfs_write_failed(mapping, pos + len);
 263                nilfs_transaction_abort(inode->i_sb);
 264        }
 265        return err;
 266}
 267
 268static int nilfs_write_end(struct file *file, struct address_space *mapping,
 269                           loff_t pos, unsigned len, unsigned copied,
 270                           struct page *page, void *fsdata)
 271{
 272        struct inode *inode = mapping->host;
 273        unsigned int start = pos & (PAGE_SIZE - 1);
 274        unsigned int nr_dirty;
 275        int err;
 276
 277        nr_dirty = nilfs_page_count_clean_buffers(page, start,
 278                                                  start + copied);
 279        copied = generic_write_end(file, mapping, pos, len, copied, page,
 280                                   fsdata);
 281        nilfs_set_file_dirty(inode, nr_dirty);
 282        err = nilfs_transaction_commit(inode->i_sb);
 283        return err ? : copied;
 284}
 285
 286static ssize_t
 287nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 288{
 289        struct inode *inode = file_inode(iocb->ki_filp);
 290
 291        if (iov_iter_rw(iter) == WRITE)
 292                return 0;
 293
 294        /* Needs synchronization with the cleaner */
 295        return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 296}
 297
 298const struct address_space_operations nilfs_aops = {
 299        .writepage              = nilfs_writepage,
 300        .readpage               = nilfs_readpage,
 301        .writepages             = nilfs_writepages,
 302        .set_page_dirty         = nilfs_set_page_dirty,
 303        .readahead              = nilfs_readahead,
 304        .write_begin            = nilfs_write_begin,
 305        .write_end              = nilfs_write_end,
 306        /* .releasepage         = nilfs_releasepage, */
 307        .invalidatepage         = block_invalidatepage,
 308        .direct_IO              = nilfs_direct_IO,
 309        .is_partially_uptodate  = block_is_partially_uptodate,
 310};
 311
 312static int nilfs_insert_inode_locked(struct inode *inode,
 313                                     struct nilfs_root *root,
 314                                     unsigned long ino)
 315{
 316        struct nilfs_iget_args args = {
 317                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 318        };
 319
 320        return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
 321}
 322
 323struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 324{
 325        struct super_block *sb = dir->i_sb;
 326        struct the_nilfs *nilfs = sb->s_fs_info;
 327        struct inode *inode;
 328        struct nilfs_inode_info *ii;
 329        struct nilfs_root *root;
 330        int err = -ENOMEM;
 331        ino_t ino;
 332
 333        inode = new_inode(sb);
 334        if (unlikely(!inode))
 335                goto failed;
 336
 337        mapping_set_gfp_mask(inode->i_mapping,
 338                           mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 339
 340        root = NILFS_I(dir)->i_root;
 341        ii = NILFS_I(inode);
 342        ii->i_state = BIT(NILFS_I_NEW);
 343        ii->i_root = root;
 344
 345        err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 346        if (unlikely(err))
 347                goto failed_ifile_create_inode;
 348        /* reference count of i_bh inherits from nilfs_mdt_read_block() */
 349
 350        atomic64_inc(&root->inodes_count);
 351        inode_init_owner(&init_user_ns, inode, dir, mode);
 352        inode->i_ino = ino;
 353        inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 354
 355        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 356                err = nilfs_bmap_read(ii->i_bmap, NULL);
 357                if (err < 0)
 358                        goto failed_after_creation;
 359
 360                set_bit(NILFS_I_BMAP, &ii->i_state);
 361                /* No lock is needed; iget() ensures it. */
 362        }
 363
 364        ii->i_flags = nilfs_mask_flags(
 365                mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 366
 367        /* ii->i_file_acl = 0; */
 368        /* ii->i_dir_acl = 0; */
 369        ii->i_dir_start_lookup = 0;
 370        nilfs_set_inode_flags(inode);
 371        spin_lock(&nilfs->ns_next_gen_lock);
 372        inode->i_generation = nilfs->ns_next_generation++;
 373        spin_unlock(&nilfs->ns_next_gen_lock);
 374        if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
 375                err = -EIO;
 376                goto failed_after_creation;
 377        }
 378
 379        err = nilfs_init_acl(inode, dir);
 380        if (unlikely(err))
 381                /*
 382                 * Never occur.  When supporting nilfs_init_acl(),
 383                 * proper cancellation of above jobs should be considered.
 384                 */
 385                goto failed_after_creation;
 386
 387        return inode;
 388
 389 failed_after_creation:
 390        clear_nlink(inode);
 391        if (inode->i_state & I_NEW)
 392                unlock_new_inode(inode);
 393        iput(inode);  /*
 394                       * raw_inode will be deleted through
 395                       * nilfs_evict_inode().
 396                       */
 397        goto failed;
 398
 399 failed_ifile_create_inode:
 400        make_bad_inode(inode);
 401        iput(inode);
 402 failed:
 403        return ERR_PTR(err);
 404}
 405
 406void nilfs_set_inode_flags(struct inode *inode)
 407{
 408        unsigned int flags = NILFS_I(inode)->i_flags;
 409        unsigned int new_fl = 0;
 410
 411        if (flags & FS_SYNC_FL)
 412                new_fl |= S_SYNC;
 413        if (flags & FS_APPEND_FL)
 414                new_fl |= S_APPEND;
 415        if (flags & FS_IMMUTABLE_FL)
 416                new_fl |= S_IMMUTABLE;
 417        if (flags & FS_NOATIME_FL)
 418                new_fl |= S_NOATIME;
 419        if (flags & FS_DIRSYNC_FL)
 420                new_fl |= S_DIRSYNC;
 421        inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
 422                        S_NOATIME | S_DIRSYNC);
 423}
 424
 425int nilfs_read_inode_common(struct inode *inode,
 426                            struct nilfs_inode *raw_inode)
 427{
 428        struct nilfs_inode_info *ii = NILFS_I(inode);
 429        int err;
 430
 431        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 432        i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 433        i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 434        set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 435        inode->i_size = le64_to_cpu(raw_inode->i_size);
 436        inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 437        inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 438        inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 439        inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 440        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 441        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 442        if (inode->i_nlink == 0)
 443                return -ESTALE; /* this inode is deleted */
 444
 445        inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 446        ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 447#if 0
 448        ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 449        ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 450                0 : le32_to_cpu(raw_inode->i_dir_acl);
 451#endif
 452        ii->i_dir_start_lookup = 0;
 453        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 454
 455        if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 456            S_ISLNK(inode->i_mode)) {
 457                err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 458                if (err < 0)
 459                        return err;
 460                set_bit(NILFS_I_BMAP, &ii->i_state);
 461                /* No lock is needed; iget() ensures it. */
 462        }
 463        return 0;
 464}
 465
 466static int __nilfs_read_inode(struct super_block *sb,
 467                              struct nilfs_root *root, unsigned long ino,
 468                              struct inode *inode)
 469{
 470        struct the_nilfs *nilfs = sb->s_fs_info;
 471        struct buffer_head *bh;
 472        struct nilfs_inode *raw_inode;
 473        int err;
 474
 475        down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 476        err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 477        if (unlikely(err))
 478                goto bad_inode;
 479
 480        raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 481
 482        err = nilfs_read_inode_common(inode, raw_inode);
 483        if (err)
 484                goto failed_unmap;
 485
 486        if (S_ISREG(inode->i_mode)) {
 487                inode->i_op = &nilfs_file_inode_operations;
 488                inode->i_fop = &nilfs_file_operations;
 489                inode->i_mapping->a_ops = &nilfs_aops;
 490        } else if (S_ISDIR(inode->i_mode)) {
 491                inode->i_op = &nilfs_dir_inode_operations;
 492                inode->i_fop = &nilfs_dir_operations;
 493                inode->i_mapping->a_ops = &nilfs_aops;
 494        } else if (S_ISLNK(inode->i_mode)) {
 495                inode->i_op = &nilfs_symlink_inode_operations;
 496                inode_nohighmem(inode);
 497                inode->i_mapping->a_ops = &nilfs_aops;
 498        } else {
 499                inode->i_op = &nilfs_special_inode_operations;
 500                init_special_inode(
 501                        inode, inode->i_mode,
 502                        huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 503        }
 504        nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 505        brelse(bh);
 506        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 507        nilfs_set_inode_flags(inode);
 508        mapping_set_gfp_mask(inode->i_mapping,
 509                           mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 510        return 0;
 511
 512 failed_unmap:
 513        nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 514        brelse(bh);
 515
 516 bad_inode:
 517        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 518        return err;
 519}
 520
 521static int nilfs_iget_test(struct inode *inode, void *opaque)
 522{
 523        struct nilfs_iget_args *args = opaque;
 524        struct nilfs_inode_info *ii;
 525
 526        if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 527                return 0;
 528
 529        ii = NILFS_I(inode);
 530        if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 531                return !args->for_gc;
 532
 533        return args->for_gc && args->cno == ii->i_cno;
 534}
 535
 536static int nilfs_iget_set(struct inode *inode, void *opaque)
 537{
 538        struct nilfs_iget_args *args = opaque;
 539
 540        inode->i_ino = args->ino;
 541        if (args->for_gc) {
 542                NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
 543                NILFS_I(inode)->i_cno = args->cno;
 544                NILFS_I(inode)->i_root = NULL;
 545        } else {
 546                if (args->root && args->ino == NILFS_ROOT_INO)
 547                        nilfs_get_root(args->root);
 548                NILFS_I(inode)->i_root = args->root;
 549        }
 550        return 0;
 551}
 552
 553struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 554                            unsigned long ino)
 555{
 556        struct nilfs_iget_args args = {
 557                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 558        };
 559
 560        return ilookup5(sb, ino, nilfs_iget_test, &args);
 561}
 562
 563struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 564                                unsigned long ino)
 565{
 566        struct nilfs_iget_args args = {
 567                .ino = ino, .root = root, .cno = 0, .for_gc = 0
 568        };
 569
 570        return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 571}
 572
 573struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 574                         unsigned long ino)
 575{
 576        struct inode *inode;
 577        int err;
 578
 579        inode = nilfs_iget_locked(sb, root, ino);
 580        if (unlikely(!inode))
 581                return ERR_PTR(-ENOMEM);
 582        if (!(inode->i_state & I_NEW))
 583                return inode;
 584
 585        err = __nilfs_read_inode(sb, root, ino, inode);
 586        if (unlikely(err)) {
 587                iget_failed(inode);
 588                return ERR_PTR(err);
 589        }
 590        unlock_new_inode(inode);
 591        return inode;
 592}
 593
 594struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 595                                __u64 cno)
 596{
 597        struct nilfs_iget_args args = {
 598                .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 599        };
 600        struct inode *inode;
 601        int err;
 602
 603        inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 604        if (unlikely(!inode))
 605                return ERR_PTR(-ENOMEM);
 606        if (!(inode->i_state & I_NEW))
 607                return inode;
 608
 609        err = nilfs_init_gcinode(inode);
 610        if (unlikely(err)) {
 611                iget_failed(inode);
 612                return ERR_PTR(err);
 613        }
 614        unlock_new_inode(inode);
 615        return inode;
 616}
 617
 618void nilfs_write_inode_common(struct inode *inode,
 619                              struct nilfs_inode *raw_inode, int has_bmap)
 620{
 621        struct nilfs_inode_info *ii = NILFS_I(inode);
 622
 623        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 624        raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 625        raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 626        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 627        raw_inode->i_size = cpu_to_le64(inode->i_size);
 628        raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 629        raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 630        raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 631        raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 632        raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 633
 634        raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 635        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 636
 637        if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 638                struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 639
 640                /* zero-fill unused portion in the case of super root block */
 641                raw_inode->i_xattr = 0;
 642                raw_inode->i_pad = 0;
 643                memset((void *)raw_inode + sizeof(*raw_inode), 0,
 644                       nilfs->ns_inode_size - sizeof(*raw_inode));
 645        }
 646
 647        if (has_bmap)
 648                nilfs_bmap_write(ii->i_bmap, raw_inode);
 649        else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 650                raw_inode->i_device_code =
 651                        cpu_to_le64(huge_encode_dev(inode->i_rdev));
 652        /*
 653         * When extending inode, nilfs->ns_inode_size should be checked
 654         * for substitutions of appended fields.
 655         */
 656}
 657
 658void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
 659{
 660        ino_t ino = inode->i_ino;
 661        struct nilfs_inode_info *ii = NILFS_I(inode);
 662        struct inode *ifile = ii->i_root->ifile;
 663        struct nilfs_inode *raw_inode;
 664
 665        raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 666
 667        if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 668                memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 669        if (flags & I_DIRTY_DATASYNC)
 670                set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 671
 672        nilfs_write_inode_common(inode, raw_inode, 0);
 673                /*
 674                 * XXX: call with has_bmap = 0 is a workaround to avoid
 675                 * deadlock of bmap.  This delays update of i_bmap to just
 676                 * before writing.
 677                 */
 678
 679        nilfs_ifile_unmap_inode(ifile, ino, ibh);
 680}
 681
 682#define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
 683
 684static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 685                                unsigned long from)
 686{
 687        __u64 b;
 688        int ret;
 689
 690        if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 691                return;
 692repeat:
 693        ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 694        if (ret == -ENOENT)
 695                return;
 696        else if (ret < 0)
 697                goto failed;
 698
 699        if (b < from)
 700                return;
 701
 702        b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 703        ret = nilfs_bmap_truncate(ii->i_bmap, b);
 704        nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 705        if (!ret || (ret == -ENOMEM &&
 706                     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 707                goto repeat;
 708
 709failed:
 710        nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
 711                   ret, ii->vfs_inode.i_ino);
 712}
 713
 714void nilfs_truncate(struct inode *inode)
 715{
 716        unsigned long blkoff;
 717        unsigned int blocksize;
 718        struct nilfs_transaction_info ti;
 719        struct super_block *sb = inode->i_sb;
 720        struct nilfs_inode_info *ii = NILFS_I(inode);
 721
 722        if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 723                return;
 724        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 725                return;
 726
 727        blocksize = sb->s_blocksize;
 728        blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 729        nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 730
 731        block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 732
 733        nilfs_truncate_bmap(ii, blkoff);
 734
 735        inode->i_mtime = inode->i_ctime = current_time(inode);
 736        if (IS_SYNC(inode))
 737                nilfs_set_transaction_flag(NILFS_TI_SYNC);
 738
 739        nilfs_mark_inode_dirty(inode);
 740        nilfs_set_file_dirty(inode, 0);
 741        nilfs_transaction_commit(sb);
 742        /*
 743         * May construct a logical segment and may fail in sync mode.
 744         * But truncate has no return value.
 745         */
 746}
 747
 748static void nilfs_clear_inode(struct inode *inode)
 749{
 750        struct nilfs_inode_info *ii = NILFS_I(inode);
 751
 752        /*
 753         * Free resources allocated in nilfs_read_inode(), here.
 754         */
 755        BUG_ON(!list_empty(&ii->i_dirty));
 756        brelse(ii->i_bh);
 757        ii->i_bh = NULL;
 758
 759        if (nilfs_is_metadata_file_inode(inode))
 760                nilfs_mdt_clear(inode);
 761
 762        if (test_bit(NILFS_I_BMAP, &ii->i_state))
 763                nilfs_bmap_clear(ii->i_bmap);
 764
 765        nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 766
 767        if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 768                nilfs_put_root(ii->i_root);
 769}
 770
 771void nilfs_evict_inode(struct inode *inode)
 772{
 773        struct nilfs_transaction_info ti;
 774        struct super_block *sb = inode->i_sb;
 775        struct nilfs_inode_info *ii = NILFS_I(inode);
 776        int ret;
 777
 778        if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 779                truncate_inode_pages_final(&inode->i_data);
 780                clear_inode(inode);
 781                nilfs_clear_inode(inode);
 782                return;
 783        }
 784        nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 785
 786        truncate_inode_pages_final(&inode->i_data);
 787
 788        /* TODO: some of the following operations may fail.  */
 789        nilfs_truncate_bmap(ii, 0);
 790        nilfs_mark_inode_dirty(inode);
 791        clear_inode(inode);
 792
 793        ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 794        if (!ret)
 795                atomic64_dec(&ii->i_root->inodes_count);
 796
 797        nilfs_clear_inode(inode);
 798
 799        if (IS_SYNC(inode))
 800                nilfs_set_transaction_flag(NILFS_TI_SYNC);
 801        nilfs_transaction_commit(sb);
 802        /*
 803         * May construct a logical segment and may fail in sync mode.
 804         * But delete_inode has no return value.
 805         */
 806}
 807
 808int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 809                  struct iattr *iattr)
 810{
 811        struct nilfs_transaction_info ti;
 812        struct inode *inode = d_inode(dentry);
 813        struct super_block *sb = inode->i_sb;
 814        int err;
 815
 816        err = setattr_prepare(&init_user_ns, dentry, iattr);
 817        if (err)
 818                return err;
 819
 820        err = nilfs_transaction_begin(sb, &ti, 0);
 821        if (unlikely(err))
 822                return err;
 823
 824        if ((iattr->ia_valid & ATTR_SIZE) &&
 825            iattr->ia_size != i_size_read(inode)) {
 826                inode_dio_wait(inode);
 827                truncate_setsize(inode, iattr->ia_size);
 828                nilfs_truncate(inode);
 829        }
 830
 831        setattr_copy(&init_user_ns, inode, iattr);
 832        mark_inode_dirty(inode);
 833
 834        if (iattr->ia_valid & ATTR_MODE) {
 835                err = nilfs_acl_chmod(inode);
 836                if (unlikely(err))
 837                        goto out_err;
 838        }
 839
 840        return nilfs_transaction_commit(sb);
 841
 842out_err:
 843        nilfs_transaction_abort(sb);
 844        return err;
 845}
 846
 847int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
 848                     int mask)
 849{
 850        struct nilfs_root *root = NILFS_I(inode)->i_root;
 851
 852        if ((mask & MAY_WRITE) && root &&
 853            root->cno != NILFS_CPTREE_CURRENT_CNO)
 854                return -EROFS; /* snapshot is not writable */
 855
 856        return generic_permission(&init_user_ns, inode, mask);
 857}
 858
 859int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 860{
 861        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 862        struct nilfs_inode_info *ii = NILFS_I(inode);
 863        int err;
 864
 865        spin_lock(&nilfs->ns_inode_lock);
 866        if (ii->i_bh == NULL) {
 867                spin_unlock(&nilfs->ns_inode_lock);
 868                err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 869                                                  inode->i_ino, pbh);
 870                if (unlikely(err))
 871                        return err;
 872                spin_lock(&nilfs->ns_inode_lock);
 873                if (ii->i_bh == NULL)
 874                        ii->i_bh = *pbh;
 875                else {
 876                        brelse(*pbh);
 877                        *pbh = ii->i_bh;
 878                }
 879        } else
 880                *pbh = ii->i_bh;
 881
 882        get_bh(*pbh);
 883        spin_unlock(&nilfs->ns_inode_lock);
 884        return 0;
 885}
 886
 887int nilfs_inode_dirty(struct inode *inode)
 888{
 889        struct nilfs_inode_info *ii = NILFS_I(inode);
 890        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 891        int ret = 0;
 892
 893        if (!list_empty(&ii->i_dirty)) {
 894                spin_lock(&nilfs->ns_inode_lock);
 895                ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 896                        test_bit(NILFS_I_BUSY, &ii->i_state);
 897                spin_unlock(&nilfs->ns_inode_lock);
 898        }
 899        return ret;
 900}
 901
 902int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 903{
 904        struct nilfs_inode_info *ii = NILFS_I(inode);
 905        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 906
 907        atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 908
 909        if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 910                return 0;
 911
 912        spin_lock(&nilfs->ns_inode_lock);
 913        if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 914            !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 915                /*
 916                 * Because this routine may race with nilfs_dispose_list(),
 917                 * we have to check NILFS_I_QUEUED here, too.
 918                 */
 919                if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 920                        /*
 921                         * This will happen when somebody is freeing
 922                         * this inode.
 923                         */
 924                        nilfs_warn(inode->i_sb,
 925                                   "cannot set file dirty (ino=%lu): the file is being freed",
 926                                   inode->i_ino);
 927                        spin_unlock(&nilfs->ns_inode_lock);
 928                        return -EINVAL; /*
 929                                         * NILFS_I_DIRTY may remain for
 930                                         * freeing inode.
 931                                         */
 932                }
 933                list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 934                set_bit(NILFS_I_QUEUED, &ii->i_state);
 935        }
 936        spin_unlock(&nilfs->ns_inode_lock);
 937        return 0;
 938}
 939
 940int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 941{
 942        struct buffer_head *ibh;
 943        int err;
 944
 945        err = nilfs_load_inode_block(inode, &ibh);
 946        if (unlikely(err)) {
 947                nilfs_warn(inode->i_sb,
 948                           "cannot mark inode dirty (ino=%lu): error %d loading inode block",
 949                           inode->i_ino, err);
 950                return err;
 951        }
 952        nilfs_update_inode(inode, ibh, flags);
 953        mark_buffer_dirty(ibh);
 954        nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 955        brelse(ibh);
 956        return 0;
 957}
 958
 959/**
 960 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 961 * @inode: inode of the file to be registered.
 962 *
 963 * nilfs_dirty_inode() loads a inode block containing the specified
 964 * @inode and copies data from a nilfs_inode to a corresponding inode
 965 * entry in the inode block. This operation is excluded from the segment
 966 * construction. This function can be called both as a single operation
 967 * and as a part of indivisible file operations.
 968 */
 969void nilfs_dirty_inode(struct inode *inode, int flags)
 970{
 971        struct nilfs_transaction_info ti;
 972        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 973
 974        if (is_bad_inode(inode)) {
 975                nilfs_warn(inode->i_sb,
 976                           "tried to mark bad_inode dirty. ignored.");
 977                dump_stack();
 978                return;
 979        }
 980        if (mdi) {
 981                nilfs_mdt_mark_dirty(inode);
 982                return;
 983        }
 984        nilfs_transaction_begin(inode->i_sb, &ti, 0);
 985        __nilfs_mark_inode_dirty(inode, flags);
 986        nilfs_transaction_commit(inode->i_sb); /* never fails */
 987}
 988
 989int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 990                 __u64 start, __u64 len)
 991{
 992        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 993        __u64 logical = 0, phys = 0, size = 0;
 994        __u32 flags = 0;
 995        loff_t isize;
 996        sector_t blkoff, end_blkoff;
 997        sector_t delalloc_blkoff;
 998        unsigned long delalloc_blklen;
 999        unsigned int blkbits = inode->i_blkbits;
1000        int ret, n;
1001
1002        ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1003        if (ret)
1004                return ret;
1005
1006        inode_lock(inode);
1007
1008        isize = i_size_read(inode);
1009
1010        blkoff = start >> blkbits;
1011        end_blkoff = (start + len - 1) >> blkbits;
1012
1013        delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1014                                                        &delalloc_blkoff);
1015
1016        do {
1017                __u64 blkphy;
1018                unsigned int maxblocks;
1019
1020                if (delalloc_blklen && blkoff == delalloc_blkoff) {
1021                        if (size) {
1022                                /* End of the current extent */
1023                                ret = fiemap_fill_next_extent(
1024                                        fieinfo, logical, phys, size, flags);
1025                                if (ret)
1026                                        break;
1027                        }
1028                        if (blkoff > end_blkoff)
1029                                break;
1030
1031                        flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1032                        logical = blkoff << blkbits;
1033                        phys = 0;
1034                        size = delalloc_blklen << blkbits;
1035
1036                        blkoff = delalloc_blkoff + delalloc_blklen;
1037                        delalloc_blklen = nilfs_find_uncommitted_extent(
1038                                inode, blkoff, &delalloc_blkoff);
1039                        continue;
1040                }
1041
1042                /*
1043                 * Limit the number of blocks that we look up so as
1044                 * not to get into the next delayed allocation extent.
1045                 */
1046                maxblocks = INT_MAX;
1047                if (delalloc_blklen)
1048                        maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1049                                          maxblocks);
1050                blkphy = 0;
1051
1052                down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1053                n = nilfs_bmap_lookup_contig(
1054                        NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1055                up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1056
1057                if (n < 0) {
1058                        int past_eof;
1059
1060                        if (unlikely(n != -ENOENT))
1061                                break; /* error */
1062
1063                        /* HOLE */
1064                        blkoff++;
1065                        past_eof = ((blkoff << blkbits) >= isize);
1066
1067                        if (size) {
1068                                /* End of the current extent */
1069
1070                                if (past_eof)
1071                                        flags |= FIEMAP_EXTENT_LAST;
1072
1073                                ret = fiemap_fill_next_extent(
1074                                        fieinfo, logical, phys, size, flags);
1075                                if (ret)
1076                                        break;
1077                                size = 0;
1078                        }
1079                        if (blkoff > end_blkoff || past_eof)
1080                                break;
1081                } else {
1082                        if (size) {
1083                                if (phys && blkphy << blkbits == phys + size) {
1084                                        /* The current extent goes on */
1085                                        size += n << blkbits;
1086                                } else {
1087                                        /* Terminate the current extent */
1088                                        ret = fiemap_fill_next_extent(
1089                                                fieinfo, logical, phys, size,
1090                                                flags);
1091                                        if (ret || blkoff > end_blkoff)
1092                                                break;
1093
1094                                        /* Start another extent */
1095                                        flags = FIEMAP_EXTENT_MERGED;
1096                                        logical = blkoff << blkbits;
1097                                        phys = blkphy << blkbits;
1098                                        size = n << blkbits;
1099                                }
1100                        } else {
1101                                /* Start a new extent */
1102                                flags = FIEMAP_EXTENT_MERGED;
1103                                logical = blkoff << blkbits;
1104                                phys = blkphy << blkbits;
1105                                size = n << blkbits;
1106                        }
1107                        blkoff += n;
1108                }
1109                cond_resched();
1110        } while (true);
1111
1112        /* If ret is 1 then we just hit the end of the extent array */
1113        if (ret == 1)
1114                ret = 0;
1115
1116        inode_unlock(inode);
1117        return ret;
1118}
1119