linux/fs/f2fs/data.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/data.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mpage.h>
  15#include <linux/aio.h>
  16#include <linux/writeback.h>
  17#include <linux/backing-dev.h>
  18#include <linux/blkdev.h>
  19#include <linux/bio.h>
  20#include <linux/prefetch.h>
  21
  22#include "f2fs.h"
  23#include "node.h"
  24#include "segment.h"
  25#include <trace/events/f2fs.h>
  26
  27/*
  28 * Lock ordering for the change of data block address:
  29 * ->data_page
  30 *  ->node_page
  31 *    update block addresses in the node page
  32 */
  33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
  34{
  35        struct f2fs_node *rn;
  36        __le32 *addr_array;
  37        struct page *node_page = dn->node_page;
  38        unsigned int ofs_in_node = dn->ofs_in_node;
  39
  40        f2fs_wait_on_page_writeback(node_page, NODE, false);
  41
  42        rn = F2FS_NODE(node_page);
  43
  44        /* Get physical address of data block */
  45        addr_array = blkaddr_in_node(rn);
  46        addr_array[ofs_in_node] = cpu_to_le32(new_addr);
  47        set_page_dirty(node_page);
  48}
  49
  50int reserve_new_block(struct dnode_of_data *dn)
  51{
  52        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  53
  54        if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
  55                return -EPERM;
  56        if (!inc_valid_block_count(sbi, dn->inode, 1))
  57                return -ENOSPC;
  58
  59        trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
  60
  61        __set_data_blkaddr(dn, NEW_ADDR);
  62        dn->data_blkaddr = NEW_ADDR;
  63        sync_inode_page(dn);
  64        return 0;
  65}
  66
  67static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
  68                                        struct buffer_head *bh_result)
  69{
  70        struct f2fs_inode_info *fi = F2FS_I(inode);
  71#ifdef CONFIG_F2FS_STAT_FS
  72        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  73#endif
  74        pgoff_t start_fofs, end_fofs;
  75        block_t start_blkaddr;
  76
  77        read_lock(&fi->ext.ext_lock);
  78        if (fi->ext.len == 0) {
  79                read_unlock(&fi->ext.ext_lock);
  80                return 0;
  81        }
  82
  83#ifdef CONFIG_F2FS_STAT_FS
  84        sbi->total_hit_ext++;
  85#endif
  86        start_fofs = fi->ext.fofs;
  87        end_fofs = fi->ext.fofs + fi->ext.len - 1;
  88        start_blkaddr = fi->ext.blk_addr;
  89
  90        if (pgofs >= start_fofs && pgofs <= end_fofs) {
  91                unsigned int blkbits = inode->i_sb->s_blocksize_bits;
  92                size_t count;
  93
  94                clear_buffer_new(bh_result);
  95                map_bh(bh_result, inode->i_sb,
  96                                start_blkaddr + pgofs - start_fofs);
  97                count = end_fofs - pgofs + 1;
  98                if (count < (UINT_MAX >> blkbits))
  99                        bh_result->b_size = (count << blkbits);
 100                else
 101                        bh_result->b_size = UINT_MAX;
 102
 103#ifdef CONFIG_F2FS_STAT_FS
 104                sbi->read_hit_ext++;
 105#endif
 106                read_unlock(&fi->ext.ext_lock);
 107                return 1;
 108        }
 109        read_unlock(&fi->ext.ext_lock);
 110        return 0;
 111}
 112
 113void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
 114{
 115        struct f2fs_inode_info *fi = F2FS_I(dn->inode);
 116        pgoff_t fofs, start_fofs, end_fofs;
 117        block_t start_blkaddr, end_blkaddr;
 118
 119        BUG_ON(blk_addr == NEW_ADDR);
 120        fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
 121                                                        dn->ofs_in_node;
 122
 123        /* Update the page address in the parent node */
 124        __set_data_blkaddr(dn, blk_addr);
 125
 126        write_lock(&fi->ext.ext_lock);
 127
 128        start_fofs = fi->ext.fofs;
 129        end_fofs = fi->ext.fofs + fi->ext.len - 1;
 130        start_blkaddr = fi->ext.blk_addr;
 131        end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
 132
 133        /* Drop and initialize the matched extent */
 134        if (fi->ext.len == 1 && fofs == start_fofs)
 135                fi->ext.len = 0;
 136
 137        /* Initial extent */
 138        if (fi->ext.len == 0) {
 139                if (blk_addr != NULL_ADDR) {
 140                        fi->ext.fofs = fofs;
 141                        fi->ext.blk_addr = blk_addr;
 142                        fi->ext.len = 1;
 143                }
 144                goto end_update;
 145        }
 146
 147        /* Front merge */
 148        if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
 149                fi->ext.fofs--;
 150                fi->ext.blk_addr--;
 151                fi->ext.len++;
 152                goto end_update;
 153        }
 154
 155        /* Back merge */
 156        if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
 157                fi->ext.len++;
 158                goto end_update;
 159        }
 160
 161        /* Split the existing extent */
 162        if (fi->ext.len > 1 &&
 163                fofs >= start_fofs && fofs <= end_fofs) {
 164                if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
 165                        fi->ext.len = fofs - start_fofs;
 166                } else {
 167                        fi->ext.fofs = fofs + 1;
 168                        fi->ext.blk_addr = start_blkaddr +
 169                                        fofs - start_fofs + 1;
 170                        fi->ext.len -= fofs - start_fofs + 1;
 171                }
 172                goto end_update;
 173        }
 174        write_unlock(&fi->ext.ext_lock);
 175        return;
 176
 177end_update:
 178        write_unlock(&fi->ext.ext_lock);
 179        sync_inode_page(dn);
 180}
 181
 182struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
 183{
 184        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 185        struct address_space *mapping = inode->i_mapping;
 186        struct dnode_of_data dn;
 187        struct page *page;
 188        int err;
 189
 190        page = find_get_page(mapping, index);
 191        if (page && PageUptodate(page))
 192                return page;
 193        f2fs_put_page(page, 0);
 194
 195        set_new_dnode(&dn, inode, NULL, NULL, 0);
 196        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 197        if (err)
 198                return ERR_PTR(err);
 199        f2fs_put_dnode(&dn);
 200
 201        if (dn.data_blkaddr == NULL_ADDR)
 202                return ERR_PTR(-ENOENT);
 203
 204        /* By fallocate(), there is no cached page, but with NEW_ADDR */
 205        if (dn.data_blkaddr == NEW_ADDR)
 206                return ERR_PTR(-EINVAL);
 207
 208        page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
 209        if (!page)
 210                return ERR_PTR(-ENOMEM);
 211
 212        if (PageUptodate(page)) {
 213                unlock_page(page);
 214                return page;
 215        }
 216
 217        err = f2fs_readpage(sbi, page, dn.data_blkaddr,
 218                                        sync ? READ_SYNC : READA);
 219        if (sync) {
 220                wait_on_page_locked(page);
 221                if (!PageUptodate(page)) {
 222                        f2fs_put_page(page, 0);
 223                        return ERR_PTR(-EIO);
 224                }
 225        }
 226        return page;
 227}
 228
 229/*
 230 * If it tries to access a hole, return an error.
 231 * Because, the callers, functions in dir.c and GC, should be able to know
 232 * whether this page exists or not.
 233 */
 234struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
 235{
 236        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 237        struct address_space *mapping = inode->i_mapping;
 238        struct dnode_of_data dn;
 239        struct page *page;
 240        int err;
 241
 242repeat:
 243        page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
 244        if (!page)
 245                return ERR_PTR(-ENOMEM);
 246
 247        set_new_dnode(&dn, inode, NULL, NULL, 0);
 248        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 249        if (err) {
 250                f2fs_put_page(page, 1);
 251                return ERR_PTR(err);
 252        }
 253        f2fs_put_dnode(&dn);
 254
 255        if (dn.data_blkaddr == NULL_ADDR) {
 256                f2fs_put_page(page, 1);
 257                return ERR_PTR(-ENOENT);
 258        }
 259
 260        if (PageUptodate(page))
 261                return page;
 262
 263        /*
 264         * A new dentry page is allocated but not able to be written, since its
 265         * new inode page couldn't be allocated due to -ENOSPC.
 266         * In such the case, its blkaddr can be remained as NEW_ADDR.
 267         * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
 268         */
 269        if (dn.data_blkaddr == NEW_ADDR) {
 270                zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 271                SetPageUptodate(page);
 272                return page;
 273        }
 274
 275        err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 276        if (err)
 277                return ERR_PTR(err);
 278
 279        lock_page(page);
 280        if (!PageUptodate(page)) {
 281                f2fs_put_page(page, 1);
 282                return ERR_PTR(-EIO);
 283        }
 284        if (page->mapping != mapping) {
 285                f2fs_put_page(page, 1);
 286                goto repeat;
 287        }
 288        return page;
 289}
 290
 291/*
 292 * Caller ensures that this data page is never allocated.
 293 * A new zero-filled data page is allocated in the page cache.
 294 *
 295 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
 296 * mutex_unlock_op().
 297 * Note that, npage is set only by make_empty_dir.
 298 */
 299struct page *get_new_data_page(struct inode *inode,
 300                struct page *npage, pgoff_t index, bool new_i_size)
 301{
 302        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 303        struct address_space *mapping = inode->i_mapping;
 304        struct page *page;
 305        struct dnode_of_data dn;
 306        int err;
 307
 308        set_new_dnode(&dn, inode, npage, npage, 0);
 309        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
 310        if (err)
 311                return ERR_PTR(err);
 312
 313        if (dn.data_blkaddr == NULL_ADDR) {
 314                if (reserve_new_block(&dn)) {
 315                        if (!npage)
 316                                f2fs_put_dnode(&dn);
 317                        return ERR_PTR(-ENOSPC);
 318                }
 319        }
 320        if (!npage)
 321                f2fs_put_dnode(&dn);
 322repeat:
 323        page = grab_cache_page(mapping, index);
 324        if (!page)
 325                return ERR_PTR(-ENOMEM);
 326
 327        if (PageUptodate(page))
 328                return page;
 329
 330        if (dn.data_blkaddr == NEW_ADDR) {
 331                zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 332                SetPageUptodate(page);
 333        } else {
 334                err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 335                if (err)
 336                        return ERR_PTR(err);
 337                lock_page(page);
 338                if (!PageUptodate(page)) {
 339                        f2fs_put_page(page, 1);
 340                        return ERR_PTR(-EIO);
 341                }
 342                if (page->mapping != mapping) {
 343                        f2fs_put_page(page, 1);
 344                        goto repeat;
 345                }
 346        }
 347
 348        if (new_i_size &&
 349                i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
 350                i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
 351                /* Only the directory inode sets new_i_size */
 352                set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
 353                mark_inode_dirty_sync(inode);
 354        }
 355        return page;
 356}
 357
 358static void read_end_io(struct bio *bio, int err)
 359{
 360        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 361        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 362
 363        do {
 364                struct page *page = bvec->bv_page;
 365
 366                if (--bvec >= bio->bi_io_vec)
 367                        prefetchw(&bvec->bv_page->flags);
 368
 369                if (uptodate) {
 370                        SetPageUptodate(page);
 371                } else {
 372                        ClearPageUptodate(page);
 373                        SetPageError(page);
 374                }
 375                unlock_page(page);
 376        } while (bvec >= bio->bi_io_vec);
 377        bio_put(bio);
 378}
 379
 380/*
 381 * Fill the locked page with data located in the block address.
 382 * Return unlocked page.
 383 */
 384int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
 385                                        block_t blk_addr, int type)
 386{
 387        struct block_device *bdev = sbi->sb->s_bdev;
 388        struct bio *bio;
 389
 390        trace_f2fs_readpage(page, blk_addr, type);
 391
 392        down_read(&sbi->bio_sem);
 393
 394        /* Allocate a new bio */
 395        bio = f2fs_bio_alloc(bdev, 1);
 396
 397        /* Initialize the bio */
 398        bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 399        bio->bi_end_io = read_end_io;
 400
 401        if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 402                bio_put(bio);
 403                up_read(&sbi->bio_sem);
 404                f2fs_put_page(page, 1);
 405                return -EFAULT;
 406        }
 407
 408        submit_bio(type, bio);
 409        up_read(&sbi->bio_sem);
 410        return 0;
 411}
 412
 413/*
 414 * This function should be used by the data read flow only where it
 415 * does not check the "create" flag that indicates block allocation.
 416 * The reason for this special functionality is to exploit VFS readahead
 417 * mechanism.
 418 */
 419static int get_data_block_ro(struct inode *inode, sector_t iblock,
 420                        struct buffer_head *bh_result, int create)
 421{
 422        unsigned int blkbits = inode->i_sb->s_blocksize_bits;
 423        unsigned maxblocks = bh_result->b_size >> blkbits;
 424        struct dnode_of_data dn;
 425        pgoff_t pgofs;
 426        int err;
 427
 428        /* Get the page offset from the block offset(iblock) */
 429        pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
 430
 431        if (check_extent_cache(inode, pgofs, bh_result)) {
 432                trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 433                return 0;
 434        }
 435
 436        /* When reading holes, we need its node page */
 437        set_new_dnode(&dn, inode, NULL, NULL, 0);
 438        err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
 439        if (err) {
 440                trace_f2fs_get_data_block(inode, iblock, bh_result, err);
 441                return (err == -ENOENT) ? 0 : err;
 442        }
 443
 444        /* It does not support data allocation */
 445        BUG_ON(create);
 446
 447        if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
 448                int i;
 449                unsigned int end_offset;
 450
 451                end_offset = IS_INODE(dn.node_page) ?
 452                                ADDRS_PER_INODE(F2FS_I(inode)) :
 453                                ADDRS_PER_BLOCK;
 454
 455                clear_buffer_new(bh_result);
 456
 457                /* Give more consecutive addresses for the read ahead */
 458                for (i = 0; i < end_offset - dn.ofs_in_node; i++)
 459                        if (((datablock_addr(dn.node_page,
 460                                                        dn.ofs_in_node + i))
 461                                != (dn.data_blkaddr + i)) || maxblocks == i)
 462                                break;
 463                map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
 464                bh_result->b_size = (i << blkbits);
 465        }
 466        f2fs_put_dnode(&dn);
 467        trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 468        return 0;
 469}
 470
 471static int f2fs_read_data_page(struct file *file, struct page *page)
 472{
 473        return mpage_readpage(page, get_data_block_ro);
 474}
 475
 476static int f2fs_read_data_pages(struct file *file,
 477                        struct address_space *mapping,
 478                        struct list_head *pages, unsigned nr_pages)
 479{
 480        return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
 481}
 482
 483int do_write_data_page(struct page *page)
 484{
 485        struct inode *inode = page->mapping->host;
 486        block_t old_blk_addr, new_blk_addr;
 487        struct dnode_of_data dn;
 488        int err = 0;
 489
 490        set_new_dnode(&dn, inode, NULL, NULL, 0);
 491        err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 492        if (err)
 493                return err;
 494
 495        old_blk_addr = dn.data_blkaddr;
 496
 497        /* This page is already truncated */
 498        if (old_blk_addr == NULL_ADDR)
 499                goto out_writepage;
 500
 501        set_page_writeback(page);
 502
 503        /*
 504         * If current allocation needs SSR,
 505         * it had better in-place writes for updated data.
 506         */
 507        if (unlikely(old_blk_addr != NEW_ADDR &&
 508                        !is_cold_data(page) &&
 509                        need_inplace_update(inode))) {
 510                rewrite_data_page(F2FS_SB(inode->i_sb), page,
 511                                                old_blk_addr);
 512        } else {
 513                write_data_page(inode, page, &dn,
 514                                old_blk_addr, &new_blk_addr);
 515                update_extent_cache(new_blk_addr, &dn);
 516        }
 517out_writepage:
 518        f2fs_put_dnode(&dn);
 519        return err;
 520}
 521
 522static int f2fs_write_data_page(struct page *page,
 523                                        struct writeback_control *wbc)
 524{
 525        struct inode *inode = page->mapping->host;
 526        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 527        loff_t i_size = i_size_read(inode);
 528        const pgoff_t end_index = ((unsigned long long) i_size)
 529                                                        >> PAGE_CACHE_SHIFT;
 530        unsigned offset;
 531        bool need_balance_fs = false;
 532        int err = 0;
 533
 534        if (page->index < end_index)
 535                goto write;
 536
 537        /*
 538         * If the offset is out-of-range of file size,
 539         * this page does not have to be written to disk.
 540         */
 541        offset = i_size & (PAGE_CACHE_SIZE - 1);
 542        if ((page->index >= end_index + 1) || !offset) {
 543                if (S_ISDIR(inode->i_mode)) {
 544                        dec_page_count(sbi, F2FS_DIRTY_DENTS);
 545                        inode_dec_dirty_dents(inode);
 546                }
 547                goto out;
 548        }
 549
 550        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 551write:
 552        if (sbi->por_doing) {
 553                err = AOP_WRITEPAGE_ACTIVATE;
 554                goto redirty_out;
 555        }
 556
 557        /* Dentry blocks are controlled by checkpoint */
 558        if (S_ISDIR(inode->i_mode)) {
 559                dec_page_count(sbi, F2FS_DIRTY_DENTS);
 560                inode_dec_dirty_dents(inode);
 561                err = do_write_data_page(page);
 562        } else {
 563                int ilock = mutex_lock_op(sbi);
 564                err = do_write_data_page(page);
 565                mutex_unlock_op(sbi, ilock);
 566                need_balance_fs = true;
 567        }
 568        if (err == -ENOENT)
 569                goto out;
 570        else if (err)
 571                goto redirty_out;
 572
 573        if (wbc->for_reclaim)
 574                f2fs_submit_bio(sbi, DATA, true);
 575
 576        clear_cold_data(page);
 577out:
 578        unlock_page(page);
 579        if (need_balance_fs)
 580                f2fs_balance_fs(sbi);
 581        return 0;
 582
 583redirty_out:
 584        wbc->pages_skipped++;
 585        set_page_dirty(page);
 586        return err;
 587}
 588
 589#define MAX_DESIRED_PAGES_WP    4096
 590
 591static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
 592                        void *data)
 593{
 594        struct address_space *mapping = data;
 595        int ret = mapping->a_ops->writepage(page, wbc);
 596        mapping_set_error(mapping, ret);
 597        return ret;
 598}
 599
 600static int f2fs_write_data_pages(struct address_space *mapping,
 601                            struct writeback_control *wbc)
 602{
 603        struct inode *inode = mapping->host;
 604        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 605        bool locked = false;
 606        int ret;
 607        long excess_nrtw = 0, desired_nrtw;
 608
 609        /* deal with chardevs and other special file */
 610        if (!mapping->a_ops->writepage)
 611                return 0;
 612
 613        if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
 614                desired_nrtw = MAX_DESIRED_PAGES_WP;
 615                excess_nrtw = desired_nrtw - wbc->nr_to_write;
 616                wbc->nr_to_write = desired_nrtw;
 617        }
 618
 619        if (!S_ISDIR(inode->i_mode)) {
 620                mutex_lock(&sbi->writepages);
 621                locked = true;
 622        }
 623        ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
 624        if (locked)
 625                mutex_unlock(&sbi->writepages);
 626        f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
 627
 628        remove_dirty_dir_inode(inode);
 629
 630        wbc->nr_to_write -= excess_nrtw;
 631        return ret;
 632}
 633
 634static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 635                loff_t pos, unsigned len, unsigned flags,
 636                struct page **pagep, void **fsdata)
 637{
 638        struct inode *inode = mapping->host;
 639        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 640        struct page *page;
 641        pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 642        struct dnode_of_data dn;
 643        int err = 0;
 644        int ilock;
 645
 646        f2fs_balance_fs(sbi);
 647repeat:
 648        page = grab_cache_page_write_begin(mapping, index, flags);
 649        if (!page)
 650                return -ENOMEM;
 651        *pagep = page;
 652
 653        ilock = mutex_lock_op(sbi);
 654
 655        set_new_dnode(&dn, inode, NULL, NULL, 0);
 656        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
 657        if (err)
 658                goto err;
 659
 660        if (dn.data_blkaddr == NULL_ADDR)
 661                err = reserve_new_block(&dn);
 662
 663        f2fs_put_dnode(&dn);
 664        if (err)
 665                goto err;
 666
 667        mutex_unlock_op(sbi, ilock);
 668
 669        if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
 670                return 0;
 671
 672        if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 673                unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 674                unsigned end = start + len;
 675
 676                /* Reading beyond i_size is simple: memset to zero */
 677                zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
 678                goto out;
 679        }
 680
 681        if (dn.data_blkaddr == NEW_ADDR) {
 682                zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 683        } else {
 684                err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 685                if (err)
 686                        return err;
 687                lock_page(page);
 688                if (!PageUptodate(page)) {
 689                        f2fs_put_page(page, 1);
 690                        return -EIO;
 691                }
 692                if (page->mapping != mapping) {
 693                        f2fs_put_page(page, 1);
 694                        goto repeat;
 695                }
 696        }
 697out:
 698        SetPageUptodate(page);
 699        clear_cold_data(page);
 700        return 0;
 701
 702err:
 703        mutex_unlock_op(sbi, ilock);
 704        f2fs_put_page(page, 1);
 705        return err;
 706}
 707
 708static int f2fs_write_end(struct file *file,
 709                        struct address_space *mapping,
 710                        loff_t pos, unsigned len, unsigned copied,
 711                        struct page *page, void *fsdata)
 712{
 713        struct inode *inode = page->mapping->host;
 714
 715        SetPageUptodate(page);
 716        set_page_dirty(page);
 717
 718        if (pos + copied > i_size_read(inode)) {
 719                i_size_write(inode, pos + copied);
 720                mark_inode_dirty(inode);
 721                update_inode_page(inode);
 722        }
 723
 724        unlock_page(page);
 725        page_cache_release(page);
 726        return copied;
 727}
 728
 729static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
 730                const struct iovec *iov, loff_t offset, unsigned long nr_segs)
 731{
 732        struct file *file = iocb->ki_filp;
 733        struct inode *inode = file->f_mapping->host;
 734
 735        if (rw == WRITE)
 736                return 0;
 737
 738        /* Needs synchronization with the cleaner */
 739        return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
 740                                                  get_data_block_ro);
 741}
 742
 743static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
 744                                      unsigned int length)
 745{
 746        struct inode *inode = page->mapping->host;
 747        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 748        if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
 749                dec_page_count(sbi, F2FS_DIRTY_DENTS);
 750                inode_dec_dirty_dents(inode);
 751        }
 752        ClearPagePrivate(page);
 753}
 754
 755static int f2fs_release_data_page(struct page *page, gfp_t wait)
 756{
 757        ClearPagePrivate(page);
 758        return 1;
 759}
 760
 761static int f2fs_set_data_page_dirty(struct page *page)
 762{
 763        struct address_space *mapping = page->mapping;
 764        struct inode *inode = mapping->host;
 765
 766        SetPageUptodate(page);
 767        if (!PageDirty(page)) {
 768                __set_page_dirty_nobuffers(page);
 769                set_dirty_dir_page(inode, page);
 770                return 1;
 771        }
 772        return 0;
 773}
 774
 775static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
 776{
 777        return generic_block_bmap(mapping, block, get_data_block_ro);
 778}
 779
 780const struct address_space_operations f2fs_dblock_aops = {
 781        .readpage       = f2fs_read_data_page,
 782        .readpages      = f2fs_read_data_pages,
 783        .writepage      = f2fs_write_data_page,
 784        .writepages     = f2fs_write_data_pages,
 785        .write_begin    = f2fs_write_begin,
 786        .write_end      = f2fs_write_end,
 787        .set_page_dirty = f2fs_set_data_page_dirty,
 788        .invalidatepage = f2fs_invalidate_data_page,
 789        .releasepage    = f2fs_release_data_page,
 790        .direct_IO      = f2fs_direct_IO,
 791        .bmap           = f2fs_bmap,
 792};
 793