linux/fs/f2fs/data.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/data.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mpage.h>
  15#include <linux/aio.h>
  16#include <linux/writeback.h>
  17#include <linux/backing-dev.h>
  18#include <linux/blkdev.h>
  19#include <linux/bio.h>
  20#include <linux/prefetch.h>
  21
  22#include "f2fs.h"
  23#include "node.h"
  24#include "segment.h"
  25#include <trace/events/f2fs.h>
  26
  27/*
  28 * Lock ordering for the change of data block address:
  29 * ->data_page
  30 *  ->node_page
  31 *    update block addresses in the node page
  32 */
  33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
  34{
  35        struct f2fs_node *rn;
  36        __le32 *addr_array;
  37        struct page *node_page = dn->node_page;
  38        unsigned int ofs_in_node = dn->ofs_in_node;
  39
  40        wait_on_page_writeback(node_page);
  41
  42        rn = (struct f2fs_node *)page_address(node_page);
  43
  44        /* Get physical address of data block */
  45        addr_array = blkaddr_in_node(rn);
  46        addr_array[ofs_in_node] = cpu_to_le32(new_addr);
  47        set_page_dirty(node_page);
  48}
  49
  50int reserve_new_block(struct dnode_of_data *dn)
  51{
  52        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
  53
  54        if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
  55                return -EPERM;
  56        if (!inc_valid_block_count(sbi, dn->inode, 1))
  57                return -ENOSPC;
  58
  59        trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
  60
  61        __set_data_blkaddr(dn, NEW_ADDR);
  62        dn->data_blkaddr = NEW_ADDR;
  63        sync_inode_page(dn);
  64        return 0;
  65}
  66
  67static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
  68                                        struct buffer_head *bh_result)
  69{
  70        struct f2fs_inode_info *fi = F2FS_I(inode);
  71        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
  72        pgoff_t start_fofs, end_fofs;
  73        block_t start_blkaddr;
  74
  75        read_lock(&fi->ext.ext_lock);
  76        if (fi->ext.len == 0) {
  77                read_unlock(&fi->ext.ext_lock);
  78                return 0;
  79        }
  80
  81        sbi->total_hit_ext++;
  82        start_fofs = fi->ext.fofs;
  83        end_fofs = fi->ext.fofs + fi->ext.len - 1;
  84        start_blkaddr = fi->ext.blk_addr;
  85
  86        if (pgofs >= start_fofs && pgofs <= end_fofs) {
  87                unsigned int blkbits = inode->i_sb->s_blocksize_bits;
  88                size_t count;
  89
  90                clear_buffer_new(bh_result);
  91                map_bh(bh_result, inode->i_sb,
  92                                start_blkaddr + pgofs - start_fofs);
  93                count = end_fofs - pgofs + 1;
  94                if (count < (UINT_MAX >> blkbits))
  95                        bh_result->b_size = (count << blkbits);
  96                else
  97                        bh_result->b_size = UINT_MAX;
  98
  99                sbi->read_hit_ext++;
 100                read_unlock(&fi->ext.ext_lock);
 101                return 1;
 102        }
 103        read_unlock(&fi->ext.ext_lock);
 104        return 0;
 105}
 106
 107void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
 108{
 109        struct f2fs_inode_info *fi = F2FS_I(dn->inode);
 110        pgoff_t fofs, start_fofs, end_fofs;
 111        block_t start_blkaddr, end_blkaddr;
 112
 113        BUG_ON(blk_addr == NEW_ADDR);
 114        fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
 115
 116        /* Update the page address in the parent node */
 117        __set_data_blkaddr(dn, blk_addr);
 118
 119        write_lock(&fi->ext.ext_lock);
 120
 121        start_fofs = fi->ext.fofs;
 122        end_fofs = fi->ext.fofs + fi->ext.len - 1;
 123        start_blkaddr = fi->ext.blk_addr;
 124        end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
 125
 126        /* Drop and initialize the matched extent */
 127        if (fi->ext.len == 1 && fofs == start_fofs)
 128                fi->ext.len = 0;
 129
 130        /* Initial extent */
 131        if (fi->ext.len == 0) {
 132                if (blk_addr != NULL_ADDR) {
 133                        fi->ext.fofs = fofs;
 134                        fi->ext.blk_addr = blk_addr;
 135                        fi->ext.len = 1;
 136                }
 137                goto end_update;
 138        }
 139
 140        /* Front merge */
 141        if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
 142                fi->ext.fofs--;
 143                fi->ext.blk_addr--;
 144                fi->ext.len++;
 145                goto end_update;
 146        }
 147
 148        /* Back merge */
 149        if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
 150                fi->ext.len++;
 151                goto end_update;
 152        }
 153
 154        /* Split the existing extent */
 155        if (fi->ext.len > 1 &&
 156                fofs >= start_fofs && fofs <= end_fofs) {
 157                if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
 158                        fi->ext.len = fofs - start_fofs;
 159                } else {
 160                        fi->ext.fofs = fofs + 1;
 161                        fi->ext.blk_addr = start_blkaddr +
 162                                        fofs - start_fofs + 1;
 163                        fi->ext.len -= fofs - start_fofs + 1;
 164                }
 165                goto end_update;
 166        }
 167        write_unlock(&fi->ext.ext_lock);
 168        return;
 169
 170end_update:
 171        write_unlock(&fi->ext.ext_lock);
 172        sync_inode_page(dn);
 173        return;
 174}
 175
 176struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
 177{
 178        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 179        struct address_space *mapping = inode->i_mapping;
 180        struct dnode_of_data dn;
 181        struct page *page;
 182        int err;
 183
 184        page = find_get_page(mapping, index);
 185        if (page && PageUptodate(page))
 186                return page;
 187        f2fs_put_page(page, 0);
 188
 189        set_new_dnode(&dn, inode, NULL, NULL, 0);
 190        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 191        if (err)
 192                return ERR_PTR(err);
 193        f2fs_put_dnode(&dn);
 194
 195        if (dn.data_blkaddr == NULL_ADDR)
 196                return ERR_PTR(-ENOENT);
 197
 198        /* By fallocate(), there is no cached page, but with NEW_ADDR */
 199        if (dn.data_blkaddr == NEW_ADDR)
 200                return ERR_PTR(-EINVAL);
 201
 202        page = grab_cache_page(mapping, index);
 203        if (!page)
 204                return ERR_PTR(-ENOMEM);
 205
 206        if (PageUptodate(page)) {
 207                unlock_page(page);
 208                return page;
 209        }
 210
 211        err = f2fs_readpage(sbi, page, dn.data_blkaddr,
 212                                        sync ? READ_SYNC : READA);
 213        if (sync) {
 214                wait_on_page_locked(page);
 215                if (!PageUptodate(page)) {
 216                        f2fs_put_page(page, 0);
 217                        return ERR_PTR(-EIO);
 218                }
 219        }
 220        return page;
 221}
 222
 223/*
 224 * If it tries to access a hole, return an error.
 225 * Because, the callers, functions in dir.c and GC, should be able to know
 226 * whether this page exists or not.
 227 */
 228struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
 229{
 230        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 231        struct address_space *mapping = inode->i_mapping;
 232        struct dnode_of_data dn;
 233        struct page *page;
 234        int err;
 235
 236        set_new_dnode(&dn, inode, NULL, NULL, 0);
 237        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 238        if (err)
 239                return ERR_PTR(err);
 240        f2fs_put_dnode(&dn);
 241
 242        if (dn.data_blkaddr == NULL_ADDR)
 243                return ERR_PTR(-ENOENT);
 244repeat:
 245        page = grab_cache_page(mapping, index);
 246        if (!page)
 247                return ERR_PTR(-ENOMEM);
 248
 249        if (PageUptodate(page))
 250                return page;
 251
 252        BUG_ON(dn.data_blkaddr == NEW_ADDR);
 253        BUG_ON(dn.data_blkaddr == NULL_ADDR);
 254
 255        err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 256        if (err)
 257                return ERR_PTR(err);
 258
 259        lock_page(page);
 260        if (!PageUptodate(page)) {
 261                f2fs_put_page(page, 1);
 262                return ERR_PTR(-EIO);
 263        }
 264        if (page->mapping != mapping) {
 265                f2fs_put_page(page, 1);
 266                goto repeat;
 267        }
 268        return page;
 269}
 270
 271/*
 272 * Caller ensures that this data page is never allocated.
 273 * A new zero-filled data page is allocated in the page cache.
 274 *
 275 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
 276 * mutex_unlock_op().
 277 */
 278struct page *get_new_data_page(struct inode *inode, pgoff_t index,
 279                                                bool new_i_size)
 280{
 281        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 282        struct address_space *mapping = inode->i_mapping;
 283        struct page *page;
 284        struct dnode_of_data dn;
 285        int err;
 286
 287        set_new_dnode(&dn, inode, NULL, NULL, 0);
 288        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
 289        if (err)
 290                return ERR_PTR(err);
 291
 292        if (dn.data_blkaddr == NULL_ADDR) {
 293                if (reserve_new_block(&dn)) {
 294                        f2fs_put_dnode(&dn);
 295                        return ERR_PTR(-ENOSPC);
 296                }
 297        }
 298        f2fs_put_dnode(&dn);
 299repeat:
 300        page = grab_cache_page(mapping, index);
 301        if (!page)
 302                return ERR_PTR(-ENOMEM);
 303
 304        if (PageUptodate(page))
 305                return page;
 306
 307        if (dn.data_blkaddr == NEW_ADDR) {
 308                zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 309                SetPageUptodate(page);
 310        } else {
 311                err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 312                if (err)
 313                        return ERR_PTR(err);
 314                lock_page(page);
 315                if (!PageUptodate(page)) {
 316                        f2fs_put_page(page, 1);
 317                        return ERR_PTR(-EIO);
 318                }
 319                if (page->mapping != mapping) {
 320                        f2fs_put_page(page, 1);
 321                        goto repeat;
 322                }
 323        }
 324
 325        if (new_i_size &&
 326                i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
 327                i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
 328                mark_inode_dirty_sync(inode);
 329        }
 330        return page;
 331}
 332
 333static void read_end_io(struct bio *bio, int err)
 334{
 335        struct bio_vec *bvec;
 336        int i;
 337
 338        bio_for_each_segment_all(bvec, bio, i) {
 339                struct page *page = bvec->bv_page;
 340
 341                if (!err) {
 342                        SetPageUptodate(page);
 343                } else {
 344                        ClearPageUptodate(page);
 345                        SetPageError(page);
 346                }
 347                unlock_page(page);
 348        }
 349        kfree(bio->bi_private);
 350        bio_put(bio);
 351}
 352
 353/*
 354 * Fill the locked page with data located in the block address.
 355 * Return unlocked page.
 356 */
 357int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
 358                                        block_t blk_addr, int type)
 359{
 360        struct block_device *bdev = sbi->sb->s_bdev;
 361        struct bio *bio;
 362
 363        trace_f2fs_readpage(page, blk_addr, type);
 364
 365        down_read(&sbi->bio_sem);
 366
 367        /* Allocate a new bio */
 368        bio = f2fs_bio_alloc(bdev, 1);
 369
 370        /* Initialize the bio */
 371        bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 372        bio->bi_end_io = read_end_io;
 373
 374        if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 375                kfree(bio->bi_private);
 376                bio_put(bio);
 377                up_read(&sbi->bio_sem);
 378                f2fs_put_page(page, 1);
 379                return -EFAULT;
 380        }
 381
 382        submit_bio(type, bio);
 383        up_read(&sbi->bio_sem);
 384        return 0;
 385}
 386
 387/*
 388 * This function should be used by the data read flow only where it
 389 * does not check the "create" flag that indicates block allocation.
 390 * The reason for this special functionality is to exploit VFS readahead
 391 * mechanism.
 392 */
 393static int get_data_block_ro(struct inode *inode, sector_t iblock,
 394                        struct buffer_head *bh_result, int create)
 395{
 396        unsigned int blkbits = inode->i_sb->s_blocksize_bits;
 397        unsigned maxblocks = bh_result->b_size >> blkbits;
 398        struct dnode_of_data dn;
 399        pgoff_t pgofs;
 400        int err;
 401
 402        /* Get the page offset from the block offset(iblock) */
 403        pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
 404
 405        if (check_extent_cache(inode, pgofs, bh_result)) {
 406                trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 407                return 0;
 408        }
 409
 410        /* When reading holes, we need its node page */
 411        set_new_dnode(&dn, inode, NULL, NULL, 0);
 412        err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
 413        if (err) {
 414                trace_f2fs_get_data_block(inode, iblock, bh_result, err);
 415                return (err == -ENOENT) ? 0 : err;
 416        }
 417
 418        /* It does not support data allocation */
 419        BUG_ON(create);
 420
 421        if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
 422                int i;
 423                unsigned int end_offset;
 424
 425                end_offset = IS_INODE(dn.node_page) ?
 426                                ADDRS_PER_INODE :
 427                                ADDRS_PER_BLOCK;
 428
 429                clear_buffer_new(bh_result);
 430
 431                /* Give more consecutive addresses for the read ahead */
 432                for (i = 0; i < end_offset - dn.ofs_in_node; i++)
 433                        if (((datablock_addr(dn.node_page,
 434                                                        dn.ofs_in_node + i))
 435                                != (dn.data_blkaddr + i)) || maxblocks == i)
 436                                break;
 437                map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
 438                bh_result->b_size = (i << blkbits);
 439        }
 440        f2fs_put_dnode(&dn);
 441        trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 442        return 0;
 443}
 444
 445static int f2fs_read_data_page(struct file *file, struct page *page)
 446{
 447        return mpage_readpage(page, get_data_block_ro);
 448}
 449
 450static int f2fs_read_data_pages(struct file *file,
 451                        struct address_space *mapping,
 452                        struct list_head *pages, unsigned nr_pages)
 453{
 454        return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
 455}
 456
 457int do_write_data_page(struct page *page)
 458{
 459        struct inode *inode = page->mapping->host;
 460        block_t old_blk_addr, new_blk_addr;
 461        struct dnode_of_data dn;
 462        int err = 0;
 463
 464        set_new_dnode(&dn, inode, NULL, NULL, 0);
 465        err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 466        if (err)
 467                return err;
 468
 469        old_blk_addr = dn.data_blkaddr;
 470
 471        /* This page is already truncated */
 472        if (old_blk_addr == NULL_ADDR)
 473                goto out_writepage;
 474
 475        set_page_writeback(page);
 476
 477        /*
 478         * If current allocation needs SSR,
 479         * it had better in-place writes for updated data.
 480         */
 481        if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
 482                                need_inplace_update(inode)) {
 483                rewrite_data_page(F2FS_SB(inode->i_sb), page,
 484                                                old_blk_addr);
 485        } else {
 486                write_data_page(inode, page, &dn,
 487                                old_blk_addr, &new_blk_addr);
 488                update_extent_cache(new_blk_addr, &dn);
 489        }
 490out_writepage:
 491        f2fs_put_dnode(&dn);
 492        return err;
 493}
 494
 495static int f2fs_write_data_page(struct page *page,
 496                                        struct writeback_control *wbc)
 497{
 498        struct inode *inode = page->mapping->host;
 499        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 500        loff_t i_size = i_size_read(inode);
 501        const pgoff_t end_index = ((unsigned long long) i_size)
 502                                                        >> PAGE_CACHE_SHIFT;
 503        unsigned offset;
 504        bool need_balance_fs = false;
 505        int err = 0;
 506
 507        if (page->index < end_index)
 508                goto write;
 509
 510        /*
 511         * If the offset is out-of-range of file size,
 512         * this page does not have to be written to disk.
 513         */
 514        offset = i_size & (PAGE_CACHE_SIZE - 1);
 515        if ((page->index >= end_index + 1) || !offset) {
 516                if (S_ISDIR(inode->i_mode)) {
 517                        dec_page_count(sbi, F2FS_DIRTY_DENTS);
 518                        inode_dec_dirty_dents(inode);
 519                }
 520                goto out;
 521        }
 522
 523        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 524write:
 525        if (sbi->por_doing) {
 526                err = AOP_WRITEPAGE_ACTIVATE;
 527                goto redirty_out;
 528        }
 529
 530        /* Dentry blocks are controlled by checkpoint */
 531        if (S_ISDIR(inode->i_mode)) {
 532                dec_page_count(sbi, F2FS_DIRTY_DENTS);
 533                inode_dec_dirty_dents(inode);
 534                err = do_write_data_page(page);
 535        } else {
 536                int ilock = mutex_lock_op(sbi);
 537                err = do_write_data_page(page);
 538                mutex_unlock_op(sbi, ilock);
 539                need_balance_fs = true;
 540        }
 541        if (err == -ENOENT)
 542                goto out;
 543        else if (err)
 544                goto redirty_out;
 545
 546        if (wbc->for_reclaim)
 547                f2fs_submit_bio(sbi, DATA, true);
 548
 549        clear_cold_data(page);
 550out:
 551        unlock_page(page);
 552        if (need_balance_fs)
 553                f2fs_balance_fs(sbi);
 554        return 0;
 555
 556redirty_out:
 557        wbc->pages_skipped++;
 558        set_page_dirty(page);
 559        return err;
 560}
 561
 562#define MAX_DESIRED_PAGES_WP    4096
 563
 564static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
 565                        void *data)
 566{
 567        struct address_space *mapping = data;
 568        int ret = mapping->a_ops->writepage(page, wbc);
 569        mapping_set_error(mapping, ret);
 570        return ret;
 571}
 572
 573static int f2fs_write_data_pages(struct address_space *mapping,
 574                            struct writeback_control *wbc)
 575{
 576        struct inode *inode = mapping->host;
 577        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 578        bool locked = false;
 579        int ret;
 580        long excess_nrtw = 0, desired_nrtw;
 581
 582        /* deal with chardevs and other special file */
 583        if (!mapping->a_ops->writepage)
 584                return 0;
 585
 586        if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
 587                desired_nrtw = MAX_DESIRED_PAGES_WP;
 588                excess_nrtw = desired_nrtw - wbc->nr_to_write;
 589                wbc->nr_to_write = desired_nrtw;
 590        }
 591
 592        if (!S_ISDIR(inode->i_mode)) {
 593                mutex_lock(&sbi->writepages);
 594                locked = true;
 595        }
 596        ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
 597        if (locked)
 598                mutex_unlock(&sbi->writepages);
 599        f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
 600
 601        remove_dirty_dir_inode(inode);
 602
 603        wbc->nr_to_write -= excess_nrtw;
 604        return ret;
 605}
 606
 607static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 608                loff_t pos, unsigned len, unsigned flags,
 609                struct page **pagep, void **fsdata)
 610{
 611        struct inode *inode = mapping->host;
 612        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 613        struct page *page;
 614        pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 615        struct dnode_of_data dn;
 616        int err = 0;
 617        int ilock;
 618
 619        /* for nobh_write_end */
 620        *fsdata = NULL;
 621
 622        f2fs_balance_fs(sbi);
 623repeat:
 624        page = grab_cache_page_write_begin(mapping, index, flags);
 625        if (!page)
 626                return -ENOMEM;
 627        *pagep = page;
 628
 629        ilock = mutex_lock_op(sbi);
 630
 631        set_new_dnode(&dn, inode, NULL, NULL, 0);
 632        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
 633        if (err)
 634                goto err;
 635
 636        if (dn.data_blkaddr == NULL_ADDR)
 637                err = reserve_new_block(&dn);
 638
 639        f2fs_put_dnode(&dn);
 640        if (err)
 641                goto err;
 642
 643        mutex_unlock_op(sbi, ilock);
 644
 645        if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
 646                return 0;
 647
 648        if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 649                unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 650                unsigned end = start + len;
 651
 652                /* Reading beyond i_size is simple: memset to zero */
 653                zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
 654                goto out;
 655        }
 656
 657        if (dn.data_blkaddr == NEW_ADDR) {
 658                zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 659        } else {
 660                err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
 661                if (err)
 662                        return err;
 663                lock_page(page);
 664                if (!PageUptodate(page)) {
 665                        f2fs_put_page(page, 1);
 666                        return -EIO;
 667                }
 668                if (page->mapping != mapping) {
 669                        f2fs_put_page(page, 1);
 670                        goto repeat;
 671                }
 672        }
 673out:
 674        SetPageUptodate(page);
 675        clear_cold_data(page);
 676        return 0;
 677
 678err:
 679        mutex_unlock_op(sbi, ilock);
 680        f2fs_put_page(page, 1);
 681        return err;
 682}
 683
 684static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
 685                const struct iovec *iov, loff_t offset, unsigned long nr_segs)
 686{
 687        struct file *file = iocb->ki_filp;
 688        struct inode *inode = file->f_mapping->host;
 689
 690        if (rw == WRITE)
 691                return 0;
 692
 693        /* Needs synchronization with the cleaner */
 694        return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
 695                                                  get_data_block_ro);
 696}
 697
 698static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
 699{
 700        struct inode *inode = page->mapping->host;
 701        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 702        if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
 703                dec_page_count(sbi, F2FS_DIRTY_DENTS);
 704                inode_dec_dirty_dents(inode);
 705        }
 706        ClearPagePrivate(page);
 707}
 708
 709static int f2fs_release_data_page(struct page *page, gfp_t wait)
 710{
 711        ClearPagePrivate(page);
 712        return 1;
 713}
 714
 715static int f2fs_set_data_page_dirty(struct page *page)
 716{
 717        struct address_space *mapping = page->mapping;
 718        struct inode *inode = mapping->host;
 719
 720        SetPageUptodate(page);
 721        if (!PageDirty(page)) {
 722                __set_page_dirty_nobuffers(page);
 723                set_dirty_dir_page(inode, page);
 724                return 1;
 725        }
 726        return 0;
 727}
 728
 729static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
 730{
 731        return generic_block_bmap(mapping, block, get_data_block_ro);
 732}
 733
 734const struct address_space_operations f2fs_dblock_aops = {
 735        .readpage       = f2fs_read_data_page,
 736        .readpages      = f2fs_read_data_pages,
 737        .writepage      = f2fs_write_data_page,
 738        .writepages     = f2fs_write_data_pages,
 739        .write_begin    = f2fs_write_begin,
 740        .write_end      = nobh_write_end,
 741        .set_page_dirty = f2fs_set_data_page_dirty,
 742        .invalidatepage = f2fs_invalidate_data_page,
 743        .releasepage    = f2fs_release_data_page,
 744        .direct_IO      = f2fs_direct_IO,
 745        .bmap           = f2fs_bmap,
 746};
 747