linux/fs/ocfs2/aops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* -*- mode: c; c-basic-offset: 8; -*-
   3 * vim: noexpandtab sw=8 ts=8 sts=0:
   4 *
   5 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/slab.h>
  10#include <linux/highmem.h>
  11#include <linux/pagemap.h>
  12#include <asm/byteorder.h>
  13#include <linux/swap.h>
  14#include <linux/mpage.h>
  15#include <linux/quotaops.h>
  16#include <linux/blkdev.h>
  17#include <linux/uio.h>
  18#include <linux/mm.h>
  19
  20#include <cluster/masklog.h>
  21
  22#include "ocfs2.h"
  23
  24#include "alloc.h"
  25#include "aops.h"
  26#include "dlmglue.h"
  27#include "extent_map.h"
  28#include "file.h"
  29#include "inode.h"
  30#include "journal.h"
  31#include "suballoc.h"
  32#include "super.h"
  33#include "symlink.h"
  34#include "refcounttree.h"
  35#include "ocfs2_trace.h"
  36
  37#include "buffer_head_io.h"
  38#include "dir.h"
  39#include "namei.h"
  40#include "sysfile.h"
  41
  42static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
  43                                   struct buffer_head *bh_result, int create)
  44{
  45        int err = -EIO;
  46        int status;
  47        struct ocfs2_dinode *fe = NULL;
  48        struct buffer_head *bh = NULL;
  49        struct buffer_head *buffer_cache_bh = NULL;
  50        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  51        void *kaddr;
  52
  53        trace_ocfs2_symlink_get_block(
  54                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
  55                        (unsigned long long)iblock, bh_result, create);
  56
  57        BUG_ON(ocfs2_inode_is_fast_symlink(inode));
  58
  59        if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
  60                mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
  61                     (unsigned long long)iblock);
  62                goto bail;
  63        }
  64
  65        status = ocfs2_read_inode_block(inode, &bh);
  66        if (status < 0) {
  67                mlog_errno(status);
  68                goto bail;
  69        }
  70        fe = (struct ocfs2_dinode *) bh->b_data;
  71
  72        if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
  73                                                    le32_to_cpu(fe->i_clusters))) {
  74                err = -ENOMEM;
  75                mlog(ML_ERROR, "block offset is outside the allocated size: "
  76                     "%llu\n", (unsigned long long)iblock);
  77                goto bail;
  78        }
  79
  80        /* We don't use the page cache to create symlink data, so if
  81         * need be, copy it over from the buffer cache. */
  82        if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
  83                u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
  84                            iblock;
  85                buffer_cache_bh = sb_getblk(osb->sb, blkno);
  86                if (!buffer_cache_bh) {
  87                        err = -ENOMEM;
  88                        mlog(ML_ERROR, "couldn't getblock for symlink!\n");
  89                        goto bail;
  90                }
  91
  92                /* we haven't locked out transactions, so a commit
  93                 * could've happened. Since we've got a reference on
  94                 * the bh, even if it commits while we're doing the
  95                 * copy, the data is still good. */
  96                if (buffer_jbd(buffer_cache_bh)
  97                    && ocfs2_inode_is_new(inode)) {
  98                        kaddr = kmap_atomic(bh_result->b_page);
  99                        if (!kaddr) {
 100                                mlog(ML_ERROR, "couldn't kmap!\n");
 101                                goto bail;
 102                        }
 103                        memcpy(kaddr + (bh_result->b_size * iblock),
 104                               buffer_cache_bh->b_data,
 105                               bh_result->b_size);
 106                        kunmap_atomic(kaddr);
 107                        set_buffer_uptodate(bh_result);
 108                }
 109                brelse(buffer_cache_bh);
 110        }
 111
 112        map_bh(bh_result, inode->i_sb,
 113               le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
 114
 115        err = 0;
 116
 117bail:
 118        brelse(bh);
 119
 120        return err;
 121}
 122
 123static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
 124                    struct buffer_head *bh_result, int create)
 125{
 126        int ret = 0;
 127        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 128
 129        down_read(&oi->ip_alloc_sem);
 130        ret = ocfs2_get_block(inode, iblock, bh_result, create);
 131        up_read(&oi->ip_alloc_sem);
 132
 133        return ret;
 134}
 135
 136int ocfs2_get_block(struct inode *inode, sector_t iblock,
 137                    struct buffer_head *bh_result, int create)
 138{
 139        int err = 0;
 140        unsigned int ext_flags;
 141        u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
 142        u64 p_blkno, count, past_eof;
 143        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 144
 145        trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
 146                              (unsigned long long)iblock, bh_result, create);
 147
 148        if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
 149                mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
 150                     inode, inode->i_ino);
 151
 152        if (S_ISLNK(inode->i_mode)) {
 153                /* this always does I/O for some reason. */
 154                err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
 155                goto bail;
 156        }
 157
 158        err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
 159                                          &ext_flags);
 160        if (err) {
 161                mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
 162                     "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
 163                     (unsigned long long)p_blkno);
 164                goto bail;
 165        }
 166
 167        if (max_blocks < count)
 168                count = max_blocks;
 169
 170        /*
 171         * ocfs2 never allocates in this function - the only time we
 172         * need to use BH_New is when we're extending i_size on a file
 173         * system which doesn't support holes, in which case BH_New
 174         * allows __block_write_begin() to zero.
 175         *
 176         * If we see this on a sparse file system, then a truncate has
 177         * raced us and removed the cluster. In this case, we clear
 178         * the buffers dirty and uptodate bits and let the buffer code
 179         * ignore it as a hole.
 180         */
 181        if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
 182                clear_buffer_dirty(bh_result);
 183                clear_buffer_uptodate(bh_result);
 184                goto bail;
 185        }
 186
 187        /* Treat the unwritten extent as a hole for zeroing purposes. */
 188        if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
 189                map_bh(bh_result, inode->i_sb, p_blkno);
 190
 191        bh_result->b_size = count << inode->i_blkbits;
 192
 193        if (!ocfs2_sparse_alloc(osb)) {
 194                if (p_blkno == 0) {
 195                        err = -EIO;
 196                        mlog(ML_ERROR,
 197                             "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
 198                             (unsigned long long)iblock,
 199                             (unsigned long long)p_blkno,
 200                             (unsigned long long)OCFS2_I(inode)->ip_blkno);
 201                        mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
 202                        dump_stack();
 203                        goto bail;
 204                }
 205        }
 206
 207        past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
 208
 209        trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
 210                                  (unsigned long long)past_eof);
 211        if (create && (iblock >= past_eof))
 212                set_buffer_new(bh_result);
 213
 214bail:
 215        if (err < 0)
 216                err = -EIO;
 217
 218        return err;
 219}
 220
 221int ocfs2_read_inline_data(struct inode *inode, struct page *page,
 222                           struct buffer_head *di_bh)
 223{
 224        void *kaddr;
 225        loff_t size;
 226        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 227
 228        if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
 229                ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
 230                            (unsigned long long)OCFS2_I(inode)->ip_blkno);
 231                return -EROFS;
 232        }
 233
 234        size = i_size_read(inode);
 235
 236        if (size > PAGE_SIZE ||
 237            size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
 238                ocfs2_error(inode->i_sb,
 239                            "Inode %llu has with inline data has bad size: %Lu\n",
 240                            (unsigned long long)OCFS2_I(inode)->ip_blkno,
 241                            (unsigned long long)size);
 242                return -EROFS;
 243        }
 244
 245        kaddr = kmap_atomic(page);
 246        if (size)
 247                memcpy(kaddr, di->id2.i_data.id_data, size);
 248        /* Clear the remaining part of the page */
 249        memset(kaddr + size, 0, PAGE_SIZE - size);
 250        flush_dcache_page(page);
 251        kunmap_atomic(kaddr);
 252
 253        SetPageUptodate(page);
 254
 255        return 0;
 256}
 257
 258static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
 259{
 260        int ret;
 261        struct buffer_head *di_bh = NULL;
 262
 263        BUG_ON(!PageLocked(page));
 264        BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
 265
 266        ret = ocfs2_read_inode_block(inode, &di_bh);
 267        if (ret) {
 268                mlog_errno(ret);
 269                goto out;
 270        }
 271
 272        ret = ocfs2_read_inline_data(inode, page, di_bh);
 273out:
 274        unlock_page(page);
 275
 276        brelse(di_bh);
 277        return ret;
 278}
 279
 280static int ocfs2_readpage(struct file *file, struct page *page)
 281{
 282        struct inode *inode = page->mapping->host;
 283        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 284        loff_t start = (loff_t)page->index << PAGE_SHIFT;
 285        int ret, unlock = 1;
 286
 287        trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
 288                             (page ? page->index : 0));
 289
 290        ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
 291        if (ret != 0) {
 292                if (ret == AOP_TRUNCATED_PAGE)
 293                        unlock = 0;
 294                mlog_errno(ret);
 295                goto out;
 296        }
 297
 298        if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
 299                /*
 300                 * Unlock the page and cycle ip_alloc_sem so that we don't
 301                 * busyloop waiting for ip_alloc_sem to unlock
 302                 */
 303                ret = AOP_TRUNCATED_PAGE;
 304                unlock_page(page);
 305                unlock = 0;
 306                down_read(&oi->ip_alloc_sem);
 307                up_read(&oi->ip_alloc_sem);
 308                goto out_inode_unlock;
 309        }
 310
 311        /*
 312         * i_size might have just been updated as we grabed the meta lock.  We
 313         * might now be discovering a truncate that hit on another node.
 314         * block_read_full_page->get_block freaks out if it is asked to read
 315         * beyond the end of a file, so we check here.  Callers
 316         * (generic_file_read, vm_ops->fault) are clever enough to check i_size
 317         * and notice that the page they just read isn't needed.
 318         *
 319         * XXX sys_readahead() seems to get that wrong?
 320         */
 321        if (start >= i_size_read(inode)) {
 322                zero_user(page, 0, PAGE_SIZE);
 323                SetPageUptodate(page);
 324                ret = 0;
 325                goto out_alloc;
 326        }
 327
 328        if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 329                ret = ocfs2_readpage_inline(inode, page);
 330        else
 331                ret = block_read_full_page(page, ocfs2_get_block);
 332        unlock = 0;
 333
 334out_alloc:
 335        up_read(&oi->ip_alloc_sem);
 336out_inode_unlock:
 337        ocfs2_inode_unlock(inode, 0);
 338out:
 339        if (unlock)
 340                unlock_page(page);
 341        return ret;
 342}
 343
 344/*
 345 * This is used only for read-ahead. Failures or difficult to handle
 346 * situations are safe to ignore.
 347 *
 348 * Right now, we don't bother with BH_Boundary - in-inode extent lists
 349 * are quite large (243 extents on 4k blocks), so most inodes don't
 350 * grow out to a tree. If need be, detecting boundary extents could
 351 * trivially be added in a future version of ocfs2_get_block().
 352 */
 353static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
 354                           struct list_head *pages, unsigned nr_pages)
 355{
 356        int ret, err = -EIO;
 357        struct inode *inode = mapping->host;
 358        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 359        loff_t start;
 360        struct page *last;
 361
 362        /*
 363         * Use the nonblocking flag for the dlm code to avoid page
 364         * lock inversion, but don't bother with retrying.
 365         */
 366        ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
 367        if (ret)
 368                return err;
 369
 370        if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
 371                ocfs2_inode_unlock(inode, 0);
 372                return err;
 373        }
 374
 375        /*
 376         * Don't bother with inline-data. There isn't anything
 377         * to read-ahead in that case anyway...
 378         */
 379        if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 380                goto out_unlock;
 381
 382        /*
 383         * Check whether a remote node truncated this file - we just
 384         * drop out in that case as it's not worth handling here.
 385         */
 386        last = lru_to_page(pages);
 387        start = (loff_t)last->index << PAGE_SHIFT;
 388        if (start >= i_size_read(inode))
 389                goto out_unlock;
 390
 391        err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
 392
 393out_unlock:
 394        up_read(&oi->ip_alloc_sem);
 395        ocfs2_inode_unlock(inode, 0);
 396
 397        return err;
 398}
 399
 400/* Note: Because we don't support holes, our allocation has
 401 * already happened (allocation writes zeros to the file data)
 402 * so we don't have to worry about ordered writes in
 403 * ocfs2_writepage.
 404 *
 405 * ->writepage is called during the process of invalidating the page cache
 406 * during blocked lock processing.  It can't block on any cluster locks
 407 * to during block mapping.  It's relying on the fact that the block
 408 * mapping can't have disappeared under the dirty pages that it is
 409 * being asked to write back.
 410 */
 411static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
 412{
 413        trace_ocfs2_writepage(
 414                (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
 415                page->index);
 416
 417        return block_write_full_page(page, ocfs2_get_block, wbc);
 418}
 419
 420/* Taken from ext3. We don't necessarily need the full blown
 421 * functionality yet, but IMHO it's better to cut and paste the whole
 422 * thing so we can avoid introducing our own bugs (and easily pick up
 423 * their fixes when they happen) --Mark */
 424int walk_page_buffers(  handle_t *handle,
 425                        struct buffer_head *head,
 426                        unsigned from,
 427                        unsigned to,
 428                        int *partial,
 429                        int (*fn)(      handle_t *handle,
 430                                        struct buffer_head *bh))
 431{
 432        struct buffer_head *bh;
 433        unsigned block_start, block_end;
 434        unsigned blocksize = head->b_size;
 435        int err, ret = 0;
 436        struct buffer_head *next;
 437
 438        for (   bh = head, block_start = 0;
 439                ret == 0 && (bh != head || !block_start);
 440                block_start = block_end, bh = next)
 441        {
 442                next = bh->b_this_page;
 443                block_end = block_start + blocksize;
 444                if (block_end <= from || block_start >= to) {
 445                        if (partial && !buffer_uptodate(bh))
 446                                *partial = 1;
 447                        continue;
 448                }
 449                err = (*fn)(handle, bh);
 450                if (!ret)
 451                        ret = err;
 452        }
 453        return ret;
 454}
 455
 456static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
 457{
 458        sector_t status;
 459        u64 p_blkno = 0;
 460        int err = 0;
 461        struct inode *inode = mapping->host;
 462
 463        trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
 464                         (unsigned long long)block);
 465
 466        /*
 467         * The swap code (ab-)uses ->bmap to get a block mapping and then
 468         * bypasseѕ the file system for actual I/O.  We really can't allow
 469         * that on refcounted inodes, so we have to skip out here.  And yes,
 470         * 0 is the magic code for a bmap error..
 471         */
 472        if (ocfs2_is_refcount_inode(inode))
 473                return 0;
 474
 475        /* We don't need to lock journal system files, since they aren't
 476         * accessed concurrently from multiple nodes.
 477         */
 478        if (!INODE_JOURNAL(inode)) {
 479                err = ocfs2_inode_lock(inode, NULL, 0);
 480                if (err) {
 481                        if (err != -ENOENT)
 482                                mlog_errno(err);
 483                        goto bail;
 484                }
 485                down_read(&OCFS2_I(inode)->ip_alloc_sem);
 486        }
 487
 488        if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
 489                err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
 490                                                  NULL);
 491
 492        if (!INODE_JOURNAL(inode)) {
 493                up_read(&OCFS2_I(inode)->ip_alloc_sem);
 494                ocfs2_inode_unlock(inode, 0);
 495        }
 496
 497        if (err) {
 498                mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
 499                     (unsigned long long)block);
 500                mlog_errno(err);
 501                goto bail;
 502        }
 503
 504bail:
 505        status = err ? 0 : p_blkno;
 506
 507        return status;
 508}
 509
 510static int ocfs2_releasepage(struct page *page, gfp_t wait)
 511{
 512        if (!page_has_buffers(page))
 513                return 0;
 514        return try_to_free_buffers(page);
 515}
 516
 517static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
 518                                            u32 cpos,
 519                                            unsigned int *start,
 520                                            unsigned int *end)
 521{
 522        unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
 523
 524        if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
 525                unsigned int cpp;
 526
 527                cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
 528
 529                cluster_start = cpos % cpp;
 530                cluster_start = cluster_start << osb->s_clustersize_bits;
 531
 532                cluster_end = cluster_start + osb->s_clustersize;
 533        }
 534
 535        BUG_ON(cluster_start > PAGE_SIZE);
 536        BUG_ON(cluster_end > PAGE_SIZE);
 537
 538        if (start)
 539                *start = cluster_start;
 540        if (end)
 541                *end = cluster_end;
 542}
 543
 544/*
 545 * 'from' and 'to' are the region in the page to avoid zeroing.
 546 *
 547 * If pagesize > clustersize, this function will avoid zeroing outside
 548 * of the cluster boundary.
 549 *
 550 * from == to == 0 is code for "zero the entire cluster region"
 551 */
 552static void ocfs2_clear_page_regions(struct page *page,
 553                                     struct ocfs2_super *osb, u32 cpos,
 554                                     unsigned from, unsigned to)
 555{
 556        void *kaddr;
 557        unsigned int cluster_start, cluster_end;
 558
 559        ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
 560
 561        kaddr = kmap_atomic(page);
 562
 563        if (from || to) {
 564                if (from > cluster_start)
 565                        memset(kaddr + cluster_start, 0, from - cluster_start);
 566                if (to < cluster_end)
 567                        memset(kaddr + to, 0, cluster_end - to);
 568        } else {
 569                memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
 570        }
 571
 572        kunmap_atomic(kaddr);
 573}
 574
 575/*
 576 * Nonsparse file systems fully allocate before we get to the write
 577 * code. This prevents ocfs2_write() from tagging the write as an
 578 * allocating one, which means ocfs2_map_page_blocks() might try to
 579 * read-in the blocks at the tail of our file. Avoid reading them by
 580 * testing i_size against each block offset.
 581 */
 582static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
 583                                 unsigned int block_start)
 584{
 585        u64 offset = page_offset(page) + block_start;
 586
 587        if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
 588                return 1;
 589
 590        if (i_size_read(inode) > offset)
 591                return 1;
 592
 593        return 0;
 594}
 595
 596/*
 597 * Some of this taken from __block_write_begin(). We already have our
 598 * mapping by now though, and the entire write will be allocating or
 599 * it won't, so not much need to use BH_New.
 600 *
 601 * This will also skip zeroing, which is handled externally.
 602 */
 603int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
 604                          struct inode *inode, unsigned int from,
 605                          unsigned int to, int new)
 606{
 607        int ret = 0;
 608        struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
 609        unsigned int block_end, block_start;
 610        unsigned int bsize = i_blocksize(inode);
 611
 612        if (!page_has_buffers(page))
 613                create_empty_buffers(page, bsize, 0);
 614
 615        head = page_buffers(page);
 616        for (bh = head, block_start = 0; bh != head || !block_start;
 617             bh = bh->b_this_page, block_start += bsize) {
 618                block_end = block_start + bsize;
 619
 620                clear_buffer_new(bh);
 621
 622                /*
 623                 * Ignore blocks outside of our i/o range -
 624                 * they may belong to unallocated clusters.
 625                 */
 626                if (block_start >= to || block_end <= from) {
 627                        if (PageUptodate(page))
 628                                set_buffer_uptodate(bh);
 629                        continue;
 630                }
 631
 632                /*
 633                 * For an allocating write with cluster size >= page
 634                 * size, we always write the entire page.
 635                 */
 636                if (new)
 637                        set_buffer_new(bh);
 638
 639                if (!buffer_mapped(bh)) {
 640                        map_bh(bh, inode->i_sb, *p_blkno);
 641                        clean_bdev_bh_alias(bh);
 642                }
 643
 644                if (PageUptodate(page)) {
 645                        if (!buffer_uptodate(bh))
 646                                set_buffer_uptodate(bh);
 647                } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
 648                           !buffer_new(bh) &&
 649                           ocfs2_should_read_blk(inode, page, block_start) &&
 650                           (block_start < from || block_end > to)) {
 651                        ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 652                        *wait_bh++=bh;
 653                }
 654
 655                *p_blkno = *p_blkno + 1;
 656        }
 657
 658        /*
 659         * If we issued read requests - let them complete.
 660         */
 661        while(wait_bh > wait) {
 662                wait_on_buffer(*--wait_bh);
 663                if (!buffer_uptodate(*wait_bh))
 664                        ret = -EIO;
 665        }
 666
 667        if (ret == 0 || !new)
 668                return ret;
 669
 670        /*
 671         * If we get -EIO above, zero out any newly allocated blocks
 672         * to avoid exposing stale data.
 673         */
 674        bh = head;
 675        block_start = 0;
 676        do {
 677                block_end = block_start + bsize;
 678                if (block_end <= from)
 679                        goto next_bh;
 680                if (block_start >= to)
 681                        break;
 682
 683                zero_user(page, block_start, bh->b_size);
 684                set_buffer_uptodate(bh);
 685                mark_buffer_dirty(bh);
 686
 687next_bh:
 688                block_start = block_end;
 689                bh = bh->b_this_page;
 690        } while (bh != head);
 691
 692        return ret;
 693}
 694
 695#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
 696#define OCFS2_MAX_CTXT_PAGES    1
 697#else
 698#define OCFS2_MAX_CTXT_PAGES    (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
 699#endif
 700
 701#define OCFS2_MAX_CLUSTERS_PER_PAGE     (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
 702
 703struct ocfs2_unwritten_extent {
 704        struct list_head        ue_node;
 705        struct list_head        ue_ip_node;
 706        u32                     ue_cpos;
 707        u32                     ue_phys;
 708};
 709
 710/*
 711 * Describe the state of a single cluster to be written to.
 712 */
 713struct ocfs2_write_cluster_desc {
 714        u32             c_cpos;
 715        u32             c_phys;
 716        /*
 717         * Give this a unique field because c_phys eventually gets
 718         * filled.
 719         */
 720        unsigned        c_new;
 721        unsigned        c_clear_unwritten;
 722        unsigned        c_needs_zero;
 723};
 724
 725struct ocfs2_write_ctxt {
 726        /* Logical cluster position / len of write */
 727        u32                             w_cpos;
 728        u32                             w_clen;
 729
 730        /* First cluster allocated in a nonsparse extend */
 731        u32                             w_first_new_cpos;
 732
 733        /* Type of caller. Must be one of buffer, mmap, direct.  */
 734        ocfs2_write_type_t              w_type;
 735
 736        struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
 737
 738        /*
 739         * This is true if page_size > cluster_size.
 740         *
 741         * It triggers a set of special cases during write which might
 742         * have to deal with allocating writes to partial pages.
 743         */
 744        unsigned int                    w_large_pages;
 745
 746        /*
 747         * Pages involved in this write.
 748         *
 749         * w_target_page is the page being written to by the user.
 750         *
 751         * w_pages is an array of pages which always contains
 752         * w_target_page, and in the case of an allocating write with
 753         * page_size < cluster size, it will contain zero'd and mapped
 754         * pages adjacent to w_target_page which need to be written
 755         * out in so that future reads from that region will get
 756         * zero's.
 757         */
 758        unsigned int                    w_num_pages;
 759        struct page                     *w_pages[OCFS2_MAX_CTXT_PAGES];
 760        struct page                     *w_target_page;
 761
 762        /*
 763         * w_target_locked is used for page_mkwrite path indicating no unlocking
 764         * against w_target_page in ocfs2_write_end_nolock.
 765         */
 766        unsigned int                    w_target_locked:1;
 767
 768        /*
 769         * ocfs2_write_end() uses this to know what the real range to
 770         * write in the target should be.
 771         */
 772        unsigned int                    w_target_from;
 773        unsigned int                    w_target_to;
 774
 775        /*
 776         * We could use journal_current_handle() but this is cleaner,
 777         * IMHO -Mark
 778         */
 779        handle_t                        *w_handle;
 780
 781        struct buffer_head              *w_di_bh;
 782
 783        struct ocfs2_cached_dealloc_ctxt w_dealloc;
 784
 785        struct list_head                w_unwritten_list;
 786        unsigned int                    w_unwritten_count;
 787};
 788
 789void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
 790{
 791        int i;
 792
 793        for(i = 0; i < num_pages; i++) {
 794                if (pages[i]) {
 795                        unlock_page(pages[i]);
 796                        mark_page_accessed(pages[i]);
 797                        put_page(pages[i]);
 798                }
 799        }
 800}
 801
 802static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
 803{
 804        int i;
 805
 806        /*
 807         * w_target_locked is only set to true in the page_mkwrite() case.
 808         * The intent is to allow us to lock the target page from write_begin()
 809         * to write_end(). The caller must hold a ref on w_target_page.
 810         */
 811        if (wc->w_target_locked) {
 812                BUG_ON(!wc->w_target_page);
 813                for (i = 0; i < wc->w_num_pages; i++) {
 814                        if (wc->w_target_page == wc->w_pages[i]) {
 815                                wc->w_pages[i] = NULL;
 816                                break;
 817                        }
 818                }
 819                mark_page_accessed(wc->w_target_page);
 820                put_page(wc->w_target_page);
 821        }
 822        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
 823}
 824
 825static void ocfs2_free_unwritten_list(struct inode *inode,
 826                                 struct list_head *head)
 827{
 828        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 829        struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
 830
 831        list_for_each_entry_safe(ue, tmp, head, ue_node) {
 832                list_del(&ue->ue_node);
 833                spin_lock(&oi->ip_lock);
 834                list_del(&ue->ue_ip_node);
 835                spin_unlock(&oi->ip_lock);
 836                kfree(ue);
 837        }
 838}
 839
 840static void ocfs2_free_write_ctxt(struct inode *inode,
 841                                  struct ocfs2_write_ctxt *wc)
 842{
 843        ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
 844        ocfs2_unlock_pages(wc);
 845        brelse(wc->w_di_bh);
 846        kfree(wc);
 847}
 848
 849static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
 850                                  struct ocfs2_super *osb, loff_t pos,
 851                                  unsigned len, ocfs2_write_type_t type,
 852                                  struct buffer_head *di_bh)
 853{
 854        u32 cend;
 855        struct ocfs2_write_ctxt *wc;
 856
 857        wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
 858        if (!wc)
 859                return -ENOMEM;
 860
 861        wc->w_cpos = pos >> osb->s_clustersize_bits;
 862        wc->w_first_new_cpos = UINT_MAX;
 863        cend = (pos + len - 1) >> osb->s_clustersize_bits;
 864        wc->w_clen = cend - wc->w_cpos + 1;
 865        get_bh(di_bh);
 866        wc->w_di_bh = di_bh;
 867        wc->w_type = type;
 868
 869        if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
 870                wc->w_large_pages = 1;
 871        else
 872                wc->w_large_pages = 0;
 873
 874        ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
 875        INIT_LIST_HEAD(&wc->w_unwritten_list);
 876
 877        *wcp = wc;
 878
 879        return 0;
 880}
 881
 882/*
 883 * If a page has any new buffers, zero them out here, and mark them uptodate
 884 * and dirty so they'll be written out (in order to prevent uninitialised
 885 * block data from leaking). And clear the new bit.
 886 */
 887static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
 888{
 889        unsigned int block_start, block_end;
 890        struct buffer_head *head, *bh;
 891
 892        BUG_ON(!PageLocked(page));
 893        if (!page_has_buffers(page))
 894                return;
 895
 896        bh = head = page_buffers(page);
 897        block_start = 0;
 898        do {
 899                block_end = block_start + bh->b_size;
 900
 901                if (buffer_new(bh)) {
 902                        if (block_end > from && block_start < to) {
 903                                if (!PageUptodate(page)) {
 904                                        unsigned start, end;
 905
 906                                        start = max(from, block_start);
 907                                        end = min(to, block_end);
 908
 909                                        zero_user_segment(page, start, end);
 910                                        set_buffer_uptodate(bh);
 911                                }
 912
 913                                clear_buffer_new(bh);
 914                                mark_buffer_dirty(bh);
 915                        }
 916                }
 917
 918                block_start = block_end;
 919                bh = bh->b_this_page;
 920        } while (bh != head);
 921}
 922
 923/*
 924 * Only called when we have a failure during allocating write to write
 925 * zero's to the newly allocated region.
 926 */
 927static void ocfs2_write_failure(struct inode *inode,
 928                                struct ocfs2_write_ctxt *wc,
 929                                loff_t user_pos, unsigned user_len)
 930{
 931        int i;
 932        unsigned from = user_pos & (PAGE_SIZE - 1),
 933                to = user_pos + user_len;
 934        struct page *tmppage;
 935
 936        if (wc->w_target_page)
 937                ocfs2_zero_new_buffers(wc->w_target_page, from, to);
 938
 939        for(i = 0; i < wc->w_num_pages; i++) {
 940                tmppage = wc->w_pages[i];
 941
 942                if (tmppage && page_has_buffers(tmppage)) {
 943                        if (ocfs2_should_order_data(inode))
 944                                ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
 945                                                           user_pos, user_len);
 946
 947                        block_commit_write(tmppage, from, to);
 948                }
 949        }
 950}
 951
 952static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
 953                                        struct ocfs2_write_ctxt *wc,
 954                                        struct page *page, u32 cpos,
 955                                        loff_t user_pos, unsigned user_len,
 956                                        int new)
 957{
 958        int ret;
 959        unsigned int map_from = 0, map_to = 0;
 960        unsigned int cluster_start, cluster_end;
 961        unsigned int user_data_from = 0, user_data_to = 0;
 962
 963        ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
 964                                        &cluster_start, &cluster_end);
 965
 966        /* treat the write as new if the a hole/lseek spanned across
 967         * the page boundary.
 968         */
 969        new = new | ((i_size_read(inode) <= page_offset(page)) &&
 970                        (page_offset(page) <= user_pos));
 971
 972        if (page == wc->w_target_page) {
 973                map_from = user_pos & (PAGE_SIZE - 1);
 974                map_to = map_from + user_len;
 975
 976                if (new)
 977                        ret = ocfs2_map_page_blocks(page, p_blkno, inode,
 978                                                    cluster_start, cluster_end,
 979                                                    new);
 980                else
 981                        ret = ocfs2_map_page_blocks(page, p_blkno, inode,
 982                                                    map_from, map_to, new);
 983                if (ret) {
 984                        mlog_errno(ret);
 985                        goto out;
 986                }
 987
 988                user_data_from = map_from;
 989                user_data_to = map_to;
 990                if (new) {
 991                        map_from = cluster_start;
 992                        map_to = cluster_end;
 993                }
 994        } else {
 995                /*
 996                 * If we haven't allocated the new page yet, we
 997                 * shouldn't be writing it out without copying user
 998                 * data. This is likely a math error from the caller.
 999                 */
1000                BUG_ON(!new);
1001
1002                map_from = cluster_start;
1003                map_to = cluster_end;
1004
1005                ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1006                                            cluster_start, cluster_end, new);
1007                if (ret) {
1008                        mlog_errno(ret);
1009                        goto out;
1010                }
1011        }
1012
1013        /*
1014         * Parts of newly allocated pages need to be zero'd.
1015         *
1016         * Above, we have also rewritten 'to' and 'from' - as far as
1017         * the rest of the function is concerned, the entire cluster
1018         * range inside of a page needs to be written.
1019         *
1020         * We can skip this if the page is up to date - it's already
1021         * been zero'd from being read in as a hole.
1022         */
1023        if (new && !PageUptodate(page))
1024                ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1025                                         cpos, user_data_from, user_data_to);
1026
1027        flush_dcache_page(page);
1028
1029out:
1030        return ret;
1031}
1032
1033/*
1034 * This function will only grab one clusters worth of pages.
1035 */
1036static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1037                                      struct ocfs2_write_ctxt *wc,
1038                                      u32 cpos, loff_t user_pos,
1039                                      unsigned user_len, int new,
1040                                      struct page *mmap_page)
1041{
1042        int ret = 0, i;
1043        unsigned long start, target_index, end_index, index;
1044        struct inode *inode = mapping->host;
1045        loff_t last_byte;
1046
1047        target_index = user_pos >> PAGE_SHIFT;
1048
1049        /*
1050         * Figure out how many pages we'll be manipulating here. For
1051         * non allocating write, we just change the one
1052         * page. Otherwise, we'll need a whole clusters worth.  If we're
1053         * writing past i_size, we only need enough pages to cover the
1054         * last page of the write.
1055         */
1056        if (new) {
1057                wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1058                start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
1059                /*
1060                 * We need the index *past* the last page we could possibly
1061                 * touch.  This is the page past the end of the write or
1062                 * i_size, whichever is greater.
1063                 */
1064                last_byte = max(user_pos + user_len, i_size_read(inode));
1065                BUG_ON(last_byte < 1);
1066                end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1067                if ((start + wc->w_num_pages) > end_index)
1068                        wc->w_num_pages = end_index - start;
1069        } else {
1070                wc->w_num_pages = 1;
1071                start = target_index;
1072        }
1073        end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1074
1075        for(i = 0; i < wc->w_num_pages; i++) {
1076                index = start + i;
1077
1078                if (index >= target_index && index <= end_index &&
1079                    wc->w_type == OCFS2_WRITE_MMAP) {
1080                        /*
1081                         * ocfs2_pagemkwrite() is a little different
1082                         * and wants us to directly use the page
1083                         * passed in.
1084                         */
1085                        lock_page(mmap_page);
1086
1087                        /* Exit and let the caller retry */
1088                        if (mmap_page->mapping != mapping) {
1089                                WARN_ON(mmap_page->mapping);
1090                                unlock_page(mmap_page);
1091                                ret = -EAGAIN;
1092                                goto out;
1093                        }
1094
1095                        get_page(mmap_page);
1096                        wc->w_pages[i] = mmap_page;
1097                        wc->w_target_locked = true;
1098                } else if (index >= target_index && index <= end_index &&
1099                           wc->w_type == OCFS2_WRITE_DIRECT) {
1100                        /* Direct write has no mapping page. */
1101                        wc->w_pages[i] = NULL;
1102                        continue;
1103                } else {
1104                        wc->w_pages[i] = find_or_create_page(mapping, index,
1105                                                             GFP_NOFS);
1106                        if (!wc->w_pages[i]) {
1107                                ret = -ENOMEM;
1108                                mlog_errno(ret);
1109                                goto out;
1110                        }
1111                }
1112                wait_for_stable_page(wc->w_pages[i]);
1113
1114                if (index == target_index)
1115                        wc->w_target_page = wc->w_pages[i];
1116        }
1117out:
1118        if (ret)
1119                wc->w_target_locked = false;
1120        return ret;
1121}
1122
1123/*
1124 * Prepare a single cluster for write one cluster into the file.
1125 */
1126static int ocfs2_write_cluster(struct address_space *mapping,
1127                               u32 *phys, unsigned int new,
1128                               unsigned int clear_unwritten,
1129                               unsigned int should_zero,
1130                               struct ocfs2_alloc_context *data_ac,
1131                               struct ocfs2_alloc_context *meta_ac,
1132                               struct ocfs2_write_ctxt *wc, u32 cpos,
1133                               loff_t user_pos, unsigned user_len)
1134{
1135        int ret, i;
1136        u64 p_blkno;
1137        struct inode *inode = mapping->host;
1138        struct ocfs2_extent_tree et;
1139        int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1140
1141        if (new) {
1142                u32 tmp_pos;
1143
1144                /*
1145                 * This is safe to call with the page locks - it won't take
1146                 * any additional semaphores or cluster locks.
1147                 */
1148                tmp_pos = cpos;
1149                ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1150                                           &tmp_pos, 1, !clear_unwritten,
1151                                           wc->w_di_bh, wc->w_handle,
1152                                           data_ac, meta_ac, NULL);
1153                /*
1154                 * This shouldn't happen because we must have already
1155                 * calculated the correct meta data allocation required. The
1156                 * internal tree allocation code should know how to increase
1157                 * transaction credits itself.
1158                 *
1159                 * If need be, we could handle -EAGAIN for a
1160                 * RESTART_TRANS here.
1161                 */
1162                mlog_bug_on_msg(ret == -EAGAIN,
1163                                "Inode %llu: EAGAIN return during allocation.\n",
1164                                (unsigned long long)OCFS2_I(inode)->ip_blkno);
1165                if (ret < 0) {
1166                        mlog_errno(ret);
1167                        goto out;
1168                }
1169        } else if (clear_unwritten) {
1170                ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1171                                              wc->w_di_bh);
1172                ret = ocfs2_mark_extent_written(inode, &et,
1173                                                wc->w_handle, cpos, 1, *phys,
1174                                                meta_ac, &wc->w_dealloc);
1175                if (ret < 0) {
1176                        mlog_errno(ret);
1177                        goto out;
1178                }
1179        }
1180
1181        /*
1182         * The only reason this should fail is due to an inability to
1183         * find the extent added.
1184         */
1185        ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
1186        if (ret < 0) {
1187                mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
1188                            "at logical cluster %u",
1189                            (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
1190                goto out;
1191        }
1192
1193        BUG_ON(*phys == 0);
1194
1195        p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
1196        if (!should_zero)
1197                p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
1198
1199        for(i = 0; i < wc->w_num_pages; i++) {
1200                int tmpret;
1201
1202                /* This is the direct io target page. */
1203                if (wc->w_pages[i] == NULL) {
1204                        p_blkno++;
1205                        continue;
1206                }
1207
1208                tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1209                                                      wc->w_pages[i], cpos,
1210                                                      user_pos, user_len,
1211                                                      should_zero);
1212                if (tmpret) {
1213                        mlog_errno(tmpret);
1214                        if (ret == 0)
1215                                ret = tmpret;
1216                }
1217        }
1218
1219        /*
1220         * We only have cleanup to do in case of allocating write.
1221         */
1222        if (ret && new)
1223                ocfs2_write_failure(inode, wc, user_pos, user_len);
1224
1225out:
1226
1227        return ret;
1228}
1229
1230static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1231                                       struct ocfs2_alloc_context *data_ac,
1232                                       struct ocfs2_alloc_context *meta_ac,
1233                                       struct ocfs2_write_ctxt *wc,
1234                                       loff_t pos, unsigned len)
1235{
1236        int ret, i;
1237        loff_t cluster_off;
1238        unsigned int local_len = len;
1239        struct ocfs2_write_cluster_desc *desc;
1240        struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1241
1242        for (i = 0; i < wc->w_clen; i++) {
1243                desc = &wc->w_desc[i];
1244
1245                /*
1246                 * We have to make sure that the total write passed in
1247                 * doesn't extend past a single cluster.
1248                 */
1249                local_len = len;
1250                cluster_off = pos & (osb->s_clustersize - 1);
1251                if ((cluster_off + local_len) > osb->s_clustersize)
1252                        local_len = osb->s_clustersize - cluster_off;
1253
1254                ret = ocfs2_write_cluster(mapping, &desc->c_phys,
1255                                          desc->c_new,
1256                                          desc->c_clear_unwritten,
1257                                          desc->c_needs_zero,
1258                                          data_ac, meta_ac,
1259                                          wc, desc->c_cpos, pos, local_len);
1260                if (ret) {
1261                        mlog_errno(ret);
1262                        goto out;
1263                }
1264
1265                len -= local_len;
1266                pos += local_len;
1267        }
1268
1269        ret = 0;
1270out:
1271        return ret;
1272}
1273
1274/*
1275 * ocfs2_write_end() wants to know which parts of the target page it
1276 * should complete the write on. It's easiest to compute them ahead of
1277 * time when a more complete view of the write is available.
1278 */
1279static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1280                                        struct ocfs2_write_ctxt *wc,
1281                                        loff_t pos, unsigned len, int alloc)
1282{
1283        struct ocfs2_write_cluster_desc *desc;
1284
1285        wc->w_target_from = pos & (PAGE_SIZE - 1);
1286        wc->w_target_to = wc->w_target_from + len;
1287
1288        if (alloc == 0)
1289                return;
1290
1291        /*
1292         * Allocating write - we may have different boundaries based
1293         * on page size and cluster size.
1294         *
1295         * NOTE: We can no longer compute one value from the other as
1296         * the actual write length and user provided length may be
1297         * different.
1298         */
1299
1300        if (wc->w_large_pages) {
1301                /*
1302                 * We only care about the 1st and last cluster within
1303                 * our range and whether they should be zero'd or not. Either
1304                 * value may be extended out to the start/end of a
1305                 * newly allocated cluster.
1306                 */
1307                desc = &wc->w_desc[0];
1308                if (desc->c_needs_zero)
1309                        ocfs2_figure_cluster_boundaries(osb,
1310                                                        desc->c_cpos,
1311                                                        &wc->w_target_from,
1312                                                        NULL);
1313
1314                desc = &wc->w_desc[wc->w_clen - 1];
1315                if (desc->c_needs_zero)
1316                        ocfs2_figure_cluster_boundaries(osb,
1317                                                        desc->c_cpos,
1318                                                        NULL,
1319                                                        &wc->w_target_to);
1320        } else {
1321                wc->w_target_from = 0;
1322                wc->w_target_to = PAGE_SIZE;
1323        }
1324}
1325
1326/*
1327 * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
1328 * do the zero work. And should not to clear UNWRITTEN since it will be cleared
1329 * by the direct io procedure.
1330 * If this is a new extent that allocated by direct io, we should mark it in
1331 * the ip_unwritten_list.
1332 */
1333static int ocfs2_unwritten_check(struct inode *inode,
1334                                 struct ocfs2_write_ctxt *wc,
1335                                 struct ocfs2_write_cluster_desc *desc)
1336{
1337        struct ocfs2_inode_info *oi = OCFS2_I(inode);
1338        struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
1339        int ret = 0;
1340
1341        if (!desc->c_needs_zero)
1342                return 0;
1343
1344retry:
1345        spin_lock(&oi->ip_lock);
1346        /* Needs not to zero no metter buffer or direct. The one who is zero
1347         * the cluster is doing zero. And he will clear unwritten after all
1348         * cluster io finished. */
1349        list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
1350                if (desc->c_cpos == ue->ue_cpos) {
1351                        BUG_ON(desc->c_new);
1352                        desc->c_needs_zero = 0;
1353                        desc->c_clear_unwritten = 0;
1354                        goto unlock;
1355                }
1356        }
1357
1358        if (wc->w_type != OCFS2_WRITE_DIRECT)
1359                goto unlock;
1360
1361        if (new == NULL) {
1362                spin_unlock(&oi->ip_lock);
1363                new = kmalloc(sizeof(struct ocfs2_unwritten_extent),
1364                             GFP_NOFS);
1365                if (new == NULL) {
1366                        ret = -ENOMEM;
1367                        goto out;
1368                }
1369                goto retry;
1370        }
1371        /* This direct write will doing zero. */
1372        new->ue_cpos = desc->c_cpos;
1373        new->ue_phys = desc->c_phys;
1374        desc->c_clear_unwritten = 0;
1375        list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
1376        list_add_tail(&new->ue_node, &wc->w_unwritten_list);
1377        wc->w_unwritten_count++;
1378        new = NULL;
1379unlock:
1380        spin_unlock(&oi->ip_lock);
1381out:
1382        kfree(new);
1383        return ret;
1384}
1385
1386/*
1387 * Populate each single-cluster write descriptor in the write context
1388 * with information about the i/o to be done.
1389 *
1390 * Returns the number of clusters that will have to be allocated, as
1391 * well as a worst case estimate of the number of extent records that
1392 * would have to be created during a write to an unwritten region.
1393 */
1394static int ocfs2_populate_write_desc(struct inode *inode,
1395                                     struct ocfs2_write_ctxt *wc,
1396                                     unsigned int *clusters_to_alloc,
1397                                     unsigned int *extents_to_split)
1398{
1399        int ret;
1400        struct ocfs2_write_cluster_desc *desc;
1401        unsigned int num_clusters = 0;
1402        unsigned int ext_flags = 0;
1403        u32 phys = 0;
1404        int i;
1405
1406        *clusters_to_alloc = 0;
1407        *extents_to_split = 0;
1408
1409        for (i = 0; i < wc->w_clen; i++) {
1410                desc = &wc->w_desc[i];
1411                desc->c_cpos = wc->w_cpos + i;
1412
1413                if (num_clusters == 0) {
1414                        /*
1415                         * Need to look up the next extent record.
1416                         */
1417                        ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
1418                                                 &num_clusters, &ext_flags);
1419                        if (ret) {
1420                                mlog_errno(ret);
1421                                goto out;
1422                        }
1423
1424                        /* We should already CoW the refcountd extent. */
1425                        BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1426
1427                        /*
1428                         * Assume worst case - that we're writing in
1429                         * the middle of the extent.
1430                         *
1431                         * We can assume that the write proceeds from
1432                         * left to right, in which case the extent
1433                         * insert code is smart enough to coalesce the
1434                         * next splits into the previous records created.
1435                         */
1436                        if (ext_flags & OCFS2_EXT_UNWRITTEN)
1437                                *extents_to_split = *extents_to_split + 2;
1438                } else if (phys) {
1439                        /*
1440                         * Only increment phys if it doesn't describe
1441                         * a hole.
1442                         */
1443                        phys++;
1444                }
1445
1446                /*
1447                 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1448                 * file that got extended.  w_first_new_cpos tells us
1449                 * where the newly allocated clusters are so we can
1450                 * zero them.
1451                 */
1452                if (desc->c_cpos >= wc->w_first_new_cpos) {
1453                        BUG_ON(phys == 0);
1454                        desc->c_needs_zero = 1;
1455                }
1456
1457                desc->c_phys = phys;
1458                if (phys == 0) {
1459                        desc->c_new = 1;
1460                        desc->c_needs_zero = 1;
1461                        desc->c_clear_unwritten = 1;
1462                        *clusters_to_alloc = *clusters_to_alloc + 1;
1463                }
1464
1465                if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1466                        desc->c_clear_unwritten = 1;
1467                        desc->c_needs_zero = 1;
1468                }
1469
1470                ret = ocfs2_unwritten_check(inode, wc, desc);
1471                if (ret) {
1472                        mlog_errno(ret);
1473                        goto out;
1474                }
1475
1476                num_clusters--;
1477        }
1478
1479        ret = 0;
1480out:
1481        return ret;
1482}
1483
1484static int ocfs2_write_begin_inline(struct address_space *mapping,
1485                                    struct inode *inode,
1486                                    struct ocfs2_write_ctxt *wc)
1487{
1488        int ret;
1489        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1490        struct page *page;
1491        handle_t *handle;
1492        struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1493
1494        handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1495        if (IS_ERR(handle)) {
1496                ret = PTR_ERR(handle);
1497                mlog_errno(ret);
1498                goto out;
1499        }
1500
1501        page = find_or_create_page(mapping, 0, GFP_NOFS);
1502        if (!page) {
1503                ocfs2_commit_trans(osb, handle);
1504                ret = -ENOMEM;
1505                mlog_errno(ret);
1506                goto out;
1507        }
1508        /*
1509         * If we don't set w_num_pages then this page won't get unlocked
1510         * and freed on cleanup of the write context.
1511         */
1512        wc->w_pages[0] = wc->w_target_page = page;
1513        wc->w_num_pages = 1;
1514
1515        ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1516                                      OCFS2_JOURNAL_ACCESS_WRITE);
1517        if (ret) {
1518                ocfs2_commit_trans(osb, handle);
1519
1520                mlog_errno(ret);
1521                goto out;
1522        }
1523
1524        if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1525                ocfs2_set_inode_data_inline(inode, di);
1526
1527        if (!PageUptodate(page)) {
1528                ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1529                if (ret) {
1530                        ocfs2_commit_trans(osb, handle);
1531
1532                        goto out;
1533                }
1534        }
1535
1536        wc->w_handle = handle;
1537out:
1538        return ret;
1539}
1540
1541int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1542{
1543        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1544
1545        if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
1546                return 1;
1547        return 0;
1548}
1549
1550static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1551                                          struct inode *inode, loff_t pos,
1552                                          unsigned len, struct page *mmap_page,
1553                                          struct ocfs2_write_ctxt *wc)
1554{
1555        int ret, written = 0;
1556        loff_t end = pos + len;
1557        struct ocfs2_inode_info *oi = OCFS2_I(inode);
1558        struct ocfs2_dinode *di = NULL;
1559
1560        trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1561                                             len, (unsigned long long)pos,
1562                                             oi->ip_dyn_features);
1563
1564        /*
1565         * Handle inodes which already have inline data 1st.
1566         */
1567        if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1568                if (mmap_page == NULL &&
1569                    ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1570                        goto do_inline_write;
1571
1572                /*
1573                 * The write won't fit - we have to give this inode an
1574                 * inline extent list now.
1575                 */
1576                ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1577                if (ret)
1578                        mlog_errno(ret);
1579                goto out;
1580        }
1581
1582        /*
1583         * Check whether the inode can accept inline data.
1584         */
1585        if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1586                return 0;
1587
1588        /*
1589         * Check whether the write can fit.
1590         */
1591        di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1592        if (mmap_page ||
1593            end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
1594                return 0;
1595
1596do_inline_write:
1597        ret = ocfs2_write_begin_inline(mapping, inode, wc);
1598        if (ret) {
1599                mlog_errno(ret);
1600                goto out;
1601        }
1602
1603        /*
1604         * This signals to the caller that the data can be written
1605         * inline.
1606         */
1607        written = 1;
1608out:
1609        return written ? written : ret;
1610}
1611
1612/*
1613 * This function only does anything for file systems which can't
1614 * handle sparse files.
1615 *
1616 * What we want to do here is fill in any hole between the current end
1617 * of allocation and the end of our write. That way the rest of the
1618 * write path can treat it as an non-allocating write, which has no
1619 * special case code for sparse/nonsparse files.
1620 */
1621static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1622                                        struct buffer_head *di_bh,
1623                                        loff_t pos, unsigned len,
1624                                        struct ocfs2_write_ctxt *wc)
1625{
1626        int ret;
1627        loff_t newsize = pos + len;
1628
1629        BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1630
1631        if (newsize <= i_size_read(inode))
1632                return 0;
1633
1634        ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
1635        if (ret)
1636                mlog_errno(ret);
1637
1638        /* There is no wc if this is call from direct. */
1639        if (wc)
1640                wc->w_first_new_cpos =
1641                        ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1642
1643        return ret;
1644}
1645
1646static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1647                           loff_t pos)
1648{
1649        int ret = 0;
1650
1651        BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1652        if (pos > i_size_read(inode))
1653                ret = ocfs2_zero_extend(inode, di_bh, pos);
1654
1655        return ret;
1656}
1657
1658int ocfs2_write_begin_nolock(struct address_space *mapping,
1659                             loff_t pos, unsigned len, ocfs2_write_type_t type,
1660                             struct page **pagep, void **fsdata,
1661                             struct buffer_head *di_bh, struct page *mmap_page)
1662{
1663        int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1664        unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
1665        struct ocfs2_write_ctxt *wc;
1666        struct inode *inode = mapping->host;
1667        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1668        struct ocfs2_dinode *di;
1669        struct ocfs2_alloc_context *data_ac = NULL;
1670        struct ocfs2_alloc_context *meta_ac = NULL;
1671        handle_t *handle;
1672        struct ocfs2_extent_tree et;
1673        int try_free = 1, ret1;
1674
1675try_again:
1676        ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
1677        if (ret) {
1678                mlog_errno(ret);
1679                return ret;
1680        }
1681
1682        if (ocfs2_supports_inline_data(osb)) {
1683                ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1684                                                     mmap_page, wc);
1685                if (ret == 1) {
1686                        ret = 0;
1687                        goto success;
1688                }
1689                if (ret < 0) {
1690                        mlog_errno(ret);
1691                        goto out;
1692                }
1693        }
1694
1695        /* Direct io change i_size late, should not zero tail here. */
1696        if (type != OCFS2_WRITE_DIRECT) {
1697                if (ocfs2_sparse_alloc(osb))
1698                        ret = ocfs2_zero_tail(inode, di_bh, pos);
1699                else
1700                        ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
1701                                                           len, wc);
1702                if (ret) {
1703                        mlog_errno(ret);
1704                        goto out;
1705                }
1706        }
1707
1708        ret = ocfs2_check_range_for_refcount(inode, pos, len);
1709        if (ret < 0) {
1710                mlog_errno(ret);
1711                goto out;
1712        } else if (ret == 1) {
1713                clusters_need = wc->w_clen;
1714                ret = ocfs2_refcount_cow(inode, di_bh,
1715                                         wc->w_cpos, wc->w_clen, UINT_MAX);
1716                if (ret) {
1717                        mlog_errno(ret);
1718                        goto out;
1719                }
1720        }
1721
1722        ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1723                                        &extents_to_split);
1724        if (ret) {
1725                mlog_errno(ret);
1726                goto out;
1727        }
1728        clusters_need += clusters_to_alloc;
1729
1730        di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1731
1732        trace_ocfs2_write_begin_nolock(
1733                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
1734                        (long long)i_size_read(inode),
1735                        le32_to_cpu(di->i_clusters),
1736                        pos, len, type, mmap_page,
1737                        clusters_to_alloc, extents_to_split);
1738
1739        /*
1740         * We set w_target_from, w_target_to here so that
1741         * ocfs2_write_end() knows which range in the target page to
1742         * write out. An allocation requires that we write the entire
1743         * cluster range.
1744         */
1745        if (clusters_to_alloc || extents_to_split) {
1746                /*
1747                 * XXX: We are stretching the limits of
1748                 * ocfs2_lock_allocators(). It greatly over-estimates
1749                 * the work to be done.
1750                 */
1751                ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1752                                              wc->w_di_bh);
1753                ret = ocfs2_lock_allocators(inode, &et,
1754                                            clusters_to_alloc, extents_to_split,
1755                                            &data_ac, &meta_ac);
1756                if (ret) {
1757                        mlog_errno(ret);
1758                        goto out;
1759                }
1760
1761                if (data_ac)
1762                        data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1763
1764                credits = ocfs2_calc_extend_credits(inode->i_sb,
1765                                                    &di->id2.i_list);
1766        } else if (type == OCFS2_WRITE_DIRECT)
1767                /* direct write needs not to start trans if no extents alloc. */
1768                goto success;
1769
1770        /*
1771         * We have to zero sparse allocated clusters, unwritten extent clusters,
1772         * and non-sparse clusters we just extended.  For non-sparse writes,
1773         * we know zeros will only be needed in the first and/or last cluster.
1774         */
1775        if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1776                           wc->w_desc[wc->w_clen - 1].c_needs_zero))
1777                cluster_of_pages = 1;
1778        else
1779                cluster_of_pages = 0;
1780
1781        ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1782
1783        handle = ocfs2_start_trans(osb, credits);
1784        if (IS_ERR(handle)) {
1785                ret = PTR_ERR(handle);
1786                mlog_errno(ret);
1787                goto out;
1788        }
1789
1790        wc->w_handle = handle;
1791
1792        if (clusters_to_alloc) {
1793                ret = dquot_alloc_space_nodirty(inode,
1794                        ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1795                if (ret)
1796                        goto out_commit;
1797        }
1798
1799        ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1800                                      OCFS2_JOURNAL_ACCESS_WRITE);
1801        if (ret) {
1802                mlog_errno(ret);
1803                goto out_quota;
1804        }
1805
1806        /*
1807         * Fill our page array first. That way we've grabbed enough so
1808         * that we can zero and flush if we error after adding the
1809         * extent.
1810         */
1811        ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
1812                                         cluster_of_pages, mmap_page);
1813        if (ret && ret != -EAGAIN) {
1814                mlog_errno(ret);
1815                goto out_quota;
1816        }
1817
1818        /*
1819         * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
1820         * the target page. In this case, we exit with no error and no target
1821         * page. This will trigger the caller, page_mkwrite(), to re-try
1822         * the operation.
1823         */
1824        if (ret == -EAGAIN) {
1825                BUG_ON(wc->w_target_page);
1826                ret = 0;
1827                goto out_quota;
1828        }
1829
1830        ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1831                                          len);
1832        if (ret) {
1833                mlog_errno(ret);
1834                goto out_quota;
1835        }
1836
1837        if (data_ac)
1838                ocfs2_free_alloc_context(data_ac);
1839        if (meta_ac)
1840                ocfs2_free_alloc_context(meta_ac);
1841
1842success:
1843        if (pagep)
1844                *pagep = wc->w_target_page;
1845        *fsdata = wc;
1846        return 0;
1847out_quota:
1848        if (clusters_to_alloc)
1849                dquot_free_space(inode,
1850                          ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1851out_commit:
1852        ocfs2_commit_trans(osb, handle);
1853
1854out:
1855        /*
1856         * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1857         * even in case of error here like ENOSPC and ENOMEM. So, we need
1858         * to unlock the target page manually to prevent deadlocks when
1859         * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
1860         * to VM code.
1861         */
1862        if (wc->w_target_locked)
1863                unlock_page(mmap_page);
1864
1865        ocfs2_free_write_ctxt(inode, wc);
1866
1867        if (data_ac) {
1868                ocfs2_free_alloc_context(data_ac);
1869                data_ac = NULL;
1870        }
1871        if (meta_ac) {
1872                ocfs2_free_alloc_context(meta_ac);
1873                meta_ac = NULL;
1874        }
1875
1876        if (ret == -ENOSPC && try_free) {
1877                /*
1878                 * Try to free some truncate log so that we can have enough
1879                 * clusters to allocate.
1880                 */
1881                try_free = 0;
1882
1883                ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
1884                if (ret1 == 1)
1885                        goto try_again;
1886
1887                if (ret1 < 0)
1888                        mlog_errno(ret1);
1889        }
1890
1891        return ret;
1892}
1893
1894static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
1895                             loff_t pos, unsigned len, unsigned flags,
1896                             struct page **pagep, void **fsdata)
1897{
1898        int ret;
1899        struct buffer_head *di_bh = NULL;
1900        struct inode *inode = mapping->host;
1901
1902        ret = ocfs2_inode_lock(inode, &di_bh, 1);
1903        if (ret) {
1904                mlog_errno(ret);
1905                return ret;
1906        }
1907
1908        /*
1909         * Take alloc sem here to prevent concurrent lookups. That way
1910         * the mapping, zeroing and tree manipulation within
1911         * ocfs2_write() will be safe against ->readpage(). This
1912         * should also serve to lock out allocation from a shared
1913         * writeable region.
1914         */
1915        down_write(&OCFS2_I(inode)->ip_alloc_sem);
1916
1917        ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
1918                                       pagep, fsdata, di_bh, NULL);
1919        if (ret) {
1920                mlog_errno(ret);
1921                goto out_fail;
1922        }
1923
1924        brelse(di_bh);
1925
1926        return 0;
1927
1928out_fail:
1929        up_write(&OCFS2_I(inode)->ip_alloc_sem);
1930
1931        brelse(di_bh);
1932        ocfs2_inode_unlock(inode, 1);
1933
1934        return ret;
1935}
1936
1937static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1938                                   unsigned len, unsigned *copied,
1939                                   struct ocfs2_dinode *di,
1940                                   struct ocfs2_write_ctxt *wc)
1941{
1942        void *kaddr;
1943
1944        if (unlikely(*copied < len)) {
1945                if (!PageUptodate(wc->w_target_page)) {
1946                        *copied = 0;
1947                        return;
1948                }
1949        }
1950
1951        kaddr = kmap_atomic(wc->w_target_page);
1952        memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1953        kunmap_atomic(kaddr);
1954
1955        trace_ocfs2_write_end_inline(
1956             (unsigned long long)OCFS2_I(inode)->ip_blkno,
1957             (unsigned long long)pos, *copied,
1958             le16_to_cpu(di->id2.i_data.id_count),
1959             le16_to_cpu(di->i_dyn_features));
1960}
1961
1962int ocfs2_write_end_nolock(struct address_space *mapping,
1963                           loff_t pos, unsigned len, unsigned copied, void *fsdata)
1964{
1965        int i, ret;
1966        unsigned from, to, start = pos & (PAGE_SIZE - 1);
1967        struct inode *inode = mapping->host;
1968        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1969        struct ocfs2_write_ctxt *wc = fsdata;
1970        struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1971        handle_t *handle = wc->w_handle;
1972        struct page *tmppage;
1973
1974        BUG_ON(!list_empty(&wc->w_unwritten_list));
1975
1976        if (handle) {
1977                ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
1978                                wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1979                if (ret) {
1980                        copied = ret;
1981                        mlog_errno(ret);
1982                        goto out;
1983                }
1984        }
1985
1986        if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1987                ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
1988                goto out_write_size;
1989        }
1990
1991        if (unlikely(copied < len) && wc->w_target_page) {
1992                if (!PageUptodate(wc->w_target_page))
1993                        copied = 0;
1994
1995                ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
1996                                       start+len);
1997        }
1998        if (wc->w_target_page)
1999                flush_dcache_page(wc->w_target_page);
2000
2001        for(i = 0; i < wc->w_num_pages; i++) {
2002                tmppage = wc->w_pages[i];
2003
2004                /* This is the direct io target page. */
2005                if (tmppage == NULL)
2006                        continue;
2007
2008                if (tmppage == wc->w_target_page) {
2009                        from = wc->w_target_from;
2010                        to = wc->w_target_to;
2011
2012                        BUG_ON(from > PAGE_SIZE ||
2013                               to > PAGE_SIZE ||
2014                               to < from);
2015                } else {
2016                        /*
2017                         * Pages adjacent to the target (if any) imply
2018                         * a hole-filling write in which case we want
2019                         * to flush their entire range.
2020                         */
2021                        from = 0;
2022                        to = PAGE_SIZE;
2023                }
2024
2025                if (page_has_buffers(tmppage)) {
2026                        if (handle && ocfs2_should_order_data(inode)) {
2027                                loff_t start_byte =
2028                                        ((loff_t)tmppage->index << PAGE_SHIFT) +
2029                                        from;
2030                                loff_t length = to - from;
2031                                ocfs2_jbd2_inode_add_write(handle, inode,
2032                                                           start_byte, length);
2033                        }
2034                        block_commit_write(tmppage, from, to);
2035                }
2036        }
2037
2038out_write_size:
2039        /* Direct io do not update i_size here. */
2040        if (wc->w_type != OCFS2_WRITE_DIRECT) {
2041                pos += copied;
2042                if (pos > i_size_read(inode)) {
2043                        i_size_write(inode, pos);
2044                        mark_inode_dirty(inode);
2045                }
2046                inode->i_blocks = ocfs2_inode_sector_count(inode);
2047                di->i_size = cpu_to_le64((u64)i_size_read(inode));
2048                inode->i_mtime = inode->i_ctime = current_time(inode);
2049                di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2050                di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
2051                if (handle)
2052                        ocfs2_update_inode_fsync_trans(handle, inode, 1);
2053        }
2054        if (handle)
2055                ocfs2_journal_dirty(handle, wc->w_di_bh);
2056
2057out:
2058        /* unlock pages before dealloc since it needs acquiring j_trans_barrier
2059         * lock, or it will cause a deadlock since journal commit threads holds
2060         * this lock and will ask for the page lock when flushing the data.
2061         * put it here to preserve the unlock order.
2062         */
2063        ocfs2_unlock_pages(wc);
2064
2065        if (handle)
2066                ocfs2_commit_trans(osb, handle);
2067
2068        ocfs2_run_deallocs(osb, &wc->w_dealloc);
2069
2070        brelse(wc->w_di_bh);
2071        kfree(wc);
2072
2073        return copied;
2074}
2075
2076static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2077                           loff_t pos, unsigned len, unsigned copied,
2078                           struct page *page, void *fsdata)
2079{
2080        int ret;
2081        struct inode *inode = mapping->host;
2082
2083        ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
2084
2085        up_write(&OCFS2_I(inode)->ip_alloc_sem);
2086        ocfs2_inode_unlock(inode, 1);
2087
2088        return ret;
2089}
2090
2091struct ocfs2_dio_write_ctxt {
2092        struct list_head        dw_zero_list;
2093        unsigned                dw_zero_count;
2094        int                     dw_orphaned;
2095        pid_t                   dw_writer_pid;
2096};
2097
2098static struct ocfs2_dio_write_ctxt *
2099ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
2100{
2101        struct ocfs2_dio_write_ctxt *dwc = NULL;
2102
2103        if (bh->b_private)
2104                return bh->b_private;
2105
2106        dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS);
2107        if (dwc == NULL)
2108                return NULL;
2109        INIT_LIST_HEAD(&dwc->dw_zero_list);
2110        dwc->dw_zero_count = 0;
2111        dwc->dw_orphaned = 0;
2112        dwc->dw_writer_pid = task_pid_nr(current);
2113        bh->b_private = dwc;
2114        *alloc = 1;
2115
2116        return dwc;
2117}
2118
2119static void ocfs2_dio_free_write_ctx(struct inode *inode,
2120                                     struct ocfs2_dio_write_ctxt *dwc)
2121{
2122        ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
2123        kfree(dwc);
2124}
2125
2126/*
2127 * TODO: Make this into a generic get_blocks function.
2128 *
2129 * From do_direct_io in direct-io.c:
2130 *  "So what we do is to permit the ->get_blocks function to populate
2131 *   bh.b_size with the size of IO which is permitted at this offset and
2132 *   this i_blkbits."
2133 *
2134 * This function is called directly from get_more_blocks in direct-io.c.
2135 *
2136 * called like this: dio->get_blocks(dio->inode, fs_startblk,
2137 *                                      fs_count, map_bh, dio->rw == WRITE);
2138 */
2139static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
2140                               struct buffer_head *bh_result, int create)
2141{
2142        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2143        struct ocfs2_inode_info *oi = OCFS2_I(inode);
2144        struct ocfs2_write_ctxt *wc;
2145        struct ocfs2_write_cluster_desc *desc = NULL;
2146        struct ocfs2_dio_write_ctxt *dwc = NULL;
2147        struct buffer_head *di_bh = NULL;
2148        u64 p_blkno;
2149        unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
2150        loff_t pos = iblock << i_blkbits;
2151        sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
2152        unsigned len, total_len = bh_result->b_size;
2153        int ret = 0, first_get_block = 0;
2154
2155        len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
2156        len = min(total_len, len);
2157
2158        /*
2159         * bh_result->b_size is count in get_more_blocks according to write
2160         * "pos" and "end", we need map twice to return different buffer state:
2161         * 1. area in file size, not set NEW;
2162         * 2. area out file size, set  NEW.
2163         *
2164         *                 iblock    endblk
2165         * |--------|---------|---------|---------
2166         * |<-------area in file------->|
2167         */
2168
2169        if ((iblock <= endblk) &&
2170            ((iblock + ((len - 1) >> i_blkbits)) > endblk))
2171                len = (endblk - iblock + 1) << i_blkbits;
2172
2173        mlog(0, "get block of %lu at %llu:%u req %u\n",
2174                        inode->i_ino, pos, len, total_len);
2175
2176        /*
2177         * Because we need to change file size in ocfs2_dio_end_io_write(), or
2178         * we may need to add it to orphan dir. So can not fall to fast path
2179         * while file size will be changed.
2180         */
2181        if (pos + total_len <= i_size_read(inode)) {
2182
2183                /* This is the fast path for re-write. */
2184                ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
2185                if (buffer_mapped(bh_result) &&
2186                    !buffer_new(bh_result) &&
2187                    ret == 0)
2188                        goto out;
2189
2190                /* Clear state set by ocfs2_get_block. */
2191                bh_result->b_state = 0;
2192        }
2193
2194        dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
2195        if (unlikely(dwc == NULL)) {
2196                ret = -ENOMEM;
2197                mlog_errno(ret);
2198                goto out;
2199        }
2200
2201        if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
2202            ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
2203            !dwc->dw_orphaned) {
2204                /*
2205                 * when we are going to alloc extents beyond file size, add the
2206                 * inode to orphan dir, so we can recall those spaces when
2207                 * system crashed during write.
2208                 */
2209                ret = ocfs2_add_inode_to_orphan(osb, inode);
2210                if (ret < 0) {
2211                        mlog_errno(ret);
2212                        goto out;
2213                }
2214                dwc->dw_orphaned = 1;
2215        }
2216
2217        ret = ocfs2_inode_lock(inode, &di_bh, 1);
2218        if (ret) {
2219                mlog_errno(ret);
2220                goto out;
2221        }
2222
2223        down_write(&oi->ip_alloc_sem);
2224
2225        if (first_get_block) {
2226                if (ocfs2_sparse_alloc(osb))
2227                        ret = ocfs2_zero_tail(inode, di_bh, pos);
2228                else
2229                        ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
2230                                                           total_len, NULL);
2231                if (ret < 0) {
2232                        mlog_errno(ret);
2233                        goto unlock;
2234                }
2235        }
2236
2237        ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
2238                                       OCFS2_WRITE_DIRECT, NULL,
2239                                       (void **)&wc, di_bh, NULL);
2240        if (ret) {
2241                mlog_errno(ret);
2242                goto unlock;
2243        }
2244
2245        desc = &wc->w_desc[0];
2246
2247        p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
2248        BUG_ON(p_blkno == 0);
2249        p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
2250
2251        map_bh(bh_result, inode->i_sb, p_blkno);
2252        bh_result->b_size = len;
2253        if (desc->c_needs_zero)
2254                set_buffer_new(bh_result);
2255
2256        if (iblock > endblk)
2257                set_buffer_new(bh_result);
2258
2259        /* May sleep in end_io. It should not happen in a irq context. So defer
2260         * it to dio work queue. */
2261        set_buffer_defer_completion(bh_result);
2262
2263        if (!list_empty(&wc->w_unwritten_list)) {
2264                struct ocfs2_unwritten_extent *ue = NULL;
2265
2266                ue = list_first_entry(&wc->w_unwritten_list,
2267                                      struct ocfs2_unwritten_extent,
2268                                      ue_node);
2269                BUG_ON(ue->ue_cpos != desc->c_cpos);
2270                /* The physical address may be 0, fill it. */
2271                ue->ue_phys = desc->c_phys;
2272
2273                list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
2274                dwc->dw_zero_count += wc->w_unwritten_count;
2275        }
2276
2277        ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
2278        BUG_ON(ret != len);
2279        ret = 0;
2280unlock:
2281        up_write(&oi->ip_alloc_sem);
2282        ocfs2_inode_unlock(inode, 1);
2283        brelse(di_bh);
2284out:
2285        if (ret < 0)
2286                ret = -EIO;
2287        return ret;
2288}
2289
2290static int ocfs2_dio_end_io_write(struct inode *inode,
2291                                  struct ocfs2_dio_write_ctxt *dwc,
2292                                  loff_t offset,
2293                                  ssize_t bytes)
2294{
2295        struct ocfs2_cached_dealloc_ctxt dealloc;
2296        struct ocfs2_extent_tree et;
2297        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2298        struct ocfs2_inode_info *oi = OCFS2_I(inode);
2299        struct ocfs2_unwritten_extent *ue = NULL;
2300        struct buffer_head *di_bh = NULL;
2301        struct ocfs2_dinode *di;
2302        struct ocfs2_alloc_context *data_ac = NULL;
2303        struct ocfs2_alloc_context *meta_ac = NULL;
2304        handle_t *handle = NULL;
2305        loff_t end = offset + bytes;
2306        int ret = 0, credits = 0, locked = 0;
2307
2308        ocfs2_init_dealloc_ctxt(&dealloc);
2309
2310        /* We do clear unwritten, delete orphan, change i_size here. If neither
2311         * of these happen, we can skip all this. */
2312        if (list_empty(&dwc->dw_zero_list) &&
2313            end <= i_size_read(inode) &&
2314            !dwc->dw_orphaned)
2315                goto out;
2316
2317        /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
2318         * are in that context. */
2319        if (dwc->dw_writer_pid != task_pid_nr(current)) {
2320                inode_lock(inode);
2321                locked = 1;
2322        }
2323
2324        ret = ocfs2_inode_lock(inode, &di_bh, 1);
2325        if (ret < 0) {
2326                mlog_errno(ret);
2327                goto out;
2328        }
2329
2330        down_write(&oi->ip_alloc_sem);
2331
2332        /* Delete orphan before acquire i_mutex. */
2333        if (dwc->dw_orphaned) {
2334                BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
2335
2336                end = end > i_size_read(inode) ? end : 0;
2337
2338                ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
2339                                !!end, end);
2340                if (ret < 0)
2341                        mlog_errno(ret);
2342        }
2343
2344        di = (struct ocfs2_dinode *)di_bh->b_data;
2345
2346        ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
2347
2348        /* Attach dealloc with extent tree in case that we may reuse extents
2349         * which are already unlinked from current extent tree due to extent
2350         * rotation and merging.
2351         */
2352        et.et_dealloc = &dealloc;
2353
2354        ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
2355                                    &data_ac, &meta_ac);
2356        if (ret) {
2357                mlog_errno(ret);
2358                goto unlock;
2359        }
2360
2361        credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
2362
2363        handle = ocfs2_start_trans(osb, credits);
2364        if (IS_ERR(handle)) {
2365                ret = PTR_ERR(handle);
2366                mlog_errno(ret);
2367                goto unlock;
2368        }
2369        ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2370                                      OCFS2_JOURNAL_ACCESS_WRITE);
2371        if (ret) {
2372                mlog_errno(ret);
2373                goto commit;
2374        }
2375
2376        list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
2377                ret = ocfs2_mark_extent_written(inode, &et, handle,
2378                                                ue->ue_cpos, 1,
2379                                                ue->ue_phys,
2380                                                meta_ac, &dealloc);
2381                if (ret < 0) {
2382                        mlog_errno(ret);
2383                        break;
2384                }
2385        }
2386
2387        if (end > i_size_read(inode)) {
2388                ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
2389                if (ret < 0)
2390                        mlog_errno(ret);
2391        }
2392commit:
2393        ocfs2_commit_trans(osb, handle);
2394unlock:
2395        up_write(&oi->ip_alloc_sem);
2396        ocfs2_inode_unlock(inode, 1);
2397        brelse(di_bh);
2398out:
2399        if (data_ac)
2400                ocfs2_free_alloc_context(data_ac);
2401        if (meta_ac)
2402                ocfs2_free_alloc_context(meta_ac);
2403        ocfs2_run_deallocs(osb, &dealloc);
2404        if (locked)
2405                inode_unlock(inode);
2406        ocfs2_dio_free_write_ctx(inode, dwc);
2407
2408        return ret;
2409}
2410
2411/*
2412 * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
2413 * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
2414 * to protect io on one node from truncation on another.
2415 */
2416static int ocfs2_dio_end_io(struct kiocb *iocb,
2417                            loff_t offset,
2418                            ssize_t bytes,
2419                            void *private)
2420{
2421        struct inode *inode = file_inode(iocb->ki_filp);
2422        int level;
2423        int ret = 0;
2424
2425        /* this io's submitter should not have unlocked this before we could */
2426        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
2427
2428        if (bytes <= 0)
2429                mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
2430                                 (long long)bytes);
2431        if (private) {
2432                if (bytes > 0)
2433                        ret = ocfs2_dio_end_io_write(inode, private, offset,
2434                                                     bytes);
2435                else
2436                        ocfs2_dio_free_write_ctx(inode, private);
2437        }
2438
2439        ocfs2_iocb_clear_rw_locked(iocb);
2440
2441        level = ocfs2_iocb_rw_locked_level(iocb);
2442        ocfs2_rw_unlock(inode, level);
2443        return ret;
2444}
2445
2446static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2447{
2448        struct file *file = iocb->ki_filp;
2449        struct inode *inode = file->f_mapping->host;
2450        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2451        get_block_t *get_block;
2452
2453        /*
2454         * Fallback to buffered I/O if we see an inode without
2455         * extents.
2456         */
2457        if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2458                return 0;
2459
2460        /* Fallback to buffered I/O if we do not support append dio. */
2461        if (iocb->ki_pos + iter->count > i_size_read(inode) &&
2462            !ocfs2_supports_append_dio(osb))
2463                return 0;
2464
2465        if (iov_iter_rw(iter) == READ)
2466                get_block = ocfs2_lock_get_block;
2467        else
2468                get_block = ocfs2_dio_wr_get_block;
2469
2470        return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2471                                    iter, get_block,
2472                                    ocfs2_dio_end_io, NULL, 0);
2473}
2474
2475const struct address_space_operations ocfs2_aops = {
2476        .readpage               = ocfs2_readpage,
2477        .readpages              = ocfs2_readpages,
2478        .writepage              = ocfs2_writepage,
2479        .write_begin            = ocfs2_write_begin,
2480        .write_end              = ocfs2_write_end,
2481        .bmap                   = ocfs2_bmap,
2482        .direct_IO              = ocfs2_direct_IO,
2483        .invalidatepage         = block_invalidatepage,
2484        .releasepage            = ocfs2_releasepage,
2485        .migratepage            = buffer_migrate_page,
2486        .is_partially_uptodate  = block_is_partially_uptodate,
2487        .error_remove_page      = generic_error_remove_page,
2488};
2489