linux/fs/xfs/linux-2.6/xfs_file.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_bit.h"
  21#include "xfs_log.h"
  22#include "xfs_inum.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_trans.h"
  26#include "xfs_mount.h"
  27#include "xfs_bmap_btree.h"
  28#include "xfs_alloc.h"
  29#include "xfs_dinode.h"
  30#include "xfs_inode.h"
  31#include "xfs_inode_item.h"
  32#include "xfs_bmap.h"
  33#include "xfs_error.h"
  34#include "xfs_vnodeops.h"
  35#include "xfs_da_btree.h"
  36#include "xfs_ioctl.h"
  37#include "xfs_trace.h"
  38
  39#include <linux/dcache.h>
  40#include <linux/falloc.h>
  41
  42static const struct vm_operations_struct xfs_file_vm_ops;
  43
  44/*
  45 * Locking primitives for read and write IO paths to ensure we consistently use
  46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
  47 */
  48static inline void
  49xfs_rw_ilock(
  50        struct xfs_inode        *ip,
  51        int                     type)
  52{
  53        if (type & XFS_IOLOCK_EXCL)
  54                mutex_lock(&VFS_I(ip)->i_mutex);
  55        xfs_ilock(ip, type);
  56}
  57
  58static inline void
  59xfs_rw_iunlock(
  60        struct xfs_inode        *ip,
  61        int                     type)
  62{
  63        xfs_iunlock(ip, type);
  64        if (type & XFS_IOLOCK_EXCL)
  65                mutex_unlock(&VFS_I(ip)->i_mutex);
  66}
  67
  68static inline void
  69xfs_rw_ilock_demote(
  70        struct xfs_inode        *ip,
  71        int                     type)
  72{
  73        xfs_ilock_demote(ip, type);
  74        if (type & XFS_IOLOCK_EXCL)
  75                mutex_unlock(&VFS_I(ip)->i_mutex);
  76}
  77
  78/*
  79 *      xfs_iozero
  80 *
  81 *      xfs_iozero clears the specified range of buffer supplied,
  82 *      and marks all the affected blocks as valid and modified.  If
  83 *      an affected block is not allocated, it will be allocated.  If
  84 *      an affected block is not completely overwritten, and is not
  85 *      valid before the operation, it will be read from disk before
  86 *      being partially zeroed.
  87 */
  88STATIC int
  89xfs_iozero(
  90        struct xfs_inode        *ip,    /* inode                        */
  91        loff_t                  pos,    /* offset in file               */
  92        size_t                  count)  /* size of data to zero         */
  93{
  94        struct page             *page;
  95        struct address_space    *mapping;
  96        int                     status;
  97
  98        mapping = VFS_I(ip)->i_mapping;
  99        do {
 100                unsigned offset, bytes;
 101                void *fsdata;
 102
 103                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
 104                bytes = PAGE_CACHE_SIZE - offset;
 105                if (bytes > count)
 106                        bytes = count;
 107
 108                status = pagecache_write_begin(NULL, mapping, pos, bytes,
 109                                        AOP_FLAG_UNINTERRUPTIBLE,
 110                                        &page, &fsdata);
 111                if (status)
 112                        break;
 113
 114                zero_user(page, offset, bytes);
 115
 116                status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
 117                                        page, fsdata);
 118                WARN_ON(status <= 0); /* can't return less than zero! */
 119                pos += bytes;
 120                count -= bytes;
 121                status = 0;
 122        } while (count);
 123
 124        return (-status);
 125}
 126
 127STATIC int
 128xfs_file_fsync(
 129        struct file             *file,
 130        int                     datasync)
 131{
 132        struct inode            *inode = file->f_mapping->host;
 133        struct xfs_inode        *ip = XFS_I(inode);
 134        struct xfs_trans        *tp;
 135        int                     error = 0;
 136        int                     log_flushed = 0;
 137
 138        trace_xfs_file_fsync(ip);
 139
 140        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 141                return -XFS_ERROR(EIO);
 142
 143        xfs_iflags_clear(ip, XFS_ITRUNCATED);
 144
 145        xfs_ioend_wait(ip);
 146
 147        /*
 148         * We always need to make sure that the required inode state is safe on
 149         * disk.  The inode might be clean but we still might need to force the
 150         * log because of committed transactions that haven't hit the disk yet.
 151         * Likewise, there could be unflushed non-transactional changes to the
 152         * inode core that have to go to disk and this requires us to issue
 153         * a synchronous transaction to capture these changes correctly.
 154         *
 155         * This code relies on the assumption that if the i_update_core field
 156         * of the inode is clear and the inode is unpinned then it is clean
 157         * and no action is required.
 158         */
 159        xfs_ilock(ip, XFS_ILOCK_SHARED);
 160
 161        /*
 162         * First check if the VFS inode is marked dirty.  All the dirtying
 163         * of non-transactional updates no goes through mark_inode_dirty*,
 164         * which allows us to distinguish beteeen pure timestamp updates
 165         * and i_size updates which need to be caught for fdatasync.
 166         * After that also theck for the dirty state in the XFS inode, which
 167         * might gets cleared when the inode gets written out via the AIL
 168         * or xfs_iflush_cluster.
 169         */
 170        if (((inode->i_state & I_DIRTY_DATASYNC) ||
 171            ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
 172            ip->i_update_core) {
 173                /*
 174                 * Kick off a transaction to log the inode core to get the
 175                 * updates.  The sync transaction will also force the log.
 176                 */
 177                xfs_iunlock(ip, XFS_ILOCK_SHARED);
 178                tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
 179                error = xfs_trans_reserve(tp, 0,
 180                                XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
 181                if (error) {
 182                        xfs_trans_cancel(tp, 0);
 183                        return -error;
 184                }
 185                xfs_ilock(ip, XFS_ILOCK_EXCL);
 186
 187                /*
 188                 * Note - it's possible that we might have pushed ourselves out
 189                 * of the way during trans_reserve which would flush the inode.
 190                 * But there's no guarantee that the inode buffer has actually
 191                 * gone out yet (it's delwri).  Plus the buffer could be pinned
 192                 * anyway if it's part of an inode in another recent
 193                 * transaction.  So we play it safe and fire off the
 194                 * transaction anyway.
 195                 */
 196                xfs_trans_ijoin(tp, ip);
 197                xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 198                xfs_trans_set_sync(tp);
 199                error = _xfs_trans_commit(tp, 0, &log_flushed);
 200
 201                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 202        } else {
 203                /*
 204                 * Timestamps/size haven't changed since last inode flush or
 205                 * inode transaction commit.  That means either nothing got
 206                 * written or a transaction committed which caught the updates.
 207                 * If the latter happened and the transaction hasn't hit the
 208                 * disk yet, the inode will be still be pinned.  If it is,
 209                 * force the log.
 210                 */
 211                if (xfs_ipincount(ip)) {
 212                        error = _xfs_log_force_lsn(ip->i_mount,
 213                                        ip->i_itemp->ili_last_lsn,
 214                                        XFS_LOG_SYNC, &log_flushed);
 215                }
 216                xfs_iunlock(ip, XFS_ILOCK_SHARED);
 217        }
 218
 219        if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
 220                /*
 221                 * If the log write didn't issue an ordered tag we need
 222                 * to flush the disk cache for the data device now.
 223                 */
 224                if (!log_flushed)
 225                        xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
 226
 227                /*
 228                 * If this inode is on the RT dev we need to flush that
 229                 * cache as well.
 230                 */
 231                if (XFS_IS_REALTIME_INODE(ip))
 232                        xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
 233        }
 234
 235        return -error;
 236}
 237
 238STATIC ssize_t
 239xfs_file_aio_read(
 240        struct kiocb            *iocb,
 241        const struct iovec      *iovp,
 242        unsigned long           nr_segs,
 243        loff_t                  pos)
 244{
 245        struct file             *file = iocb->ki_filp;
 246        struct inode            *inode = file->f_mapping->host;
 247        struct xfs_inode        *ip = XFS_I(inode);
 248        struct xfs_mount        *mp = ip->i_mount;
 249        size_t                  size = 0;
 250        ssize_t                 ret = 0;
 251        int                     ioflags = 0;
 252        xfs_fsize_t             n;
 253        unsigned long           seg;
 254
 255        XFS_STATS_INC(xs_read_calls);
 256
 257        BUG_ON(iocb->ki_pos != pos);
 258
 259        if (unlikely(file->f_flags & O_DIRECT))
 260                ioflags |= IO_ISDIRECT;
 261        if (file->f_mode & FMODE_NOCMTIME)
 262                ioflags |= IO_INVIS;
 263
 264        /* START copy & waste from filemap.c */
 265        for (seg = 0; seg < nr_segs; seg++) {
 266                const struct iovec *iv = &iovp[seg];
 267
 268                /*
 269                 * If any segment has a negative length, or the cumulative
 270                 * length ever wraps negative then return -EINVAL.
 271                 */
 272                size += iv->iov_len;
 273                if (unlikely((ssize_t)(size|iv->iov_len) < 0))
 274                        return XFS_ERROR(-EINVAL);
 275        }
 276        /* END copy & waste from filemap.c */
 277
 278        if (unlikely(ioflags & IO_ISDIRECT)) {
 279                xfs_buftarg_t   *target =
 280                        XFS_IS_REALTIME_INODE(ip) ?
 281                                mp->m_rtdev_targp : mp->m_ddev_targp;
 282                if ((iocb->ki_pos & target->bt_smask) ||
 283                    (size & target->bt_smask)) {
 284                        if (iocb->ki_pos == ip->i_size)
 285                                return 0;
 286                        return -XFS_ERROR(EINVAL);
 287                }
 288        }
 289
 290        n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
 291        if (n <= 0 || size == 0)
 292                return 0;
 293
 294        if (n < size)
 295                size = n;
 296
 297        if (XFS_FORCED_SHUTDOWN(mp))
 298                return -EIO;
 299
 300        if (unlikely(ioflags & IO_ISDIRECT)) {
 301                xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 302
 303                if (inode->i_mapping->nrpages) {
 304                        ret = -xfs_flushinval_pages(ip,
 305                                        (iocb->ki_pos & PAGE_CACHE_MASK),
 306                                        -1, FI_REMAPF_LOCKED);
 307                        if (ret) {
 308                                xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
 309                                return ret;
 310                        }
 311                }
 312                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
 313        } else
 314                xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 315
 316        trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
 317
 318        ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
 319        if (ret > 0)
 320                XFS_STATS_ADD(xs_read_bytes, ret);
 321
 322        xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 323        return ret;
 324}
 325
 326STATIC ssize_t
 327xfs_file_splice_read(
 328        struct file             *infilp,
 329        loff_t                  *ppos,
 330        struct pipe_inode_info  *pipe,
 331        size_t                  count,
 332        unsigned int            flags)
 333{
 334        struct xfs_inode        *ip = XFS_I(infilp->f_mapping->host);
 335        int                     ioflags = 0;
 336        ssize_t                 ret;
 337
 338        XFS_STATS_INC(xs_read_calls);
 339
 340        if (infilp->f_mode & FMODE_NOCMTIME)
 341                ioflags |= IO_INVIS;
 342
 343        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 344                return -EIO;
 345
 346        xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 347
 348        trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 349
 350        ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
 351        if (ret > 0)
 352                XFS_STATS_ADD(xs_read_bytes, ret);
 353
 354        xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 355        return ret;
 356}
 357
 358STATIC void
 359xfs_aio_write_isize_update(
 360        struct inode    *inode,
 361        loff_t          *ppos,
 362        ssize_t         bytes_written)
 363{
 364        struct xfs_inode        *ip = XFS_I(inode);
 365        xfs_fsize_t             isize = i_size_read(inode);
 366
 367        if (bytes_written > 0)
 368                XFS_STATS_ADD(xs_write_bytes, bytes_written);
 369
 370        if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
 371                                        *ppos > isize))
 372                *ppos = isize;
 373
 374        if (*ppos > ip->i_size) {
 375                xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
 376                if (*ppos > ip->i_size)
 377                        ip->i_size = *ppos;
 378                xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
 379        }
 380}
 381
 382/*
 383 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
 384 * part of the I/O may have been written to disk before the error occured.  In
 385 * this case the on-disk file size may have been adjusted beyond the in-memory
 386 * file size and now needs to be truncated back.
 387 */
 388STATIC void
 389xfs_aio_write_newsize_update(
 390        struct xfs_inode        *ip)
 391{
 392        if (ip->i_new_size) {
 393                xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
 394                ip->i_new_size = 0;
 395                if (ip->i_d.di_size > ip->i_size)
 396                        ip->i_d.di_size = ip->i_size;
 397                xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
 398        }
 399}
 400
 401/*
 402 * xfs_file_splice_write() does not use xfs_rw_ilock() because
 403 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
 404 * couuld cause lock inversions between the aio_write path and the splice path
 405 * if someone is doing concurrent splice(2) based writes and write(2) based
 406 * writes to the same inode. The only real way to fix this is to re-implement
 407 * the generic code here with correct locking orders.
 408 */
 409STATIC ssize_t
 410xfs_file_splice_write(
 411        struct pipe_inode_info  *pipe,
 412        struct file             *outfilp,
 413        loff_t                  *ppos,
 414        size_t                  count,
 415        unsigned int            flags)
 416{
 417        struct inode            *inode = outfilp->f_mapping->host;
 418        struct xfs_inode        *ip = XFS_I(inode);
 419        xfs_fsize_t             new_size;
 420        int                     ioflags = 0;
 421        ssize_t                 ret;
 422
 423        XFS_STATS_INC(xs_write_calls);
 424
 425        if (outfilp->f_mode & FMODE_NOCMTIME)
 426                ioflags |= IO_INVIS;
 427
 428        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 429                return -EIO;
 430
 431        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 432
 433        new_size = *ppos + count;
 434
 435        xfs_ilock(ip, XFS_ILOCK_EXCL);
 436        if (new_size > ip->i_size)
 437                ip->i_new_size = new_size;
 438        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 439
 440        trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 441
 442        ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
 443
 444        xfs_aio_write_isize_update(inode, ppos, ret);
 445        xfs_aio_write_newsize_update(ip);
 446        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 447        return ret;
 448}
 449
 450/*
 451 * This routine is called to handle zeroing any space in the last
 452 * block of the file that is beyond the EOF.  We do this since the
 453 * size is being increased without writing anything to that block
 454 * and we don't want anyone to read the garbage on the disk.
 455 */
 456STATIC int                              /* error (positive) */
 457xfs_zero_last_block(
 458        xfs_inode_t     *ip,
 459        xfs_fsize_t     offset,
 460        xfs_fsize_t     isize)
 461{
 462        xfs_fileoff_t   last_fsb;
 463        xfs_mount_t     *mp = ip->i_mount;
 464        int             nimaps;
 465        int             zero_offset;
 466        int             zero_len;
 467        int             error = 0;
 468        xfs_bmbt_irec_t imap;
 469
 470        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 471
 472        zero_offset = XFS_B_FSB_OFFSET(mp, isize);
 473        if (zero_offset == 0) {
 474                /*
 475                 * There are no extra bytes in the last block on disk to
 476                 * zero, so return.
 477                 */
 478                return 0;
 479        }
 480
 481        last_fsb = XFS_B_TO_FSBT(mp, isize);
 482        nimaps = 1;
 483        error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
 484                          &nimaps, NULL);
 485        if (error) {
 486                return error;
 487        }
 488        ASSERT(nimaps > 0);
 489        /*
 490         * If the block underlying isize is just a hole, then there
 491         * is nothing to zero.
 492         */
 493        if (imap.br_startblock == HOLESTARTBLOCK) {
 494                return 0;
 495        }
 496        /*
 497         * Zero the part of the last block beyond the EOF, and write it
 498         * out sync.  We need to drop the ilock while we do this so we
 499         * don't deadlock when the buffer cache calls back to us.
 500         */
 501        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 502
 503        zero_len = mp->m_sb.sb_blocksize - zero_offset;
 504        if (isize + zero_len > offset)
 505                zero_len = offset - isize;
 506        error = xfs_iozero(ip, isize, zero_len);
 507
 508        xfs_ilock(ip, XFS_ILOCK_EXCL);
 509        ASSERT(error >= 0);
 510        return error;
 511}
 512
 513/*
 514 * Zero any on disk space between the current EOF and the new,
 515 * larger EOF.  This handles the normal case of zeroing the remainder
 516 * of the last block in the file and the unusual case of zeroing blocks
 517 * out beyond the size of the file.  This second case only happens
 518 * with fixed size extents and when the system crashes before the inode
 519 * size was updated but after blocks were allocated.  If fill is set,
 520 * then any holes in the range are filled and zeroed.  If not, the holes
 521 * are left alone as holes.
 522 */
 523
 524int                                     /* error (positive) */
 525xfs_zero_eof(
 526        xfs_inode_t     *ip,
 527        xfs_off_t       offset,         /* starting I/O offset */
 528        xfs_fsize_t     isize)          /* current inode size */
 529{
 530        xfs_mount_t     *mp = ip->i_mount;
 531        xfs_fileoff_t   start_zero_fsb;
 532        xfs_fileoff_t   end_zero_fsb;
 533        xfs_fileoff_t   zero_count_fsb;
 534        xfs_fileoff_t   last_fsb;
 535        xfs_fileoff_t   zero_off;
 536        xfs_fsize_t     zero_len;
 537        int             nimaps;
 538        int             error = 0;
 539        xfs_bmbt_irec_t imap;
 540
 541        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 542        ASSERT(offset > isize);
 543
 544        /*
 545         * First handle zeroing the block on which isize resides.
 546         * We only zero a part of that block so it is handled specially.
 547         */
 548        error = xfs_zero_last_block(ip, offset, isize);
 549        if (error) {
 550                ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 551                return error;
 552        }
 553
 554        /*
 555         * Calculate the range between the new size and the old
 556         * where blocks needing to be zeroed may exist.  To get the
 557         * block where the last byte in the file currently resides,
 558         * we need to subtract one from the size and truncate back
 559         * to a block boundary.  We subtract 1 in case the size is
 560         * exactly on a block boundary.
 561         */
 562        last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
 563        start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
 564        end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
 565        ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
 566        if (last_fsb == end_zero_fsb) {
 567                /*
 568                 * The size was only incremented on its last block.
 569                 * We took care of that above, so just return.
 570                 */
 571                return 0;
 572        }
 573
 574        ASSERT(start_zero_fsb <= end_zero_fsb);
 575        while (start_zero_fsb <= end_zero_fsb) {
 576                nimaps = 1;
 577                zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
 578                error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
 579                                  0, NULL, 0, &imap, &nimaps, NULL);
 580                if (error) {
 581                        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 582                        return error;
 583                }
 584                ASSERT(nimaps > 0);
 585
 586                if (imap.br_state == XFS_EXT_UNWRITTEN ||
 587                    imap.br_startblock == HOLESTARTBLOCK) {
 588                        /*
 589                         * This loop handles initializing pages that were
 590                         * partially initialized by the code below this
 591                         * loop. It basically zeroes the part of the page
 592                         * that sits on a hole and sets the page as P_HOLE
 593                         * and calls remapf if it is a mapped file.
 594                         */
 595                        start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 596                        ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 597                        continue;
 598                }
 599
 600                /*
 601                 * There are blocks we need to zero.
 602                 * Drop the inode lock while we're doing the I/O.
 603                 * We'll still have the iolock to protect us.
 604                 */
 605                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 606
 607                zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
 608                zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
 609
 610                if ((zero_off + zero_len) > offset)
 611                        zero_len = offset - zero_off;
 612
 613                error = xfs_iozero(ip, zero_off, zero_len);
 614                if (error) {
 615                        goto out_lock;
 616                }
 617
 618                start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 619                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 620
 621                xfs_ilock(ip, XFS_ILOCK_EXCL);
 622        }
 623
 624        return 0;
 625
 626out_lock:
 627        xfs_ilock(ip, XFS_ILOCK_EXCL);
 628        ASSERT(error >= 0);
 629        return error;
 630}
 631
 632/*
 633 * Common pre-write limit and setup checks.
 634 *
 635 * Returns with iolock held according to @iolock.
 636 */
 637STATIC ssize_t
 638xfs_file_aio_write_checks(
 639        struct file             *file,
 640        loff_t                  *pos,
 641        size_t                  *count,
 642        int                     *iolock)
 643{
 644        struct inode            *inode = file->f_mapping->host;
 645        struct xfs_inode        *ip = XFS_I(inode);
 646        xfs_fsize_t             new_size;
 647        int                     error = 0;
 648
 649        error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
 650        if (error) {
 651                xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
 652                *iolock = 0;
 653                return error;
 654        }
 655
 656        new_size = *pos + *count;
 657        if (new_size > ip->i_size)
 658                ip->i_new_size = new_size;
 659
 660        if (likely(!(file->f_mode & FMODE_NOCMTIME)))
 661                file_update_time(file);
 662
 663        /*
 664         * If the offset is beyond the size of the file, we need to zero any
 665         * blocks that fall between the existing EOF and the start of this
 666         * write.
 667         */
 668        if (*pos > ip->i_size)
 669                error = -xfs_zero_eof(ip, *pos, ip->i_size);
 670
 671        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
 672        if (error)
 673                return error;
 674
 675        /*
 676         * If we're writing the file then make sure to clear the setuid and
 677         * setgid bits if the process is not being run by root.  This keeps
 678         * people from modifying setuid and setgid binaries.
 679         */
 680        return file_remove_suid(file);
 681
 682}
 683
 684/*
 685 * xfs_file_dio_aio_write - handle direct IO writes
 686 *
 687 * Lock the inode appropriately to prepare for and issue a direct IO write.
 688 * By separating it from the buffered write path we remove all the tricky to
 689 * follow locking changes and looping.
 690 *
 691 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 692 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 693 * pages are flushed out.
 694 *
 695 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 696 * allowing them to be done in parallel with reads and other direct IO writes.
 697 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 698 * needs to do sub-block zeroing and that requires serialisation against other
 699 * direct IOs to the same block. In this case we need to serialise the
 700 * submission of the unaligned IOs so that we don't get racing block zeroing in
 701 * the dio layer.  To avoid the problem with aio, we also need to wait for
 702 * outstanding IOs to complete so that unwritten extent conversion is completed
 703 * before we try to map the overlapping block. This is currently implemented by
 704 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
 705 *
 706 * Returns with locks held indicated by @iolock and errors indicated by
 707 * negative return values.
 708 */
 709STATIC ssize_t
 710xfs_file_dio_aio_write(
 711        struct kiocb            *iocb,
 712        const struct iovec      *iovp,
 713        unsigned long           nr_segs,
 714        loff_t                  pos,
 715        size_t                  ocount,
 716        int                     *iolock)
 717{
 718        struct file             *file = iocb->ki_filp;
 719        struct address_space    *mapping = file->f_mapping;
 720        struct inode            *inode = mapping->host;
 721        struct xfs_inode        *ip = XFS_I(inode);
 722        struct xfs_mount        *mp = ip->i_mount;
 723        ssize_t                 ret = 0;
 724        size_t                  count = ocount;
 725        int                     unaligned_io = 0;
 726        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 727                                        mp->m_rtdev_targp : mp->m_ddev_targp;
 728
 729        *iolock = 0;
 730        if ((pos & target->bt_smask) || (count & target->bt_smask))
 731                return -XFS_ERROR(EINVAL);
 732
 733        if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
 734                unaligned_io = 1;
 735
 736        if (unaligned_io || mapping->nrpages || pos > ip->i_size)
 737                *iolock = XFS_IOLOCK_EXCL;
 738        else
 739                *iolock = XFS_IOLOCK_SHARED;
 740        xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
 741
 742        ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
 743        if (ret)
 744                return ret;
 745
 746        if (mapping->nrpages) {
 747                WARN_ON(*iolock != XFS_IOLOCK_EXCL);
 748                ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
 749                                                        FI_REMAPF_LOCKED);
 750                if (ret)
 751                        return ret;
 752        }
 753
 754        /*
 755         * If we are doing unaligned IO, wait for all other IO to drain,
 756         * otherwise demote the lock if we had to flush cached pages
 757         */
 758        if (unaligned_io)
 759                xfs_ioend_wait(ip);
 760        else if (*iolock == XFS_IOLOCK_EXCL) {
 761                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
 762                *iolock = XFS_IOLOCK_SHARED;
 763        }
 764
 765        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
 766        ret = generic_file_direct_write(iocb, iovp,
 767                        &nr_segs, pos, &iocb->ki_pos, count, ocount);
 768
 769        /* No fallback to buffered IO on errors for XFS. */
 770        ASSERT(ret < 0 || ret == count);
 771        return ret;
 772}
 773
 774STATIC ssize_t
 775xfs_file_buffered_aio_write(
 776        struct kiocb            *iocb,
 777        const struct iovec      *iovp,
 778        unsigned long           nr_segs,
 779        loff_t                  pos,
 780        size_t                  ocount,
 781        int                     *iolock)
 782{
 783        struct file             *file = iocb->ki_filp;
 784        struct address_space    *mapping = file->f_mapping;
 785        struct inode            *inode = mapping->host;
 786        struct xfs_inode        *ip = XFS_I(inode);
 787        ssize_t                 ret;
 788        int                     enospc = 0;
 789        size_t                  count = ocount;
 790
 791        *iolock = XFS_IOLOCK_EXCL;
 792        xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
 793
 794        ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
 795        if (ret)
 796                return ret;
 797
 798        /* We can write back this queue in page reclaim */
 799        current->backing_dev_info = mapping->backing_dev_info;
 800
 801write_retry:
 802        trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
 803        ret = generic_file_buffered_write(iocb, iovp, nr_segs,
 804                        pos, &iocb->ki_pos, count, ret);
 805        /*
 806         * if we just got an ENOSPC, flush the inode now we aren't holding any
 807         * page locks and retry *once*
 808         */
 809        if (ret == -ENOSPC && !enospc) {
 810                ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
 811                if (ret)
 812                        return ret;
 813                enospc = 1;
 814                goto write_retry;
 815        }
 816        current->backing_dev_info = NULL;
 817        return ret;
 818}
 819
 820STATIC ssize_t
 821xfs_file_aio_write(
 822        struct kiocb            *iocb,
 823        const struct iovec      *iovp,
 824        unsigned long           nr_segs,
 825        loff_t                  pos)
 826{
 827        struct file             *file = iocb->ki_filp;
 828        struct address_space    *mapping = file->f_mapping;
 829        struct inode            *inode = mapping->host;
 830        struct xfs_inode        *ip = XFS_I(inode);
 831        ssize_t                 ret;
 832        int                     iolock;
 833        size_t                  ocount = 0;
 834
 835        XFS_STATS_INC(xs_write_calls);
 836
 837        BUG_ON(iocb->ki_pos != pos);
 838
 839        ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
 840        if (ret)
 841                return ret;
 842
 843        if (ocount == 0)
 844                return 0;
 845
 846        xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
 847
 848        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 849                return -EIO;
 850
 851        if (unlikely(file->f_flags & O_DIRECT))
 852                ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
 853                                                ocount, &iolock);
 854        else
 855                ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
 856                                                ocount, &iolock);
 857
 858        xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
 859
 860        if (ret <= 0)
 861                goto out_unlock;
 862
 863        /* Handle various SYNC-type writes */
 864        if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
 865                loff_t end = pos + ret - 1;
 866                int error, error2;
 867
 868                xfs_rw_iunlock(ip, iolock);
 869                error = filemap_write_and_wait_range(mapping, pos, end);
 870                xfs_rw_ilock(ip, iolock);
 871
 872                error2 = -xfs_file_fsync(file,
 873                                         (file->f_flags & __O_SYNC) ? 0 : 1);
 874                if (error)
 875                        ret = error;
 876                else if (error2)
 877                        ret = error2;
 878        }
 879
 880out_unlock:
 881        xfs_aio_write_newsize_update(ip);
 882        xfs_rw_iunlock(ip, iolock);
 883        return ret;
 884}
 885
 886STATIC long
 887xfs_file_fallocate(
 888        struct file     *file,
 889        int             mode,
 890        loff_t          offset,
 891        loff_t          len)
 892{
 893        struct inode    *inode = file->f_path.dentry->d_inode;
 894        long            error;
 895        loff_t          new_size = 0;
 896        xfs_flock64_t   bf;
 897        xfs_inode_t     *ip = XFS_I(inode);
 898        int             cmd = XFS_IOC_RESVSP;
 899
 900        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
 901                return -EOPNOTSUPP;
 902
 903        bf.l_whence = 0;
 904        bf.l_start = offset;
 905        bf.l_len = len;
 906
 907        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 908
 909        if (mode & FALLOC_FL_PUNCH_HOLE)
 910                cmd = XFS_IOC_UNRESVSP;
 911
 912        /* check the new inode size is valid before allocating */
 913        if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 914            offset + len > i_size_read(inode)) {
 915                new_size = offset + len;
 916                error = inode_newsize_ok(inode, new_size);
 917                if (error)
 918                        goto out_unlock;
 919        }
 920
 921        error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
 922        if (error)
 923                goto out_unlock;
 924
 925        /* Change file size if needed */
 926        if (new_size) {
 927                struct iattr iattr;
 928
 929                iattr.ia_valid = ATTR_SIZE;
 930                iattr.ia_size = new_size;
 931                error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
 932        }
 933
 934out_unlock:
 935        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 936        return error;
 937}
 938
 939
 940STATIC int
 941xfs_file_open(
 942        struct inode    *inode,
 943        struct file     *file)
 944{
 945        if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 946                return -EFBIG;
 947        if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 948                return -EIO;
 949        return 0;
 950}
 951
 952STATIC int
 953xfs_dir_open(
 954        struct inode    *inode,
 955        struct file     *file)
 956{
 957        struct xfs_inode *ip = XFS_I(inode);
 958        int             mode;
 959        int             error;
 960
 961        error = xfs_file_open(inode, file);
 962        if (error)
 963                return error;
 964
 965        /*
 966         * If there are any blocks, read-ahead block 0 as we're almost
 967         * certain to have the next operation be a read there.
 968         */
 969        mode = xfs_ilock_map_shared(ip);
 970        if (ip->i_d.di_nextents > 0)
 971                xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
 972        xfs_iunlock(ip, mode);
 973        return 0;
 974}
 975
 976STATIC int
 977xfs_file_release(
 978        struct inode    *inode,
 979        struct file     *filp)
 980{
 981        return -xfs_release(XFS_I(inode));
 982}
 983
 984STATIC int
 985xfs_file_readdir(
 986        struct file     *filp,
 987        void            *dirent,
 988        filldir_t       filldir)
 989{
 990        struct inode    *inode = filp->f_path.dentry->d_inode;
 991        xfs_inode_t     *ip = XFS_I(inode);
 992        int             error;
 993        size_t          bufsize;
 994
 995        /*
 996         * The Linux API doesn't pass down the total size of the buffer
 997         * we read into down to the filesystem.  With the filldir concept
 998         * it's not needed for correct information, but the XFS dir2 leaf
 999         * code wants an estimate of the buffer size to calculate it's
1000         * readahead window and size the buffers used for mapping to
1001         * physical blocks.
1002         *
1003         * Try to give it an estimate that's good enough, maybe at some
1004         * point we can change the ->readdir prototype to include the
1005         * buffer size.  For now we use the current glibc buffer size.
1006         */
1007        bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1008
1009        error = xfs_readdir(ip, dirent, bufsize,
1010                                (xfs_off_t *)&filp->f_pos, filldir);
1011        if (error)
1012                return -error;
1013        return 0;
1014}
1015
1016STATIC int
1017xfs_file_mmap(
1018        struct file     *filp,
1019        struct vm_area_struct *vma)
1020{
1021        vma->vm_ops = &xfs_file_vm_ops;
1022        vma->vm_flags |= VM_CAN_NONLINEAR;
1023
1024        file_accessed(filp);
1025        return 0;
1026}
1027
1028/*
1029 * mmap()d file has taken write protection fault and is being made
1030 * writable. We can set the page state up correctly for a writable
1031 * page, which means we can do correct delalloc accounting (ENOSPC
1032 * checking!) and unwritten extent mapping.
1033 */
1034STATIC int
1035xfs_vm_page_mkwrite(
1036        struct vm_area_struct   *vma,
1037        struct vm_fault         *vmf)
1038{
1039        return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1040}
1041
1042const struct file_operations xfs_file_operations = {
1043        .llseek         = generic_file_llseek,
1044        .read           = do_sync_read,
1045        .write          = do_sync_write,
1046        .aio_read       = xfs_file_aio_read,
1047        .aio_write      = xfs_file_aio_write,
1048        .splice_read    = xfs_file_splice_read,
1049        .splice_write   = xfs_file_splice_write,
1050        .unlocked_ioctl = xfs_file_ioctl,
1051#ifdef CONFIG_COMPAT
1052        .compat_ioctl   = xfs_file_compat_ioctl,
1053#endif
1054        .mmap           = xfs_file_mmap,
1055        .open           = xfs_file_open,
1056        .release        = xfs_file_release,
1057        .fsync          = xfs_file_fsync,
1058        .fallocate      = xfs_file_fallocate,
1059};
1060
1061const struct file_operations xfs_dir_file_operations = {
1062        .open           = xfs_dir_open,
1063        .read           = generic_read_dir,
1064        .readdir        = xfs_file_readdir,
1065        .llseek         = generic_file_llseek,
1066        .unlocked_ioctl = xfs_file_ioctl,
1067#ifdef CONFIG_COMPAT
1068        .compat_ioctl   = xfs_file_compat_ioctl,
1069#endif
1070        .fsync          = xfs_file_fsync,
1071};
1072
1073static const struct vm_operations_struct xfs_file_vm_ops = {
1074        .fault          = filemap_fault,
1075        .page_mkwrite   = xfs_vm_page_mkwrite,
1076};
1077