linux/fs/xfs/linux-2.6/xfs_lrw.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_bit.h"
  21#include "xfs_log.h"
  22#include "xfs_inum.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_dir2.h"
  27#include "xfs_alloc.h"
  28#include "xfs_dmapi.h"
  29#include "xfs_quota.h"
  30#include "xfs_mount.h"
  31#include "xfs_bmap_btree.h"
  32#include "xfs_alloc_btree.h"
  33#include "xfs_ialloc_btree.h"
  34#include "xfs_dir2_sf.h"
  35#include "xfs_attr_sf.h"
  36#include "xfs_dinode.h"
  37#include "xfs_inode.h"
  38#include "xfs_bmap.h"
  39#include "xfs_btree.h"
  40#include "xfs_ialloc.h"
  41#include "xfs_rtalloc.h"
  42#include "xfs_error.h"
  43#include "xfs_itable.h"
  44#include "xfs_rw.h"
  45#include "xfs_attr.h"
  46#include "xfs_inode_item.h"
  47#include "xfs_buf_item.h"
  48#include "xfs_utils.h"
  49#include "xfs_iomap.h"
  50#include "xfs_vnodeops.h"
  51
  52#include <linux/capability.h>
  53#include <linux/writeback.h>
  54
  55
  56#if defined(XFS_RW_TRACE)
  57void
  58xfs_rw_enter_trace(
  59        int                     tag,
  60        xfs_inode_t             *ip,
  61        void                    *data,
  62        size_t                  segs,
  63        loff_t                  offset,
  64        int                     ioflags)
  65{
  66        if (ip->i_rwtrace == NULL)
  67                return;
  68        ktrace_enter(ip->i_rwtrace,
  69                (void *)(unsigned long)tag,
  70                (void *)ip,
  71                (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  72                (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  73                (void *)data,
  74                (void *)((unsigned long)segs),
  75                (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  76                (void *)((unsigned long)(offset & 0xffffffff)),
  77                (void *)((unsigned long)ioflags),
  78                (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
  79                (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
  80                (void *)((unsigned long)current_pid()),
  81                (void *)NULL,
  82                (void *)NULL,
  83                (void *)NULL,
  84                (void *)NULL);
  85}
  86
  87void
  88xfs_inval_cached_trace(
  89        xfs_inode_t     *ip,
  90        xfs_off_t       offset,
  91        xfs_off_t       len,
  92        xfs_off_t       first,
  93        xfs_off_t       last)
  94{
  95
  96        if (ip->i_rwtrace == NULL)
  97                return;
  98        ktrace_enter(ip->i_rwtrace,
  99                (void *)(__psint_t)XFS_INVAL_CACHED,
 100                (void *)ip,
 101                (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
 102                (void *)((unsigned long)(offset & 0xffffffff)),
 103                (void *)((unsigned long)((len >> 32) & 0xffffffff)),
 104                (void *)((unsigned long)(len & 0xffffffff)),
 105                (void *)((unsigned long)((first >> 32) & 0xffffffff)),
 106                (void *)((unsigned long)(first & 0xffffffff)),
 107                (void *)((unsigned long)((last >> 32) & 0xffffffff)),
 108                (void *)((unsigned long)(last & 0xffffffff)),
 109                (void *)((unsigned long)current_pid()),
 110                (void *)NULL,
 111                (void *)NULL,
 112                (void *)NULL,
 113                (void *)NULL,
 114                (void *)NULL);
 115}
 116#endif
 117
 118/*
 119 *      xfs_iozero
 120 *
 121 *      xfs_iozero clears the specified range of buffer supplied,
 122 *      and marks all the affected blocks as valid and modified.  If
 123 *      an affected block is not allocated, it will be allocated.  If
 124 *      an affected block is not completely overwritten, and is not
 125 *      valid before the operation, it will be read from disk before
 126 *      being partially zeroed.
 127 */
 128STATIC int
 129xfs_iozero(
 130        struct xfs_inode        *ip,    /* inode                        */
 131        loff_t                  pos,    /* offset in file               */
 132        size_t                  count)  /* size of data to zero         */
 133{
 134        struct page             *page;
 135        struct address_space    *mapping;
 136        int                     status;
 137
 138        mapping = VFS_I(ip)->i_mapping;
 139        do {
 140                unsigned offset, bytes;
 141                void *fsdata;
 142
 143                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
 144                bytes = PAGE_CACHE_SIZE - offset;
 145                if (bytes > count)
 146                        bytes = count;
 147
 148                status = pagecache_write_begin(NULL, mapping, pos, bytes,
 149                                        AOP_FLAG_UNINTERRUPTIBLE,
 150                                        &page, &fsdata);
 151                if (status)
 152                        break;
 153
 154                zero_user(page, offset, bytes);
 155
 156                status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
 157                                        page, fsdata);
 158                WARN_ON(status <= 0); /* can't return less than zero! */
 159                pos += bytes;
 160                count -= bytes;
 161                status = 0;
 162        } while (count);
 163
 164        return (-status);
 165}
 166
 167ssize_t                 /* bytes read, or (-)  error */
 168xfs_read(
 169        xfs_inode_t             *ip,
 170        struct kiocb            *iocb,
 171        const struct iovec      *iovp,
 172        unsigned int            segs,
 173        loff_t                  *offset,
 174        int                     ioflags)
 175{
 176        struct file             *file = iocb->ki_filp;
 177        struct inode            *inode = file->f_mapping->host;
 178        xfs_mount_t             *mp = ip->i_mount;
 179        size_t                  size = 0;
 180        ssize_t                 ret = 0;
 181        xfs_fsize_t             n;
 182        unsigned long           seg;
 183
 184
 185        XFS_STATS_INC(xs_read_calls);
 186
 187        /* START copy & waste from filemap.c */
 188        for (seg = 0; seg < segs; seg++) {
 189                const struct iovec *iv = &iovp[seg];
 190
 191                /*
 192                 * If any segment has a negative length, or the cumulative
 193                 * length ever wraps negative then return -EINVAL.
 194                 */
 195                size += iv->iov_len;
 196                if (unlikely((ssize_t)(size|iv->iov_len) < 0))
 197                        return XFS_ERROR(-EINVAL);
 198        }
 199        /* END copy & waste from filemap.c */
 200
 201        if (unlikely(ioflags & IO_ISDIRECT)) {
 202                xfs_buftarg_t   *target =
 203                        XFS_IS_REALTIME_INODE(ip) ?
 204                                mp->m_rtdev_targp : mp->m_ddev_targp;
 205                if ((*offset & target->bt_smask) ||
 206                    (size & target->bt_smask)) {
 207                        if (*offset == ip->i_size) {
 208                                return (0);
 209                        }
 210                        return -XFS_ERROR(EINVAL);
 211                }
 212        }
 213
 214        n = XFS_MAXIOFFSET(mp) - *offset;
 215        if ((n <= 0) || (size == 0))
 216                return 0;
 217
 218        if (n < size)
 219                size = n;
 220
 221        if (XFS_FORCED_SHUTDOWN(mp))
 222                return -EIO;
 223
 224        if (unlikely(ioflags & IO_ISDIRECT))
 225                mutex_lock(&inode->i_mutex);
 226        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 227
 228        if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
 229                int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
 230                int iolock = XFS_IOLOCK_SHARED;
 231
 232                ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
 233                                        dmflags, &iolock);
 234                if (ret) {
 235                        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 236                        if (unlikely(ioflags & IO_ISDIRECT))
 237                                mutex_unlock(&inode->i_mutex);
 238                        return ret;
 239                }
 240        }
 241
 242        if (unlikely(ioflags & IO_ISDIRECT)) {
 243                if (inode->i_mapping->nrpages)
 244                        ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
 245                                                    -1, FI_REMAPF_LOCKED);
 246                mutex_unlock(&inode->i_mutex);
 247                if (ret) {
 248                        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 249                        return ret;
 250                }
 251        }
 252
 253        xfs_rw_enter_trace(XFS_READ_ENTER, ip,
 254                                (void *)iovp, segs, *offset, ioflags);
 255
 256        iocb->ki_pos = *offset;
 257        ret = generic_file_aio_read(iocb, iovp, segs, *offset);
 258        if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
 259                ret = wait_on_sync_kiocb(iocb);
 260        if (ret > 0)
 261                XFS_STATS_ADD(xs_read_bytes, ret);
 262
 263        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 264        return ret;
 265}
 266
 267ssize_t
 268xfs_splice_read(
 269        xfs_inode_t             *ip,
 270        struct file             *infilp,
 271        loff_t                  *ppos,
 272        struct pipe_inode_info  *pipe,
 273        size_t                  count,
 274        int                     flags,
 275        int                     ioflags)
 276{
 277        xfs_mount_t             *mp = ip->i_mount;
 278        ssize_t                 ret;
 279
 280        XFS_STATS_INC(xs_read_calls);
 281        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 282                return -EIO;
 283
 284        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 285
 286        if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
 287                int iolock = XFS_IOLOCK_SHARED;
 288                int error;
 289
 290                error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
 291                                        FILP_DELAY_FLAG(infilp), &iolock);
 292                if (error) {
 293                        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 294                        return -error;
 295                }
 296        }
 297        xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
 298                           pipe, count, *ppos, ioflags);
 299        ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
 300        if (ret > 0)
 301                XFS_STATS_ADD(xs_read_bytes, ret);
 302
 303        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 304        return ret;
 305}
 306
 307ssize_t
 308xfs_splice_write(
 309        xfs_inode_t             *ip,
 310        struct pipe_inode_info  *pipe,
 311        struct file             *outfilp,
 312        loff_t                  *ppos,
 313        size_t                  count,
 314        int                     flags,
 315        int                     ioflags)
 316{
 317        xfs_mount_t             *mp = ip->i_mount;
 318        ssize_t                 ret;
 319        struct inode            *inode = outfilp->f_mapping->host;
 320        xfs_fsize_t             isize, new_size;
 321
 322        XFS_STATS_INC(xs_write_calls);
 323        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 324                return -EIO;
 325
 326        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 327
 328        if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
 329                int iolock = XFS_IOLOCK_EXCL;
 330                int error;
 331
 332                error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
 333                                        FILP_DELAY_FLAG(outfilp), &iolock);
 334                if (error) {
 335                        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 336                        return -error;
 337                }
 338        }
 339
 340        new_size = *ppos + count;
 341
 342        xfs_ilock(ip, XFS_ILOCK_EXCL);
 343        if (new_size > ip->i_size)
 344                ip->i_new_size = new_size;
 345        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 346
 347        xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
 348                           pipe, count, *ppos, ioflags);
 349        ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
 350        if (ret > 0)
 351                XFS_STATS_ADD(xs_write_bytes, ret);
 352
 353        isize = i_size_read(inode);
 354        if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
 355                *ppos = isize;
 356
 357        if (*ppos > ip->i_size) {
 358                xfs_ilock(ip, XFS_ILOCK_EXCL);
 359                if (*ppos > ip->i_size)
 360                        ip->i_size = *ppos;
 361                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 362        }
 363
 364        if (ip->i_new_size) {
 365                xfs_ilock(ip, XFS_ILOCK_EXCL);
 366                ip->i_new_size = 0;
 367                if (ip->i_d.di_size > ip->i_size)
 368                        ip->i_d.di_size = ip->i_size;
 369                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 370        }
 371        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 372        return ret;
 373}
 374
 375/*
 376 * This routine is called to handle zeroing any space in the last
 377 * block of the file that is beyond the EOF.  We do this since the
 378 * size is being increased without writing anything to that block
 379 * and we don't want anyone to read the garbage on the disk.
 380 */
 381STATIC int                              /* error (positive) */
 382xfs_zero_last_block(
 383        xfs_inode_t     *ip,
 384        xfs_fsize_t     offset,
 385        xfs_fsize_t     isize)
 386{
 387        xfs_fileoff_t   last_fsb;
 388        xfs_mount_t     *mp = ip->i_mount;
 389        int             nimaps;
 390        int             zero_offset;
 391        int             zero_len;
 392        int             error = 0;
 393        xfs_bmbt_irec_t imap;
 394
 395        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 396
 397        zero_offset = XFS_B_FSB_OFFSET(mp, isize);
 398        if (zero_offset == 0) {
 399                /*
 400                 * There are no extra bytes in the last block on disk to
 401                 * zero, so return.
 402                 */
 403                return 0;
 404        }
 405
 406        last_fsb = XFS_B_TO_FSBT(mp, isize);
 407        nimaps = 1;
 408        error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
 409                          &nimaps, NULL, NULL);
 410        if (error) {
 411                return error;
 412        }
 413        ASSERT(nimaps > 0);
 414        /*
 415         * If the block underlying isize is just a hole, then there
 416         * is nothing to zero.
 417         */
 418        if (imap.br_startblock == HOLESTARTBLOCK) {
 419                return 0;
 420        }
 421        /*
 422         * Zero the part of the last block beyond the EOF, and write it
 423         * out sync.  We need to drop the ilock while we do this so we
 424         * don't deadlock when the buffer cache calls back to us.
 425         */
 426        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 427
 428        zero_len = mp->m_sb.sb_blocksize - zero_offset;
 429        if (isize + zero_len > offset)
 430                zero_len = offset - isize;
 431        error = xfs_iozero(ip, isize, zero_len);
 432
 433        xfs_ilock(ip, XFS_ILOCK_EXCL);
 434        ASSERT(error >= 0);
 435        return error;
 436}
 437
 438/*
 439 * Zero any on disk space between the current EOF and the new,
 440 * larger EOF.  This handles the normal case of zeroing the remainder
 441 * of the last block in the file and the unusual case of zeroing blocks
 442 * out beyond the size of the file.  This second case only happens
 443 * with fixed size extents and when the system crashes before the inode
 444 * size was updated but after blocks were allocated.  If fill is set,
 445 * then any holes in the range are filled and zeroed.  If not, the holes
 446 * are left alone as holes.
 447 */
 448
 449int                                     /* error (positive) */
 450xfs_zero_eof(
 451        xfs_inode_t     *ip,
 452        xfs_off_t       offset,         /* starting I/O offset */
 453        xfs_fsize_t     isize)          /* current inode size */
 454{
 455        xfs_mount_t     *mp = ip->i_mount;
 456        xfs_fileoff_t   start_zero_fsb;
 457        xfs_fileoff_t   end_zero_fsb;
 458        xfs_fileoff_t   zero_count_fsb;
 459        xfs_fileoff_t   last_fsb;
 460        xfs_fileoff_t   zero_off;
 461        xfs_fsize_t     zero_len;
 462        int             nimaps;
 463        int             error = 0;
 464        xfs_bmbt_irec_t imap;
 465
 466        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 467        ASSERT(offset > isize);
 468
 469        /*
 470         * First handle zeroing the block on which isize resides.
 471         * We only zero a part of that block so it is handled specially.
 472         */
 473        error = xfs_zero_last_block(ip, offset, isize);
 474        if (error) {
 475                ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 476                return error;
 477        }
 478
 479        /*
 480         * Calculate the range between the new size and the old
 481         * where blocks needing to be zeroed may exist.  To get the
 482         * block where the last byte in the file currently resides,
 483         * we need to subtract one from the size and truncate back
 484         * to a block boundary.  We subtract 1 in case the size is
 485         * exactly on a block boundary.
 486         */
 487        last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
 488        start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
 489        end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
 490        ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
 491        if (last_fsb == end_zero_fsb) {
 492                /*
 493                 * The size was only incremented on its last block.
 494                 * We took care of that above, so just return.
 495                 */
 496                return 0;
 497        }
 498
 499        ASSERT(start_zero_fsb <= end_zero_fsb);
 500        while (start_zero_fsb <= end_zero_fsb) {
 501                nimaps = 1;
 502                zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
 503                error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
 504                                  0, NULL, 0, &imap, &nimaps, NULL, NULL);
 505                if (error) {
 506                        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
 507                        return error;
 508                }
 509                ASSERT(nimaps > 0);
 510
 511                if (imap.br_state == XFS_EXT_UNWRITTEN ||
 512                    imap.br_startblock == HOLESTARTBLOCK) {
 513                        /*
 514                         * This loop handles initializing pages that were
 515                         * partially initialized by the code below this
 516                         * loop. It basically zeroes the part of the page
 517                         * that sits on a hole and sets the page as P_HOLE
 518                         * and calls remapf if it is a mapped file.
 519                         */
 520                        start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 521                        ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 522                        continue;
 523                }
 524
 525                /*
 526                 * There are blocks we need to zero.
 527                 * Drop the inode lock while we're doing the I/O.
 528                 * We'll still have the iolock to protect us.
 529                 */
 530                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 531
 532                zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
 533                zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
 534
 535                if ((zero_off + zero_len) > offset)
 536                        zero_len = offset - zero_off;
 537
 538                error = xfs_iozero(ip, zero_off, zero_len);
 539                if (error) {
 540                        goto out_lock;
 541                }
 542
 543                start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 544                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 545
 546                xfs_ilock(ip, XFS_ILOCK_EXCL);
 547        }
 548
 549        return 0;
 550
 551out_lock:
 552        xfs_ilock(ip, XFS_ILOCK_EXCL);
 553        ASSERT(error >= 0);
 554        return error;
 555}
 556
 557ssize_t                         /* bytes written, or (-) error */
 558xfs_write(
 559        struct xfs_inode        *xip,
 560        struct kiocb            *iocb,
 561        const struct iovec      *iovp,
 562        unsigned int            nsegs,
 563        loff_t                  *offset,
 564        int                     ioflags)
 565{
 566        struct file             *file = iocb->ki_filp;
 567        struct address_space    *mapping = file->f_mapping;
 568        struct inode            *inode = mapping->host;
 569        unsigned long           segs = nsegs;
 570        xfs_mount_t             *mp;
 571        ssize_t                 ret = 0, error = 0;
 572        xfs_fsize_t             isize, new_size;
 573        int                     iolock;
 574        int                     eventsent = 0;
 575        size_t                  ocount = 0, count;
 576        loff_t                  pos;
 577        int                     need_i_mutex;
 578
 579        XFS_STATS_INC(xs_write_calls);
 580
 581        error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
 582        if (error)
 583                return error;
 584
 585        count = ocount;
 586        pos = *offset;
 587
 588        if (count == 0)
 589                return 0;
 590
 591        mp = xip->i_mount;
 592
 593        xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
 594
 595        if (XFS_FORCED_SHUTDOWN(mp))
 596                return -EIO;
 597
 598relock:
 599        if (ioflags & IO_ISDIRECT) {
 600                iolock = XFS_IOLOCK_SHARED;
 601                need_i_mutex = 0;
 602        } else {
 603                iolock = XFS_IOLOCK_EXCL;
 604                need_i_mutex = 1;
 605                mutex_lock(&inode->i_mutex);
 606        }
 607
 608        xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
 609
 610start:
 611        error = -generic_write_checks(file, &pos, &count,
 612                                        S_ISBLK(inode->i_mode));
 613        if (error) {
 614                xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
 615                goto out_unlock_mutex;
 616        }
 617
 618        if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
 619            !(ioflags & IO_INVIS) && !eventsent)) {
 620                int             dmflags = FILP_DELAY_FLAG(file);
 621
 622                if (need_i_mutex)
 623                        dmflags |= DM_FLAGS_IMUX;
 624
 625                xfs_iunlock(xip, XFS_ILOCK_EXCL);
 626                error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
 627                                      pos, count, dmflags, &iolock);
 628                if (error) {
 629                        goto out_unlock_internal;
 630                }
 631                xfs_ilock(xip, XFS_ILOCK_EXCL);
 632                eventsent = 1;
 633
 634                /*
 635                 * The iolock was dropped and reacquired in XFS_SEND_DATA
 636                 * so we have to recheck the size when appending.
 637                 * We will only "goto start;" once, since having sent the
 638                 * event prevents another call to XFS_SEND_DATA, which is
 639                 * what allows the size to change in the first place.
 640                 */
 641                if ((file->f_flags & O_APPEND) && pos != xip->i_size)
 642                        goto start;
 643        }
 644
 645        if (ioflags & IO_ISDIRECT) {
 646                xfs_buftarg_t   *target =
 647                        XFS_IS_REALTIME_INODE(xip) ?
 648                                mp->m_rtdev_targp : mp->m_ddev_targp;
 649
 650                if ((pos & target->bt_smask) || (count & target->bt_smask)) {
 651                        xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
 652                        return XFS_ERROR(-EINVAL);
 653                }
 654
 655                if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
 656                        xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
 657                        iolock = XFS_IOLOCK_EXCL;
 658                        need_i_mutex = 1;
 659                        mutex_lock(&inode->i_mutex);
 660                        xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
 661                        goto start;
 662                }
 663        }
 664
 665        new_size = pos + count;
 666        if (new_size > xip->i_size)
 667                xip->i_new_size = new_size;
 668
 669        if (likely(!(ioflags & IO_INVIS)))
 670                file_update_time(file);
 671
 672        /*
 673         * If the offset is beyond the size of the file, we have a couple
 674         * of things to do. First, if there is already space allocated
 675         * we need to either create holes or zero the disk or ...
 676         *
 677         * If there is a page where the previous size lands, we need
 678         * to zero it out up to the new size.
 679         */
 680
 681        if (pos > xip->i_size) {
 682                error = xfs_zero_eof(xip, pos, xip->i_size);
 683                if (error) {
 684                        xfs_iunlock(xip, XFS_ILOCK_EXCL);
 685                        goto out_unlock_internal;
 686                }
 687        }
 688        xfs_iunlock(xip, XFS_ILOCK_EXCL);
 689
 690        /*
 691         * If we're writing the file then make sure to clear the
 692         * setuid and setgid bits if the process is not being run
 693         * by root.  This keeps people from modifying setuid and
 694         * setgid binaries.
 695         */
 696
 697        if (((xip->i_d.di_mode & S_ISUID) ||
 698            ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
 699                (S_ISGID | S_IXGRP))) &&
 700             !capable(CAP_FSETID)) {
 701                error = xfs_write_clear_setuid(xip);
 702                if (likely(!error))
 703                        error = -file_remove_suid(file);
 704                if (unlikely(error)) {
 705                        goto out_unlock_internal;
 706                }
 707        }
 708
 709        /* We can write back this queue in page reclaim */
 710        current->backing_dev_info = mapping->backing_dev_info;
 711
 712        if ((ioflags & IO_ISDIRECT)) {
 713                if (mapping->nrpages) {
 714                        WARN_ON(need_i_mutex == 0);
 715                        xfs_inval_cached_trace(xip, pos, -1,
 716                                        (pos & PAGE_CACHE_MASK), -1);
 717                        error = xfs_flushinval_pages(xip,
 718                                        (pos & PAGE_CACHE_MASK),
 719                                        -1, FI_REMAPF_LOCKED);
 720                        if (error)
 721                                goto out_unlock_internal;
 722                }
 723
 724                if (need_i_mutex) {
 725                        /* demote the lock now the cached pages are gone */
 726                        xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
 727                        mutex_unlock(&inode->i_mutex);
 728
 729                        iolock = XFS_IOLOCK_SHARED;
 730                        need_i_mutex = 0;
 731                }
 732
 733                xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
 734                                *offset, ioflags);
 735                ret = generic_file_direct_write(iocb, iovp,
 736                                &segs, pos, offset, count, ocount);
 737
 738                /*
 739                 * direct-io write to a hole: fall through to buffered I/O
 740                 * for completing the rest of the request.
 741                 */
 742                if (ret >= 0 && ret != count) {
 743                        XFS_STATS_ADD(xs_write_bytes, ret);
 744
 745                        pos += ret;
 746                        count -= ret;
 747
 748                        ioflags &= ~IO_ISDIRECT;
 749                        xfs_iunlock(xip, iolock);
 750                        goto relock;
 751                }
 752        } else {
 753                int enospc = 0;
 754                ssize_t ret2 = 0;
 755
 756write_retry:
 757                xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
 758                                *offset, ioflags);
 759                ret2 = generic_file_buffered_write(iocb, iovp, segs,
 760                                pos, offset, count, ret);
 761                /*
 762                 * if we just got an ENOSPC, flush the inode now we
 763                 * aren't holding any page locks and retry *once*
 764                 */
 765                if (ret2 == -ENOSPC && !enospc) {
 766                        error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
 767                        if (error)
 768                                goto out_unlock_internal;
 769                        enospc = 1;
 770                        goto write_retry;
 771                }
 772                ret = ret2;
 773        }
 774
 775        current->backing_dev_info = NULL;
 776
 777        if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
 778                ret = wait_on_sync_kiocb(iocb);
 779
 780        isize = i_size_read(inode);
 781        if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
 782                *offset = isize;
 783
 784        if (*offset > xip->i_size) {
 785                xfs_ilock(xip, XFS_ILOCK_EXCL);
 786                if (*offset > xip->i_size)
 787                        xip->i_size = *offset;
 788                xfs_iunlock(xip, XFS_ILOCK_EXCL);
 789        }
 790
 791        if (ret == -ENOSPC &&
 792            DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
 793                xfs_iunlock(xip, iolock);
 794                if (need_i_mutex)
 795                        mutex_unlock(&inode->i_mutex);
 796                error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
 797                                DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
 798                                0, 0, 0); /* Delay flag intentionally  unused */
 799                if (need_i_mutex)
 800                        mutex_lock(&inode->i_mutex);
 801                xfs_ilock(xip, iolock);
 802                if (error)
 803                        goto out_unlock_internal;
 804                goto start;
 805        }
 806
 807        error = -ret;
 808        if (ret <= 0)
 809                goto out_unlock_internal;
 810
 811        XFS_STATS_ADD(xs_write_bytes, ret);
 812
 813        /* Handle various SYNC-type writes */
 814        if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
 815                loff_t end = pos + ret - 1;
 816                int error2;
 817
 818                xfs_iunlock(xip, iolock);
 819                if (need_i_mutex)
 820                        mutex_unlock(&inode->i_mutex);
 821
 822                error2 = filemap_write_and_wait_range(mapping, pos, end);
 823                if (!error)
 824                        error = error2;
 825                if (need_i_mutex)
 826                        mutex_lock(&inode->i_mutex);
 827                xfs_ilock(xip, iolock);
 828
 829                error2 = xfs_fsync(xip);
 830                if (!error)
 831                        error = error2;
 832        }
 833
 834 out_unlock_internal:
 835        if (xip->i_new_size) {
 836                xfs_ilock(xip, XFS_ILOCK_EXCL);
 837                xip->i_new_size = 0;
 838                /*
 839                 * If this was a direct or synchronous I/O that failed (such
 840                 * as ENOSPC) then part of the I/O may have been written to
 841                 * disk before the error occured.  In this case the on-disk
 842                 * file size may have been adjusted beyond the in-memory file
 843                 * size and now needs to be truncated back.
 844                 */
 845                if (xip->i_d.di_size > xip->i_size)
 846                        xip->i_d.di_size = xip->i_size;
 847                xfs_iunlock(xip, XFS_ILOCK_EXCL);
 848        }
 849        xfs_iunlock(xip, iolock);
 850 out_unlock_mutex:
 851        if (need_i_mutex)
 852                mutex_unlock(&inode->i_mutex);
 853        return -error;
 854}
 855
 856/*
 857 * All xfs metadata buffers except log state machine buffers
 858 * get this attached as their b_bdstrat callback function.
 859 * This is so that we can catch a buffer
 860 * after prematurely unpinning it to forcibly shutdown the filesystem.
 861 */
 862int
 863xfs_bdstrat_cb(struct xfs_buf *bp)
 864{
 865        if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
 866                xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
 867                /*
 868                 * Metadata write that didn't get logged but
 869                 * written delayed anyway. These aren't associated
 870                 * with a transaction, and can be ignored.
 871                 */
 872                if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
 873                    (XFS_BUF_ISREAD(bp)) == 0)
 874                        return (xfs_bioerror_relse(bp));
 875                else
 876                        return (xfs_bioerror(bp));
 877        }
 878
 879        xfs_buf_iorequest(bp);
 880        return 0;
 881}
 882
 883/*
 884 * Wrapper around bdstrat so that we can stop data from going to disk in case
 885 * we are shutting down the filesystem.  Typically user data goes thru this
 886 * path; one of the exceptions is the superblock.
 887 */
 888void
 889xfsbdstrat(
 890        struct xfs_mount        *mp,
 891        struct xfs_buf          *bp)
 892{
 893        ASSERT(mp);
 894        if (!XFS_FORCED_SHUTDOWN(mp)) {
 895                xfs_buf_iorequest(bp);
 896                return;
 897        }
 898
 899        xfs_buftrace("XFSBDSTRAT IOERROR", bp);
 900        xfs_bioerror_relse(bp);
 901}
 902
 903/*
 904 * If the underlying (data/log/rt) device is readonly, there are some
 905 * operations that cannot proceed.
 906 */
 907int
 908xfs_dev_is_read_only(
 909        xfs_mount_t             *mp,
 910        char                    *message)
 911{
 912        if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
 913            xfs_readonly_buftarg(mp->m_logdev_targp) ||
 914            (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
 915                cmn_err(CE_NOTE,
 916                        "XFS: %s required on read-only device.", message);
 917                cmn_err(CE_NOTE,
 918                        "XFS: write access unavailable, cannot proceed.");
 919                return EROFS;
 920        }
 921        return 0;
 922}
 923