linux/fs/ext4/file.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/ext4/file.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/file.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  ext4 fs regular file handling primitives
  16 *
  17 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  18 *      (jj@sunsite.ms.mff.cuni.cz)
  19 */
  20
  21#include <linux/time.h>
  22#include <linux/fs.h>
  23#include <linux/jbd2.h>
  24#include <linux/mount.h>
  25#include <linux/path.h>
  26#include <linux/aio.h>
  27#include <linux/quotaops.h>
  28#include <linux/pagevec.h>
  29#include "ext4.h"
  30#include "ext4_jbd2.h"
  31#include "xattr.h"
  32#include "acl.h"
  33
  34/*
  35 * Called when an inode is released. Note that this is different
  36 * from ext4_file_open: open gets called at every open, but release
  37 * gets called only when /all/ the files are closed.
  38 */
  39static int ext4_release_file(struct inode *inode, struct file *filp)
  40{
  41        if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  42                ext4_alloc_da_blocks(inode);
  43                ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  44        }
  45        /* if we are the last writer on the inode, drop the block reservation */
  46        if ((filp->f_mode & FMODE_WRITE) &&
  47                        (atomic_read(&inode->i_writecount) == 1) &&
  48                        !EXT4_I(inode)->i_reserved_data_blocks)
  49        {
  50                down_write(&EXT4_I(inode)->i_data_sem);
  51                ext4_discard_preallocations(inode);
  52                up_write(&EXT4_I(inode)->i_data_sem);
  53        }
  54        if (is_dx(inode) && filp->private_data)
  55                ext4_htree_free_dir_info(filp->private_data);
  56
  57        return 0;
  58}
  59
  60void ext4_unwritten_wait(struct inode *inode)
  61{
  62        wait_queue_head_t *wq = ext4_ioend_wq(inode);
  63
  64        wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
  65}
  66
  67/*
  68 * This tests whether the IO in question is block-aligned or not.
  69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
  70 * are converted to written only after the IO is complete.  Until they are
  71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
  72 * it needs to zero out portions of the start and/or end block.  If 2 AIO
  73 * threads are at work on the same unwritten block, they must be synchronized
  74 * or one thread will zero the other's data, causing corruption.
  75 */
  76static int
  77ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
  78                   unsigned long nr_segs, loff_t pos)
  79{
  80        struct super_block *sb = inode->i_sb;
  81        int blockmask = sb->s_blocksize - 1;
  82        size_t count = iov_length(iov, nr_segs);
  83        loff_t final_size = pos + count;
  84
  85        if (pos >= i_size_read(inode))
  86                return 0;
  87
  88        if ((pos & blockmask) || (final_size & blockmask))
  89                return 1;
  90
  91        return 0;
  92}
  93
  94static ssize_t
  95ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
  96                    unsigned long nr_segs, loff_t pos)
  97{
  98        struct file *file = iocb->ki_filp;
  99        struct inode *inode = file->f_mapping->host;
 100        struct blk_plug plug;
 101        int unaligned_aio = 0;
 102        ssize_t ret;
 103        int overwrite = 0;
 104        size_t length = iov_length(iov, nr_segs);
 105
 106        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
 107            !is_sync_kiocb(iocb))
 108                unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
 109
 110        /* Unaligned direct AIO must be serialized; see comment above */
 111        if (unaligned_aio) {
 112                mutex_lock(ext4_aio_mutex(inode));
 113                ext4_unwritten_wait(inode);
 114        }
 115
 116        BUG_ON(iocb->ki_pos != pos);
 117
 118        mutex_lock(&inode->i_mutex);
 119        blk_start_plug(&plug);
 120
 121        iocb->private = &overwrite;
 122
 123        /* check whether we do a DIO overwrite or not */
 124        if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
 125            !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
 126                struct ext4_map_blocks map;
 127                unsigned int blkbits = inode->i_blkbits;
 128                int err, len;
 129
 130                map.m_lblk = pos >> blkbits;
 131                map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
 132                        - map.m_lblk;
 133                len = map.m_len;
 134
 135                err = ext4_map_blocks(NULL, inode, &map, 0);
 136                /*
 137                 * 'err==len' means that all of blocks has been preallocated no
 138                 * matter they are initialized or not.  For excluding
 139                 * uninitialized extents, we need to check m_flags.  There are
 140                 * two conditions that indicate for initialized extents.
 141                 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
 142                 * 2) If we do a real lookup, non-flags are returned.
 143                 * So we should check these two conditions.
 144                 */
 145                if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
 146                        overwrite = 1;
 147        }
 148
 149        ret = __generic_file_aio_write(iocb, iov, nr_segs);
 150        mutex_unlock(&inode->i_mutex);
 151
 152        if (ret > 0) {
 153                ssize_t err;
 154
 155                err = generic_write_sync(file, iocb->ki_pos - ret, ret);
 156                if (err < 0)
 157                        ret = err;
 158        }
 159        blk_finish_plug(&plug);
 160
 161        if (unaligned_aio)
 162                mutex_unlock(ext4_aio_mutex(inode));
 163
 164        return ret;
 165}
 166
 167static ssize_t
 168ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 169                unsigned long nr_segs, loff_t pos)
 170{
 171        struct inode *inode = file_inode(iocb->ki_filp);
 172        ssize_t ret;
 173
 174        /*
 175         * If we have encountered a bitmap-format file, the size limit
 176         * is smaller than s_maxbytes, which is for extent-mapped files.
 177         */
 178
 179        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 180                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 181                size_t length = iov_length(iov, nr_segs);
 182
 183                if ((pos > sbi->s_bitmap_maxbytes ||
 184                    (pos == sbi->s_bitmap_maxbytes && length > 0)))
 185                        return -EFBIG;
 186
 187                if (pos + length > sbi->s_bitmap_maxbytes) {
 188                        nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
 189                                              sbi->s_bitmap_maxbytes - pos);
 190                }
 191        }
 192
 193        if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
 194                ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
 195        else
 196                ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
 197
 198        return ret;
 199}
 200
 201static const struct vm_operations_struct ext4_file_vm_ops = {
 202        .fault          = filemap_fault,
 203        .map_pages      = filemap_map_pages,
 204        .page_mkwrite   = ext4_page_mkwrite,
 205        .remap_pages    = generic_file_remap_pages,
 206};
 207
 208static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 209{
 210        struct address_space *mapping = file->f_mapping;
 211
 212        if (!mapping->a_ops->readpage)
 213                return -ENOEXEC;
 214        file_accessed(file);
 215        vma->vm_ops = &ext4_file_vm_ops;
 216        return 0;
 217}
 218
 219static int ext4_file_open(struct inode * inode, struct file * filp)
 220{
 221        struct super_block *sb = inode->i_sb;
 222        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 223        struct vfsmount *mnt = filp->f_path.mnt;
 224        struct path path;
 225        char buf[64], *cp;
 226
 227        if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
 228                     !(sb->s_flags & MS_RDONLY))) {
 229                sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 230                /*
 231                 * Sample where the filesystem has been mounted and
 232                 * store it in the superblock for sysadmin convenience
 233                 * when trying to sort through large numbers of block
 234                 * devices or filesystem images.
 235                 */
 236                memset(buf, 0, sizeof(buf));
 237                path.mnt = mnt;
 238                path.dentry = mnt->mnt_root;
 239                cp = d_path(&path, buf, sizeof(buf));
 240                if (!IS_ERR(cp)) {
 241                        handle_t *handle;
 242                        int err;
 243
 244                        handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 245                        if (IS_ERR(handle))
 246                                return PTR_ERR(handle);
 247                        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 248                        if (err) {
 249                                ext4_journal_stop(handle);
 250                                return err;
 251                        }
 252                        strlcpy(sbi->s_es->s_last_mounted, cp,
 253                                sizeof(sbi->s_es->s_last_mounted));
 254                        ext4_handle_dirty_super(handle, sb);
 255                        ext4_journal_stop(handle);
 256                }
 257        }
 258        /*
 259         * Set up the jbd2_inode if we are opening the inode for
 260         * writing and the journal is present
 261         */
 262        if (filp->f_mode & FMODE_WRITE) {
 263                int ret = ext4_inode_attach_jinode(inode);
 264                if (ret < 0)
 265                        return ret;
 266        }
 267        return dquot_file_open(inode, filp);
 268}
 269
 270/*
 271 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
 272 * file rather than ext4_ext_walk_space() because we can introduce
 273 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
 274 * function.  When extent status tree has been fully implemented, it will
 275 * track all extent status for a file and we can directly use it to
 276 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
 277 */
 278
 279/*
 280 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
 281 * lookup page cache to check whether or not there has some data between
 282 * [startoff, endoff] because, if this range contains an unwritten extent,
 283 * we determine this extent as a data or a hole according to whether the
 284 * page cache has data or not.
 285 */
 286static int ext4_find_unwritten_pgoff(struct inode *inode,
 287                                     int whence,
 288                                     struct ext4_map_blocks *map,
 289                                     loff_t *offset)
 290{
 291        struct pagevec pvec;
 292        unsigned int blkbits;
 293        pgoff_t index;
 294        pgoff_t end;
 295        loff_t endoff;
 296        loff_t startoff;
 297        loff_t lastoff;
 298        int found = 0;
 299
 300        blkbits = inode->i_sb->s_blocksize_bits;
 301        startoff = *offset;
 302        lastoff = startoff;
 303        endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
 304
 305        index = startoff >> PAGE_CACHE_SHIFT;
 306        end = endoff >> PAGE_CACHE_SHIFT;
 307
 308        pagevec_init(&pvec, 0);
 309        do {
 310                int i, num;
 311                unsigned long nr_pages;
 312
 313                num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
 314                nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 315                                          (pgoff_t)num);
 316                if (nr_pages == 0) {
 317                        if (whence == SEEK_DATA)
 318                                break;
 319
 320                        BUG_ON(whence != SEEK_HOLE);
 321                        /*
 322                         * If this is the first time to go into the loop and
 323                         * offset is not beyond the end offset, it will be a
 324                         * hole at this offset
 325                         */
 326                        if (lastoff == startoff || lastoff < endoff)
 327                                found = 1;
 328                        break;
 329                }
 330
 331                /*
 332                 * If this is the first time to go into the loop and
 333                 * offset is smaller than the first page offset, it will be a
 334                 * hole at this offset.
 335                 */
 336                if (lastoff == startoff && whence == SEEK_HOLE &&
 337                    lastoff < page_offset(pvec.pages[0])) {
 338                        found = 1;
 339                        break;
 340                }
 341
 342                for (i = 0; i < nr_pages; i++) {
 343                        struct page *page = pvec.pages[i];
 344                        struct buffer_head *bh, *head;
 345
 346                        /*
 347                         * If the current offset is not beyond the end of given
 348                         * range, it will be a hole.
 349                         */
 350                        if (lastoff < endoff && whence == SEEK_HOLE &&
 351                            page->index > end) {
 352                                found = 1;
 353                                *offset = lastoff;
 354                                goto out;
 355                        }
 356
 357                        lock_page(page);
 358
 359                        if (unlikely(page->mapping != inode->i_mapping)) {
 360                                unlock_page(page);
 361                                continue;
 362                        }
 363
 364                        if (!page_has_buffers(page)) {
 365                                unlock_page(page);
 366                                continue;
 367                        }
 368
 369                        if (page_has_buffers(page)) {
 370                                lastoff = page_offset(page);
 371                                bh = head = page_buffers(page);
 372                                do {
 373                                        if (buffer_uptodate(bh) ||
 374                                            buffer_unwritten(bh)) {
 375                                                if (whence == SEEK_DATA)
 376                                                        found = 1;
 377                                        } else {
 378                                                if (whence == SEEK_HOLE)
 379                                                        found = 1;
 380                                        }
 381                                        if (found) {
 382                                                *offset = max_t(loff_t,
 383                                                        startoff, lastoff);
 384                                                unlock_page(page);
 385                                                goto out;
 386                                        }
 387                                        lastoff += bh->b_size;
 388                                        bh = bh->b_this_page;
 389                                } while (bh != head);
 390                        }
 391
 392                        lastoff = page_offset(page) + PAGE_SIZE;
 393                        unlock_page(page);
 394                }
 395
 396                /*
 397                 * The no. of pages is less than our desired, that would be a
 398                 * hole in there.
 399                 */
 400                if (nr_pages < num && whence == SEEK_HOLE) {
 401                        found = 1;
 402                        *offset = lastoff;
 403                        break;
 404                }
 405
 406                index = pvec.pages[i - 1]->index + 1;
 407                pagevec_release(&pvec);
 408        } while (index <= end);
 409
 410out:
 411        pagevec_release(&pvec);
 412        return found;
 413}
 414
 415/*
 416 * ext4_seek_data() retrieves the offset for SEEK_DATA.
 417 */
 418static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
 419{
 420        struct inode *inode = file->f_mapping->host;
 421        struct ext4_map_blocks map;
 422        struct extent_status es;
 423        ext4_lblk_t start, last, end;
 424        loff_t dataoff, isize;
 425        int blkbits;
 426        int ret = 0;
 427
 428        mutex_lock(&inode->i_mutex);
 429
 430        isize = i_size_read(inode);
 431        if (offset >= isize) {
 432                mutex_unlock(&inode->i_mutex);
 433                return -ENXIO;
 434        }
 435
 436        blkbits = inode->i_sb->s_blocksize_bits;
 437        start = offset >> blkbits;
 438        last = start;
 439        end = isize >> blkbits;
 440        dataoff = offset;
 441
 442        do {
 443                map.m_lblk = last;
 444                map.m_len = end - last + 1;
 445                ret = ext4_map_blocks(NULL, inode, &map, 0);
 446                if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
 447                        if (last != start)
 448                                dataoff = (loff_t)last << blkbits;
 449                        break;
 450                }
 451
 452                /*
 453                 * If there is a delay extent at this offset,
 454                 * it will be as a data.
 455                 */
 456                ext4_es_find_delayed_extent_range(inode, last, last, &es);
 457                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
 458                        if (last != start)
 459                                dataoff = (loff_t)last << blkbits;
 460                        break;
 461                }
 462
 463                /*
 464                 * If there is a unwritten extent at this offset,
 465                 * it will be as a data or a hole according to page
 466                 * cache that has data or not.
 467                 */
 468                if (map.m_flags & EXT4_MAP_UNWRITTEN) {
 469                        int unwritten;
 470                        unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
 471                                                              &map, &dataoff);
 472                        if (unwritten)
 473                                break;
 474                }
 475
 476                last++;
 477                dataoff = (loff_t)last << blkbits;
 478        } while (last <= end);
 479
 480        mutex_unlock(&inode->i_mutex);
 481
 482        if (dataoff > isize)
 483                return -ENXIO;
 484
 485        return vfs_setpos(file, dataoff, maxsize);
 486}
 487
 488/*
 489 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
 490 */
 491static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
 492{
 493        struct inode *inode = file->f_mapping->host;
 494        struct ext4_map_blocks map;
 495        struct extent_status es;
 496        ext4_lblk_t start, last, end;
 497        loff_t holeoff, isize;
 498        int blkbits;
 499        int ret = 0;
 500
 501        mutex_lock(&inode->i_mutex);
 502
 503        isize = i_size_read(inode);
 504        if (offset >= isize) {
 505                mutex_unlock(&inode->i_mutex);
 506                return -ENXIO;
 507        }
 508
 509        blkbits = inode->i_sb->s_blocksize_bits;
 510        start = offset >> blkbits;
 511        last = start;
 512        end = isize >> blkbits;
 513        holeoff = offset;
 514
 515        do {
 516                map.m_lblk = last;
 517                map.m_len = end - last + 1;
 518                ret = ext4_map_blocks(NULL, inode, &map, 0);
 519                if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
 520                        last += ret;
 521                        holeoff = (loff_t)last << blkbits;
 522                        continue;
 523                }
 524
 525                /*
 526                 * If there is a delay extent at this offset,
 527                 * we will skip this extent.
 528                 */
 529                ext4_es_find_delayed_extent_range(inode, last, last, &es);
 530                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
 531                        last = es.es_lblk + es.es_len;
 532                        holeoff = (loff_t)last << blkbits;
 533                        continue;
 534                }
 535
 536                /*
 537                 * If there is a unwritten extent at this offset,
 538                 * it will be as a data or a hole according to page
 539                 * cache that has data or not.
 540                 */
 541                if (map.m_flags & EXT4_MAP_UNWRITTEN) {
 542                        int unwritten;
 543                        unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
 544                                                              &map, &holeoff);
 545                        if (!unwritten) {
 546                                last += ret;
 547                                holeoff = (loff_t)last << blkbits;
 548                                continue;
 549                        }
 550                }
 551
 552                /* find a hole */
 553                break;
 554        } while (last <= end);
 555
 556        mutex_unlock(&inode->i_mutex);
 557
 558        if (holeoff > isize)
 559                holeoff = isize;
 560
 561        return vfs_setpos(file, holeoff, maxsize);
 562}
 563
 564/*
 565 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 566 * by calling generic_file_llseek_size() with the appropriate maxbytes
 567 * value for each.
 568 */
 569loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 570{
 571        struct inode *inode = file->f_mapping->host;
 572        loff_t maxbytes;
 573
 574        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 575                maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 576        else
 577                maxbytes = inode->i_sb->s_maxbytes;
 578
 579        switch (whence) {
 580        case SEEK_SET:
 581        case SEEK_CUR:
 582        case SEEK_END:
 583                return generic_file_llseek_size(file, offset, whence,
 584                                                maxbytes, i_size_read(inode));
 585        case SEEK_DATA:
 586                return ext4_seek_data(file, offset, maxbytes);
 587        case SEEK_HOLE:
 588                return ext4_seek_hole(file, offset, maxbytes);
 589        }
 590
 591        return -EINVAL;
 592}
 593
 594const struct file_operations ext4_file_operations = {
 595        .llseek         = ext4_llseek,
 596        .read           = do_sync_read,
 597        .write          = do_sync_write,
 598        .aio_read       = generic_file_aio_read,
 599        .aio_write      = ext4_file_write,
 600        .unlocked_ioctl = ext4_ioctl,
 601#ifdef CONFIG_COMPAT
 602        .compat_ioctl   = ext4_compat_ioctl,
 603#endif
 604        .mmap           = ext4_file_mmap,
 605        .open           = ext4_file_open,
 606        .release        = ext4_release_file,
 607        .fsync          = ext4_sync_file,
 608        .splice_read    = generic_file_splice_read,
 609        .splice_write   = generic_file_splice_write,
 610        .fallocate      = ext4_fallocate,
 611};
 612
 613const struct inode_operations ext4_file_inode_operations = {
 614        .setattr        = ext4_setattr,
 615        .getattr        = ext4_getattr,
 616        .setxattr       = generic_setxattr,
 617        .getxattr       = generic_getxattr,
 618        .listxattr      = ext4_listxattr,
 619        .removexattr    = generic_removexattr,
 620        .get_acl        = ext4_get_acl,
 621        .set_acl        = ext4_set_acl,
 622        .fiemap         = ext4_fiemap,
 623};
 624
 625