linux/fs/block_dev.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/block_dev.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/mm.h>
  10#include <linux/fcntl.h>
  11#include <linux/slab.h>
  12#include <linux/kmod.h>
  13#include <linux/major.h>
  14#include <linux/smp_lock.h>
  15#include <linux/device_cgroup.h>
  16#include <linux/highmem.h>
  17#include <linux/blkdev.h>
  18#include <linux/module.h>
  19#include <linux/blkpg.h>
  20#include <linux/buffer_head.h>
  21#include <linux/pagevec.h>
  22#include <linux/writeback.h>
  23#include <linux/mpage.h>
  24#include <linux/mount.h>
  25#include <linux/uio.h>
  26#include <linux/namei.h>
  27#include <linux/log2.h>
  28#include <linux/kmemleak.h>
  29#include <asm/uaccess.h>
  30#include "internal.h"
  31
  32struct bdev_inode {
  33        struct block_device bdev;
  34        struct inode vfs_inode;
  35};
  36
  37static const struct address_space_operations def_blk_aops;
  38
  39static inline struct bdev_inode *BDEV_I(struct inode *inode)
  40{
  41        return container_of(inode, struct bdev_inode, vfs_inode);
  42}
  43
  44inline struct block_device *I_BDEV(struct inode *inode)
  45{
  46        return &BDEV_I(inode)->bdev;
  47}
  48
  49EXPORT_SYMBOL(I_BDEV);
  50
  51static sector_t max_block(struct block_device *bdev)
  52{
  53        sector_t retval = ~((sector_t)0);
  54        loff_t sz = i_size_read(bdev->bd_inode);
  55
  56        if (sz) {
  57                unsigned int size = block_size(bdev);
  58                unsigned int sizebits = blksize_bits(size);
  59                retval = (sz >> sizebits);
  60        }
  61        return retval;
  62}
  63
  64/* Kill _all_ buffers and pagecache , dirty or not.. */
  65static void kill_bdev(struct block_device *bdev)
  66{
  67        if (bdev->bd_inode->i_mapping->nrpages == 0)
  68                return;
  69        invalidate_bh_lrus();
  70        truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
  71}       
  72
  73int set_blocksize(struct block_device *bdev, int size)
  74{
  75        /* Size must be a power of two, and between 512 and PAGE_SIZE */
  76        if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
  77                return -EINVAL;
  78
  79        /* Size cannot be smaller than the size supported by the device */
  80        if (size < bdev_logical_block_size(bdev))
  81                return -EINVAL;
  82
  83        /* Don't change the size if it is same as current */
  84        if (bdev->bd_block_size != size) {
  85                sync_blockdev(bdev);
  86                bdev->bd_block_size = size;
  87                bdev->bd_inode->i_blkbits = blksize_bits(size);
  88                kill_bdev(bdev);
  89        }
  90        return 0;
  91}
  92
  93EXPORT_SYMBOL(set_blocksize);
  94
  95int sb_set_blocksize(struct super_block *sb, int size)
  96{
  97        if (set_blocksize(sb->s_bdev, size))
  98                return 0;
  99        /* If we get here, we know size is power of two
 100         * and it's value is between 512 and PAGE_SIZE */
 101        sb->s_blocksize = size;
 102        sb->s_blocksize_bits = blksize_bits(size);
 103        return sb->s_blocksize;
 104}
 105
 106EXPORT_SYMBOL(sb_set_blocksize);
 107
 108int sb_min_blocksize(struct super_block *sb, int size)
 109{
 110        int minsize = bdev_logical_block_size(sb->s_bdev);
 111        if (size < minsize)
 112                size = minsize;
 113        return sb_set_blocksize(sb, size);
 114}
 115
 116EXPORT_SYMBOL(sb_min_blocksize);
 117
 118static int
 119blkdev_get_block(struct inode *inode, sector_t iblock,
 120                struct buffer_head *bh, int create)
 121{
 122        if (iblock >= max_block(I_BDEV(inode))) {
 123                if (create)
 124                        return -EIO;
 125
 126                /*
 127                 * for reads, we're just trying to fill a partial page.
 128                 * return a hole, they will have to call get_block again
 129                 * before they can fill it, and they will get -EIO at that
 130                 * time
 131                 */
 132                return 0;
 133        }
 134        bh->b_bdev = I_BDEV(inode);
 135        bh->b_blocknr = iblock;
 136        set_buffer_mapped(bh);
 137        return 0;
 138}
 139
 140static int
 141blkdev_get_blocks(struct inode *inode, sector_t iblock,
 142                struct buffer_head *bh, int create)
 143{
 144        sector_t end_block = max_block(I_BDEV(inode));
 145        unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
 146
 147        if ((iblock + max_blocks) > end_block) {
 148                max_blocks = end_block - iblock;
 149                if ((long)max_blocks <= 0) {
 150                        if (create)
 151                                return -EIO;    /* write fully beyond EOF */
 152                        /*
 153                         * It is a read which is fully beyond EOF.  We return
 154                         * a !buffer_mapped buffer
 155                         */
 156                        max_blocks = 0;
 157                }
 158        }
 159
 160        bh->b_bdev = I_BDEV(inode);
 161        bh->b_blocknr = iblock;
 162        bh->b_size = max_blocks << inode->i_blkbits;
 163        if (max_blocks)
 164                set_buffer_mapped(bh);
 165        return 0;
 166}
 167
 168static ssize_t
 169blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 170                        loff_t offset, unsigned long nr_segs)
 171{
 172        struct file *file = iocb->ki_filp;
 173        struct inode *inode = file->f_mapping->host;
 174
 175        return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
 176                                iov, offset, nr_segs, blkdev_get_blocks, NULL);
 177}
 178
 179int __sync_blockdev(struct block_device *bdev, int wait)
 180{
 181        if (!bdev)
 182                return 0;
 183        if (!wait)
 184                return filemap_flush(bdev->bd_inode->i_mapping);
 185        return filemap_write_and_wait(bdev->bd_inode->i_mapping);
 186}
 187
 188/*
 189 * Write out and wait upon all the dirty data associated with a block
 190 * device via its mapping.  Does not take the superblock lock.
 191 */
 192int sync_blockdev(struct block_device *bdev)
 193{
 194        return __sync_blockdev(bdev, 1);
 195}
 196EXPORT_SYMBOL(sync_blockdev);
 197
 198/*
 199 * Write out and wait upon all dirty data associated with this
 200 * device.   Filesystem data as well as the underlying block
 201 * device.  Takes the superblock lock.
 202 */
 203int fsync_bdev(struct block_device *bdev)
 204{
 205        struct super_block *sb = get_super(bdev);
 206        if (sb) {
 207                int res = sync_filesystem(sb);
 208                drop_super(sb);
 209                return res;
 210        }
 211        return sync_blockdev(bdev);
 212}
 213EXPORT_SYMBOL(fsync_bdev);
 214
 215/**
 216 * freeze_bdev  --  lock a filesystem and force it into a consistent state
 217 * @bdev:       blockdevice to lock
 218 *
 219 * If a superblock is found on this device, we take the s_umount semaphore
 220 * on it to make sure nobody unmounts until the snapshot creation is done.
 221 * The reference counter (bd_fsfreeze_count) guarantees that only the last
 222 * unfreeze process can unfreeze the frozen filesystem actually when multiple
 223 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
 224 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
 225 * actually.
 226 */
 227struct super_block *freeze_bdev(struct block_device *bdev)
 228{
 229        struct super_block *sb;
 230        int error = 0;
 231
 232        mutex_lock(&bdev->bd_fsfreeze_mutex);
 233        if (++bdev->bd_fsfreeze_count > 1) {
 234                /*
 235                 * We don't even need to grab a reference - the first call
 236                 * to freeze_bdev grab an active reference and only the last
 237                 * thaw_bdev drops it.
 238                 */
 239                sb = get_super(bdev);
 240                drop_super(sb);
 241                mutex_unlock(&bdev->bd_fsfreeze_mutex);
 242                return sb;
 243        }
 244
 245        sb = get_active_super(bdev);
 246        if (!sb)
 247                goto out;
 248        if (sb->s_flags & MS_RDONLY) {
 249                deactivate_locked_super(sb);
 250                mutex_unlock(&bdev->bd_fsfreeze_mutex);
 251                return sb;
 252        }
 253
 254        sb->s_frozen = SB_FREEZE_WRITE;
 255        smp_wmb();
 256
 257        sync_filesystem(sb);
 258
 259        sb->s_frozen = SB_FREEZE_TRANS;
 260        smp_wmb();
 261
 262        sync_blockdev(sb->s_bdev);
 263
 264        if (sb->s_op->freeze_fs) {
 265                error = sb->s_op->freeze_fs(sb);
 266                if (error) {
 267                        printk(KERN_ERR
 268                                "VFS:Filesystem freeze failed\n");
 269                        sb->s_frozen = SB_UNFROZEN;
 270                        deactivate_locked_super(sb);
 271                        bdev->bd_fsfreeze_count--;
 272                        mutex_unlock(&bdev->bd_fsfreeze_mutex);
 273                        return ERR_PTR(error);
 274                }
 275        }
 276        up_write(&sb->s_umount);
 277
 278 out:
 279        sync_blockdev(bdev);
 280        mutex_unlock(&bdev->bd_fsfreeze_mutex);
 281        return sb;      /* thaw_bdev releases s->s_umount */
 282}
 283EXPORT_SYMBOL(freeze_bdev);
 284
 285/**
 286 * thaw_bdev  -- unlock filesystem
 287 * @bdev:       blockdevice to unlock
 288 * @sb:         associated superblock
 289 *
 290 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
 291 */
 292int thaw_bdev(struct block_device *bdev, struct super_block *sb)
 293{
 294        int error = -EINVAL;
 295
 296        mutex_lock(&bdev->bd_fsfreeze_mutex);
 297        if (!bdev->bd_fsfreeze_count)
 298                goto out_unlock;
 299
 300        error = 0;
 301        if (--bdev->bd_fsfreeze_count > 0)
 302                goto out_unlock;
 303
 304        if (!sb)
 305                goto out_unlock;
 306
 307        BUG_ON(sb->s_bdev != bdev);
 308        down_write(&sb->s_umount);
 309        if (sb->s_flags & MS_RDONLY)
 310                goto out_deactivate;
 311
 312        if (sb->s_op->unfreeze_fs) {
 313                error = sb->s_op->unfreeze_fs(sb);
 314                if (error) {
 315                        printk(KERN_ERR
 316                                "VFS:Filesystem thaw failed\n");
 317                        sb->s_frozen = SB_FREEZE_TRANS;
 318                        bdev->bd_fsfreeze_count++;
 319                        mutex_unlock(&bdev->bd_fsfreeze_mutex);
 320                        return error;
 321                }
 322        }
 323
 324        sb->s_frozen = SB_UNFROZEN;
 325        smp_wmb();
 326        wake_up(&sb->s_wait_unfrozen);
 327
 328out_deactivate:
 329        if (sb)
 330                deactivate_locked_super(sb);
 331out_unlock:
 332        mutex_unlock(&bdev->bd_fsfreeze_mutex);
 333        return 0;
 334}
 335EXPORT_SYMBOL(thaw_bdev);
 336
 337static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
 338{
 339        return block_write_full_page(page, blkdev_get_block, wbc);
 340}
 341
 342static int blkdev_readpage(struct file * file, struct page * page)
 343{
 344        return block_read_full_page(page, blkdev_get_block);
 345}
 346
 347static int blkdev_write_begin(struct file *file, struct address_space *mapping,
 348                        loff_t pos, unsigned len, unsigned flags,
 349                        struct page **pagep, void **fsdata)
 350{
 351        *pagep = NULL;
 352        return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
 353                                blkdev_get_block);
 354}
 355
 356static int blkdev_write_end(struct file *file, struct address_space *mapping,
 357                        loff_t pos, unsigned len, unsigned copied,
 358                        struct page *page, void *fsdata)
 359{
 360        int ret;
 361        ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 362
 363        unlock_page(page);
 364        page_cache_release(page);
 365
 366        return ret;
 367}
 368
 369/*
 370 * private llseek:
 371 * for a block special file file->f_path.dentry->d_inode->i_size is zero
 372 * so we compute the size by hand (just as in block_read/write above)
 373 */
 374static loff_t block_llseek(struct file *file, loff_t offset, int origin)
 375{
 376        struct inode *bd_inode = file->f_mapping->host;
 377        loff_t size;
 378        loff_t retval;
 379
 380        mutex_lock(&bd_inode->i_mutex);
 381        size = i_size_read(bd_inode);
 382
 383        switch (origin) {
 384                case 2:
 385                        offset += size;
 386                        break;
 387                case 1:
 388                        offset += file->f_pos;
 389        }
 390        retval = -EINVAL;
 391        if (offset >= 0 && offset <= size) {
 392                if (offset != file->f_pos) {
 393                        file->f_pos = offset;
 394                }
 395                retval = offset;
 396        }
 397        mutex_unlock(&bd_inode->i_mutex);
 398        return retval;
 399}
 400        
 401/*
 402 *      Filp is never NULL; the only case when ->fsync() is called with
 403 *      NULL first argument is nfsd_sync_dir() and that's not a directory.
 404 */
 405 
 406static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
 407{
 408        return sync_blockdev(I_BDEV(filp->f_mapping->host));
 409}
 410
 411/*
 412 * pseudo-fs
 413 */
 414
 415static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
 416static struct kmem_cache * bdev_cachep __read_mostly;
 417
 418static struct inode *bdev_alloc_inode(struct super_block *sb)
 419{
 420        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
 421        if (!ei)
 422                return NULL;
 423        return &ei->vfs_inode;
 424}
 425
 426static void bdev_destroy_inode(struct inode *inode)
 427{
 428        struct bdev_inode *bdi = BDEV_I(inode);
 429
 430        kmem_cache_free(bdev_cachep, bdi);
 431}
 432
 433static void init_once(void *foo)
 434{
 435        struct bdev_inode *ei = (struct bdev_inode *) foo;
 436        struct block_device *bdev = &ei->bdev;
 437
 438        memset(bdev, 0, sizeof(*bdev));
 439        mutex_init(&bdev->bd_mutex);
 440        INIT_LIST_HEAD(&bdev->bd_inodes);
 441        INIT_LIST_HEAD(&bdev->bd_list);
 442#ifdef CONFIG_SYSFS
 443        INIT_LIST_HEAD(&bdev->bd_holder_list);
 444#endif
 445        inode_init_once(&ei->vfs_inode);
 446        /* Initialize mutex for freeze. */
 447        mutex_init(&bdev->bd_fsfreeze_mutex);
 448}
 449
 450static inline void __bd_forget(struct inode *inode)
 451{
 452        list_del_init(&inode->i_devices);
 453        inode->i_bdev = NULL;
 454        inode->i_mapping = &inode->i_data;
 455}
 456
 457static void bdev_clear_inode(struct inode *inode)
 458{
 459        struct block_device *bdev = &BDEV_I(inode)->bdev;
 460        struct list_head *p;
 461        spin_lock(&bdev_lock);
 462        while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
 463                __bd_forget(list_entry(p, struct inode, i_devices));
 464        }
 465        list_del_init(&bdev->bd_list);
 466        spin_unlock(&bdev_lock);
 467}
 468
 469static const struct super_operations bdev_sops = {
 470        .statfs = simple_statfs,
 471        .alloc_inode = bdev_alloc_inode,
 472        .destroy_inode = bdev_destroy_inode,
 473        .drop_inode = generic_delete_inode,
 474        .clear_inode = bdev_clear_inode,
 475};
 476
 477static int bd_get_sb(struct file_system_type *fs_type,
 478        int flags, const char *dev_name, void *data, struct vfsmount *mnt)
 479{
 480        return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
 481}
 482
 483static struct file_system_type bd_type = {
 484        .name           = "bdev",
 485        .get_sb         = bd_get_sb,
 486        .kill_sb        = kill_anon_super,
 487};
 488
 489struct super_block *blockdev_superblock __read_mostly;
 490
 491void __init bdev_cache_init(void)
 492{
 493        int err;
 494        struct vfsmount *bd_mnt;
 495
 496        bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
 497                        0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
 498                                SLAB_MEM_SPREAD|SLAB_PANIC),
 499                        init_once);
 500        err = register_filesystem(&bd_type);
 501        if (err)
 502                panic("Cannot register bdev pseudo-fs");
 503        bd_mnt = kern_mount(&bd_type);
 504        if (IS_ERR(bd_mnt))
 505                panic("Cannot create bdev pseudo-fs");
 506        /*
 507         * This vfsmount structure is only used to obtain the
 508         * blockdev_superblock, so tell kmemleak not to report it.
 509         */
 510        kmemleak_not_leak(bd_mnt);
 511        blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
 512}
 513
 514/*
 515 * Most likely _very_ bad one - but then it's hardly critical for small
 516 * /dev and can be fixed when somebody will need really large one.
 517 * Keep in mind that it will be fed through icache hash function too.
 518 */
 519static inline unsigned long hash(dev_t dev)
 520{
 521        return MAJOR(dev)+MINOR(dev);
 522}
 523
 524static int bdev_test(struct inode *inode, void *data)
 525{
 526        return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
 527}
 528
 529static int bdev_set(struct inode *inode, void *data)
 530{
 531        BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
 532        return 0;
 533}
 534
 535static LIST_HEAD(all_bdevs);
 536
 537struct block_device *bdget(dev_t dev)
 538{
 539        struct block_device *bdev;
 540        struct inode *inode;
 541
 542        inode = iget5_locked(blockdev_superblock, hash(dev),
 543                        bdev_test, bdev_set, &dev);
 544
 545        if (!inode)
 546                return NULL;
 547
 548        bdev = &BDEV_I(inode)->bdev;
 549
 550        if (inode->i_state & I_NEW) {
 551                bdev->bd_contains = NULL;
 552                bdev->bd_inode = inode;
 553                bdev->bd_block_size = (1 << inode->i_blkbits);
 554                bdev->bd_part_count = 0;
 555                bdev->bd_invalidated = 0;
 556                inode->i_mode = S_IFBLK;
 557                inode->i_rdev = dev;
 558                inode->i_bdev = bdev;
 559                inode->i_data.a_ops = &def_blk_aops;
 560                mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 561                inode->i_data.backing_dev_info = &default_backing_dev_info;
 562                spin_lock(&bdev_lock);
 563                list_add(&bdev->bd_list, &all_bdevs);
 564                spin_unlock(&bdev_lock);
 565                unlock_new_inode(inode);
 566        }
 567        return bdev;
 568}
 569
 570EXPORT_SYMBOL(bdget);
 571
 572/**
 573 * bdgrab -- Grab a reference to an already referenced block device
 574 * @bdev:       Block device to grab a reference to.
 575 */
 576struct block_device *bdgrab(struct block_device *bdev)
 577{
 578        atomic_inc(&bdev->bd_inode->i_count);
 579        return bdev;
 580}
 581
 582long nr_blockdev_pages(void)
 583{
 584        struct block_device *bdev;
 585        long ret = 0;
 586        spin_lock(&bdev_lock);
 587        list_for_each_entry(bdev, &all_bdevs, bd_list) {
 588                ret += bdev->bd_inode->i_mapping->nrpages;
 589        }
 590        spin_unlock(&bdev_lock);
 591        return ret;
 592}
 593
 594void bdput(struct block_device *bdev)
 595{
 596        iput(bdev->bd_inode);
 597}
 598
 599EXPORT_SYMBOL(bdput);
 600 
 601static struct block_device *bd_acquire(struct inode *inode)
 602{
 603        struct block_device *bdev;
 604
 605        spin_lock(&bdev_lock);
 606        bdev = inode->i_bdev;
 607        if (bdev) {
 608                atomic_inc(&bdev->bd_inode->i_count);
 609                spin_unlock(&bdev_lock);
 610                return bdev;
 611        }
 612        spin_unlock(&bdev_lock);
 613
 614        bdev = bdget(inode->i_rdev);
 615        if (bdev) {
 616                spin_lock(&bdev_lock);
 617                if (!inode->i_bdev) {
 618                        /*
 619                         * We take an additional bd_inode->i_count for inode,
 620                         * and it's released in clear_inode() of inode.
 621                         * So, we can access it via ->i_mapping always
 622                         * without igrab().
 623                         */
 624                        atomic_inc(&bdev->bd_inode->i_count);
 625                        inode->i_bdev = bdev;
 626                        inode->i_mapping = bdev->bd_inode->i_mapping;
 627                        list_add(&inode->i_devices, &bdev->bd_inodes);
 628                }
 629                spin_unlock(&bdev_lock);
 630        }
 631        return bdev;
 632}
 633
 634/* Call when you free inode */
 635
 636void bd_forget(struct inode *inode)
 637{
 638        struct block_device *bdev = NULL;
 639
 640        spin_lock(&bdev_lock);
 641        if (inode->i_bdev) {
 642                if (!sb_is_blkdev_sb(inode->i_sb))
 643                        bdev = inode->i_bdev;
 644                __bd_forget(inode);
 645        }
 646        spin_unlock(&bdev_lock);
 647
 648        if (bdev)
 649                iput(bdev->bd_inode);
 650}
 651
 652int bd_claim(struct block_device *bdev, void *holder)
 653{
 654        int res;
 655        spin_lock(&bdev_lock);
 656
 657        /* first decide result */
 658        if (bdev->bd_holder == holder)
 659                res = 0;         /* already a holder */
 660        else if (bdev->bd_holder != NULL)
 661                res = -EBUSY;    /* held by someone else */
 662        else if (bdev->bd_contains == bdev)
 663                res = 0;         /* is a whole device which isn't held */
 664
 665        else if (bdev->bd_contains->bd_holder == bd_claim)
 666                res = 0;         /* is a partition of a device that is being partitioned */
 667        else if (bdev->bd_contains->bd_holder != NULL)
 668                res = -EBUSY;    /* is a partition of a held device */
 669        else
 670                res = 0;         /* is a partition of an un-held device */
 671
 672        /* now impose change */
 673        if (res==0) {
 674                /* note that for a whole device bd_holders
 675                 * will be incremented twice, and bd_holder will
 676                 * be set to bd_claim before being set to holder
 677                 */
 678                bdev->bd_contains->bd_holders ++;
 679                bdev->bd_contains->bd_holder = bd_claim;
 680                bdev->bd_holders++;
 681                bdev->bd_holder = holder;
 682        }
 683        spin_unlock(&bdev_lock);
 684        return res;
 685}
 686
 687EXPORT_SYMBOL(bd_claim);
 688
 689void bd_release(struct block_device *bdev)
 690{
 691        spin_lock(&bdev_lock);
 692        if (!--bdev->bd_contains->bd_holders)
 693                bdev->bd_contains->bd_holder = NULL;
 694        if (!--bdev->bd_holders)
 695                bdev->bd_holder = NULL;
 696        spin_unlock(&bdev_lock);
 697}
 698
 699EXPORT_SYMBOL(bd_release);
 700
 701#ifdef CONFIG_SYSFS
 702/*
 703 * Functions for bd_claim_by_kobject / bd_release_from_kobject
 704 *
 705 *     If a kobject is passed to bd_claim_by_kobject()
 706 *     and the kobject has a parent directory,
 707 *     following symlinks are created:
 708 *        o from the kobject to the claimed bdev
 709 *        o from "holders" directory of the bdev to the parent of the kobject
 710 *     bd_release_from_kobject() removes these symlinks.
 711 *
 712 *     Example:
 713 *        If /dev/dm-0 maps to /dev/sda, kobject corresponding to
 714 *        /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
 715 *           /sys/block/dm-0/slaves/sda --> /sys/block/sda
 716 *           /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
 717 */
 718
 719static int add_symlink(struct kobject *from, struct kobject *to)
 720{
 721        if (!from || !to)
 722                return 0;
 723        return sysfs_create_link(from, to, kobject_name(to));
 724}
 725
 726static void del_symlink(struct kobject *from, struct kobject *to)
 727{
 728        if (!from || !to)
 729                return;
 730        sysfs_remove_link(from, kobject_name(to));
 731}
 732
 733/*
 734 * 'struct bd_holder' contains pointers to kobjects symlinked by
 735 * bd_claim_by_kobject.
 736 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
 737 */
 738struct bd_holder {
 739        struct list_head list;  /* chain of holders of the bdev */
 740        int count;              /* references from the holder */
 741        struct kobject *sdir;   /* holder object, e.g. "/block/dm-0/slaves" */
 742        struct kobject *hdev;   /* e.g. "/block/dm-0" */
 743        struct kobject *hdir;   /* e.g. "/block/sda/holders" */
 744        struct kobject *sdev;   /* e.g. "/block/sda" */
 745};
 746
 747/*
 748 * Get references of related kobjects at once.
 749 * Returns 1 on success. 0 on failure.
 750 *
 751 * Should call bd_holder_release_dirs() after successful use.
 752 */
 753static int bd_holder_grab_dirs(struct block_device *bdev,
 754                        struct bd_holder *bo)
 755{
 756        if (!bdev || !bo)
 757                return 0;
 758
 759        bo->sdir = kobject_get(bo->sdir);
 760        if (!bo->sdir)
 761                return 0;
 762
 763        bo->hdev = kobject_get(bo->sdir->parent);
 764        if (!bo->hdev)
 765                goto fail_put_sdir;
 766
 767        bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
 768        if (!bo->sdev)
 769                goto fail_put_hdev;
 770
 771        bo->hdir = kobject_get(bdev->bd_part->holder_dir);
 772        if (!bo->hdir)
 773                goto fail_put_sdev;
 774
 775        return 1;
 776
 777fail_put_sdev:
 778        kobject_put(bo->sdev);
 779fail_put_hdev:
 780        kobject_put(bo->hdev);
 781fail_put_sdir:
 782        kobject_put(bo->sdir);
 783
 784        return 0;
 785}
 786
 787/* Put references of related kobjects at once. */
 788static void bd_holder_release_dirs(struct bd_holder *bo)
 789{
 790        kobject_put(bo->hdir);
 791        kobject_put(bo->sdev);
 792        kobject_put(bo->hdev);
 793        kobject_put(bo->sdir);
 794}
 795
 796static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
 797{
 798        struct bd_holder *bo;
 799
 800        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 801        if (!bo)
 802                return NULL;
 803
 804        bo->count = 1;
 805        bo->sdir = kobj;
 806
 807        return bo;
 808}
 809
 810static void free_bd_holder(struct bd_holder *bo)
 811{
 812        kfree(bo);
 813}
 814
 815/**
 816 * find_bd_holder - find matching struct bd_holder from the block device
 817 *
 818 * @bdev:       struct block device to be searched
 819 * @bo:         target struct bd_holder
 820 *
 821 * Returns matching entry with @bo in @bdev->bd_holder_list.
 822 * If found, increment the reference count and return the pointer.
 823 * If not found, returns NULL.
 824 */
 825static struct bd_holder *find_bd_holder(struct block_device *bdev,
 826                                        struct bd_holder *bo)
 827{
 828        struct bd_holder *tmp;
 829
 830        list_for_each_entry(tmp, &bdev->bd_holder_list, list)
 831                if (tmp->sdir == bo->sdir) {
 832                        tmp->count++;
 833                        return tmp;
 834                }
 835
 836        return NULL;
 837}
 838
 839/**
 840 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
 841 *
 842 * @bdev:       block device to be bd_claimed
 843 * @bo:         preallocated and initialized by alloc_bd_holder()
 844 *
 845 * Add @bo to @bdev->bd_holder_list, create symlinks.
 846 *
 847 * Returns 0 if symlinks are created.
 848 * Returns -ve if something fails.
 849 */
 850static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
 851{
 852        int err;
 853
 854        if (!bo)
 855                return -EINVAL;
 856
 857        if (!bd_holder_grab_dirs(bdev, bo))
 858                return -EBUSY;
 859
 860        err = add_symlink(bo->sdir, bo->sdev);
 861        if (err)
 862                return err;
 863
 864        err = add_symlink(bo->hdir, bo->hdev);
 865        if (err) {
 866                del_symlink(bo->sdir, bo->sdev);
 867                return err;
 868        }
 869
 870        list_add_tail(&bo->list, &bdev->bd_holder_list);
 871        return 0;
 872}
 873
 874/**
 875 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
 876 *
 877 * @bdev:       block device to be bd_claimed
 878 * @kobj:       holder's kobject
 879 *
 880 * If there is matching entry with @kobj in @bdev->bd_holder_list
 881 * and no other bd_claim() from the same kobject,
 882 * remove the struct bd_holder from the list, delete symlinks for it.
 883 *
 884 * Returns a pointer to the struct bd_holder when it's removed from the list
 885 * and ready to be freed.
 886 * Returns NULL if matching claim isn't found or there is other bd_claim()
 887 * by the same kobject.
 888 */
 889static struct bd_holder *del_bd_holder(struct block_device *bdev,
 890                                        struct kobject *kobj)
 891{
 892        struct bd_holder *bo;
 893
 894        list_for_each_entry(bo, &bdev->bd_holder_list, list) {
 895                if (bo->sdir == kobj) {
 896                        bo->count--;
 897                        BUG_ON(bo->count < 0);
 898                        if (!bo->count) {
 899                                list_del(&bo->list);
 900                                del_symlink(bo->sdir, bo->sdev);
 901                                del_symlink(bo->hdir, bo->hdev);
 902                                bd_holder_release_dirs(bo);
 903                                return bo;
 904                        }
 905                        break;
 906                }
 907        }
 908
 909        return NULL;
 910}
 911
 912/**
 913 * bd_claim_by_kobject - bd_claim() with additional kobject signature
 914 *
 915 * @bdev:       block device to be claimed
 916 * @holder:     holder's signature
 917 * @kobj:       holder's kobject
 918 *
 919 * Do bd_claim() and if it succeeds, create sysfs symlinks between
 920 * the bdev and the holder's kobject.
 921 * Use bd_release_from_kobject() when relesing the claimed bdev.
 922 *
 923 * Returns 0 on success. (same as bd_claim())
 924 * Returns errno on failure.
 925 */
 926static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
 927                                struct kobject *kobj)
 928{
 929        int err;
 930        struct bd_holder *bo, *found;
 931
 932        if (!kobj)
 933                return -EINVAL;
 934
 935        bo = alloc_bd_holder(kobj);
 936        if (!bo)
 937                return -ENOMEM;
 938
 939        mutex_lock(&bdev->bd_mutex);
 940
 941        err = bd_claim(bdev, holder);
 942        if (err)
 943                goto fail;
 944
 945        found = find_bd_holder(bdev, bo);
 946        if (found)
 947                goto fail;
 948
 949        err = add_bd_holder(bdev, bo);
 950        if (err)
 951                bd_release(bdev);
 952        else
 953                bo = NULL;
 954fail:
 955        mutex_unlock(&bdev->bd_mutex);
 956        free_bd_holder(bo);
 957        return err;
 958}
 959
 960/**
 961 * bd_release_from_kobject - bd_release() with additional kobject signature
 962 *
 963 * @bdev:       block device to be released
 964 * @kobj:       holder's kobject
 965 *
 966 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
 967 */
 968static void bd_release_from_kobject(struct block_device *bdev,
 969                                        struct kobject *kobj)
 970{
 971        if (!kobj)
 972                return;
 973
 974        mutex_lock(&bdev->bd_mutex);
 975        bd_release(bdev);
 976        free_bd_holder(del_bd_holder(bdev, kobj));
 977        mutex_unlock(&bdev->bd_mutex);
 978}
 979
 980/**
 981 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
 982 *
 983 * @bdev:       block device to be claimed
 984 * @holder:     holder's signature
 985 * @disk:       holder's gendisk
 986 *
 987 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
 988 */
 989int bd_claim_by_disk(struct block_device *bdev, void *holder,
 990                        struct gendisk *disk)
 991{
 992        return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
 993}
 994EXPORT_SYMBOL_GPL(bd_claim_by_disk);
 995
 996/**
 997 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
 998 *
 999 * @bdev:       block device to be claimed
1000 * @disk:       holder's gendisk
1001 *
1002 * Call bd_release_from_kobject() and put @disk->slave_dir.
1003 */
1004void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
1005{
1006        bd_release_from_kobject(bdev, disk->slave_dir);
1007        kobject_put(disk->slave_dir);
1008}
1009EXPORT_SYMBOL_GPL(bd_release_from_disk);
1010#endif
1011
1012/*
1013 * Tries to open block device by device number.  Use it ONLY if you
1014 * really do not have anything better - i.e. when you are behind a
1015 * truly sucky interface and all you are given is a device number.  _Never_
1016 * to be used for internal purposes.  If you ever need it - reconsider
1017 * your API.
1018 */
1019struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
1020{
1021        struct block_device *bdev = bdget(dev);
1022        int err = -ENOMEM;
1023        if (bdev)
1024                err = blkdev_get(bdev, mode);
1025        return err ? ERR_PTR(err) : bdev;
1026}
1027
1028EXPORT_SYMBOL(open_by_devnum);
1029
1030/**
1031 * flush_disk - invalidates all buffer-cache entries on a disk
1032 *
1033 * @bdev:      struct block device to be flushed
1034 *
1035 * Invalidates all buffer-cache entries on a disk. It should be called
1036 * when a disk has been changed -- either by a media change or online
1037 * resize.
1038 */
1039static void flush_disk(struct block_device *bdev)
1040{
1041        if (__invalidate_device(bdev)) {
1042                char name[BDEVNAME_SIZE] = "";
1043
1044                if (bdev->bd_disk)
1045                        disk_name(bdev->bd_disk, 0, name);
1046                printk(KERN_WARNING "VFS: busy inodes on changed media or "
1047                       "resized disk %s\n", name);
1048        }
1049
1050        if (!bdev->bd_disk)
1051                return;
1052        if (disk_partitionable(bdev->bd_disk))
1053                bdev->bd_invalidated = 1;
1054}
1055
1056/**
1057 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1058 * @disk: struct gendisk to check
1059 * @bdev: struct bdev to adjust.
1060 *
1061 * This routine checks to see if the bdev size does not match the disk size
1062 * and adjusts it if it differs.
1063 */
1064void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1065{
1066        loff_t disk_size, bdev_size;
1067
1068        disk_size = (loff_t)get_capacity(disk) << 9;
1069        bdev_size = i_size_read(bdev->bd_inode);
1070        if (disk_size != bdev_size) {
1071                char name[BDEVNAME_SIZE];
1072
1073                disk_name(disk, 0, name);
1074                printk(KERN_INFO
1075                       "%s: detected capacity change from %lld to %lld\n",
1076                       name, bdev_size, disk_size);
1077                i_size_write(bdev->bd_inode, disk_size);
1078                flush_disk(bdev);
1079        }
1080}
1081EXPORT_SYMBOL(check_disk_size_change);
1082
1083/**
1084 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
1085 * @disk: struct gendisk to be revalidated
1086 *
1087 * This routine is a wrapper for lower-level driver's revalidate_disk
1088 * call-backs.  It is used to do common pre and post operations needed
1089 * for all revalidate_disk operations.
1090 */
1091int revalidate_disk(struct gendisk *disk)
1092{
1093        struct block_device *bdev;
1094        int ret = 0;
1095
1096        if (disk->fops->revalidate_disk)
1097                ret = disk->fops->revalidate_disk(disk);
1098
1099        bdev = bdget_disk(disk, 0);
1100        if (!bdev)
1101                return ret;
1102
1103        mutex_lock(&bdev->bd_mutex);
1104        check_disk_size_change(disk, bdev);
1105        mutex_unlock(&bdev->bd_mutex);
1106        bdput(bdev);
1107        return ret;
1108}
1109EXPORT_SYMBOL(revalidate_disk);
1110
1111/*
1112 * This routine checks whether a removable media has been changed,
1113 * and invalidates all buffer-cache-entries in that case. This
1114 * is a relatively slow routine, so we have to try to minimize using
1115 * it. Thus it is called only upon a 'mount' or 'open'. This
1116 * is the best way of combining speed and utility, I think.
1117 * People changing diskettes in the middle of an operation deserve
1118 * to lose :-)
1119 */
1120int check_disk_change(struct block_device *bdev)
1121{
1122        struct gendisk *disk = bdev->bd_disk;
1123        const struct block_device_operations *bdops = disk->fops;
1124
1125        if (!bdops->media_changed)
1126                return 0;
1127        if (!bdops->media_changed(bdev->bd_disk))
1128                return 0;
1129
1130        flush_disk(bdev);
1131        if (bdops->revalidate_disk)
1132                bdops->revalidate_disk(bdev->bd_disk);
1133        return 1;
1134}
1135
1136EXPORT_SYMBOL(check_disk_change);
1137
1138void bd_set_size(struct block_device *bdev, loff_t size)
1139{
1140        unsigned bsize = bdev_logical_block_size(bdev);
1141
1142        bdev->bd_inode->i_size = size;
1143        while (bsize < PAGE_CACHE_SIZE) {
1144                if (size & bsize)
1145                        break;
1146                bsize <<= 1;
1147        }
1148        bdev->bd_block_size = bsize;
1149        bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1150}
1151EXPORT_SYMBOL(bd_set_size);
1152
1153static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1154
1155/*
1156 * bd_mutex locking:
1157 *
1158 *  mutex_lock(part->bd_mutex)
1159 *    mutex_lock_nested(whole->bd_mutex, 1)
1160 */
1161
1162static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1163{
1164        struct gendisk *disk;
1165        int ret;
1166        int partno;
1167        int perm = 0;
1168
1169        if (mode & FMODE_READ)
1170                perm |= MAY_READ;
1171        if (mode & FMODE_WRITE)
1172                perm |= MAY_WRITE;
1173        /*
1174         * hooks: /n/, see "layering violations".
1175         */
1176        ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1177        if (ret != 0) {
1178                bdput(bdev);
1179                return ret;
1180        }
1181
1182        lock_kernel();
1183 restart:
1184
1185        ret = -ENXIO;
1186        disk = get_gendisk(bdev->bd_dev, &partno);
1187        if (!disk)
1188                goto out_unlock_kernel;
1189
1190        mutex_lock_nested(&bdev->bd_mutex, for_part);
1191        if (!bdev->bd_openers) {
1192                bdev->bd_disk = disk;
1193                bdev->bd_contains = bdev;
1194                if (!partno) {
1195                        struct backing_dev_info *bdi;
1196
1197                        ret = -ENXIO;
1198                        bdev->bd_part = disk_get_part(disk, partno);
1199                        if (!bdev->bd_part)
1200                                goto out_clear;
1201
1202                        if (disk->fops->open) {
1203                                ret = disk->fops->open(bdev, mode);
1204                                if (ret == -ERESTARTSYS) {
1205                                        /* Lost a race with 'disk' being
1206                                         * deleted, try again.
1207                                         * See md.c
1208                                         */
1209                                        disk_put_part(bdev->bd_part);
1210                                        bdev->bd_part = NULL;
1211                                        module_put(disk->fops->owner);
1212                                        put_disk(disk);
1213                                        bdev->bd_disk = NULL;
1214                                        mutex_unlock(&bdev->bd_mutex);
1215                                        goto restart;
1216                                }
1217                                if (ret)
1218                                        goto out_clear;
1219                        }
1220                        if (!bdev->bd_openers) {
1221                                bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1222                                bdi = blk_get_backing_dev_info(bdev);
1223                                if (bdi == NULL)
1224                                        bdi = &default_backing_dev_info;
1225                                bdev->bd_inode->i_data.backing_dev_info = bdi;
1226                        }
1227                        if (bdev->bd_invalidated)
1228                                rescan_partitions(disk, bdev);
1229                } else {
1230                        struct block_device *whole;
1231                        whole = bdget_disk(disk, 0);
1232                        ret = -ENOMEM;
1233                        if (!whole)
1234                                goto out_clear;
1235                        BUG_ON(for_part);
1236                        ret = __blkdev_get(whole, mode, 1);
1237                        if (ret)
1238                                goto out_clear;
1239                        bdev->bd_contains = whole;
1240                        bdev->bd_inode->i_data.backing_dev_info =
1241                           whole->bd_inode->i_data.backing_dev_info;
1242                        bdev->bd_part = disk_get_part(disk, partno);
1243                        if (!(disk->flags & GENHD_FL_UP) ||
1244                            !bdev->bd_part || !bdev->bd_part->nr_sects) {
1245                                ret = -ENXIO;
1246                                goto out_clear;
1247                        }
1248                        bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1249                }
1250        } else {
1251                module_put(disk->fops->owner);
1252                put_disk(disk);
1253                disk = NULL;
1254                if (bdev->bd_contains == bdev) {
1255                        if (bdev->bd_disk->fops->open) {
1256                                ret = bdev->bd_disk->fops->open(bdev, mode);
1257                                if (ret)
1258                                        goto out_unlock_bdev;
1259                        }
1260                        if (bdev->bd_invalidated)
1261                                rescan_partitions(bdev->bd_disk, bdev);
1262                }
1263        }
1264        bdev->bd_openers++;
1265        if (for_part)
1266                bdev->bd_part_count++;
1267        mutex_unlock(&bdev->bd_mutex);
1268        unlock_kernel();
1269        return 0;
1270
1271 out_clear:
1272        disk_put_part(bdev->bd_part);
1273        bdev->bd_disk = NULL;
1274        bdev->bd_part = NULL;
1275        bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1276        if (bdev != bdev->bd_contains)
1277                __blkdev_put(bdev->bd_contains, mode, 1);
1278        bdev->bd_contains = NULL;
1279 out_unlock_bdev:
1280        mutex_unlock(&bdev->bd_mutex);
1281 out_unlock_kernel:
1282        unlock_kernel();
1283
1284        if (disk)
1285                module_put(disk->fops->owner);
1286        put_disk(disk);
1287        bdput(bdev);
1288
1289        return ret;
1290}
1291
1292int blkdev_get(struct block_device *bdev, fmode_t mode)
1293{
1294        return __blkdev_get(bdev, mode, 0);
1295}
1296EXPORT_SYMBOL(blkdev_get);
1297
1298static int blkdev_open(struct inode * inode, struct file * filp)
1299{
1300        struct block_device *bdev;
1301        int res;
1302
1303        /*
1304         * Preserve backwards compatibility and allow large file access
1305         * even if userspace doesn't ask for it explicitly. Some mkfs
1306         * binary needs it. We might want to drop this workaround
1307         * during an unstable branch.
1308         */
1309        filp->f_flags |= O_LARGEFILE;
1310
1311        if (filp->f_flags & O_NDELAY)
1312                filp->f_mode |= FMODE_NDELAY;
1313        if (filp->f_flags & O_EXCL)
1314                filp->f_mode |= FMODE_EXCL;
1315        if ((filp->f_flags & O_ACCMODE) == 3)
1316                filp->f_mode |= FMODE_WRITE_IOCTL;
1317
1318        bdev = bd_acquire(inode);
1319        if (bdev == NULL)
1320                return -ENOMEM;
1321
1322        filp->f_mapping = bdev->bd_inode->i_mapping;
1323
1324        res = blkdev_get(bdev, filp->f_mode);
1325        if (res)
1326                return res;
1327
1328        if (filp->f_mode & FMODE_EXCL) {
1329                res = bd_claim(bdev, filp);
1330                if (res)
1331                        goto out_blkdev_put;
1332        }
1333
1334        return 0;
1335
1336 out_blkdev_put:
1337        blkdev_put(bdev, filp->f_mode);
1338        return res;
1339}
1340
1341static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1342{
1343        int ret = 0;
1344        struct gendisk *disk = bdev->bd_disk;
1345        struct block_device *victim = NULL;
1346
1347        mutex_lock_nested(&bdev->bd_mutex, for_part);
1348        lock_kernel();
1349        if (for_part)
1350                bdev->bd_part_count--;
1351
1352        if (!--bdev->bd_openers) {
1353                sync_blockdev(bdev);
1354                kill_bdev(bdev);
1355        }
1356        if (bdev->bd_contains == bdev) {
1357                if (disk->fops->release)
1358                        ret = disk->fops->release(disk, mode);
1359        }
1360        if (!bdev->bd_openers) {
1361                struct module *owner = disk->fops->owner;
1362
1363                put_disk(disk);
1364                module_put(owner);
1365                disk_put_part(bdev->bd_part);
1366                bdev->bd_part = NULL;
1367                bdev->bd_disk = NULL;
1368                bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1369                if (bdev != bdev->bd_contains)
1370                        victim = bdev->bd_contains;
1371                bdev->bd_contains = NULL;
1372        }
1373        unlock_kernel();
1374        mutex_unlock(&bdev->bd_mutex);
1375        bdput(bdev);
1376        if (victim)
1377                __blkdev_put(victim, mode, 1);
1378        return ret;
1379}
1380
1381int blkdev_put(struct block_device *bdev, fmode_t mode)
1382{
1383        return __blkdev_put(bdev, mode, 0);
1384}
1385EXPORT_SYMBOL(blkdev_put);
1386
1387static int blkdev_close(struct inode * inode, struct file * filp)
1388{
1389        struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1390        if (bdev->bd_holder == filp)
1391                bd_release(bdev);
1392        return blkdev_put(bdev, filp->f_mode);
1393}
1394
1395static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1396{
1397        struct block_device *bdev = I_BDEV(file->f_mapping->host);
1398        fmode_t mode = file->f_mode;
1399
1400        /*
1401         * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1402         * to updated it before every ioctl.
1403         */
1404        if (file->f_flags & O_NDELAY)
1405                mode |= FMODE_NDELAY;
1406        else
1407                mode &= ~FMODE_NDELAY;
1408
1409        return blkdev_ioctl(bdev, mode, cmd, arg);
1410}
1411
1412/*
1413 * Write data to the block device.  Only intended for the block device itself
1414 * and the raw driver which basically is a fake block device.
1415 *
1416 * Does not take i_mutex for the write and thus is not for general purpose
1417 * use.
1418 */
1419ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1420                         unsigned long nr_segs, loff_t pos)
1421{
1422        struct file *file = iocb->ki_filp;
1423        ssize_t ret;
1424
1425        BUG_ON(iocb->ki_pos != pos);
1426
1427        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
1428        if (ret > 0 || ret == -EIOCBQUEUED) {
1429                ssize_t err;
1430
1431                err = generic_write_sync(file, pos, ret);
1432                if (err < 0 && ret > 0)
1433                        ret = err;
1434        }
1435        return ret;
1436}
1437EXPORT_SYMBOL_GPL(blkdev_aio_write);
1438
1439/*
1440 * Try to release a page associated with block device when the system
1441 * is under memory pressure.
1442 */
1443static int blkdev_releasepage(struct page *page, gfp_t wait)
1444{
1445        struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1446
1447        if (super && super->s_op->bdev_try_to_free_page)
1448                return super->s_op->bdev_try_to_free_page(super, page, wait);
1449
1450        return try_to_free_buffers(page);
1451}
1452
1453static const struct address_space_operations def_blk_aops = {
1454        .readpage       = blkdev_readpage,
1455        .writepage      = blkdev_writepage,
1456        .sync_page      = block_sync_page,
1457        .write_begin    = blkdev_write_begin,
1458        .write_end      = blkdev_write_end,
1459        .writepages     = generic_writepages,
1460        .releasepage    = blkdev_releasepage,
1461        .direct_IO      = blkdev_direct_IO,
1462};
1463
1464const struct file_operations def_blk_fops = {
1465        .open           = blkdev_open,
1466        .release        = blkdev_close,
1467        .llseek         = block_llseek,
1468        .read           = do_sync_read,
1469        .write          = do_sync_write,
1470        .aio_read       = generic_file_aio_read,
1471        .aio_write      = blkdev_aio_write,
1472        .mmap           = generic_file_mmap,
1473        .fsync          = block_fsync,
1474        .unlocked_ioctl = block_ioctl,
1475#ifdef CONFIG_COMPAT
1476        .compat_ioctl   = compat_blkdev_ioctl,
1477#endif
1478        .splice_read    = generic_file_splice_read,
1479        .splice_write   = generic_file_splice_write,
1480};
1481
1482int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1483{
1484        int res;
1485        mm_segment_t old_fs = get_fs();
1486        set_fs(KERNEL_DS);
1487        res = blkdev_ioctl(bdev, 0, cmd, arg);
1488        set_fs(old_fs);
1489        return res;
1490}
1491
1492EXPORT_SYMBOL(ioctl_by_bdev);
1493
1494/**
1495 * lookup_bdev  - lookup a struct block_device by name
1496 * @pathname:   special file representing the block device
1497 *
1498 * Get a reference to the blockdevice at @pathname in the current
1499 * namespace if possible and return it.  Return ERR_PTR(error)
1500 * otherwise.
1501 */
1502struct block_device *lookup_bdev(const char *pathname)
1503{
1504        struct block_device *bdev;
1505        struct inode *inode;
1506        struct path path;
1507        int error;
1508
1509        if (!pathname || !*pathname)
1510                return ERR_PTR(-EINVAL);
1511
1512        error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1513        if (error)
1514                return ERR_PTR(error);
1515
1516        inode = path.dentry->d_inode;
1517        error = -ENOTBLK;
1518        if (!S_ISBLK(inode->i_mode))
1519                goto fail;
1520        error = -EACCES;
1521        if (path.mnt->mnt_flags & MNT_NODEV)
1522                goto fail;
1523        error = -ENOMEM;
1524        bdev = bd_acquire(inode);
1525        if (!bdev)
1526                goto fail;
1527out:
1528        path_put(&path);
1529        return bdev;
1530fail:
1531        bdev = ERR_PTR(error);
1532        goto out;
1533}
1534EXPORT_SYMBOL(lookup_bdev);
1535
1536/**
1537 * open_bdev_exclusive  -  open a block device by name and set it up for use
1538 *
1539 * @path:       special file representing the block device
1540 * @mode:       FMODE_... combination to pass be used
1541 * @holder:     owner for exclusion
1542 *
1543 * Open the blockdevice described by the special file at @path, claim it
1544 * for the @holder.
1545 */
1546struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
1547{
1548        struct block_device *bdev;
1549        int error = 0;
1550
1551        bdev = lookup_bdev(path);
1552        if (IS_ERR(bdev))
1553                return bdev;
1554
1555        error = blkdev_get(bdev, mode);
1556        if (error)
1557                return ERR_PTR(error);
1558        error = -EACCES;
1559        if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
1560                goto blkdev_put;
1561        error = bd_claim(bdev, holder);
1562        if (error)
1563                goto blkdev_put;
1564
1565        return bdev;
1566        
1567blkdev_put:
1568        blkdev_put(bdev, mode);
1569        return ERR_PTR(error);
1570}
1571
1572EXPORT_SYMBOL(open_bdev_exclusive);
1573
1574/**
1575 * close_bdev_exclusive  -  close a blockdevice opened by open_bdev_exclusive()
1576 *
1577 * @bdev:       blockdevice to close
1578 * @mode:       mode, must match that used to open.
1579 *
1580 * This is the counterpart to open_bdev_exclusive().
1581 */
1582void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
1583{
1584        bd_release(bdev);
1585        blkdev_put(bdev, mode);
1586}
1587
1588EXPORT_SYMBOL(close_bdev_exclusive);
1589
1590int __invalidate_device(struct block_device *bdev)
1591{
1592        struct super_block *sb = get_super(bdev);
1593        int res = 0;
1594
1595        if (sb) {
1596                /*
1597                 * no need to lock the super, get_super holds the
1598                 * read mutex so the filesystem cannot go away
1599                 * under us (->put_super runs with the write lock
1600                 * hold).
1601                 */
1602                shrink_dcache_sb(sb);
1603                res = invalidate_inodes(sb);
1604                drop_super(sb);
1605        }
1606        invalidate_bdev(bdev);
1607        return res;
1608}
1609EXPORT_SYMBOL(__invalidate_device);
1610