linux/block/genhd.c
<<
>>
Prefs
   1/*
   2 *  gendisk handling
   3 */
   4
   5#include <linux/module.h>
   6#include <linux/fs.h>
   7#include <linux/genhd.h>
   8#include <linux/kdev_t.h>
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/backing-dev.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/proc_fs.h>
  15#include <linux/seq_file.h>
  16#include <linux/slab.h>
  17#include <linux/kmod.h>
  18#include <linux/kobj_map.h>
  19#include <linux/mutex.h>
  20#include <linux/idr.h>
  21#include <linux/log2.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/badblocks.h>
  24
  25#include "blk.h"
  26
  27static DEFINE_MUTEX(block_class_lock);
  28struct kobject *block_depr;
  29
  30/* for extended dynamic devt allocation, currently only one major is used */
  31#define NR_EXT_DEVT             (1 << MINORBITS)
  32
  33/* For extended devt allocation.  ext_devt_lock prevents look up
  34 * results from going away underneath its user.
  35 */
  36static DEFINE_SPINLOCK(ext_devt_lock);
  37static DEFINE_IDR(ext_devt_idr);
  38
  39static const struct device_type disk_type;
  40
  41static void disk_check_events(struct disk_events *ev,
  42                              unsigned int *clearing_ptr);
  43static void disk_alloc_events(struct gendisk *disk);
  44static void disk_add_events(struct gendisk *disk);
  45static void disk_del_events(struct gendisk *disk);
  46static void disk_release_events(struct gendisk *disk);
  47
  48void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
  49{
  50        if (q->mq_ops)
  51                return;
  52
  53        atomic_inc(&part->in_flight[rw]);
  54        if (part->partno)
  55                atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
  56}
  57
  58void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
  59{
  60        if (q->mq_ops)
  61                return;
  62
  63        atomic_dec(&part->in_flight[rw]);
  64        if (part->partno)
  65                atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
  66}
  67
  68void part_in_flight(struct request_queue *q, struct hd_struct *part,
  69                    unsigned int inflight[2])
  70{
  71        if (q->mq_ops) {
  72                blk_mq_in_flight(q, part, inflight);
  73                return;
  74        }
  75
  76        inflight[0] = atomic_read(&part->in_flight[0]) +
  77                        atomic_read(&part->in_flight[1]);
  78        if (part->partno) {
  79                part = &part_to_disk(part)->part0;
  80                inflight[1] = atomic_read(&part->in_flight[0]) +
  81                                atomic_read(&part->in_flight[1]);
  82        }
  83}
  84
  85struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
  86{
  87        struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
  88
  89        if (unlikely(partno < 0 || partno >= ptbl->len))
  90                return NULL;
  91        return rcu_dereference(ptbl->part[partno]);
  92}
  93
  94/**
  95 * disk_get_part - get partition
  96 * @disk: disk to look partition from
  97 * @partno: partition number
  98 *
  99 * Look for partition @partno from @disk.  If found, increment
 100 * reference count and return it.
 101 *
 102 * CONTEXT:
 103 * Don't care.
 104 *
 105 * RETURNS:
 106 * Pointer to the found partition on success, NULL if not found.
 107 */
 108struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
 109{
 110        struct hd_struct *part;
 111
 112        rcu_read_lock();
 113        part = __disk_get_part(disk, partno);
 114        if (part)
 115                get_device(part_to_dev(part));
 116        rcu_read_unlock();
 117
 118        return part;
 119}
 120EXPORT_SYMBOL_GPL(disk_get_part);
 121
 122/**
 123 * disk_part_iter_init - initialize partition iterator
 124 * @piter: iterator to initialize
 125 * @disk: disk to iterate over
 126 * @flags: DISK_PITER_* flags
 127 *
 128 * Initialize @piter so that it iterates over partitions of @disk.
 129 *
 130 * CONTEXT:
 131 * Don't care.
 132 */
 133void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
 134                          unsigned int flags)
 135{
 136        struct disk_part_tbl *ptbl;
 137
 138        rcu_read_lock();
 139        ptbl = rcu_dereference(disk->part_tbl);
 140
 141        piter->disk = disk;
 142        piter->part = NULL;
 143
 144        if (flags & DISK_PITER_REVERSE)
 145                piter->idx = ptbl->len - 1;
 146        else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
 147                piter->idx = 0;
 148        else
 149                piter->idx = 1;
 150
 151        piter->flags = flags;
 152
 153        rcu_read_unlock();
 154}
 155EXPORT_SYMBOL_GPL(disk_part_iter_init);
 156
 157/**
 158 * disk_part_iter_next - proceed iterator to the next partition and return it
 159 * @piter: iterator of interest
 160 *
 161 * Proceed @piter to the next partition and return it.
 162 *
 163 * CONTEXT:
 164 * Don't care.
 165 */
 166struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
 167{
 168        struct disk_part_tbl *ptbl;
 169        int inc, end;
 170
 171        /* put the last partition */
 172        disk_put_part(piter->part);
 173        piter->part = NULL;
 174
 175        /* get part_tbl */
 176        rcu_read_lock();
 177        ptbl = rcu_dereference(piter->disk->part_tbl);
 178
 179        /* determine iteration parameters */
 180        if (piter->flags & DISK_PITER_REVERSE) {
 181                inc = -1;
 182                if (piter->flags & (DISK_PITER_INCL_PART0 |
 183                                    DISK_PITER_INCL_EMPTY_PART0))
 184                        end = -1;
 185                else
 186                        end = 0;
 187        } else {
 188                inc = 1;
 189                end = ptbl->len;
 190        }
 191
 192        /* iterate to the next partition */
 193        for (; piter->idx != end; piter->idx += inc) {
 194                struct hd_struct *part;
 195
 196                part = rcu_dereference(ptbl->part[piter->idx]);
 197                if (!part)
 198                        continue;
 199                if (!part_nr_sects_read(part) &&
 200                    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
 201                    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
 202                      piter->idx == 0))
 203                        continue;
 204
 205                get_device(part_to_dev(part));
 206                piter->part = part;
 207                piter->idx += inc;
 208                break;
 209        }
 210
 211        rcu_read_unlock();
 212
 213        return piter->part;
 214}
 215EXPORT_SYMBOL_GPL(disk_part_iter_next);
 216
 217/**
 218 * disk_part_iter_exit - finish up partition iteration
 219 * @piter: iter of interest
 220 *
 221 * Called when iteration is over.  Cleans up @piter.
 222 *
 223 * CONTEXT:
 224 * Don't care.
 225 */
 226void disk_part_iter_exit(struct disk_part_iter *piter)
 227{
 228        disk_put_part(piter->part);
 229        piter->part = NULL;
 230}
 231EXPORT_SYMBOL_GPL(disk_part_iter_exit);
 232
 233static inline int sector_in_part(struct hd_struct *part, sector_t sector)
 234{
 235        return part->start_sect <= sector &&
 236                sector < part->start_sect + part_nr_sects_read(part);
 237}
 238
 239/**
 240 * disk_map_sector_rcu - map sector to partition
 241 * @disk: gendisk of interest
 242 * @sector: sector to map
 243 *
 244 * Find out which partition @sector maps to on @disk.  This is
 245 * primarily used for stats accounting.
 246 *
 247 * CONTEXT:
 248 * RCU read locked.  The returned partition pointer is valid only
 249 * while preemption is disabled.
 250 *
 251 * RETURNS:
 252 * Found partition on success, part0 is returned if no partition matches
 253 */
 254struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 255{
 256        struct disk_part_tbl *ptbl;
 257        struct hd_struct *part;
 258        int i;
 259
 260        ptbl = rcu_dereference(disk->part_tbl);
 261
 262        part = rcu_dereference(ptbl->last_lookup);
 263        if (part && sector_in_part(part, sector))
 264                return part;
 265
 266        for (i = 1; i < ptbl->len; i++) {
 267                part = rcu_dereference(ptbl->part[i]);
 268
 269                if (part && sector_in_part(part, sector)) {
 270                        rcu_assign_pointer(ptbl->last_lookup, part);
 271                        return part;
 272                }
 273        }
 274        return &disk->part0;
 275}
 276EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
 277
 278/*
 279 * Can be deleted altogether. Later.
 280 *
 281 */
 282#define BLKDEV_MAJOR_HASH_SIZE 255
 283static struct blk_major_name {
 284        struct blk_major_name *next;
 285        int major;
 286        char name[16];
 287} *major_names[BLKDEV_MAJOR_HASH_SIZE];
 288
 289/* index in the above - for now: assume no multimajor ranges */
 290static inline int major_to_index(unsigned major)
 291{
 292        return major % BLKDEV_MAJOR_HASH_SIZE;
 293}
 294
 295#ifdef CONFIG_PROC_FS
 296void blkdev_show(struct seq_file *seqf, off_t offset)
 297{
 298        struct blk_major_name *dp;
 299
 300        mutex_lock(&block_class_lock);
 301        for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
 302                if (dp->major == offset)
 303                        seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
 304        mutex_unlock(&block_class_lock);
 305}
 306#endif /* CONFIG_PROC_FS */
 307
 308/**
 309 * register_blkdev - register a new block device
 310 *
 311 * @major: the requested major device number [1..255]. If @major = 0, try to
 312 *         allocate any unused major number.
 313 * @name: the name of the new block device as a zero terminated string
 314 *
 315 * The @name must be unique within the system.
 316 *
 317 * The return value depends on the @major input parameter:
 318 *
 319 *  - if a major device number was requested in range [1..255] then the
 320 *    function returns zero on success, or a negative error code
 321 *  - if any unused major number was requested with @major = 0 parameter
 322 *    then the return value is the allocated major number in range
 323 *    [1..255] or a negative error code otherwise
 324 */
 325int register_blkdev(unsigned int major, const char *name)
 326{
 327        struct blk_major_name **n, *p;
 328        int index, ret = 0;
 329
 330        mutex_lock(&block_class_lock);
 331
 332        /* temporary */
 333        if (major == 0) {
 334                for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
 335                        if (major_names[index] == NULL)
 336                                break;
 337                }
 338
 339                if (index == 0) {
 340                        printk("register_blkdev: failed to get major for %s\n",
 341                               name);
 342                        ret = -EBUSY;
 343                        goto out;
 344                }
 345                major = index;
 346                ret = major;
 347        }
 348
 349        if (major >= BLKDEV_MAJOR_MAX) {
 350                pr_err("register_blkdev: major requested (%d) is greater than the maximum (%d) for %s\n",
 351                       major, BLKDEV_MAJOR_MAX, name);
 352
 353                ret = -EINVAL;
 354                goto out;
 355        }
 356
 357        p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
 358        if (p == NULL) {
 359                ret = -ENOMEM;
 360                goto out;
 361        }
 362
 363        p->major = major;
 364        strlcpy(p->name, name, sizeof(p->name));
 365        p->next = NULL;
 366        index = major_to_index(major);
 367
 368        for (n = &major_names[index]; *n; n = &(*n)->next) {
 369                if ((*n)->major == major)
 370                        break;
 371        }
 372        if (!*n)
 373                *n = p;
 374        else
 375                ret = -EBUSY;
 376
 377        if (ret < 0) {
 378                printk("register_blkdev: cannot get major %d for %s\n",
 379                       major, name);
 380                kfree(p);
 381        }
 382out:
 383        mutex_unlock(&block_class_lock);
 384        return ret;
 385}
 386
 387EXPORT_SYMBOL(register_blkdev);
 388
 389void unregister_blkdev(unsigned int major, const char *name)
 390{
 391        struct blk_major_name **n;
 392        struct blk_major_name *p = NULL;
 393        int index = major_to_index(major);
 394
 395        mutex_lock(&block_class_lock);
 396        for (n = &major_names[index]; *n; n = &(*n)->next)
 397                if ((*n)->major == major)
 398                        break;
 399        if (!*n || strcmp((*n)->name, name)) {
 400                WARN_ON(1);
 401        } else {
 402                p = *n;
 403                *n = p->next;
 404        }
 405        mutex_unlock(&block_class_lock);
 406        kfree(p);
 407}
 408
 409EXPORT_SYMBOL(unregister_blkdev);
 410
 411static struct kobj_map *bdev_map;
 412
 413/**
 414 * blk_mangle_minor - scatter minor numbers apart
 415 * @minor: minor number to mangle
 416 *
 417 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
 418 * is enabled.  Mangling twice gives the original value.
 419 *
 420 * RETURNS:
 421 * Mangled value.
 422 *
 423 * CONTEXT:
 424 * Don't care.
 425 */
 426static int blk_mangle_minor(int minor)
 427{
 428#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
 429        int i;
 430
 431        for (i = 0; i < MINORBITS / 2; i++) {
 432                int low = minor & (1 << i);
 433                int high = minor & (1 << (MINORBITS - 1 - i));
 434                int distance = MINORBITS - 1 - 2 * i;
 435
 436                minor ^= low | high;    /* clear both bits */
 437                low <<= distance;       /* swap the positions */
 438                high >>= distance;
 439                minor |= low | high;    /* and set */
 440        }
 441#endif
 442        return minor;
 443}
 444
 445/**
 446 * blk_alloc_devt - allocate a dev_t for a partition
 447 * @part: partition to allocate dev_t for
 448 * @devt: out parameter for resulting dev_t
 449 *
 450 * Allocate a dev_t for block device.
 451 *
 452 * RETURNS:
 453 * 0 on success, allocated dev_t is returned in *@devt.  -errno on
 454 * failure.
 455 *
 456 * CONTEXT:
 457 * Might sleep.
 458 */
 459int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
 460{
 461        struct gendisk *disk = part_to_disk(part);
 462        int idx;
 463
 464        /* in consecutive minor range? */
 465        if (part->partno < disk->minors) {
 466                *devt = MKDEV(disk->major, disk->first_minor + part->partno);
 467                return 0;
 468        }
 469
 470        /* allocate ext devt */
 471        idr_preload(GFP_KERNEL);
 472
 473        spin_lock_bh(&ext_devt_lock);
 474        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
 475        spin_unlock_bh(&ext_devt_lock);
 476
 477        idr_preload_end();
 478        if (idx < 0)
 479                return idx == -ENOSPC ? -EBUSY : idx;
 480
 481        *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
 482        return 0;
 483}
 484
 485/**
 486 * blk_free_devt - free a dev_t
 487 * @devt: dev_t to free
 488 *
 489 * Free @devt which was allocated using blk_alloc_devt().
 490 *
 491 * CONTEXT:
 492 * Might sleep.
 493 */
 494void blk_free_devt(dev_t devt)
 495{
 496        if (devt == MKDEV(0, 0))
 497                return;
 498
 499        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
 500                spin_lock_bh(&ext_devt_lock);
 501                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
 502                spin_unlock_bh(&ext_devt_lock);
 503        }
 504}
 505
 506static char *bdevt_str(dev_t devt, char *buf)
 507{
 508        if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
 509                char tbuf[BDEVT_SIZE];
 510                snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
 511                snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
 512        } else
 513                snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
 514
 515        return buf;
 516}
 517
 518/*
 519 * Register device numbers dev..(dev+range-1)
 520 * range must be nonzero
 521 * The hash chain is sorted on range, so that subranges can override.
 522 */
 523void blk_register_region(dev_t devt, unsigned long range, struct module *module,
 524                         struct kobject *(*probe)(dev_t, int *, void *),
 525                         int (*lock)(dev_t, void *), void *data)
 526{
 527        kobj_map(bdev_map, devt, range, module, probe, lock, data);
 528}
 529
 530EXPORT_SYMBOL(blk_register_region);
 531
 532void blk_unregister_region(dev_t devt, unsigned long range)
 533{
 534        kobj_unmap(bdev_map, devt, range);
 535}
 536
 537EXPORT_SYMBOL(blk_unregister_region);
 538
 539static struct kobject *exact_match(dev_t devt, int *partno, void *data)
 540{
 541        struct gendisk *p = data;
 542
 543        return &disk_to_dev(p)->kobj;
 544}
 545
 546static int exact_lock(dev_t devt, void *data)
 547{
 548        struct gendisk *p = data;
 549
 550        if (!get_disk(p))
 551                return -1;
 552        return 0;
 553}
 554
 555static void register_disk(struct device *parent, struct gendisk *disk)
 556{
 557        struct device *ddev = disk_to_dev(disk);
 558        struct block_device *bdev;
 559        struct disk_part_iter piter;
 560        struct hd_struct *part;
 561        int err;
 562
 563        ddev->parent = parent;
 564
 565        dev_set_name(ddev, "%s", disk->disk_name);
 566
 567        /* delay uevents, until we scanned partition table */
 568        dev_set_uevent_suppress(ddev, 1);
 569
 570        if (device_add(ddev))
 571                return;
 572        if (!sysfs_deprecated) {
 573                err = sysfs_create_link(block_depr, &ddev->kobj,
 574                                        kobject_name(&ddev->kobj));
 575                if (err) {
 576                        device_del(ddev);
 577                        return;
 578                }
 579        }
 580
 581        /*
 582         * avoid probable deadlock caused by allocating memory with
 583         * GFP_KERNEL in runtime_resume callback of its all ancestor
 584         * devices
 585         */
 586        pm_runtime_set_memalloc_noio(ddev, true);
 587
 588        disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
 589        disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
 590
 591        if (disk->flags & GENHD_FL_HIDDEN) {
 592                dev_set_uevent_suppress(ddev, 0);
 593                return;
 594        }
 595
 596        /* No minors to use for partitions */
 597        if (!disk_part_scan_enabled(disk))
 598                goto exit;
 599
 600        /* No such device (e.g., media were just removed) */
 601        if (!get_capacity(disk))
 602                goto exit;
 603
 604        bdev = bdget_disk(disk, 0);
 605        if (!bdev)
 606                goto exit;
 607
 608        bdev->bd_invalidated = 1;
 609        err = blkdev_get(bdev, FMODE_READ, NULL);
 610        if (err < 0)
 611                goto exit;
 612        blkdev_put(bdev, FMODE_READ);
 613
 614exit:
 615        /* announce disk after possible partitions are created */
 616        dev_set_uevent_suppress(ddev, 0);
 617        kobject_uevent(&ddev->kobj, KOBJ_ADD);
 618
 619        /* announce possible partitions */
 620        disk_part_iter_init(&piter, disk, 0);
 621        while ((part = disk_part_iter_next(&piter)))
 622                kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
 623        disk_part_iter_exit(&piter);
 624
 625        err = sysfs_create_link(&ddev->kobj,
 626                                &disk->queue->backing_dev_info->dev->kobj,
 627                                "bdi");
 628        WARN_ON(err);
 629}
 630
 631/**
 632 * device_add_disk - add partitioning information to kernel list
 633 * @parent: parent device for the disk
 634 * @disk: per-device partitioning information
 635 *
 636 * This function registers the partitioning information in @disk
 637 * with the kernel.
 638 *
 639 * FIXME: error handling
 640 */
 641void device_add_disk(struct device *parent, struct gendisk *disk)
 642{
 643        dev_t devt;
 644        int retval;
 645
 646        /* minors == 0 indicates to use ext devt from part0 and should
 647         * be accompanied with EXT_DEVT flag.  Make sure all
 648         * parameters make sense.
 649         */
 650        WARN_ON(disk->minors && !(disk->major || disk->first_minor));
 651        WARN_ON(!disk->minors &&
 652                !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
 653
 654        disk->flags |= GENHD_FL_UP;
 655
 656        retval = blk_alloc_devt(&disk->part0, &devt);
 657        if (retval) {
 658                WARN_ON(1);
 659                return;
 660        }
 661        disk->major = MAJOR(devt);
 662        disk->first_minor = MINOR(devt);
 663
 664        disk_alloc_events(disk);
 665
 666        if (disk->flags & GENHD_FL_HIDDEN) {
 667                /*
 668                 * Don't let hidden disks show up in /proc/partitions,
 669                 * and don't bother scanning for partitions either.
 670                 */
 671                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
 672                disk->flags |= GENHD_FL_NO_PART_SCAN;
 673        } else {
 674                int ret;
 675
 676                /* Register BDI before referencing it from bdev */
 677                disk_to_dev(disk)->devt = devt;
 678                ret = bdi_register_owner(disk->queue->backing_dev_info,
 679                                                disk_to_dev(disk));
 680                WARN_ON(ret);
 681                blk_register_region(disk_devt(disk), disk->minors, NULL,
 682                                    exact_match, exact_lock, disk);
 683        }
 684        register_disk(parent, disk);
 685        blk_register_queue(disk);
 686
 687        /*
 688         * Take an extra ref on queue which will be put on disk_release()
 689         * so that it sticks around as long as @disk is there.
 690         */
 691        WARN_ON_ONCE(!blk_get_queue(disk->queue));
 692
 693        disk_add_events(disk);
 694        blk_integrity_add(disk);
 695}
 696EXPORT_SYMBOL(device_add_disk);
 697
 698void del_gendisk(struct gendisk *disk)
 699{
 700        struct disk_part_iter piter;
 701        struct hd_struct *part;
 702
 703        blk_integrity_del(disk);
 704        disk_del_events(disk);
 705
 706        /* invalidate stuff */
 707        disk_part_iter_init(&piter, disk,
 708                             DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
 709        while ((part = disk_part_iter_next(&piter))) {
 710                invalidate_partition(disk, part->partno);
 711                bdev_unhash_inode(part_devt(part));
 712                delete_partition(disk, part->partno);
 713        }
 714        disk_part_iter_exit(&piter);
 715
 716        invalidate_partition(disk, 0);
 717        bdev_unhash_inode(disk_devt(disk));
 718        set_capacity(disk, 0);
 719        disk->flags &= ~GENHD_FL_UP;
 720
 721        if (!(disk->flags & GENHD_FL_HIDDEN))
 722                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 723        if (disk->queue) {
 724                /*
 725                 * Unregister bdi before releasing device numbers (as they can
 726                 * get reused and we'd get clashes in sysfs).
 727                 */
 728                bdi_unregister(disk->queue->backing_dev_info);
 729                blk_unregister_queue(disk);
 730        } else {
 731                WARN_ON(1);
 732        }
 733
 734        if (!(disk->flags & GENHD_FL_HIDDEN))
 735                blk_unregister_region(disk_devt(disk), disk->minors);
 736
 737        kobject_put(disk->part0.holder_dir);
 738        kobject_put(disk->slave_dir);
 739
 740        part_stat_set_all(&disk->part0, 0);
 741        disk->part0.stamp = 0;
 742        if (!sysfs_deprecated)
 743                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
 744        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
 745        device_del(disk_to_dev(disk));
 746}
 747EXPORT_SYMBOL(del_gendisk);
 748
 749/* sysfs access to bad-blocks list. */
 750static ssize_t disk_badblocks_show(struct device *dev,
 751                                        struct device_attribute *attr,
 752                                        char *page)
 753{
 754        struct gendisk *disk = dev_to_disk(dev);
 755
 756        if (!disk->bb)
 757                return sprintf(page, "\n");
 758
 759        return badblocks_show(disk->bb, page, 0);
 760}
 761
 762static ssize_t disk_badblocks_store(struct device *dev,
 763                                        struct device_attribute *attr,
 764                                        const char *page, size_t len)
 765{
 766        struct gendisk *disk = dev_to_disk(dev);
 767
 768        if (!disk->bb)
 769                return -ENXIO;
 770
 771        return badblocks_store(disk->bb, page, len, 0);
 772}
 773
 774/**
 775 * get_gendisk - get partitioning information for a given device
 776 * @devt: device to get partitioning information for
 777 * @partno: returned partition index
 778 *
 779 * This function gets the structure containing partitioning
 780 * information for the given device @devt.
 781 */
 782struct gendisk *get_gendisk(dev_t devt, int *partno)
 783{
 784        struct gendisk *disk = NULL;
 785
 786        if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
 787                struct kobject *kobj;
 788
 789                kobj = kobj_lookup(bdev_map, devt, partno);
 790                if (kobj)
 791                        disk = dev_to_disk(kobj_to_dev(kobj));
 792        } else {
 793                struct hd_struct *part;
 794
 795                spin_lock_bh(&ext_devt_lock);
 796                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
 797                if (part && get_disk(part_to_disk(part))) {
 798                        *partno = part->partno;
 799                        disk = part_to_disk(part);
 800                }
 801                spin_unlock_bh(&ext_devt_lock);
 802        }
 803
 804        if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) {
 805                put_disk(disk);
 806                disk = NULL;
 807        }
 808        return disk;
 809}
 810EXPORT_SYMBOL(get_gendisk);
 811
 812/**
 813 * bdget_disk - do bdget() by gendisk and partition number
 814 * @disk: gendisk of interest
 815 * @partno: partition number
 816 *
 817 * Find partition @partno from @disk, do bdget() on it.
 818 *
 819 * CONTEXT:
 820 * Don't care.
 821 *
 822 * RETURNS:
 823 * Resulting block_device on success, NULL on failure.
 824 */
 825struct block_device *bdget_disk(struct gendisk *disk, int partno)
 826{
 827        struct hd_struct *part;
 828        struct block_device *bdev = NULL;
 829
 830        part = disk_get_part(disk, partno);
 831        if (part)
 832                bdev = bdget(part_devt(part));
 833        disk_put_part(part);
 834
 835        return bdev;
 836}
 837EXPORT_SYMBOL(bdget_disk);
 838
 839/*
 840 * print a full list of all partitions - intended for places where the root
 841 * filesystem can't be mounted and thus to give the victim some idea of what
 842 * went wrong
 843 */
 844void __init printk_all_partitions(void)
 845{
 846        struct class_dev_iter iter;
 847        struct device *dev;
 848
 849        class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 850        while ((dev = class_dev_iter_next(&iter))) {
 851                struct gendisk *disk = dev_to_disk(dev);
 852                struct disk_part_iter piter;
 853                struct hd_struct *part;
 854                char name_buf[BDEVNAME_SIZE];
 855                char devt_buf[BDEVT_SIZE];
 856
 857                /*
 858                 * Don't show empty devices or things that have been
 859                 * suppressed
 860                 */
 861                if (get_capacity(disk) == 0 ||
 862                    (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
 863                        continue;
 864
 865                /*
 866                 * Note, unlike /proc/partitions, I am showing the
 867                 * numbers in hex - the same format as the root=
 868                 * option takes.
 869                 */
 870                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 871                while ((part = disk_part_iter_next(&piter))) {
 872                        bool is_part0 = part == &disk->part0;
 873
 874                        printk("%s%s %10llu %s %s", is_part0 ? "" : "  ",
 875                               bdevt_str(part_devt(part), devt_buf),
 876                               (unsigned long long)part_nr_sects_read(part) >> 1
 877                               , disk_name(disk, part->partno, name_buf),
 878                               part->info ? part->info->uuid : "");
 879                        if (is_part0) {
 880                                if (dev->parent && dev->parent->driver)
 881                                        printk(" driver: %s\n",
 882                                              dev->parent->driver->name);
 883                                else
 884                                        printk(" (driver?)\n");
 885                        } else
 886                                printk("\n");
 887                }
 888                disk_part_iter_exit(&piter);
 889        }
 890        class_dev_iter_exit(&iter);
 891}
 892
 893#ifdef CONFIG_PROC_FS
 894/* iterator */
 895static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
 896{
 897        loff_t skip = *pos;
 898        struct class_dev_iter *iter;
 899        struct device *dev;
 900
 901        iter = kmalloc(sizeof(*iter), GFP_KERNEL);
 902        if (!iter)
 903                return ERR_PTR(-ENOMEM);
 904
 905        seqf->private = iter;
 906        class_dev_iter_init(iter, &block_class, NULL, &disk_type);
 907        do {
 908                dev = class_dev_iter_next(iter);
 909                if (!dev)
 910                        return NULL;
 911        } while (skip--);
 912
 913        return dev_to_disk(dev);
 914}
 915
 916static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
 917{
 918        struct device *dev;
 919
 920        (*pos)++;
 921        dev = class_dev_iter_next(seqf->private);
 922        if (dev)
 923                return dev_to_disk(dev);
 924
 925        return NULL;
 926}
 927
 928static void disk_seqf_stop(struct seq_file *seqf, void *v)
 929{
 930        struct class_dev_iter *iter = seqf->private;
 931
 932        /* stop is called even after start failed :-( */
 933        if (iter) {
 934                class_dev_iter_exit(iter);
 935                kfree(iter);
 936                seqf->private = NULL;
 937        }
 938}
 939
 940static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
 941{
 942        void *p;
 943
 944        p = disk_seqf_start(seqf, pos);
 945        if (!IS_ERR_OR_NULL(p) && !*pos)
 946                seq_puts(seqf, "major minor  #blocks  name\n\n");
 947        return p;
 948}
 949
 950static int show_partition(struct seq_file *seqf, void *v)
 951{
 952        struct gendisk *sgp = v;
 953        struct disk_part_iter piter;
 954        struct hd_struct *part;
 955        char buf[BDEVNAME_SIZE];
 956
 957        /* Don't show non-partitionable removeable devices or empty devices */
 958        if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
 959                                   (sgp->flags & GENHD_FL_REMOVABLE)))
 960                return 0;
 961        if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
 962                return 0;
 963
 964        /* show the full disk and all non-0 size partitions of it */
 965        disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
 966        while ((part = disk_part_iter_next(&piter)))
 967                seq_printf(seqf, "%4d  %7d %10llu %s\n",
 968                           MAJOR(part_devt(part)), MINOR(part_devt(part)),
 969                           (unsigned long long)part_nr_sects_read(part) >> 1,
 970                           disk_name(sgp, part->partno, buf));
 971        disk_part_iter_exit(&piter);
 972
 973        return 0;
 974}
 975
 976static const struct seq_operations partitions_op = {
 977        .start  = show_partition_start,
 978        .next   = disk_seqf_next,
 979        .stop   = disk_seqf_stop,
 980        .show   = show_partition
 981};
 982
 983static int partitions_open(struct inode *inode, struct file *file)
 984{
 985        return seq_open(file, &partitions_op);
 986}
 987
 988static const struct file_operations proc_partitions_operations = {
 989        .open           = partitions_open,
 990        .read           = seq_read,
 991        .llseek         = seq_lseek,
 992        .release        = seq_release,
 993};
 994#endif
 995
 996
 997static struct kobject *base_probe(dev_t devt, int *partno, void *data)
 998{
 999        if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
1000                /* Make old-style 2.4 aliases work */
1001                request_module("block-major-%d", MAJOR(devt));
1002        return NULL;
1003}
1004
1005static int __init genhd_device_init(void)
1006{
1007        int error;
1008
1009        block_class.dev_kobj = sysfs_dev_block_kobj;
1010        error = class_register(&block_class);
1011        if (unlikely(error))
1012                return error;
1013        bdev_map = kobj_map_init(base_probe, &block_class_lock);
1014        blk_dev_init();
1015
1016        register_blkdev(BLOCK_EXT_MAJOR, "blkext");
1017
1018        /* create top-level block dir */
1019        if (!sysfs_deprecated)
1020                block_depr = kobject_create_and_add("block", NULL);
1021        return 0;
1022}
1023
1024subsys_initcall(genhd_device_init);
1025
1026static ssize_t disk_range_show(struct device *dev,
1027                               struct device_attribute *attr, char *buf)
1028{
1029        struct gendisk *disk = dev_to_disk(dev);
1030
1031        return sprintf(buf, "%d\n", disk->minors);
1032}
1033
1034static ssize_t disk_ext_range_show(struct device *dev,
1035                                   struct device_attribute *attr, char *buf)
1036{
1037        struct gendisk *disk = dev_to_disk(dev);
1038
1039        return sprintf(buf, "%d\n", disk_max_parts(disk));
1040}
1041
1042static ssize_t disk_removable_show(struct device *dev,
1043                                   struct device_attribute *attr, char *buf)
1044{
1045        struct gendisk *disk = dev_to_disk(dev);
1046
1047        return sprintf(buf, "%d\n",
1048                       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
1049}
1050
1051static ssize_t disk_hidden_show(struct device *dev,
1052                                   struct device_attribute *attr, char *buf)
1053{
1054        struct gendisk *disk = dev_to_disk(dev);
1055
1056        return sprintf(buf, "%d\n",
1057                       (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
1058}
1059
1060static ssize_t disk_ro_show(struct device *dev,
1061                                   struct device_attribute *attr, char *buf)
1062{
1063        struct gendisk *disk = dev_to_disk(dev);
1064
1065        return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
1066}
1067
1068static ssize_t disk_capability_show(struct device *dev,
1069                                    struct device_attribute *attr, char *buf)
1070{
1071        struct gendisk *disk = dev_to_disk(dev);
1072
1073        return sprintf(buf, "%x\n", disk->flags);
1074}
1075
1076static ssize_t disk_alignment_offset_show(struct device *dev,
1077                                          struct device_attribute *attr,
1078                                          char *buf)
1079{
1080        struct gendisk *disk = dev_to_disk(dev);
1081
1082        return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1083}
1084
1085static ssize_t disk_discard_alignment_show(struct device *dev,
1086                                           struct device_attribute *attr,
1087                                           char *buf)
1088{
1089        struct gendisk *disk = dev_to_disk(dev);
1090
1091        return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1092}
1093
1094static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
1095static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
1096static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
1097static DEVICE_ATTR(hidden, S_IRUGO, disk_hidden_show, NULL);
1098static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
1099static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
1100static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
1101static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
1102                   NULL);
1103static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
1104static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
1105static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
1106static DEVICE_ATTR(badblocks, S_IRUGO | S_IWUSR, disk_badblocks_show,
1107                disk_badblocks_store);
1108#ifdef CONFIG_FAIL_MAKE_REQUEST
1109static struct device_attribute dev_attr_fail =
1110        __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
1111#endif
1112#ifdef CONFIG_FAIL_IO_TIMEOUT
1113static struct device_attribute dev_attr_fail_timeout =
1114        __ATTR(io-timeout-fail,  S_IRUGO|S_IWUSR, part_timeout_show,
1115                part_timeout_store);
1116#endif
1117
1118static struct attribute *disk_attrs[] = {
1119        &dev_attr_range.attr,
1120        &dev_attr_ext_range.attr,
1121        &dev_attr_removable.attr,
1122        &dev_attr_hidden.attr,
1123        &dev_attr_ro.attr,
1124        &dev_attr_size.attr,
1125        &dev_attr_alignment_offset.attr,
1126        &dev_attr_discard_alignment.attr,
1127        &dev_attr_capability.attr,
1128        &dev_attr_stat.attr,
1129        &dev_attr_inflight.attr,
1130        &dev_attr_badblocks.attr,
1131#ifdef CONFIG_FAIL_MAKE_REQUEST
1132        &dev_attr_fail.attr,
1133#endif
1134#ifdef CONFIG_FAIL_IO_TIMEOUT
1135        &dev_attr_fail_timeout.attr,
1136#endif
1137        NULL
1138};
1139
1140static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
1141{
1142        struct device *dev = container_of(kobj, typeof(*dev), kobj);
1143        struct gendisk *disk = dev_to_disk(dev);
1144
1145        if (a == &dev_attr_badblocks.attr && !disk->bb)
1146                return 0;
1147        return a->mode;
1148}
1149
1150static struct attribute_group disk_attr_group = {
1151        .attrs = disk_attrs,
1152        .is_visible = disk_visible,
1153};
1154
1155static const struct attribute_group *disk_attr_groups[] = {
1156        &disk_attr_group,
1157        NULL
1158};
1159
1160/**
1161 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
1162 * @disk: disk to replace part_tbl for
1163 * @new_ptbl: new part_tbl to install
1164 *
1165 * Replace disk->part_tbl with @new_ptbl in RCU-safe way.  The
1166 * original ptbl is freed using RCU callback.
1167 *
1168 * LOCKING:
1169 * Matching bd_mutex locked or the caller is the only user of @disk.
1170 */
1171static void disk_replace_part_tbl(struct gendisk *disk,
1172                                  struct disk_part_tbl *new_ptbl)
1173{
1174        struct disk_part_tbl *old_ptbl =
1175                rcu_dereference_protected(disk->part_tbl, 1);
1176
1177        rcu_assign_pointer(disk->part_tbl, new_ptbl);
1178
1179        if (old_ptbl) {
1180                rcu_assign_pointer(old_ptbl->last_lookup, NULL);
1181                kfree_rcu(old_ptbl, rcu_head);
1182        }
1183}
1184
1185/**
1186 * disk_expand_part_tbl - expand disk->part_tbl
1187 * @disk: disk to expand part_tbl for
1188 * @partno: expand such that this partno can fit in
1189 *
1190 * Expand disk->part_tbl such that @partno can fit in.  disk->part_tbl
1191 * uses RCU to allow unlocked dereferencing for stats and other stuff.
1192 *
1193 * LOCKING:
1194 * Matching bd_mutex locked or the caller is the only user of @disk.
1195 * Might sleep.
1196 *
1197 * RETURNS:
1198 * 0 on success, -errno on failure.
1199 */
1200int disk_expand_part_tbl(struct gendisk *disk, int partno)
1201{
1202        struct disk_part_tbl *old_ptbl =
1203                rcu_dereference_protected(disk->part_tbl, 1);
1204        struct disk_part_tbl *new_ptbl;
1205        int len = old_ptbl ? old_ptbl->len : 0;
1206        int i, target;
1207        size_t size;
1208
1209        /*
1210         * check for int overflow, since we can get here from blkpg_ioctl()
1211         * with a user passed 'partno'.
1212         */
1213        target = partno + 1;
1214        if (target < 0)
1215                return -EINVAL;
1216
1217        /* disk_max_parts() is zero during initialization, ignore if so */
1218        if (disk_max_parts(disk) && target > disk_max_parts(disk))
1219                return -EINVAL;
1220
1221        if (target <= len)
1222                return 0;
1223
1224        size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
1225        new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
1226        if (!new_ptbl)
1227                return -ENOMEM;
1228
1229        new_ptbl->len = target;
1230
1231        for (i = 0; i < len; i++)
1232                rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
1233
1234        disk_replace_part_tbl(disk, new_ptbl);
1235        return 0;
1236}
1237
1238static void disk_release(struct device *dev)
1239{
1240        struct gendisk *disk = dev_to_disk(dev);
1241
1242        blk_free_devt(dev->devt);
1243        disk_release_events(disk);
1244        kfree(disk->random);
1245        disk_replace_part_tbl(disk, NULL);
1246        hd_free_part(&disk->part0);
1247        if (disk->queue)
1248                blk_put_queue(disk->queue);
1249        kfree(disk);
1250}
1251struct class block_class = {
1252        .name           = "block",
1253};
1254
1255static char *block_devnode(struct device *dev, umode_t *mode,
1256                           kuid_t *uid, kgid_t *gid)
1257{
1258        struct gendisk *disk = dev_to_disk(dev);
1259
1260        if (disk->devnode)
1261                return disk->devnode(disk, mode);
1262        return NULL;
1263}
1264
1265static const struct device_type disk_type = {
1266        .name           = "disk",
1267        .groups         = disk_attr_groups,
1268        .release        = disk_release,
1269        .devnode        = block_devnode,
1270};
1271
1272#ifdef CONFIG_PROC_FS
1273/*
1274 * aggregate disk stat collector.  Uses the same stats that the sysfs
1275 * entries do, above, but makes them available through one seq_file.
1276 *
1277 * The output looks suspiciously like /proc/partitions with a bunch of
1278 * extra fields.
1279 */
1280static int diskstats_show(struct seq_file *seqf, void *v)
1281{
1282        struct gendisk *gp = v;
1283        struct disk_part_iter piter;
1284        struct hd_struct *hd;
1285        char buf[BDEVNAME_SIZE];
1286        unsigned int inflight[2];
1287        int cpu;
1288
1289        /*
1290        if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1291                seq_puts(seqf,  "major minor name"
1292                                "     rio rmerge rsect ruse wio wmerge "
1293                                "wsect wuse running use aveq"
1294                                "\n\n");
1295        */
1296
1297        disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
1298        while ((hd = disk_part_iter_next(&piter))) {
1299                cpu = part_stat_lock();
1300                part_round_stats(gp->queue, cpu, hd);
1301                part_stat_unlock();
1302                part_in_flight(gp->queue, hd, inflight);
1303                seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
1304                           "%u %lu %lu %lu %u %u %u %u\n",
1305                           MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
1306                           disk_name(gp, hd->partno, buf),
1307                           part_stat_read(hd, ios[READ]),
1308                           part_stat_read(hd, merges[READ]),
1309                           part_stat_read(hd, sectors[READ]),
1310                           jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
1311                           part_stat_read(hd, ios[WRITE]),
1312                           part_stat_read(hd, merges[WRITE]),
1313                           part_stat_read(hd, sectors[WRITE]),
1314                           jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
1315                           inflight[0],
1316                           jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1317                           jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1318                        );
1319        }
1320        disk_part_iter_exit(&piter);
1321
1322        return 0;
1323}
1324
1325static const struct seq_operations diskstats_op = {
1326        .start  = disk_seqf_start,
1327        .next   = disk_seqf_next,
1328        .stop   = disk_seqf_stop,
1329        .show   = diskstats_show
1330};
1331
1332static int diskstats_open(struct inode *inode, struct file *file)
1333{
1334        return seq_open(file, &diskstats_op);
1335}
1336
1337static const struct file_operations proc_diskstats_operations = {
1338        .open           = diskstats_open,
1339        .read           = seq_read,
1340        .llseek         = seq_lseek,
1341        .release        = seq_release,
1342};
1343
1344static int __init proc_genhd_init(void)
1345{
1346        proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
1347        proc_create("partitions", 0, NULL, &proc_partitions_operations);
1348        return 0;
1349}
1350module_init(proc_genhd_init);
1351#endif /* CONFIG_PROC_FS */
1352
1353dev_t blk_lookup_devt(const char *name, int partno)
1354{
1355        dev_t devt = MKDEV(0, 0);
1356        struct class_dev_iter iter;
1357        struct device *dev;
1358
1359        class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1360        while ((dev = class_dev_iter_next(&iter))) {
1361                struct gendisk *disk = dev_to_disk(dev);
1362                struct hd_struct *part;
1363
1364                if (strcmp(dev_name(dev), name))
1365                        continue;
1366
1367                if (partno < disk->minors) {
1368                        /* We need to return the right devno, even
1369                         * if the partition doesn't exist yet.
1370                         */
1371                        devt = MKDEV(MAJOR(dev->devt),
1372                                     MINOR(dev->devt) + partno);
1373                        break;
1374                }
1375                part = disk_get_part(disk, partno);
1376                if (part) {
1377                        devt = part_devt(part);
1378                        disk_put_part(part);
1379                        break;
1380                }
1381                disk_put_part(part);
1382        }
1383        class_dev_iter_exit(&iter);
1384        return devt;
1385}
1386EXPORT_SYMBOL(blk_lookup_devt);
1387
1388struct gendisk *__alloc_disk_node(int minors, int node_id)
1389{
1390        struct gendisk *disk;
1391        struct disk_part_tbl *ptbl;
1392
1393        if (minors > DISK_MAX_PARTS) {
1394                printk(KERN_ERR
1395                        "block: can't allocate more than %d partitions\n",
1396                        DISK_MAX_PARTS);
1397                minors = DISK_MAX_PARTS;
1398        }
1399
1400        disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1401        if (disk) {
1402                if (!init_part_stats(&disk->part0)) {
1403                        kfree(disk);
1404                        return NULL;
1405                }
1406                disk->node_id = node_id;
1407                if (disk_expand_part_tbl(disk, 0)) {
1408                        free_part_stats(&disk->part0);
1409                        kfree(disk);
1410                        return NULL;
1411                }
1412                ptbl = rcu_dereference_protected(disk->part_tbl, 1);
1413                rcu_assign_pointer(ptbl->part[0], &disk->part0);
1414
1415                /*
1416                 * set_capacity() and get_capacity() currently don't use
1417                 * seqcounter to read/update the part0->nr_sects. Still init
1418                 * the counter as we can read the sectors in IO submission
1419                 * patch using seqence counters.
1420                 *
1421                 * TODO: Ideally set_capacity() and get_capacity() should be
1422                 * converted to make use of bd_mutex and sequence counters.
1423                 */
1424                seqcount_init(&disk->part0.nr_sects_seq);
1425                if (hd_ref_init(&disk->part0)) {
1426                        hd_free_part(&disk->part0);
1427                        kfree(disk);
1428                        return NULL;
1429                }
1430
1431                disk->minors = minors;
1432                rand_initialize_disk(disk);
1433                disk_to_dev(disk)->class = &block_class;
1434                disk_to_dev(disk)->type = &disk_type;
1435                device_initialize(disk_to_dev(disk));
1436        }
1437        return disk;
1438}
1439EXPORT_SYMBOL(__alloc_disk_node);
1440
1441struct kobject *get_disk(struct gendisk *disk)
1442{
1443        struct module *owner;
1444        struct kobject *kobj;
1445
1446        if (!disk->fops)
1447                return NULL;
1448        owner = disk->fops->owner;
1449        if (owner && !try_module_get(owner))
1450                return NULL;
1451        kobj = kobject_get_unless_zero(&disk_to_dev(disk)->kobj);
1452        if (kobj == NULL) {
1453                module_put(owner);
1454                return NULL;
1455        }
1456        return kobj;
1457
1458}
1459
1460EXPORT_SYMBOL(get_disk);
1461
1462void put_disk(struct gendisk *disk)
1463{
1464        if (disk)
1465                kobject_put(&disk_to_dev(disk)->kobj);
1466}
1467
1468EXPORT_SYMBOL(put_disk);
1469
1470static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1471{
1472        char event[] = "DISK_RO=1";
1473        char *envp[] = { event, NULL };
1474
1475        if (!ro)
1476                event[8] = '0';
1477        kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1478}
1479
1480void set_device_ro(struct block_device *bdev, int flag)
1481{
1482        bdev->bd_part->policy = flag;
1483}
1484
1485EXPORT_SYMBOL(set_device_ro);
1486
1487void set_disk_ro(struct gendisk *disk, int flag)
1488{
1489        struct disk_part_iter piter;
1490        struct hd_struct *part;
1491
1492        if (disk->part0.policy != flag) {
1493                set_disk_ro_uevent(disk, flag);
1494                disk->part0.policy = flag;
1495        }
1496
1497        disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
1498        while ((part = disk_part_iter_next(&piter)))
1499                part->policy = flag;
1500        disk_part_iter_exit(&piter);
1501}
1502
1503EXPORT_SYMBOL(set_disk_ro);
1504
1505int bdev_read_only(struct block_device *bdev)
1506{
1507        if (!bdev)
1508                return 0;
1509        return bdev->bd_part->policy;
1510}
1511
1512EXPORT_SYMBOL(bdev_read_only);
1513
1514int invalidate_partition(struct gendisk *disk, int partno)
1515{
1516        int res = 0;
1517        struct block_device *bdev = bdget_disk(disk, partno);
1518        if (bdev) {
1519                fsync_bdev(bdev);
1520                res = __invalidate_device(bdev, true);
1521                bdput(bdev);
1522        }
1523        return res;
1524}
1525
1526EXPORT_SYMBOL(invalidate_partition);
1527
1528/*
1529 * Disk events - monitor disk events like media change and eject request.
1530 */
1531struct disk_events {
1532        struct list_head        node;           /* all disk_event's */
1533        struct gendisk          *disk;          /* the associated disk */
1534        spinlock_t              lock;
1535
1536        struct mutex            block_mutex;    /* protects blocking */
1537        int                     block;          /* event blocking depth */
1538        unsigned int            pending;        /* events already sent out */
1539        unsigned int            clearing;       /* events being cleared */
1540
1541        long                    poll_msecs;     /* interval, -1 for default */
1542        struct delayed_work     dwork;
1543};
1544
1545static const char *disk_events_strs[] = {
1546        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "media_change",
1547        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "eject_request",
1548};
1549
1550static char *disk_uevents[] = {
1551        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "DISK_MEDIA_CHANGE=1",
1552        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "DISK_EJECT_REQUEST=1",
1553};
1554
1555/* list of all disk_events */
1556static DEFINE_MUTEX(disk_events_mutex);
1557static LIST_HEAD(disk_events);
1558
1559/* disable in-kernel polling by default */
1560static unsigned long disk_events_dfl_poll_msecs;
1561
1562static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1563{
1564        struct disk_events *ev = disk->ev;
1565        long intv_msecs = 0;
1566
1567        /*
1568         * If device-specific poll interval is set, always use it.  If
1569         * the default is being used, poll iff there are events which
1570         * can't be monitored asynchronously.
1571         */
1572        if (ev->poll_msecs >= 0)
1573                intv_msecs = ev->poll_msecs;
1574        else if (disk->events & ~disk->async_events)
1575                intv_msecs = disk_events_dfl_poll_msecs;
1576
1577        return msecs_to_jiffies(intv_msecs);
1578}
1579
1580/**
1581 * disk_block_events - block and flush disk event checking
1582 * @disk: disk to block events for
1583 *
1584 * On return from this function, it is guaranteed that event checking
1585 * isn't in progress and won't happen until unblocked by
1586 * disk_unblock_events().  Events blocking is counted and the actual
1587 * unblocking happens after the matching number of unblocks are done.
1588 *
1589 * Note that this intentionally does not block event checking from
1590 * disk_clear_events().
1591 *
1592 * CONTEXT:
1593 * Might sleep.
1594 */
1595void disk_block_events(struct gendisk *disk)
1596{
1597        struct disk_events *ev = disk->ev;
1598        unsigned long flags;
1599        bool cancel;
1600
1601        if (!ev)
1602                return;
1603
1604        /*
1605         * Outer mutex ensures that the first blocker completes canceling
1606         * the event work before further blockers are allowed to finish.
1607         */
1608        mutex_lock(&ev->block_mutex);
1609
1610        spin_lock_irqsave(&ev->lock, flags);
1611        cancel = !ev->block++;
1612        spin_unlock_irqrestore(&ev->lock, flags);
1613
1614        if (cancel)
1615                cancel_delayed_work_sync(&disk->ev->dwork);
1616
1617        mutex_unlock(&ev->block_mutex);
1618}
1619
1620static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1621{
1622        struct disk_events *ev = disk->ev;
1623        unsigned long intv;
1624        unsigned long flags;
1625
1626        spin_lock_irqsave(&ev->lock, flags);
1627
1628        if (WARN_ON_ONCE(ev->block <= 0))
1629                goto out_unlock;
1630
1631        if (--ev->block)
1632                goto out_unlock;
1633
1634        intv = disk_events_poll_jiffies(disk);
1635        if (check_now)
1636                queue_delayed_work(system_freezable_power_efficient_wq,
1637                                &ev->dwork, 0);
1638        else if (intv)
1639                queue_delayed_work(system_freezable_power_efficient_wq,
1640                                &ev->dwork, intv);
1641out_unlock:
1642        spin_unlock_irqrestore(&ev->lock, flags);
1643}
1644
1645/**
1646 * disk_unblock_events - unblock disk event checking
1647 * @disk: disk to unblock events for
1648 *
1649 * Undo disk_block_events().  When the block count reaches zero, it
1650 * starts events polling if configured.
1651 *
1652 * CONTEXT:
1653 * Don't care.  Safe to call from irq context.
1654 */
1655void disk_unblock_events(struct gendisk *disk)
1656{
1657        if (disk->ev)
1658                __disk_unblock_events(disk, false);
1659}
1660
1661/**
1662 * disk_flush_events - schedule immediate event checking and flushing
1663 * @disk: disk to check and flush events for
1664 * @mask: events to flush
1665 *
1666 * Schedule immediate event checking on @disk if not blocked.  Events in
1667 * @mask are scheduled to be cleared from the driver.  Note that this
1668 * doesn't clear the events from @disk->ev.
1669 *
1670 * CONTEXT:
1671 * If @mask is non-zero must be called with bdev->bd_mutex held.
1672 */
1673void disk_flush_events(struct gendisk *disk, unsigned int mask)
1674{
1675        struct disk_events *ev = disk->ev;
1676
1677        if (!ev)
1678                return;
1679
1680        spin_lock_irq(&ev->lock);
1681        ev->clearing |= mask;
1682        if (!ev->block)
1683                mod_delayed_work(system_freezable_power_efficient_wq,
1684                                &ev->dwork, 0);
1685        spin_unlock_irq(&ev->lock);
1686}
1687
1688/**
1689 * disk_clear_events - synchronously check, clear and return pending events
1690 * @disk: disk to fetch and clear events from
1691 * @mask: mask of events to be fetched and cleared
1692 *
1693 * Disk events are synchronously checked and pending events in @mask
1694 * are cleared and returned.  This ignores the block count.
1695 *
1696 * CONTEXT:
1697 * Might sleep.
1698 */
1699unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1700{
1701        const struct block_device_operations *bdops = disk->fops;
1702        struct disk_events *ev = disk->ev;
1703        unsigned int pending;
1704        unsigned int clearing = mask;
1705
1706        if (!ev) {
1707                /* for drivers still using the old ->media_changed method */
1708                if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
1709                    bdops->media_changed && bdops->media_changed(disk))
1710                        return DISK_EVENT_MEDIA_CHANGE;
1711                return 0;
1712        }
1713
1714        disk_block_events(disk);
1715
1716        /*
1717         * store the union of mask and ev->clearing on the stack so that the
1718         * race with disk_flush_events does not cause ambiguity (ev->clearing
1719         * can still be modified even if events are blocked).
1720         */
1721        spin_lock_irq(&ev->lock);
1722        clearing |= ev->clearing;
1723        ev->clearing = 0;
1724        spin_unlock_irq(&ev->lock);
1725
1726        disk_check_events(ev, &clearing);
1727        /*
1728         * if ev->clearing is not 0, the disk_flush_events got called in the
1729         * middle of this function, so we want to run the workfn without delay.
1730         */
1731        __disk_unblock_events(disk, ev->clearing ? true : false);
1732
1733        /* then, fetch and clear pending events */
1734        spin_lock_irq(&ev->lock);
1735        pending = ev->pending & mask;
1736        ev->pending &= ~mask;
1737        spin_unlock_irq(&ev->lock);
1738        WARN_ON_ONCE(clearing & mask);
1739
1740        return pending;
1741}
1742
1743/*
1744 * Separate this part out so that a different pointer for clearing_ptr can be
1745 * passed in for disk_clear_events.
1746 */
1747static void disk_events_workfn(struct work_struct *work)
1748{
1749        struct delayed_work *dwork = to_delayed_work(work);
1750        struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
1751
1752        disk_check_events(ev, &ev->clearing);
1753}
1754
1755static void disk_check_events(struct disk_events *ev,
1756                              unsigned int *clearing_ptr)
1757{
1758        struct gendisk *disk = ev->disk;
1759        char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
1760        unsigned int clearing = *clearing_ptr;
1761        unsigned int events;
1762        unsigned long intv;
1763        int nr_events = 0, i;
1764
1765        /* check events */
1766        events = disk->fops->check_events(disk, clearing);
1767
1768        /* accumulate pending events and schedule next poll if necessary */
1769        spin_lock_irq(&ev->lock);
1770
1771        events &= ~ev->pending;
1772        ev->pending |= events;
1773        *clearing_ptr &= ~clearing;
1774
1775        intv = disk_events_poll_jiffies(disk);
1776        if (!ev->block && intv)
1777                queue_delayed_work(system_freezable_power_efficient_wq,
1778                                &ev->dwork, intv);
1779
1780        spin_unlock_irq(&ev->lock);
1781
1782        /*
1783         * Tell userland about new events.  Only the events listed in
1784         * @disk->events are reported.  Unlisted events are processed the
1785         * same internally but never get reported to userland.
1786         */
1787        for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1788                if (events & disk->events & (1 << i))
1789                        envp[nr_events++] = disk_uevents[i];
1790
1791        if (nr_events)
1792                kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
1793}
1794
1795/*
1796 * A disk events enabled device has the following sysfs nodes under
1797 * its /sys/block/X/ directory.
1798 *
1799 * events               : list of all supported events
1800 * events_async         : list of events which can be detected w/o polling
1801 * events_poll_msecs    : polling interval, 0: disable, -1: system default
1802 */
1803static ssize_t __disk_events_show(unsigned int events, char *buf)
1804{
1805        const char *delim = "";
1806        ssize_t pos = 0;
1807        int i;
1808
1809        for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
1810                if (events & (1 << i)) {
1811                        pos += sprintf(buf + pos, "%s%s",
1812                                       delim, disk_events_strs[i]);
1813                        delim = " ";
1814                }
1815        if (pos)
1816                pos += sprintf(buf + pos, "\n");
1817        return pos;
1818}
1819
1820static ssize_t disk_events_show(struct device *dev,
1821                                struct device_attribute *attr, char *buf)
1822{
1823        struct gendisk *disk = dev_to_disk(dev);
1824
1825        return __disk_events_show(disk->events, buf);
1826}
1827
1828static ssize_t disk_events_async_show(struct device *dev,
1829                                      struct device_attribute *attr, char *buf)
1830{
1831        struct gendisk *disk = dev_to_disk(dev);
1832
1833        return __disk_events_show(disk->async_events, buf);
1834}
1835
1836static ssize_t disk_events_poll_msecs_show(struct device *dev,
1837                                           struct device_attribute *attr,
1838                                           char *buf)
1839{
1840        struct gendisk *disk = dev_to_disk(dev);
1841
1842        return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
1843}
1844
1845static ssize_t disk_events_poll_msecs_store(struct device *dev,
1846                                            struct device_attribute *attr,
1847                                            const char *buf, size_t count)
1848{
1849        struct gendisk *disk = dev_to_disk(dev);
1850        long intv;
1851
1852        if (!count || !sscanf(buf, "%ld", &intv))
1853                return -EINVAL;
1854
1855        if (intv < 0 && intv != -1)
1856                return -EINVAL;
1857
1858        disk_block_events(disk);
1859        disk->ev->poll_msecs = intv;
1860        __disk_unblock_events(disk, true);
1861
1862        return count;
1863}
1864
1865static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
1866static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
1867static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
1868                         disk_events_poll_msecs_show,
1869                         disk_events_poll_msecs_store);
1870
1871static const struct attribute *disk_events_attrs[] = {
1872        &dev_attr_events.attr,
1873        &dev_attr_events_async.attr,
1874        &dev_attr_events_poll_msecs.attr,
1875        NULL,
1876};
1877
1878/*
1879 * The default polling interval can be specified by the kernel
1880 * parameter block.events_dfl_poll_msecs which defaults to 0
1881 * (disable).  This can also be modified runtime by writing to
1882 * /sys/module/block/events_dfl_poll_msecs.
1883 */
1884static int disk_events_set_dfl_poll_msecs(const char *val,
1885                                          const struct kernel_param *kp)
1886{
1887        struct disk_events *ev;
1888        int ret;
1889
1890        ret = param_set_ulong(val, kp);
1891        if (ret < 0)
1892                return ret;
1893
1894        mutex_lock(&disk_events_mutex);
1895
1896        list_for_each_entry(ev, &disk_events, node)
1897                disk_flush_events(ev->disk, 0);
1898
1899        mutex_unlock(&disk_events_mutex);
1900
1901        return 0;
1902}
1903
1904static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
1905        .set    = disk_events_set_dfl_poll_msecs,
1906        .get    = param_get_ulong,
1907};
1908
1909#undef MODULE_PARAM_PREFIX
1910#define MODULE_PARAM_PREFIX     "block."
1911
1912module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
1913                &disk_events_dfl_poll_msecs, 0644);
1914
1915/*
1916 * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
1917 */
1918static void disk_alloc_events(struct gendisk *disk)
1919{
1920        struct disk_events *ev;
1921
1922        if (!disk->fops->check_events)
1923                return;
1924
1925        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1926        if (!ev) {
1927                pr_warn("%s: failed to initialize events\n", disk->disk_name);
1928                return;
1929        }
1930
1931        INIT_LIST_HEAD(&ev->node);
1932        ev->disk = disk;
1933        spin_lock_init(&ev->lock);
1934        mutex_init(&ev->block_mutex);
1935        ev->block = 1;
1936        ev->poll_msecs = -1;
1937        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
1938
1939        disk->ev = ev;
1940}
1941
1942static void disk_add_events(struct gendisk *disk)
1943{
1944        if (!disk->ev)
1945                return;
1946
1947        /* FIXME: error handling */
1948        if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
1949                pr_warn("%s: failed to create sysfs files for events\n",
1950                        disk->disk_name);
1951
1952        mutex_lock(&disk_events_mutex);
1953        list_add_tail(&disk->ev->node, &disk_events);
1954        mutex_unlock(&disk_events_mutex);
1955
1956        /*
1957         * Block count is initialized to 1 and the following initial
1958         * unblock kicks it into action.
1959         */
1960        __disk_unblock_events(disk, true);
1961}
1962
1963static void disk_del_events(struct gendisk *disk)
1964{
1965        if (!disk->ev)
1966                return;
1967
1968        disk_block_events(disk);
1969
1970        mutex_lock(&disk_events_mutex);
1971        list_del_init(&disk->ev->node);
1972        mutex_unlock(&disk_events_mutex);
1973
1974        sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
1975}
1976
1977static void disk_release_events(struct gendisk *disk)
1978{
1979        /* the block count should be 1 from disk_del_events() */
1980        WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
1981        kfree(disk->ev);
1982}
1983