linux/block/genhd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  gendisk handling
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/ctype.h>
   8#include <linux/fs.h>
   9#include <linux/genhd.h>
  10#include <linux/kdev_t.h>
  11#include <linux/kernel.h>
  12#include <linux/blkdev.h>
  13#include <linux/backing-dev.h>
  14#include <linux/init.h>
  15#include <linux/spinlock.h>
  16#include <linux/proc_fs.h>
  17#include <linux/seq_file.h>
  18#include <linux/slab.h>
  19#include <linux/kmod.h>
  20#include <linux/kobj_map.h>
  21#include <linux/mutex.h>
  22#include <linux/idr.h>
  23#include <linux/log2.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/badblocks.h>
  26
  27#include "blk.h"
  28
  29static DEFINE_MUTEX(block_class_lock);
  30static struct kobject *block_depr;
  31
  32/* for extended dynamic devt allocation, currently only one major is used */
  33#define NR_EXT_DEVT             (1 << MINORBITS)
  34
  35/* For extended devt allocation.  ext_devt_lock prevents look up
  36 * results from going away underneath its user.
  37 */
  38static DEFINE_SPINLOCK(ext_devt_lock);
  39static DEFINE_IDR(ext_devt_idr);
  40
  41static void disk_check_events(struct disk_events *ev,
  42                              unsigned int *clearing_ptr);
  43static void disk_alloc_events(struct gendisk *disk);
  44static void disk_add_events(struct gendisk *disk);
  45static void disk_del_events(struct gendisk *disk);
  46static void disk_release_events(struct gendisk *disk);
  47
  48/*
  49 * Set disk capacity and notify if the size is not currently
  50 * zero and will not be set to zero
  51 */
  52void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
  53                                        bool revalidate)
  54{
  55        sector_t capacity = get_capacity(disk);
  56
  57        set_capacity(disk, size);
  58
  59        if (revalidate)
  60                revalidate_disk(disk);
  61
  62        if (capacity != size && capacity != 0 && size != 0) {
  63                char *envp[] = { "RESIZE=1", NULL };
  64
  65                kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
  66        }
  67}
  68
  69EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify);
  70
  71/*
  72 * Format the device name of the indicated disk into the supplied buffer and
  73 * return a pointer to that same buffer for convenience.
  74 */
  75char *disk_name(struct gendisk *hd, int partno, char *buf)
  76{
  77        if (!partno)
  78                snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
  79        else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
  80                snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
  81        else
  82                snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
  83
  84        return buf;
  85}
  86
  87const char *bdevname(struct block_device *bdev, char *buf)
  88{
  89        return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
  90}
  91EXPORT_SYMBOL(bdevname);
  92
  93static void part_stat_read_all(struct hd_struct *part, struct disk_stats *stat)
  94{
  95        int cpu;
  96
  97        memset(stat, 0, sizeof(struct disk_stats));
  98        for_each_possible_cpu(cpu) {
  99                struct disk_stats *ptr = per_cpu_ptr(part->dkstats, cpu);
 100                int group;
 101
 102                for (group = 0; group < NR_STAT_GROUPS; group++) {
 103                        stat->nsecs[group] += ptr->nsecs[group];
 104                        stat->sectors[group] += ptr->sectors[group];
 105                        stat->ios[group] += ptr->ios[group];
 106                        stat->merges[group] += ptr->merges[group];
 107                }
 108
 109                stat->io_ticks += ptr->io_ticks;
 110        }
 111}
 112
 113static unsigned int part_in_flight(struct request_queue *q,
 114                struct hd_struct *part)
 115{
 116        unsigned int inflight = 0;
 117        int cpu;
 118
 119        for_each_possible_cpu(cpu) {
 120                inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
 121                            part_stat_local_read_cpu(part, in_flight[1], cpu);
 122        }
 123        if ((int)inflight < 0)
 124                inflight = 0;
 125
 126        return inflight;
 127}
 128
 129static void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 130                unsigned int inflight[2])
 131{
 132        int cpu;
 133
 134        inflight[0] = 0;
 135        inflight[1] = 0;
 136        for_each_possible_cpu(cpu) {
 137                inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
 138                inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
 139        }
 140        if ((int)inflight[0] < 0)
 141                inflight[0] = 0;
 142        if ((int)inflight[1] < 0)
 143                inflight[1] = 0;
 144}
 145
 146struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
 147{
 148        struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
 149
 150        if (unlikely(partno < 0 || partno >= ptbl->len))
 151                return NULL;
 152        return rcu_dereference(ptbl->part[partno]);
 153}
 154
 155/**
 156 * disk_get_part - get partition
 157 * @disk: disk to look partition from
 158 * @partno: partition number
 159 *
 160 * Look for partition @partno from @disk.  If found, increment
 161 * reference count and return it.
 162 *
 163 * CONTEXT:
 164 * Don't care.
 165 *
 166 * RETURNS:
 167 * Pointer to the found partition on success, NULL if not found.
 168 */
 169struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
 170{
 171        struct hd_struct *part;
 172
 173        rcu_read_lock();
 174        part = __disk_get_part(disk, partno);
 175        if (part)
 176                get_device(part_to_dev(part));
 177        rcu_read_unlock();
 178
 179        return part;
 180}
 181
 182/**
 183 * disk_part_iter_init - initialize partition iterator
 184 * @piter: iterator to initialize
 185 * @disk: disk to iterate over
 186 * @flags: DISK_PITER_* flags
 187 *
 188 * Initialize @piter so that it iterates over partitions of @disk.
 189 *
 190 * CONTEXT:
 191 * Don't care.
 192 */
 193void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
 194                          unsigned int flags)
 195{
 196        struct disk_part_tbl *ptbl;
 197
 198        rcu_read_lock();
 199        ptbl = rcu_dereference(disk->part_tbl);
 200
 201        piter->disk = disk;
 202        piter->part = NULL;
 203
 204        if (flags & DISK_PITER_REVERSE)
 205                piter->idx = ptbl->len - 1;
 206        else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
 207                piter->idx = 0;
 208        else
 209                piter->idx = 1;
 210
 211        piter->flags = flags;
 212
 213        rcu_read_unlock();
 214}
 215EXPORT_SYMBOL_GPL(disk_part_iter_init);
 216
 217/**
 218 * disk_part_iter_next - proceed iterator to the next partition and return it
 219 * @piter: iterator of interest
 220 *
 221 * Proceed @piter to the next partition and return it.
 222 *
 223 * CONTEXT:
 224 * Don't care.
 225 */
 226struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
 227{
 228        struct disk_part_tbl *ptbl;
 229        int inc, end;
 230
 231        /* put the last partition */
 232        disk_put_part(piter->part);
 233        piter->part = NULL;
 234
 235        /* get part_tbl */
 236        rcu_read_lock();
 237        ptbl = rcu_dereference(piter->disk->part_tbl);
 238
 239        /* determine iteration parameters */
 240        if (piter->flags & DISK_PITER_REVERSE) {
 241                inc = -1;
 242                if (piter->flags & (DISK_PITER_INCL_PART0 |
 243                                    DISK_PITER_INCL_EMPTY_PART0))
 244                        end = -1;
 245                else
 246                        end = 0;
 247        } else {
 248                inc = 1;
 249                end = ptbl->len;
 250        }
 251
 252        /* iterate to the next partition */
 253        for (; piter->idx != end; piter->idx += inc) {
 254                struct hd_struct *part;
 255
 256                part = rcu_dereference(ptbl->part[piter->idx]);
 257                if (!part)
 258                        continue;
 259                if (!part_nr_sects_read(part) &&
 260                    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
 261                    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
 262                      piter->idx == 0))
 263                        continue;
 264
 265                get_device(part_to_dev(part));
 266                piter->part = part;
 267                piter->idx += inc;
 268                break;
 269        }
 270
 271        rcu_read_unlock();
 272
 273        return piter->part;
 274}
 275EXPORT_SYMBOL_GPL(disk_part_iter_next);
 276
 277/**
 278 * disk_part_iter_exit - finish up partition iteration
 279 * @piter: iter of interest
 280 *
 281 * Called when iteration is over.  Cleans up @piter.
 282 *
 283 * CONTEXT:
 284 * Don't care.
 285 */
 286void disk_part_iter_exit(struct disk_part_iter *piter)
 287{
 288        disk_put_part(piter->part);
 289        piter->part = NULL;
 290}
 291EXPORT_SYMBOL_GPL(disk_part_iter_exit);
 292
 293static inline int sector_in_part(struct hd_struct *part, sector_t sector)
 294{
 295        return part->start_sect <= sector &&
 296                sector < part->start_sect + part_nr_sects_read(part);
 297}
 298
 299/**
 300 * disk_map_sector_rcu - map sector to partition
 301 * @disk: gendisk of interest
 302 * @sector: sector to map
 303 *
 304 * Find out which partition @sector maps to on @disk.  This is
 305 * primarily used for stats accounting.
 306 *
 307 * CONTEXT:
 308 * RCU read locked.  The returned partition pointer is always valid
 309 * because its refcount is grabbed except for part0, which lifetime
 310 * is same with the disk.
 311 *
 312 * RETURNS:
 313 * Found partition on success, part0 is returned if no partition matches
 314 * or the matched partition is being deleted.
 315 */
 316struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 317{
 318        struct disk_part_tbl *ptbl;
 319        struct hd_struct *part;
 320        int i;
 321
 322        rcu_read_lock();
 323        ptbl = rcu_dereference(disk->part_tbl);
 324
 325        part = rcu_dereference(ptbl->last_lookup);
 326        if (part && sector_in_part(part, sector) && hd_struct_try_get(part))
 327                goto out_unlock;
 328
 329        for (i = 1; i < ptbl->len; i++) {
 330                part = rcu_dereference(ptbl->part[i]);
 331
 332                if (part && sector_in_part(part, sector)) {
 333                        /*
 334                         * only live partition can be cached for lookup,
 335                         * so use-after-free on cached & deleting partition
 336                         * can be avoided
 337                         */
 338                        if (!hd_struct_try_get(part))
 339                                break;
 340                        rcu_assign_pointer(ptbl->last_lookup, part);
 341                        goto out_unlock;
 342                }
 343        }
 344
 345        part = &disk->part0;
 346out_unlock:
 347        rcu_read_unlock();
 348        return part;
 349}
 350
 351/**
 352 * disk_has_partitions
 353 * @disk: gendisk of interest
 354 *
 355 * Walk through the partition table and check if valid partition exists.
 356 *
 357 * CONTEXT:
 358 * Don't care.
 359 *
 360 * RETURNS:
 361 * True if the gendisk has at least one valid non-zero size partition.
 362 * Otherwise false.
 363 */
 364bool disk_has_partitions(struct gendisk *disk)
 365{
 366        struct disk_part_tbl *ptbl;
 367        int i;
 368        bool ret = false;
 369
 370        rcu_read_lock();
 371        ptbl = rcu_dereference(disk->part_tbl);
 372
 373        /* Iterate partitions skipping the whole device at index 0 */
 374        for (i = 1; i < ptbl->len; i++) {
 375                if (rcu_dereference(ptbl->part[i])) {
 376                        ret = true;
 377                        break;
 378                }
 379        }
 380
 381        rcu_read_unlock();
 382
 383        return ret;
 384}
 385EXPORT_SYMBOL_GPL(disk_has_partitions);
 386
 387/*
 388 * Can be deleted altogether. Later.
 389 *
 390 */
 391#define BLKDEV_MAJOR_HASH_SIZE 255
 392static struct blk_major_name {
 393        struct blk_major_name *next;
 394        int major;
 395        char name[16];
 396} *major_names[BLKDEV_MAJOR_HASH_SIZE];
 397
 398/* index in the above - for now: assume no multimajor ranges */
 399static inline int major_to_index(unsigned major)
 400{
 401        return major % BLKDEV_MAJOR_HASH_SIZE;
 402}
 403
 404#ifdef CONFIG_PROC_FS
 405void blkdev_show(struct seq_file *seqf, off_t offset)
 406{
 407        struct blk_major_name *dp;
 408
 409        mutex_lock(&block_class_lock);
 410        for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
 411                if (dp->major == offset)
 412                        seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
 413        mutex_unlock(&block_class_lock);
 414}
 415#endif /* CONFIG_PROC_FS */
 416
 417/**
 418 * register_blkdev - register a new block device
 419 *
 420 * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If
 421 *         @major = 0, try to allocate any unused major number.
 422 * @name: the name of the new block device as a zero terminated string
 423 *
 424 * The @name must be unique within the system.
 425 *
 426 * The return value depends on the @major input parameter:
 427 *
 428 *  - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1]
 429 *    then the function returns zero on success, or a negative error code
 430 *  - if any unused major number was requested with @major = 0 parameter
 431 *    then the return value is the allocated major number in range
 432 *    [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise
 433 *
 434 * See Documentation/admin-guide/devices.txt for the list of allocated
 435 * major numbers.
 436 */
 437int register_blkdev(unsigned int major, const char *name)
 438{
 439        struct blk_major_name **n, *p;
 440        int index, ret = 0;
 441
 442        mutex_lock(&block_class_lock);
 443
 444        /* temporary */
 445        if (major == 0) {
 446                for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
 447                        if (major_names[index] == NULL)
 448                                break;
 449                }
 450
 451                if (index == 0) {
 452                        printk("%s: failed to get major for %s\n",
 453                               __func__, name);
 454                        ret = -EBUSY;
 455                        goto out;
 456                }
 457                major = index;
 458                ret = major;
 459        }
 460
 461        if (major >= BLKDEV_MAJOR_MAX) {
 462                pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
 463                       __func__, major, BLKDEV_MAJOR_MAX-1, name);
 464
 465                ret = -EINVAL;
 466                goto out;
 467        }
 468
 469        p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
 470        if (p == NULL) {
 471                ret = -ENOMEM;
 472                goto out;
 473        }
 474
 475        p->major = major;
 476        strlcpy(p->name, name, sizeof(p->name));
 477        p->next = NULL;
 478        index = major_to_index(major);
 479
 480        for (n = &major_names[index]; *n; n = &(*n)->next) {
 481                if ((*n)->major == major)
 482                        break;
 483        }
 484        if (!*n)
 485                *n = p;
 486        else
 487                ret = -EBUSY;
 488
 489        if (ret < 0) {
 490                printk("register_blkdev: cannot get major %u for %s\n",
 491                       major, name);
 492                kfree(p);
 493        }
 494out:
 495        mutex_unlock(&block_class_lock);
 496        return ret;
 497}
 498
 499EXPORT_SYMBOL(register_blkdev);
 500
 501void unregister_blkdev(unsigned int major, const char *name)
 502{
 503        struct blk_major_name **n;
 504        struct blk_major_name *p = NULL;
 505        int index = major_to_index(major);
 506
 507        mutex_lock(&block_class_lock);
 508        for (n = &major_names[index]; *n; n = &(*n)->next)
 509                if ((*n)->major == major)
 510                        break;
 511        if (!*n || strcmp((*n)->name, name)) {
 512                WARN_ON(1);
 513        } else {
 514                p = *n;
 515                *n = p->next;
 516        }
 517        mutex_unlock(&block_class_lock);
 518        kfree(p);
 519}
 520
 521EXPORT_SYMBOL(unregister_blkdev);
 522
 523static struct kobj_map *bdev_map;
 524
 525/**
 526 * blk_mangle_minor - scatter minor numbers apart
 527 * @minor: minor number to mangle
 528 *
 529 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
 530 * is enabled.  Mangling twice gives the original value.
 531 *
 532 * RETURNS:
 533 * Mangled value.
 534 *
 535 * CONTEXT:
 536 * Don't care.
 537 */
 538static int blk_mangle_minor(int minor)
 539{
 540#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
 541        int i;
 542
 543        for (i = 0; i < MINORBITS / 2; i++) {
 544                int low = minor & (1 << i);
 545                int high = minor & (1 << (MINORBITS - 1 - i));
 546                int distance = MINORBITS - 1 - 2 * i;
 547
 548                minor ^= low | high;    /* clear both bits */
 549                low <<= distance;       /* swap the positions */
 550                high >>= distance;
 551                minor |= low | high;    /* and set */
 552        }
 553#endif
 554        return minor;
 555}
 556
 557/**
 558 * blk_alloc_devt - allocate a dev_t for a partition
 559 * @part: partition to allocate dev_t for
 560 * @devt: out parameter for resulting dev_t
 561 *
 562 * Allocate a dev_t for block device.
 563 *
 564 * RETURNS:
 565 * 0 on success, allocated dev_t is returned in *@devt.  -errno on
 566 * failure.
 567 *
 568 * CONTEXT:
 569 * Might sleep.
 570 */
 571int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
 572{
 573        struct gendisk *disk = part_to_disk(part);
 574        int idx;
 575
 576        /* in consecutive minor range? */
 577        if (part->partno < disk->minors) {
 578                *devt = MKDEV(disk->major, disk->first_minor + part->partno);
 579                return 0;
 580        }
 581
 582        /* allocate ext devt */
 583        idr_preload(GFP_KERNEL);
 584
 585        spin_lock_bh(&ext_devt_lock);
 586        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
 587        spin_unlock_bh(&ext_devt_lock);
 588
 589        idr_preload_end();
 590        if (idx < 0)
 591                return idx == -ENOSPC ? -EBUSY : idx;
 592
 593        *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
 594        return 0;
 595}
 596
 597/**
 598 * blk_free_devt - free a dev_t
 599 * @devt: dev_t to free
 600 *
 601 * Free @devt which was allocated using blk_alloc_devt().
 602 *
 603 * CONTEXT:
 604 * Might sleep.
 605 */
 606void blk_free_devt(dev_t devt)
 607{
 608        if (devt == MKDEV(0, 0))
 609                return;
 610
 611        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
 612                spin_lock_bh(&ext_devt_lock);
 613                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
 614                spin_unlock_bh(&ext_devt_lock);
 615        }
 616}
 617
 618/*
 619 * We invalidate devt by assigning NULL pointer for devt in idr.
 620 */
 621void blk_invalidate_devt(dev_t devt)
 622{
 623        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
 624                spin_lock_bh(&ext_devt_lock);
 625                idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
 626                spin_unlock_bh(&ext_devt_lock);
 627        }
 628}
 629
 630static char *bdevt_str(dev_t devt, char *buf)
 631{
 632        if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
 633                char tbuf[BDEVT_SIZE];
 634                snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
 635                snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
 636        } else
 637                snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
 638
 639        return buf;
 640}
 641
 642/*
 643 * Register device numbers dev..(dev+range-1)
 644 * range must be nonzero
 645 * The hash chain is sorted on range, so that subranges can override.
 646 */
 647void blk_register_region(dev_t devt, unsigned long range, struct module *module,
 648                         struct kobject *(*probe)(dev_t, int *, void *),
 649                         int (*lock)(dev_t, void *), void *data)
 650{
 651        kobj_map(bdev_map, devt, range, module, probe, lock, data);
 652}
 653
 654EXPORT_SYMBOL(blk_register_region);
 655
 656void blk_unregister_region(dev_t devt, unsigned long range)
 657{
 658        kobj_unmap(bdev_map, devt, range);
 659}
 660
 661EXPORT_SYMBOL(blk_unregister_region);
 662
 663static struct kobject *exact_match(dev_t devt, int *partno, void *data)
 664{
 665        struct gendisk *p = data;
 666
 667        return &disk_to_dev(p)->kobj;
 668}
 669
 670static int exact_lock(dev_t devt, void *data)
 671{
 672        struct gendisk *p = data;
 673
 674        if (!get_disk_and_module(p))
 675                return -1;
 676        return 0;
 677}
 678
 679static void register_disk(struct device *parent, struct gendisk *disk,
 680                          const struct attribute_group **groups)
 681{
 682        struct device *ddev = disk_to_dev(disk);
 683        struct block_device *bdev;
 684        struct disk_part_iter piter;
 685        struct hd_struct *part;
 686        int err;
 687
 688        ddev->parent = parent;
 689
 690        dev_set_name(ddev, "%s", disk->disk_name);
 691
 692        /* delay uevents, until we scanned partition table */
 693        dev_set_uevent_suppress(ddev, 1);
 694
 695        if (groups) {
 696                WARN_ON(ddev->groups);
 697                ddev->groups = groups;
 698        }
 699        if (device_add(ddev))
 700                return;
 701        if (!sysfs_deprecated) {
 702                err = sysfs_create_link(block_depr, &ddev->kobj,
 703                                        kobject_name(&ddev->kobj));
 704                if (err) {
 705                        device_del(ddev);
 706                        return;
 707                }
 708        }
 709
 710        /*
 711         * avoid probable deadlock caused by allocating memory with
 712         * GFP_KERNEL in runtime_resume callback of its all ancestor
 713         * devices
 714         */
 715        pm_runtime_set_memalloc_noio(ddev, true);
 716
 717        disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
 718        disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
 719
 720        if (disk->flags & GENHD_FL_HIDDEN) {
 721                dev_set_uevent_suppress(ddev, 0);
 722                return;
 723        }
 724
 725        /* No minors to use for partitions */
 726        if (!disk_part_scan_enabled(disk))
 727                goto exit;
 728
 729        /* No such device (e.g., media were just removed) */
 730        if (!get_capacity(disk))
 731                goto exit;
 732
 733        bdev = bdget_disk(disk, 0);
 734        if (!bdev)
 735                goto exit;
 736
 737        bdev->bd_invalidated = 1;
 738        err = blkdev_get(bdev, FMODE_READ, NULL);
 739        if (err < 0)
 740                goto exit;
 741        blkdev_put(bdev, FMODE_READ);
 742
 743exit:
 744        /* announce disk after possible partitions are created */
 745        dev_set_uevent_suppress(ddev, 0);
 746        kobject_uevent(&ddev->kobj, KOBJ_ADD);
 747
 748        /* announce possible partitions */
 749        disk_part_iter_init(&piter, disk, 0);
 750        while ((part = disk_part_iter_next(&piter)))
 751                kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
 752        disk_part_iter_exit(&piter);
 753
 754        if (disk->queue->backing_dev_info->dev) {
 755                err = sysfs_create_link(&ddev->kobj,
 756                          &disk->queue->backing_dev_info->dev->kobj,
 757                          "bdi");
 758                WARN_ON(err);
 759        }
 760}
 761
 762/**
 763 * __device_add_disk - add disk information to kernel list
 764 * @parent: parent device for the disk
 765 * @disk: per-device partitioning information
 766 * @groups: Additional per-device sysfs groups
 767 * @register_queue: register the queue if set to true
 768 *
 769 * This function registers the partitioning information in @disk
 770 * with the kernel.
 771 *
 772 * FIXME: error handling
 773 */
 774static void __device_add_disk(struct device *parent, struct gendisk *disk,
 775                              const struct attribute_group **groups,
 776                              bool register_queue)
 777{
 778        dev_t devt;
 779        int retval;
 780
 781        /*
 782         * The disk queue should now be all set with enough information about
 783         * the device for the elevator code to pick an adequate default
 784         * elevator if one is needed, that is, for devices requesting queue
 785         * registration.
 786         */
 787        if (register_queue)
 788                elevator_init_mq(disk->queue);
 789
 790        /* minors == 0 indicates to use ext devt from part0 and should
 791         * be accompanied with EXT_DEVT flag.  Make sure all
 792         * parameters make sense.
 793         */
 794        WARN_ON(disk->minors && !(disk->major || disk->first_minor));
 795        WARN_ON(!disk->minors &&
 796                !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
 797
 798        disk->flags |= GENHD_FL_UP;
 799
 800        retval = blk_alloc_devt(&disk->part0, &devt);
 801        if (retval) {
 802                WARN_ON(1);
 803                return;
 804        }
 805        disk->major = MAJOR(devt);
 806        disk->first_minor = MINOR(devt);
 807
 808        disk_alloc_events(disk);
 809
 810        if (disk->flags & GENHD_FL_HIDDEN) {
 811                /*
 812                 * Don't let hidden disks show up in /proc/partitions,
 813                 * and don't bother scanning for partitions either.
 814                 */
 815                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
 816                disk->flags |= GENHD_FL_NO_PART_SCAN;
 817        } else {
 818                struct backing_dev_info *bdi = disk->queue->backing_dev_info;
 819                struct device *dev = disk_to_dev(disk);
 820                int ret;
 821
 822                /* Register BDI before referencing it from bdev */
 823                dev->devt = devt;
 824                ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
 825                WARN_ON(ret);
 826                bdi_set_owner(bdi, dev);
 827                blk_register_region(disk_devt(disk), disk->minors, NULL,
 828                                    exact_match, exact_lock, disk);
 829        }
 830        register_disk(parent, disk, groups);
 831        if (register_queue)
 832                blk_register_queue(disk);
 833
 834        /*
 835         * Take an extra ref on queue which will be put on disk_release()
 836         * so that it sticks around as long as @disk is there.
 837         */
 838        WARN_ON_ONCE(!blk_get_queue(disk->queue));
 839
 840        disk_add_events(disk);
 841        blk_integrity_add(disk);
 842}
 843
 844void device_add_disk(struct device *parent, struct gendisk *disk,
 845                     const struct attribute_group **groups)
 846
 847{
 848        __device_add_disk(parent, disk, groups, true);
 849}
 850EXPORT_SYMBOL(device_add_disk);
 851
 852void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
 853{
 854        __device_add_disk(parent, disk, NULL, false);
 855}
 856EXPORT_SYMBOL(device_add_disk_no_queue_reg);
 857
 858static void invalidate_partition(struct gendisk *disk, int partno)
 859{
 860        struct block_device *bdev;
 861
 862        bdev = bdget_disk(disk, partno);
 863        if (!bdev)
 864                return;
 865
 866        fsync_bdev(bdev);
 867        __invalidate_device(bdev, true);
 868
 869        /*
 870         * Unhash the bdev inode for this device so that it gets evicted as soon
 871         * as last inode reference is dropped.
 872         */
 873        remove_inode_hash(bdev->bd_inode);
 874        bdput(bdev);
 875}
 876
 877/**
 878 * del_gendisk - remove the gendisk
 879 * @disk: the struct gendisk to remove
 880 *
 881 * Removes the gendisk and all its associated resources. This deletes the
 882 * partitions associated with the gendisk, and unregisters the associated
 883 * request_queue.
 884 *
 885 * This is the counter to the respective __device_add_disk() call.
 886 *
 887 * The final removal of the struct gendisk happens when its refcount reaches 0
 888 * with put_disk(), which should be called after del_gendisk(), if
 889 * __device_add_disk() was used.
 890 *
 891 * Drivers exist which depend on the release of the gendisk to be synchronous,
 892 * it should not be deferred.
 893 *
 894 * Context: can sleep
 895 */
 896void del_gendisk(struct gendisk *disk)
 897{
 898        struct disk_part_iter piter;
 899        struct hd_struct *part;
 900
 901        might_sleep();
 902
 903        blk_integrity_del(disk);
 904        disk_del_events(disk);
 905
 906        /*
 907         * Block lookups of the disk until all bdevs are unhashed and the
 908         * disk is marked as dead (GENHD_FL_UP cleared).
 909         */
 910        down_write(&disk->lookup_sem);
 911        /* invalidate stuff */
 912        disk_part_iter_init(&piter, disk,
 913                             DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
 914        while ((part = disk_part_iter_next(&piter))) {
 915                invalidate_partition(disk, part->partno);
 916                delete_partition(disk, part);
 917        }
 918        disk_part_iter_exit(&piter);
 919
 920        invalidate_partition(disk, 0);
 921        set_capacity(disk, 0);
 922        disk->flags &= ~GENHD_FL_UP;
 923        up_write(&disk->lookup_sem);
 924
 925        if (!(disk->flags & GENHD_FL_HIDDEN))
 926                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 927        if (disk->queue) {
 928                /*
 929                 * Unregister bdi before releasing device numbers (as they can
 930                 * get reused and we'd get clashes in sysfs).
 931                 */
 932                if (!(disk->flags & GENHD_FL_HIDDEN))
 933                        bdi_unregister(disk->queue->backing_dev_info);
 934                blk_unregister_queue(disk);
 935        } else {
 936                WARN_ON(1);
 937        }
 938
 939        if (!(disk->flags & GENHD_FL_HIDDEN))
 940                blk_unregister_region(disk_devt(disk), disk->minors);
 941        /*
 942         * Remove gendisk pointer from idr so that it cannot be looked up
 943         * while RCU period before freeing gendisk is running to prevent
 944         * use-after-free issues. Note that the device number stays
 945         * "in-use" until we really free the gendisk.
 946         */
 947        blk_invalidate_devt(disk_devt(disk));
 948
 949        kobject_put(disk->part0.holder_dir);
 950        kobject_put(disk->slave_dir);
 951
 952        part_stat_set_all(&disk->part0, 0);
 953        disk->part0.stamp = 0;
 954        if (!sysfs_deprecated)
 955                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
 956        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
 957        device_del(disk_to_dev(disk));
 958}
 959EXPORT_SYMBOL(del_gendisk);
 960
 961/* sysfs access to bad-blocks list. */
 962static ssize_t disk_badblocks_show(struct device *dev,
 963                                        struct device_attribute *attr,
 964                                        char *page)
 965{
 966        struct gendisk *disk = dev_to_disk(dev);
 967
 968        if (!disk->bb)
 969                return sprintf(page, "\n");
 970
 971        return badblocks_show(disk->bb, page, 0);
 972}
 973
 974static ssize_t disk_badblocks_store(struct device *dev,
 975                                        struct device_attribute *attr,
 976                                        const char *page, size_t len)
 977{
 978        struct gendisk *disk = dev_to_disk(dev);
 979
 980        if (!disk->bb)
 981                return -ENXIO;
 982
 983        return badblocks_store(disk->bb, page, len, 0);
 984}
 985
 986/**
 987 * get_gendisk - get partitioning information for a given device
 988 * @devt: device to get partitioning information for
 989 * @partno: returned partition index
 990 *
 991 * This function gets the structure containing partitioning
 992 * information for the given device @devt.
 993 *
 994 * Context: can sleep
 995 */
 996struct gendisk *get_gendisk(dev_t devt, int *partno)
 997{
 998        struct gendisk *disk = NULL;
 999
1000        might_sleep();
1001
1002        if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
1003                struct kobject *kobj;
1004
1005                kobj = kobj_lookup(bdev_map, devt, partno);
1006                if (kobj)
1007                        disk = dev_to_disk(kobj_to_dev(kobj));
1008        } else {
1009                struct hd_struct *part;
1010
1011                spin_lock_bh(&ext_devt_lock);
1012                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
1013                if (part && get_disk_and_module(part_to_disk(part))) {
1014                        *partno = part->partno;
1015                        disk = part_to_disk(part);
1016                }
1017                spin_unlock_bh(&ext_devt_lock);
1018        }
1019
1020        if (!disk)
1021                return NULL;
1022
1023        /*
1024         * Synchronize with del_gendisk() to not return disk that is being
1025         * destroyed.
1026         */
1027        down_read(&disk->lookup_sem);
1028        if (unlikely((disk->flags & GENHD_FL_HIDDEN) ||
1029                     !(disk->flags & GENHD_FL_UP))) {
1030                up_read(&disk->lookup_sem);
1031                put_disk_and_module(disk);
1032                disk = NULL;
1033        } else {
1034                up_read(&disk->lookup_sem);
1035        }
1036        return disk;
1037}
1038
1039/**
1040 * bdget_disk - do bdget() by gendisk and partition number
1041 * @disk: gendisk of interest
1042 * @partno: partition number
1043 *
1044 * Find partition @partno from @disk, do bdget() on it.
1045 *
1046 * CONTEXT:
1047 * Don't care.
1048 *
1049 * RETURNS:
1050 * Resulting block_device on success, NULL on failure.
1051 */
1052struct block_device *bdget_disk(struct gendisk *disk, int partno)
1053{
1054        struct hd_struct *part;
1055        struct block_device *bdev = NULL;
1056
1057        part = disk_get_part(disk, partno);
1058        if (part)
1059                bdev = bdget(part_devt(part));
1060        disk_put_part(part);
1061
1062        return bdev;
1063}
1064EXPORT_SYMBOL(bdget_disk);
1065
1066/*
1067 * print a full list of all partitions - intended for places where the root
1068 * filesystem can't be mounted and thus to give the victim some idea of what
1069 * went wrong
1070 */
1071void __init printk_all_partitions(void)
1072{
1073        struct class_dev_iter iter;
1074        struct device *dev;
1075
1076        class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1077        while ((dev = class_dev_iter_next(&iter))) {
1078                struct gendisk *disk = dev_to_disk(dev);
1079                struct disk_part_iter piter;
1080                struct hd_struct *part;
1081                char name_buf[BDEVNAME_SIZE];
1082                char devt_buf[BDEVT_SIZE];
1083
1084                /*
1085                 * Don't show empty devices or things that have been
1086                 * suppressed
1087                 */
1088                if (get_capacity(disk) == 0 ||
1089                    (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
1090                        continue;
1091
1092                /*
1093                 * Note, unlike /proc/partitions, I am showing the
1094                 * numbers in hex - the same format as the root=
1095                 * option takes.
1096                 */
1097                disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
1098                while ((part = disk_part_iter_next(&piter))) {
1099                        bool is_part0 = part == &disk->part0;
1100
1101                        printk("%s%s %10llu %s %s", is_part0 ? "" : "  ",
1102                               bdevt_str(part_devt(part), devt_buf),
1103                               (unsigned long long)part_nr_sects_read(part) >> 1
1104                               , disk_name(disk, part->partno, name_buf),
1105                               part->info ? part->info->uuid : "");
1106                        if (is_part0) {
1107                                if (dev->parent && dev->parent->driver)
1108                                        printk(" driver: %s\n",
1109                                              dev->parent->driver->name);
1110                                else
1111                                        printk(" (driver?)\n");
1112                        } else
1113                                printk("\n");
1114                }
1115                disk_part_iter_exit(&piter);
1116        }
1117        class_dev_iter_exit(&iter);
1118}
1119
1120#ifdef CONFIG_PROC_FS
1121/* iterator */
1122static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
1123{
1124        loff_t skip = *pos;
1125        struct class_dev_iter *iter;
1126        struct device *dev;
1127
1128        iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1129        if (!iter)
1130                return ERR_PTR(-ENOMEM);
1131
1132        seqf->private = iter;
1133        class_dev_iter_init(iter, &block_class, NULL, &disk_type);
1134        do {
1135                dev = class_dev_iter_next(iter);
1136                if (!dev)
1137                        return NULL;
1138        } while (skip--);
1139
1140        return dev_to_disk(dev);
1141}
1142
1143static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
1144{
1145        struct device *dev;
1146
1147        (*pos)++;
1148        dev = class_dev_iter_next(seqf->private);
1149        if (dev)
1150                return dev_to_disk(dev);
1151
1152        return NULL;
1153}
1154
1155static void disk_seqf_stop(struct seq_file *seqf, void *v)
1156{
1157        struct class_dev_iter *iter = seqf->private;
1158
1159        /* stop is called even after start failed :-( */
1160        if (iter) {
1161                class_dev_iter_exit(iter);
1162                kfree(iter);
1163                seqf->private = NULL;
1164        }
1165}
1166
1167static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
1168{
1169        void *p;
1170
1171        p = disk_seqf_start(seqf, pos);
1172        if (!IS_ERR_OR_NULL(p) && !*pos)
1173                seq_puts(seqf, "major minor  #blocks  name\n\n");
1174        return p;
1175}
1176
1177static int show_partition(struct seq_file *seqf, void *v)
1178{
1179        struct gendisk *sgp = v;
1180        struct disk_part_iter piter;
1181        struct hd_struct *part;
1182        char buf[BDEVNAME_SIZE];
1183
1184        /* Don't show non-partitionable removeable devices or empty devices */
1185        if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
1186                                   (sgp->flags & GENHD_FL_REMOVABLE)))
1187                return 0;
1188        if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
1189                return 0;
1190
1191        /* show the full disk and all non-0 size partitions of it */
1192        disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
1193        while ((part = disk_part_iter_next(&piter)))
1194                seq_printf(seqf, "%4d  %7d %10llu %s\n",
1195                           MAJOR(part_devt(part)), MINOR(part_devt(part)),
1196                           (unsigned long long)part_nr_sects_read(part) >> 1,
1197                           disk_name(sgp, part->partno, buf));
1198        disk_part_iter_exit(&piter);
1199
1200        return 0;
1201}
1202
1203static const struct seq_operations partitions_op = {
1204        .start  = show_partition_start,
1205        .next   = disk_seqf_next,
1206        .stop   = disk_seqf_stop,
1207        .show   = show_partition
1208};
1209#endif
1210
1211
1212static struct kobject *base_probe(dev_t devt, int *partno, void *data)
1213{
1214        if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
1215                /* Make old-style 2.4 aliases work */
1216                request_module("block-major-%d", MAJOR(devt));
1217        return NULL;
1218}
1219
1220static int __init genhd_device_init(void)
1221{
1222        int error;
1223
1224        block_class.dev_kobj = sysfs_dev_block_kobj;
1225        error = class_register(&block_class);
1226        if (unlikely(error))
1227                return error;
1228        bdev_map = kobj_map_init(base_probe, &block_class_lock);
1229        blk_dev_init();
1230
1231        register_blkdev(BLOCK_EXT_MAJOR, "blkext");
1232
1233        /* create top-level block dir */
1234        if (!sysfs_deprecated)
1235                block_depr = kobject_create_and_add("block", NULL);
1236        return 0;
1237}
1238
1239subsys_initcall(genhd_device_init);
1240
1241static ssize_t disk_range_show(struct device *dev,
1242                               struct device_attribute *attr, char *buf)
1243{
1244        struct gendisk *disk = dev_to_disk(dev);
1245
1246        return sprintf(buf, "%d\n", disk->minors);
1247}
1248
1249static ssize_t disk_ext_range_show(struct device *dev,
1250                                   struct device_attribute *attr, char *buf)
1251{
1252        struct gendisk *disk = dev_to_disk(dev);
1253
1254        return sprintf(buf, "%d\n", disk_max_parts(disk));
1255}
1256
1257static ssize_t disk_removable_show(struct device *dev,
1258                                   struct device_attribute *attr, char *buf)
1259{
1260        struct gendisk *disk = dev_to_disk(dev);
1261
1262        return sprintf(buf, "%d\n",
1263                       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
1264}
1265
1266static ssize_t disk_hidden_show(struct device *dev,
1267                                   struct device_attribute *attr, char *buf)
1268{
1269        struct gendisk *disk = dev_to_disk(dev);
1270
1271        return sprintf(buf, "%d\n",
1272                       (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
1273}
1274
1275static ssize_t disk_ro_show(struct device *dev,
1276                                   struct device_attribute *attr, char *buf)
1277{
1278        struct gendisk *disk = dev_to_disk(dev);
1279
1280        return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
1281}
1282
1283ssize_t part_size_show(struct device *dev,
1284                       struct device_attribute *attr, char *buf)
1285{
1286        struct hd_struct *p = dev_to_part(dev);
1287
1288        return sprintf(buf, "%llu\n",
1289                (unsigned long long)part_nr_sects_read(p));
1290}
1291
1292ssize_t part_stat_show(struct device *dev,
1293                       struct device_attribute *attr, char *buf)
1294{
1295        struct hd_struct *p = dev_to_part(dev);
1296        struct request_queue *q = part_to_disk(p)->queue;
1297        struct disk_stats stat;
1298        unsigned int inflight;
1299
1300        part_stat_read_all(p, &stat);
1301        if (queue_is_mq(q))
1302                inflight = blk_mq_in_flight(q, p);
1303        else
1304                inflight = part_in_flight(q, p);
1305
1306        return sprintf(buf,
1307                "%8lu %8lu %8llu %8u "
1308                "%8lu %8lu %8llu %8u "
1309                "%8u %8u %8u "
1310                "%8lu %8lu %8llu %8u "
1311                "%8lu %8u"
1312                "\n",
1313                stat.ios[STAT_READ],
1314                stat.merges[STAT_READ],
1315                (unsigned long long)stat.sectors[STAT_READ],
1316                (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC),
1317                stat.ios[STAT_WRITE],
1318                stat.merges[STAT_WRITE],
1319                (unsigned long long)stat.sectors[STAT_WRITE],
1320                (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC),
1321                inflight,
1322                jiffies_to_msecs(stat.io_ticks),
1323                (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1324                                      stat.nsecs[STAT_WRITE] +
1325                                      stat.nsecs[STAT_DISCARD] +
1326                                      stat.nsecs[STAT_FLUSH],
1327                                                NSEC_PER_MSEC),
1328                stat.ios[STAT_DISCARD],
1329                stat.merges[STAT_DISCARD],
1330                (unsigned long long)stat.sectors[STAT_DISCARD],
1331                (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
1332                stat.ios[STAT_FLUSH],
1333                (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
1334}
1335
1336ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
1337                           char *buf)
1338{
1339        struct hd_struct *p = dev_to_part(dev);
1340        struct request_queue *q = part_to_disk(p)->queue;
1341        unsigned int inflight[2];
1342
1343        if (queue_is_mq(q))
1344                blk_mq_in_flight_rw(q, p, inflight);
1345        else
1346                part_in_flight_rw(q, p, inflight);
1347
1348        return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
1349}
1350
1351static ssize_t disk_capability_show(struct device *dev,
1352                                    struct device_attribute *attr, char *buf)
1353{
1354        struct gendisk *disk = dev_to_disk(dev);
1355
1356        return sprintf(buf, "%x\n", disk->flags);
1357}
1358
1359static ssize_t disk_alignment_offset_show(struct device *dev,
1360                                          struct device_attribute *attr,
1361                                          char *buf)
1362{
1363        struct gendisk *disk = dev_to_disk(dev);
1364
1365        return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1366}
1367
1368static ssize_t disk_discard_alignment_show(struct device *dev,
1369                                           struct device_attribute *attr,
1370                                           char *buf)
1371{
1372        struct gendisk *disk = dev_to_disk(dev);
1373
1374        return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1375}
1376
1377static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
1378static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
1379static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
1380static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL);
1381static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL);
1382static DEVICE_ATTR(size, 0444, part_size_show, NULL);
1383static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL);
1384static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL);
1385static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
1386static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
1387static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
1388static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
1389
1390#ifdef CONFIG_FAIL_MAKE_REQUEST
1391ssize_t part_fail_show(struct device *dev,
1392                       struct device_attribute *attr, char *buf)
1393{
1394        struct hd_struct *p = dev_to_part(dev);
1395
1396        return sprintf(buf, "%d\n", p->make_it_fail);
1397}
1398
1399ssize_t part_fail_store(struct device *dev,
1400                        struct device_attribute *attr,
1401                        const char *buf, size_t count)
1402{
1403        struct hd_struct *p = dev_to_part(dev);
1404        int i;
1405
1406        if (count > 0 && sscanf(buf, "%d", &i) > 0)
1407                p->make_it_fail = (i == 0) ? 0 : 1;
1408
1409        return count;
1410}
1411
1412static struct device_attribute dev_attr_fail =
1413        __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
1414#endif /* CONFIG_FAIL_MAKE_REQUEST */
1415
1416#ifdef CONFIG_FAIL_IO_TIMEOUT
1417static struct device_attribute dev_attr_fail_timeout =
1418        __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store);
1419#endif
1420
1421static struct attribute *disk_attrs[] = {
1422        &dev_attr_range.attr,
1423        &dev_attr_ext_range.attr,
1424        &dev_attr_removable.attr,
1425        &dev_attr_hidden.attr,
1426        &dev_attr_ro.attr,
1427        &dev_attr_size.attr,
1428        &dev_attr_alignment_offset.attr,
1429        &dev_attr_discard_alignment.attr,
1430        &dev_attr_capability.attr,
1431        &dev_attr_stat.attr,
1432        &dev_attr_inflight.attr,
1433        &dev_attr_badblocks.attr,
1434#ifdef CONFIG_FAIL_MAKE_REQUEST
1435        &dev_attr_fail.attr,
1436#endif
1437#ifdef CONFIG_FAIL_IO_TIMEOUT
1438        &dev_attr_fail_timeout.attr,
1439#endif
1440        NULL
1441};
1442
1443static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
1444{
1445        struct device *dev = container_of(kobj, typeof(*dev), kobj);
1446        struct gendisk *disk = dev_to_disk(dev);
1447
1448        if (a == &dev_attr_badblocks.attr && !disk->bb)
1449                return 0;
1450        return a->mode;
1451}
1452
1453static struct attribute_group disk_attr_group = {
1454        .attrs = disk_attrs,
1455        .is_visible = disk_visible,
1456};
1457
1458static const struct attribute_group *disk_attr_groups[] = {
1459        &disk_attr_group,
1460        NULL
1461};
1462
1463/**
1464 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
1465 * @disk: disk to replace part_tbl for
1466 * @new_ptbl: new part_tbl to install
1467 *
1468 * Replace disk->part_tbl with @new_ptbl in RCU-safe way.  The
1469 * original ptbl is freed using RCU callback.
1470 *
1471 * LOCKING:
1472 * Matching bd_mutex locked or the caller is the only user of @disk.
1473 */
1474static void disk_replace_part_tbl(struct gendisk *disk,
1475                                  struct disk_part_tbl *new_ptbl)
1476{
1477        struct disk_part_tbl *old_ptbl =
1478                rcu_dereference_protected(disk->part_tbl, 1);
1479
1480        rcu_assign_pointer(disk->part_tbl, new_ptbl);
1481
1482        if (old_ptbl) {
1483                rcu_assign_pointer(old_ptbl->last_lookup, NULL);
1484                kfree_rcu(old_ptbl, rcu_head);
1485        }
1486}
1487
1488/**
1489 * disk_expand_part_tbl - expand disk->part_tbl
1490 * @disk: disk to expand part_tbl for
1491 * @partno: expand such that this partno can fit in
1492 *
1493 * Expand disk->part_tbl such that @partno can fit in.  disk->part_tbl
1494 * uses RCU to allow unlocked dereferencing for stats and other stuff.
1495 *
1496 * LOCKING:
1497 * Matching bd_mutex locked or the caller is the only user of @disk.
1498 * Might sleep.
1499 *
1500 * RETURNS:
1501 * 0 on success, -errno on failure.
1502 */
1503int disk_expand_part_tbl(struct gendisk *disk, int partno)
1504{
1505        struct disk_part_tbl *old_ptbl =
1506                rcu_dereference_protected(disk->part_tbl, 1);
1507        struct disk_part_tbl *new_ptbl;
1508        int len = old_ptbl ? old_ptbl->len : 0;
1509        int i, target;
1510
1511        /*
1512         * check for int overflow, since we can get here from blkpg_ioctl()
1513         * with a user passed 'partno'.
1514         */
1515        target = partno + 1;
1516        if (target < 0)
1517                return -EINVAL;
1518
1519        /* disk_max_parts() is zero during initialization, ignore if so */
1520        if (disk_max_parts(disk) && target > disk_max_parts(disk))
1521                return -EINVAL;
1522
1523        if (target <= len)
1524                return 0;
1525
1526        new_ptbl = kzalloc_node(struct_size(new_ptbl, part, target), GFP_KERNEL,
1527                                disk->node_id);
1528        if (!new_ptbl)
1529                return -ENOMEM;
1530
1531        new_ptbl->len = target;
1532
1533        for (i = 0; i < len; i++)
1534                rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
1535
1536        disk_replace_part_tbl(disk, new_ptbl);
1537        return 0;
1538}
1539
1540/**
1541 * disk_release - releases all allocated resources of the gendisk
1542 * @dev: the device representing this disk
1543 *
1544 * This function releases all allocated resources of the gendisk.
1545 *
1546 * The struct gendisk refcount is incremented with get_gendisk() or
1547 * get_disk_and_module(), and its refcount is decremented with
1548 * put_disk_and_module() or put_disk(). Once the refcount reaches 0 this
1549 * function is called.
1550 *
1551 * Drivers which used __device_add_disk() have a gendisk with a request_queue
1552 * assigned. Since the request_queue sits on top of the gendisk for these
1553 * drivers we also call blk_put_queue() for them, and we expect the
1554 * request_queue refcount to reach 0 at this point, and so the request_queue
1555 * will also be freed prior to the disk.
1556 *
1557 * Context: can sleep
1558 */
1559static void disk_release(struct device *dev)
1560{
1561        struct gendisk *disk = dev_to_disk(dev);
1562
1563        might_sleep();
1564
1565        blk_free_devt(dev->devt);
1566        disk_release_events(disk);
1567        kfree(disk->random);
1568        disk_replace_part_tbl(disk, NULL);
1569        hd_free_part(&disk->part0);
1570        if (disk->queue)
1571                blk_put_queue(disk->queue);
1572        kfree(disk);
1573}
1574struct class block_class = {
1575        .name           = "block",
1576};
1577
1578static char *block_devnode(struct device *dev, umode_t *mode,
1579                           kuid_t *uid, kgid_t *gid)
1580{
1581        struct gendisk *disk = dev_to_disk(dev);
1582
1583        if (disk->fops->devnode)
1584                return disk->fops->devnode(disk, mode);
1585        return NULL;
1586}
1587
1588const struct device_type disk_type = {
1589        .name           = "disk",
1590        .groups         = disk_attr_groups,
1591        .release        = disk_release,
1592        .devnode        = block_devnode,
1593};
1594
1595#ifdef CONFIG_PROC_FS
1596/*
1597 * aggregate disk stat collector.  Uses the same stats that the sysfs
1598 * entries do, above, but makes them available through one seq_file.
1599 *
1600 * The output looks suspiciously like /proc/partitions with a bunch of
1601 * extra fields.
1602 */
1603static int diskstats_show(struct seq_file *seqf, void *v)
1604{
1605        struct gendisk *gp = v;
1606        struct disk_part_iter piter;
1607        struct hd_struct *hd;
1608        char buf[BDEVNAME_SIZE];
1609        unsigned int inflight;
1610        struct disk_stats stat;
1611
1612        /*
1613        if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1614                seq_puts(seqf,  "major minor name"
1615                                "     rio rmerge rsect ruse wio wmerge "
1616                                "wsect wuse running use aveq"
1617                                "\n\n");
1618        */
1619
1620        disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
1621        while ((hd = disk_part_iter_next(&piter))) {
1622                part_stat_read_all(hd, &stat);
1623                if (queue_is_mq(gp->queue))
1624                        inflight = blk_mq_in_flight(gp->queue, hd);
1625                else
1626                        inflight = part_in_flight(gp->queue, hd);
1627
1628                seq_printf(seqf, "%4d %7d %s "
1629                           "%lu %lu %lu %u "
1630                           "%lu %lu %lu %u "
1631                           "%u %u %u "
1632                           "%lu %lu %lu %u "
1633                           "%lu %u"
1634                           "\n",
1635                           MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
1636                           disk_name(gp, hd->partno, buf),
1637                           stat.ios[STAT_READ],
1638                           stat.merges[STAT_READ],
1639                           stat.sectors[STAT_READ],
1640                           (unsigned int)div_u64(stat.nsecs[STAT_READ],
1641                                                        NSEC_PER_MSEC),
1642                           stat.ios[STAT_WRITE],
1643                           stat.merges[STAT_WRITE],
1644                           stat.sectors[STAT_WRITE],
1645                           (unsigned int)div_u64(stat.nsecs[STAT_WRITE],
1646                                                        NSEC_PER_MSEC),
1647                           inflight,
1648                           jiffies_to_msecs(stat.io_ticks),
1649                           (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1650                                                 stat.nsecs[STAT_WRITE] +
1651                                                 stat.nsecs[STAT_DISCARD] +
1652                                                 stat.nsecs[STAT_FLUSH],
1653                                                        NSEC_PER_MSEC),
1654                           stat.ios[STAT_DISCARD],
1655                           stat.merges[STAT_DISCARD],
1656                           stat.sectors[STAT_DISCARD],
1657                           (unsigned int)div_u64(stat.nsecs[STAT_DISCARD],
1658                                                 NSEC_PER_MSEC),
1659                           stat.ios[STAT_FLUSH],
1660                           (unsigned int)div_u64(stat.nsecs[STAT_FLUSH],
1661                                                 NSEC_PER_MSEC)
1662                        );
1663        }
1664        disk_part_iter_exit(&piter);
1665
1666        return 0;
1667}
1668
1669static const struct seq_operations diskstats_op = {
1670        .start  = disk_seqf_start,
1671        .next   = disk_seqf_next,
1672        .stop   = disk_seqf_stop,
1673        .show   = diskstats_show
1674};
1675
1676static int __init proc_genhd_init(void)
1677{
1678        proc_create_seq("diskstats", 0, NULL, &diskstats_op);
1679        proc_create_seq("partitions", 0, NULL, &partitions_op);
1680        return 0;
1681}
1682module_init(proc_genhd_init);
1683#endif /* CONFIG_PROC_FS */
1684
1685dev_t blk_lookup_devt(const char *name, int partno)
1686{
1687        dev_t devt = MKDEV(0, 0);
1688        struct class_dev_iter iter;
1689        struct device *dev;
1690
1691        class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1692        while ((dev = class_dev_iter_next(&iter))) {
1693                struct gendisk *disk = dev_to_disk(dev);
1694                struct hd_struct *part;
1695
1696                if (strcmp(dev_name(dev), name))
1697                        continue;
1698
1699                if (partno < disk->minors) {
1700                        /* We need to return the right devno, even
1701                         * if the partition doesn't exist yet.
1702                         */
1703                        devt = MKDEV(MAJOR(dev->devt),
1704                                     MINOR(dev->devt) + partno);
1705                        break;
1706                }
1707                part = disk_get_part(disk, partno);
1708                if (part) {
1709                        devt = part_devt(part);
1710                        disk_put_part(part);
1711                        break;
1712                }
1713                disk_put_part(part);
1714        }
1715        class_dev_iter_exit(&iter);
1716        return devt;
1717}
1718
1719struct gendisk *__alloc_disk_node(int minors, int node_id)
1720{
1721        struct gendisk *disk;
1722        struct disk_part_tbl *ptbl;
1723
1724        if (minors > DISK_MAX_PARTS) {
1725                printk(KERN_ERR
1726                        "block: can't allocate more than %d partitions\n",
1727                        DISK_MAX_PARTS);
1728                minors = DISK_MAX_PARTS;
1729        }
1730
1731        disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1732        if (disk) {
1733                disk->part0.dkstats = alloc_percpu(struct disk_stats);
1734                if (!disk->part0.dkstats) {
1735                        kfree(disk);
1736                        return NULL;
1737                }
1738                init_rwsem(&disk->lookup_sem);
1739                disk->node_id = node_id;
1740                if (disk_expand_part_tbl(disk, 0)) {
1741                        free_percpu(disk->part0.dkstats);
1742                        kfree(disk);
1743                        return NULL;
1744                }
1745                ptbl = rcu_dereference_protected(disk->part_tbl, 1);
1746                rcu_assign_pointer(ptbl->part[0], &disk->part0);
1747
1748                /*
1749                 * set_capacity() and get_capacity() currently don't use
1750                 * seqcounter to read/update the part0->nr_sects. Still init
1751                 * the counter as we can read the sectors in IO submission
1752                 * patch using seqence counters.
1753                 *
1754                 * TODO: Ideally set_capacity() and get_capacity() should be
1755                 * converted to make use of bd_mutex and sequence counters.
1756                 */
1757                hd_sects_seq_init(&disk->part0);
1758                if (hd_ref_init(&disk->part0)) {
1759                        hd_free_part(&disk->part0);
1760                        kfree(disk);
1761                        return NULL;
1762                }
1763
1764                disk->minors = minors;
1765                rand_initialize_disk(disk);
1766                disk_to_dev(disk)->class = &block_class;
1767                disk_to_dev(disk)->type = &disk_type;
1768                device_initialize(disk_to_dev(disk));
1769        }
1770        return disk;
1771}
1772EXPORT_SYMBOL(__alloc_disk_node);
1773
1774/**
1775 * get_disk_and_module - increments the gendisk and gendisk fops module refcount
1776 * @disk: the struct gendisk to increment the refcount for
1777 *
1778 * This increments the refcount for the struct gendisk, and the gendisk's
1779 * fops module owner.
1780 *
1781 * Context: Any context.
1782 */
1783struct kobject *get_disk_and_module(struct gendisk *disk)
1784{
1785        struct module *owner;
1786        struct kobject *kobj;
1787
1788        if (!disk->fops)
1789                return NULL;
1790        owner = disk->fops->owner;
1791        if (owner && !try_module_get(owner))
1792                return NULL;
1793        kobj = kobject_get_unless_zero(&disk_to_dev(disk)->kobj);
1794        if (kobj == NULL) {
1795                module_put(owner);
1796                return NULL;
1797        }
1798        return kobj;
1799
1800}
1801EXPORT_SYMBOL(get_disk_and_module);
1802
1803/**
1804 * put_disk - decrements the gendisk refcount
1805 * @disk: the struct gendisk to decrement the refcount for
1806 *
1807 * This decrements the refcount for the struct gendisk. When this reaches 0
1808 * we'll have disk_release() called.
1809 *
1810 * Context: Any context, but the last reference must not be dropped from
1811 *          atomic context.
1812 */
1813void put_disk(struct gendisk *disk)
1814{
1815        if (disk)
1816                kobject_put(&disk_to_dev(disk)->kobj);
1817}
1818EXPORT_SYMBOL(put_disk);
1819
1820/**
1821 * put_disk_and_module - decrements the module and gendisk refcount
1822 * @disk: the struct gendisk to decrement the refcount for
1823 *
1824 * This is a counterpart of get_disk_and_module() and thus also of
1825 * get_gendisk().
1826 *
1827 * Context: Any context, but the last reference must not be dropped from
1828 *          atomic context.
1829 */
1830void put_disk_and_module(struct gendisk *disk)
1831{
1832        if (disk) {
1833                struct module *owner = disk->fops->owner;
1834
1835                put_disk(disk);
1836                module_put(owner);
1837        }
1838}
1839EXPORT_SYMBOL(put_disk_and_module);
1840
1841static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1842{
1843        char event[] = "DISK_RO=1";
1844        char *envp[] = { event, NULL };
1845
1846        if (!ro)
1847                event[8] = '0';
1848        kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1849}
1850
1851void set_device_ro(struct block_device *bdev, int flag)
1852{
1853        bdev->bd_part->policy = flag;
1854}
1855
1856EXPORT_SYMBOL(set_device_ro);
1857
1858void set_disk_ro(struct gendisk *disk, int flag)
1859{
1860        struct disk_part_iter piter;
1861        struct hd_struct *part;
1862
1863        if (disk->part0.policy != flag) {
1864                set_disk_ro_uevent(disk, flag);
1865                disk->part0.policy = flag;
1866        }
1867
1868        disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
1869        while ((part = disk_part_iter_next(&piter)))
1870                part->policy = flag;
1871        disk_part_iter_exit(&piter);
1872}
1873
1874EXPORT_SYMBOL(set_disk_ro);
1875
1876int bdev_read_only(struct block_device *bdev)
1877{
1878        if (!bdev)
1879                return 0;
1880        return bdev->bd_part->policy;
1881}
1882
1883EXPORT_SYMBOL(bdev_read_only);
1884
1885/*
1886 * Disk events - monitor disk events like media change and eject request.
1887 */
1888struct disk_events {
1889        struct list_head        node;           /* all disk_event's */
1890        struct gendisk          *disk;          /* the associated disk */
1891        spinlock_t              lock;
1892
1893        struct mutex            block_mutex;    /* protects blocking */
1894        int                     block;          /* event blocking depth */
1895        unsigned int            pending;        /* events already sent out */
1896        unsigned int            clearing;       /* events being cleared */
1897
1898        long                    poll_msecs;     /* interval, -1 for default */
1899        struct delayed_work     dwork;
1900};
1901
1902static const char *disk_events_strs[] = {
1903        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "media_change",
1904        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "eject_request",
1905};
1906
1907static char *disk_uevents[] = {
1908        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "DISK_MEDIA_CHANGE=1",
1909        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "DISK_EJECT_REQUEST=1",
1910};
1911
1912/* list of all disk_events */
1913static DEFINE_MUTEX(disk_events_mutex);
1914static LIST_HEAD(disk_events);
1915
1916/* disable in-kernel polling by default */
1917static unsigned long disk_events_dfl_poll_msecs;
1918
1919static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1920{
1921        struct disk_events *ev = disk->ev;
1922        long intv_msecs = 0;
1923
1924        /*
1925         * If device-specific poll interval is set, always use it.  If
1926         * the default is being used, poll if the POLL flag is set.
1927         */
1928        if (ev->poll_msecs >= 0)
1929                intv_msecs = ev->poll_msecs;
1930        else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
1931                intv_msecs = disk_events_dfl_poll_msecs;
1932
1933        return msecs_to_jiffies(intv_msecs);
1934}
1935
1936/**
1937 * disk_block_events - block and flush disk event checking
1938 * @disk: disk to block events for
1939 *
1940 * On return from this function, it is guaranteed that event checking
1941 * isn't in progress and won't happen until unblocked by
1942 * disk_unblock_events().  Events blocking is counted and the actual
1943 * unblocking happens after the matching number of unblocks are done.
1944 *
1945 * Note that this intentionally does not block event checking from
1946 * disk_clear_events().
1947 *
1948 * CONTEXT:
1949 * Might sleep.
1950 */
1951void disk_block_events(struct gendisk *disk)
1952{
1953        struct disk_events *ev = disk->ev;
1954        unsigned long flags;
1955        bool cancel;
1956
1957        if (!ev)
1958                return;
1959
1960        /*
1961         * Outer mutex ensures that the first blocker completes canceling
1962         * the event work before further blockers are allowed to finish.
1963         */
1964        mutex_lock(&ev->block_mutex);
1965
1966        spin_lock_irqsave(&ev->lock, flags);
1967        cancel = !ev->block++;
1968        spin_unlock_irqrestore(&ev->lock, flags);
1969
1970        if (cancel)
1971                cancel_delayed_work_sync(&disk->ev->dwork);
1972
1973        mutex_unlock(&ev->block_mutex);
1974}
1975
1976static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1977{
1978        struct disk_events *ev = disk->ev;
1979        unsigned long intv;
1980        unsigned long flags;
1981
1982        spin_lock_irqsave(&ev->lock, flags);
1983
1984        if (WARN_ON_ONCE(ev->block <= 0))
1985                goto out_unlock;
1986
1987        if (--ev->block)
1988                goto out_unlock;
1989
1990        intv = disk_events_poll_jiffies(disk);
1991        if (check_now)
1992                queue_delayed_work(system_freezable_power_efficient_wq,
1993                                &ev->dwork, 0);
1994        else if (intv)
1995                queue_delayed_work(system_freezable_power_efficient_wq,
1996                                &ev->dwork, intv);
1997out_unlock:
1998        spin_unlock_irqrestore(&ev->lock, flags);
1999}
2000
2001/**
2002 * disk_unblock_events - unblock disk event checking
2003 * @disk: disk to unblock events for
2004 *
2005 * Undo disk_block_events().  When the block count reaches zero, it
2006 * starts events polling if configured.
2007 *
2008 * CONTEXT:
2009 * Don't care.  Safe to call from irq context.
2010 */
2011void disk_unblock_events(struct gendisk *disk)
2012{
2013        if (disk->ev)
2014                __disk_unblock_events(disk, false);
2015}
2016
2017/**
2018 * disk_flush_events - schedule immediate event checking and flushing
2019 * @disk: disk to check and flush events for
2020 * @mask: events to flush
2021 *
2022 * Schedule immediate event checking on @disk if not blocked.  Events in
2023 * @mask are scheduled to be cleared from the driver.  Note that this
2024 * doesn't clear the events from @disk->ev.
2025 *
2026 * CONTEXT:
2027 * If @mask is non-zero must be called with bdev->bd_mutex held.
2028 */
2029void disk_flush_events(struct gendisk *disk, unsigned int mask)
2030{
2031        struct disk_events *ev = disk->ev;
2032
2033        if (!ev)
2034                return;
2035
2036        spin_lock_irq(&ev->lock);
2037        ev->clearing |= mask;
2038        if (!ev->block)
2039                mod_delayed_work(system_freezable_power_efficient_wq,
2040                                &ev->dwork, 0);
2041        spin_unlock_irq(&ev->lock);
2042}
2043
2044/**
2045 * disk_clear_events - synchronously check, clear and return pending events
2046 * @disk: disk to fetch and clear events from
2047 * @mask: mask of events to be fetched and cleared
2048 *
2049 * Disk events are synchronously checked and pending events in @mask
2050 * are cleared and returned.  This ignores the block count.
2051 *
2052 * CONTEXT:
2053 * Might sleep.
2054 */
2055unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
2056{
2057        struct disk_events *ev = disk->ev;
2058        unsigned int pending;
2059        unsigned int clearing = mask;
2060
2061        if (!ev)
2062                return 0;
2063
2064        disk_block_events(disk);
2065
2066        /*
2067         * store the union of mask and ev->clearing on the stack so that the
2068         * race with disk_flush_events does not cause ambiguity (ev->clearing
2069         * can still be modified even if events are blocked).
2070         */
2071        spin_lock_irq(&ev->lock);
2072        clearing |= ev->clearing;
2073        ev->clearing = 0;
2074        spin_unlock_irq(&ev->lock);
2075
2076        disk_check_events(ev, &clearing);
2077        /*
2078         * if ev->clearing is not 0, the disk_flush_events got called in the
2079         * middle of this function, so we want to run the workfn without delay.
2080         */
2081        __disk_unblock_events(disk, ev->clearing ? true : false);
2082
2083        /* then, fetch and clear pending events */
2084        spin_lock_irq(&ev->lock);
2085        pending = ev->pending & mask;
2086        ev->pending &= ~mask;
2087        spin_unlock_irq(&ev->lock);
2088        WARN_ON_ONCE(clearing & mask);
2089
2090        return pending;
2091}
2092
2093/*
2094 * Separate this part out so that a different pointer for clearing_ptr can be
2095 * passed in for disk_clear_events.
2096 */
2097static void disk_events_workfn(struct work_struct *work)
2098{
2099        struct delayed_work *dwork = to_delayed_work(work);
2100        struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
2101
2102        disk_check_events(ev, &ev->clearing);
2103}
2104
2105static void disk_check_events(struct disk_events *ev,
2106                              unsigned int *clearing_ptr)
2107{
2108        struct gendisk *disk = ev->disk;
2109        char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
2110        unsigned int clearing = *clearing_ptr;
2111        unsigned int events;
2112        unsigned long intv;
2113        int nr_events = 0, i;
2114
2115        /* check events */
2116        events = disk->fops->check_events(disk, clearing);
2117
2118        /* accumulate pending events and schedule next poll if necessary */
2119        spin_lock_irq(&ev->lock);
2120
2121        events &= ~ev->pending;
2122        ev->pending |= events;
2123        *clearing_ptr &= ~clearing;
2124
2125        intv = disk_events_poll_jiffies(disk);
2126        if (!ev->block && intv)
2127                queue_delayed_work(system_freezable_power_efficient_wq,
2128                                &ev->dwork, intv);
2129
2130        spin_unlock_irq(&ev->lock);
2131
2132        /*
2133         * Tell userland about new events.  Only the events listed in
2134         * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
2135         * is set. Otherwise, events are processed internally but never
2136         * get reported to userland.
2137         */
2138        for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
2139                if ((events & disk->events & (1 << i)) &&
2140                    (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
2141                        envp[nr_events++] = disk_uevents[i];
2142
2143        if (nr_events)
2144                kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
2145}
2146
2147/*
2148 * A disk events enabled device has the following sysfs nodes under
2149 * its /sys/block/X/ directory.
2150 *
2151 * events               : list of all supported events
2152 * events_async         : list of events which can be detected w/o polling
2153 *                        (always empty, only for backwards compatibility)
2154 * events_poll_msecs    : polling interval, 0: disable, -1: system default
2155 */
2156static ssize_t __disk_events_show(unsigned int events, char *buf)
2157{
2158        const char *delim = "";
2159        ssize_t pos = 0;
2160        int i;
2161
2162        for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
2163                if (events & (1 << i)) {
2164                        pos += sprintf(buf + pos, "%s%s",
2165                                       delim, disk_events_strs[i]);
2166                        delim = " ";
2167                }
2168        if (pos)
2169                pos += sprintf(buf + pos, "\n");
2170        return pos;
2171}
2172
2173static ssize_t disk_events_show(struct device *dev,
2174                                struct device_attribute *attr, char *buf)
2175{
2176        struct gendisk *disk = dev_to_disk(dev);
2177
2178        if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
2179                return 0;
2180
2181        return __disk_events_show(disk->events, buf);
2182}
2183
2184static ssize_t disk_events_async_show(struct device *dev,
2185                                      struct device_attribute *attr, char *buf)
2186{
2187        return 0;
2188}
2189
2190static ssize_t disk_events_poll_msecs_show(struct device *dev,
2191                                           struct device_attribute *attr,
2192                                           char *buf)
2193{
2194        struct gendisk *disk = dev_to_disk(dev);
2195
2196        if (!disk->ev)
2197                return sprintf(buf, "-1\n");
2198
2199        return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
2200}
2201
2202static ssize_t disk_events_poll_msecs_store(struct device *dev,
2203                                            struct device_attribute *attr,
2204                                            const char *buf, size_t count)
2205{
2206        struct gendisk *disk = dev_to_disk(dev);
2207        long intv;
2208
2209        if (!count || !sscanf(buf, "%ld", &intv))
2210                return -EINVAL;
2211
2212        if (intv < 0 && intv != -1)
2213                return -EINVAL;
2214
2215        if (!disk->ev)
2216                return -ENODEV;
2217
2218        disk_block_events(disk);
2219        disk->ev->poll_msecs = intv;
2220        __disk_unblock_events(disk, true);
2221
2222        return count;
2223}
2224
2225static const DEVICE_ATTR(events, 0444, disk_events_show, NULL);
2226static const DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
2227static const DEVICE_ATTR(events_poll_msecs, 0644,
2228                         disk_events_poll_msecs_show,
2229                         disk_events_poll_msecs_store);
2230
2231static const struct attribute *disk_events_attrs[] = {
2232        &dev_attr_events.attr,
2233        &dev_attr_events_async.attr,
2234        &dev_attr_events_poll_msecs.attr,
2235        NULL,
2236};
2237
2238/*
2239 * The default polling interval can be specified by the kernel
2240 * parameter block.events_dfl_poll_msecs which defaults to 0
2241 * (disable).  This can also be modified runtime by writing to
2242 * /sys/module/block/parameters/events_dfl_poll_msecs.
2243 */
2244static int disk_events_set_dfl_poll_msecs(const char *val,
2245                                          const struct kernel_param *kp)
2246{
2247        struct disk_events *ev;
2248        int ret;
2249
2250        ret = param_set_ulong(val, kp);
2251        if (ret < 0)
2252                return ret;
2253
2254        mutex_lock(&disk_events_mutex);
2255
2256        list_for_each_entry(ev, &disk_events, node)
2257                disk_flush_events(ev->disk, 0);
2258
2259        mutex_unlock(&disk_events_mutex);
2260
2261        return 0;
2262}
2263
2264static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
2265        .set    = disk_events_set_dfl_poll_msecs,
2266        .get    = param_get_ulong,
2267};
2268
2269#undef MODULE_PARAM_PREFIX
2270#define MODULE_PARAM_PREFIX     "block."
2271
2272module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
2273                &disk_events_dfl_poll_msecs, 0644);
2274
2275/*
2276 * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
2277 */
2278static void disk_alloc_events(struct gendisk *disk)
2279{
2280        struct disk_events *ev;
2281
2282        if (!disk->fops->check_events || !disk->events)
2283                return;
2284
2285        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
2286        if (!ev) {
2287                pr_warn("%s: failed to initialize events\n", disk->disk_name);
2288                return;
2289        }
2290
2291        INIT_LIST_HEAD(&ev->node);
2292        ev->disk = disk;
2293        spin_lock_init(&ev->lock);
2294        mutex_init(&ev->block_mutex);
2295        ev->block = 1;
2296        ev->poll_msecs = -1;
2297        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
2298
2299        disk->ev = ev;
2300}
2301
2302static void disk_add_events(struct gendisk *disk)
2303{
2304        /* FIXME: error handling */
2305        if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
2306                pr_warn("%s: failed to create sysfs files for events\n",
2307                        disk->disk_name);
2308
2309        if (!disk->ev)
2310                return;
2311
2312        mutex_lock(&disk_events_mutex);
2313        list_add_tail(&disk->ev->node, &disk_events);
2314        mutex_unlock(&disk_events_mutex);
2315
2316        /*
2317         * Block count is initialized to 1 and the following initial
2318         * unblock kicks it into action.
2319         */
2320        __disk_unblock_events(disk, true);
2321}
2322
2323static void disk_del_events(struct gendisk *disk)
2324{
2325        if (disk->ev) {
2326                disk_block_events(disk);
2327
2328                mutex_lock(&disk_events_mutex);
2329                list_del_init(&disk->ev->node);
2330                mutex_unlock(&disk_events_mutex);
2331        }
2332
2333        sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
2334}
2335
2336static void disk_release_events(struct gendisk *disk)
2337{
2338        /* the block count should be 1 from disk_del_events() */
2339        WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
2340        kfree(disk->ev);
2341}
2342