linux/block/blk-sysfs.c
<<
>>
Prefs
   1/*
   2 * Functions related to sysfs handling
   3 */
   4#include <linux/kernel.h>
   5#include <linux/slab.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/backing-dev.h>
  10#include <linux/blktrace_api.h>
  11#include <linux/blk-mq.h>
  12#include <linux/blk-cgroup.h>
  13
  14#include "blk.h"
  15#include "blk-mq.h"
  16#include "blk-mq-debugfs.h"
  17#include "blk-wbt.h"
  18
  19struct queue_sysfs_entry {
  20        struct attribute attr;
  21        ssize_t (*show)(struct request_queue *, char *);
  22        ssize_t (*store)(struct request_queue *, const char *, size_t);
  23};
  24
  25static ssize_t
  26queue_var_show(unsigned long var, char *page)
  27{
  28        return sprintf(page, "%lu\n", var);
  29}
  30
  31static ssize_t
  32queue_var_store(unsigned long *var, const char *page, size_t count)
  33{
  34        int err;
  35        unsigned long v;
  36
  37        err = kstrtoul(page, 10, &v);
  38        if (err || v > UINT_MAX)
  39                return -EINVAL;
  40
  41        *var = v;
  42
  43        return count;
  44}
  45
  46static ssize_t queue_var_store64(s64 *var, const char *page)
  47{
  48        int err;
  49        s64 v;
  50
  51        err = kstrtos64(page, 10, &v);
  52        if (err < 0)
  53                return err;
  54
  55        *var = v;
  56        return 0;
  57}
  58
  59static ssize_t queue_requests_show(struct request_queue *q, char *page)
  60{
  61        return queue_var_show(q->nr_requests, (page));
  62}
  63
  64static ssize_t
  65queue_requests_store(struct request_queue *q, const char *page, size_t count)
  66{
  67        unsigned long nr;
  68        int ret, err;
  69
  70        if (!q->request_fn && !q->mq_ops)
  71                return -EINVAL;
  72
  73        ret = queue_var_store(&nr, page, count);
  74        if (ret < 0)
  75                return ret;
  76
  77        if (nr < BLKDEV_MIN_RQ)
  78                nr = BLKDEV_MIN_RQ;
  79
  80        if (q->request_fn)
  81                err = blk_update_nr_requests(q, nr);
  82        else
  83                err = blk_mq_update_nr_requests(q, nr);
  84
  85        if (err)
  86                return err;
  87
  88        return ret;
  89}
  90
  91static ssize_t queue_ra_show(struct request_queue *q, char *page)
  92{
  93        unsigned long ra_kb = q->backing_dev_info->ra_pages <<
  94                                        (PAGE_SHIFT - 10);
  95
  96        return queue_var_show(ra_kb, (page));
  97}
  98
  99static ssize_t
 100queue_ra_store(struct request_queue *q, const char *page, size_t count)
 101{
 102        unsigned long ra_kb;
 103        ssize_t ret = queue_var_store(&ra_kb, page, count);
 104
 105        if (ret < 0)
 106                return ret;
 107
 108        q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 109
 110        return ret;
 111}
 112
 113static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 114{
 115        int max_sectors_kb = queue_max_sectors(q) >> 1;
 116
 117        return queue_var_show(max_sectors_kb, (page));
 118}
 119
 120static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
 121{
 122        return queue_var_show(queue_max_segments(q), (page));
 123}
 124
 125static ssize_t queue_max_discard_segments_show(struct request_queue *q,
 126                char *page)
 127{
 128        return queue_var_show(queue_max_discard_segments(q), (page));
 129}
 130
 131static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
 132{
 133        return queue_var_show(q->limits.max_integrity_segments, (page));
 134}
 135
 136static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
 137{
 138        if (blk_queue_cluster(q))
 139                return queue_var_show(queue_max_segment_size(q), (page));
 140
 141        return queue_var_show(PAGE_SIZE, (page));
 142}
 143
 144static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
 145{
 146        return queue_var_show(queue_logical_block_size(q), page);
 147}
 148
 149static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
 150{
 151        return queue_var_show(queue_physical_block_size(q), page);
 152}
 153
 154static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
 155{
 156        return queue_var_show(q->limits.chunk_sectors, page);
 157}
 158
 159static ssize_t queue_io_min_show(struct request_queue *q, char *page)
 160{
 161        return queue_var_show(queue_io_min(q), page);
 162}
 163
 164static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
 165{
 166        return queue_var_show(queue_io_opt(q), page);
 167}
 168
 169static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
 170{
 171        return queue_var_show(q->limits.discard_granularity, page);
 172}
 173
 174static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
 175{
 176
 177        return sprintf(page, "%llu\n",
 178                (unsigned long long)q->limits.max_hw_discard_sectors << 9);
 179}
 180
 181static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
 182{
 183        return sprintf(page, "%llu\n",
 184                       (unsigned long long)q->limits.max_discard_sectors << 9);
 185}
 186
 187static ssize_t queue_discard_max_store(struct request_queue *q,
 188                                       const char *page, size_t count)
 189{
 190        unsigned long max_discard;
 191        ssize_t ret = queue_var_store(&max_discard, page, count);
 192
 193        if (ret < 0)
 194                return ret;
 195
 196        if (max_discard & (q->limits.discard_granularity - 1))
 197                return -EINVAL;
 198
 199        max_discard >>= 9;
 200        if (max_discard > UINT_MAX)
 201                return -EINVAL;
 202
 203        if (max_discard > q->limits.max_hw_discard_sectors)
 204                max_discard = q->limits.max_hw_discard_sectors;
 205
 206        q->limits.max_discard_sectors = max_discard;
 207        return ret;
 208}
 209
 210static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
 211{
 212        return queue_var_show(0, page);
 213}
 214
 215static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
 216{
 217        return sprintf(page, "%llu\n",
 218                (unsigned long long)q->limits.max_write_same_sectors << 9);
 219}
 220
 221static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
 222{
 223        return sprintf(page, "%llu\n",
 224                (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
 225}
 226
 227static ssize_t
 228queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 229{
 230        unsigned long max_sectors_kb,
 231                max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
 232                        page_kb = 1 << (PAGE_SHIFT - 10);
 233        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 234
 235        if (ret < 0)
 236                return ret;
 237
 238        max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
 239                                         q->limits.max_dev_sectors >> 1);
 240
 241        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
 242                return -EINVAL;
 243
 244        spin_lock_irq(q->queue_lock);
 245        q->limits.max_sectors = max_sectors_kb << 1;
 246        q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
 247        spin_unlock_irq(q->queue_lock);
 248
 249        return ret;
 250}
 251
 252static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
 253{
 254        int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
 255
 256        return queue_var_show(max_hw_sectors_kb, (page));
 257}
 258
 259#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
 260static ssize_t                                                          \
 261queue_show_##name(struct request_queue *q, char *page)                  \
 262{                                                                       \
 263        int bit;                                                        \
 264        bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);             \
 265        return queue_var_show(neg ? !bit : bit, page);                  \
 266}                                                                       \
 267static ssize_t                                                          \
 268queue_store_##name(struct request_queue *q, const char *page, size_t count) \
 269{                                                                       \
 270        unsigned long val;                                              \
 271        ssize_t ret;                                                    \
 272        ret = queue_var_store(&val, page, count);                       \
 273        if (ret < 0)                                                    \
 274                 return ret;                                            \
 275        if (neg)                                                        \
 276                val = !val;                                             \
 277                                                                        \
 278        spin_lock_irq(q->queue_lock);                                   \
 279        if (val)                                                        \
 280                queue_flag_set(QUEUE_FLAG_##flag, q);                   \
 281        else                                                            \
 282                queue_flag_clear(QUEUE_FLAG_##flag, q);                 \
 283        spin_unlock_irq(q->queue_lock);                                 \
 284        return ret;                                                     \
 285}
 286
 287QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
 288QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
 289QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 290#undef QUEUE_SYSFS_BIT_FNS
 291
 292static ssize_t queue_zoned_show(struct request_queue *q, char *page)
 293{
 294        switch (blk_queue_zoned_model(q)) {
 295        case BLK_ZONED_HA:
 296                return sprintf(page, "host-aware\n");
 297        case BLK_ZONED_HM:
 298                return sprintf(page, "host-managed\n");
 299        default:
 300                return sprintf(page, "none\n");
 301        }
 302}
 303
 304static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 305{
 306        return queue_var_show((blk_queue_nomerges(q) << 1) |
 307                               blk_queue_noxmerges(q), page);
 308}
 309
 310static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 311                                    size_t count)
 312{
 313        unsigned long nm;
 314        ssize_t ret = queue_var_store(&nm, page, count);
 315
 316        if (ret < 0)
 317                return ret;
 318
 319        spin_lock_irq(q->queue_lock);
 320        queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
 321        queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
 322        if (nm == 2)
 323                queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 324        else if (nm)
 325                queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 326        spin_unlock_irq(q->queue_lock);
 327
 328        return ret;
 329}
 330
 331static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 332{
 333        bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
 334        bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
 335
 336        return queue_var_show(set << force, page);
 337}
 338
 339static ssize_t
 340queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 341{
 342        ssize_t ret = -EINVAL;
 343#ifdef CONFIG_SMP
 344        unsigned long val;
 345
 346        ret = queue_var_store(&val, page, count);
 347        if (ret < 0)
 348                return ret;
 349
 350        spin_lock_irq(q->queue_lock);
 351        if (val == 2) {
 352                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 353                queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
 354        } else if (val == 1) {
 355                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 356                queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 357        } else if (val == 0) {
 358                queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
 359                queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 360        }
 361        spin_unlock_irq(q->queue_lock);
 362#endif
 363        return ret;
 364}
 365
 366static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 367{
 368        int val;
 369
 370        if (q->poll_nsec == -1)
 371                val = -1;
 372        else
 373                val = q->poll_nsec / 1000;
 374
 375        return sprintf(page, "%d\n", val);
 376}
 377
 378static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
 379                                size_t count)
 380{
 381        int err, val;
 382
 383        if (!q->mq_ops || !q->mq_ops->poll)
 384                return -EINVAL;
 385
 386        err = kstrtoint(page, 10, &val);
 387        if (err < 0)
 388                return err;
 389
 390        if (val == -1)
 391                q->poll_nsec = -1;
 392        else
 393                q->poll_nsec = val * 1000;
 394
 395        return count;
 396}
 397
 398static ssize_t queue_poll_show(struct request_queue *q, char *page)
 399{
 400        return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
 401}
 402
 403static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 404                                size_t count)
 405{
 406        unsigned long poll_on;
 407        ssize_t ret;
 408
 409        if (!q->mq_ops || !q->mq_ops->poll)
 410                return -EINVAL;
 411
 412        ret = queue_var_store(&poll_on, page, count);
 413        if (ret < 0)
 414                return ret;
 415
 416        spin_lock_irq(q->queue_lock);
 417        if (poll_on)
 418                queue_flag_set(QUEUE_FLAG_POLL, q);
 419        else
 420                queue_flag_clear(QUEUE_FLAG_POLL, q);
 421        spin_unlock_irq(q->queue_lock);
 422
 423        return ret;
 424}
 425
 426static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
 427{
 428        if (!q->rq_wb)
 429                return -EINVAL;
 430
 431        return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
 432}
 433
 434static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
 435                                  size_t count)
 436{
 437        struct rq_wb *rwb;
 438        ssize_t ret;
 439        s64 val;
 440
 441        ret = queue_var_store64(&val, page);
 442        if (ret < 0)
 443                return ret;
 444        if (val < -1)
 445                return -EINVAL;
 446
 447        rwb = q->rq_wb;
 448        if (!rwb) {
 449                ret = wbt_init(q);
 450                if (ret)
 451                        return ret;
 452
 453                rwb = q->rq_wb;
 454                if (!rwb)
 455                        return -EINVAL;
 456        }
 457
 458        if (val == -1)
 459                rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 460        else if (val >= 0)
 461                rwb->min_lat_nsec = val * 1000ULL;
 462
 463        if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
 464                rwb->enable_state = WBT_STATE_ON_MANUAL;
 465
 466        wbt_update_limits(rwb);
 467        return count;
 468}
 469
 470static ssize_t queue_wc_show(struct request_queue *q, char *page)
 471{
 472        if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 473                return sprintf(page, "write back\n");
 474
 475        return sprintf(page, "write through\n");
 476}
 477
 478static ssize_t queue_wc_store(struct request_queue *q, const char *page,
 479                              size_t count)
 480{
 481        int set = -1;
 482
 483        if (!strncmp(page, "write back", 10))
 484                set = 1;
 485        else if (!strncmp(page, "write through", 13) ||
 486                 !strncmp(page, "none", 4))
 487                set = 0;
 488
 489        if (set == -1)
 490                return -EINVAL;
 491
 492        spin_lock_irq(q->queue_lock);
 493        if (set)
 494                queue_flag_set(QUEUE_FLAG_WC, q);
 495        else
 496                queue_flag_clear(QUEUE_FLAG_WC, q);
 497        spin_unlock_irq(q->queue_lock);
 498
 499        return count;
 500}
 501
 502static ssize_t queue_dax_show(struct request_queue *q, char *page)
 503{
 504        return queue_var_show(blk_queue_dax(q), page);
 505}
 506
 507static struct queue_sysfs_entry queue_requests_entry = {
 508        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 509        .show = queue_requests_show,
 510        .store = queue_requests_store,
 511};
 512
 513static struct queue_sysfs_entry queue_ra_entry = {
 514        .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
 515        .show = queue_ra_show,
 516        .store = queue_ra_store,
 517};
 518
 519static struct queue_sysfs_entry queue_max_sectors_entry = {
 520        .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
 521        .show = queue_max_sectors_show,
 522        .store = queue_max_sectors_store,
 523};
 524
 525static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
 526        .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
 527        .show = queue_max_hw_sectors_show,
 528};
 529
 530static struct queue_sysfs_entry queue_max_segments_entry = {
 531        .attr = {.name = "max_segments", .mode = S_IRUGO },
 532        .show = queue_max_segments_show,
 533};
 534
 535static struct queue_sysfs_entry queue_max_discard_segments_entry = {
 536        .attr = {.name = "max_discard_segments", .mode = S_IRUGO },
 537        .show = queue_max_discard_segments_show,
 538};
 539
 540static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
 541        .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
 542        .show = queue_max_integrity_segments_show,
 543};
 544
 545static struct queue_sysfs_entry queue_max_segment_size_entry = {
 546        .attr = {.name = "max_segment_size", .mode = S_IRUGO },
 547        .show = queue_max_segment_size_show,
 548};
 549
 550static struct queue_sysfs_entry queue_iosched_entry = {
 551        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
 552        .show = elv_iosched_show,
 553        .store = elv_iosched_store,
 554};
 555
 556static struct queue_sysfs_entry queue_hw_sector_size_entry = {
 557        .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
 558        .show = queue_logical_block_size_show,
 559};
 560
 561static struct queue_sysfs_entry queue_logical_block_size_entry = {
 562        .attr = {.name = "logical_block_size", .mode = S_IRUGO },
 563        .show = queue_logical_block_size_show,
 564};
 565
 566static struct queue_sysfs_entry queue_physical_block_size_entry = {
 567        .attr = {.name = "physical_block_size", .mode = S_IRUGO },
 568        .show = queue_physical_block_size_show,
 569};
 570
 571static struct queue_sysfs_entry queue_chunk_sectors_entry = {
 572        .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
 573        .show = queue_chunk_sectors_show,
 574};
 575
 576static struct queue_sysfs_entry queue_io_min_entry = {
 577        .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
 578        .show = queue_io_min_show,
 579};
 580
 581static struct queue_sysfs_entry queue_io_opt_entry = {
 582        .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
 583        .show = queue_io_opt_show,
 584};
 585
 586static struct queue_sysfs_entry queue_discard_granularity_entry = {
 587        .attr = {.name = "discard_granularity", .mode = S_IRUGO },
 588        .show = queue_discard_granularity_show,
 589};
 590
 591static struct queue_sysfs_entry queue_discard_max_hw_entry = {
 592        .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
 593        .show = queue_discard_max_hw_show,
 594};
 595
 596static struct queue_sysfs_entry queue_discard_max_entry = {
 597        .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
 598        .show = queue_discard_max_show,
 599        .store = queue_discard_max_store,
 600};
 601
 602static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
 603        .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
 604        .show = queue_discard_zeroes_data_show,
 605};
 606
 607static struct queue_sysfs_entry queue_write_same_max_entry = {
 608        .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
 609        .show = queue_write_same_max_show,
 610};
 611
 612static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
 613        .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
 614        .show = queue_write_zeroes_max_show,
 615};
 616
 617static struct queue_sysfs_entry queue_nonrot_entry = {
 618        .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
 619        .show = queue_show_nonrot,
 620        .store = queue_store_nonrot,
 621};
 622
 623static struct queue_sysfs_entry queue_zoned_entry = {
 624        .attr = {.name = "zoned", .mode = S_IRUGO },
 625        .show = queue_zoned_show,
 626};
 627
 628static struct queue_sysfs_entry queue_nomerges_entry = {
 629        .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
 630        .show = queue_nomerges_show,
 631        .store = queue_nomerges_store,
 632};
 633
 634static struct queue_sysfs_entry queue_rq_affinity_entry = {
 635        .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
 636        .show = queue_rq_affinity_show,
 637        .store = queue_rq_affinity_store,
 638};
 639
 640static struct queue_sysfs_entry queue_iostats_entry = {
 641        .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
 642        .show = queue_show_iostats,
 643        .store = queue_store_iostats,
 644};
 645
 646static struct queue_sysfs_entry queue_random_entry = {
 647        .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
 648        .show = queue_show_random,
 649        .store = queue_store_random,
 650};
 651
 652static struct queue_sysfs_entry queue_poll_entry = {
 653        .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
 654        .show = queue_poll_show,
 655        .store = queue_poll_store,
 656};
 657
 658static struct queue_sysfs_entry queue_poll_delay_entry = {
 659        .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
 660        .show = queue_poll_delay_show,
 661        .store = queue_poll_delay_store,
 662};
 663
 664static struct queue_sysfs_entry queue_wc_entry = {
 665        .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
 666        .show = queue_wc_show,
 667        .store = queue_wc_store,
 668};
 669
 670static struct queue_sysfs_entry queue_dax_entry = {
 671        .attr = {.name = "dax", .mode = S_IRUGO },
 672        .show = queue_dax_show,
 673};
 674
 675static struct queue_sysfs_entry queue_wb_lat_entry = {
 676        .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
 677        .show = queue_wb_lat_show,
 678        .store = queue_wb_lat_store,
 679};
 680
 681#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 682static struct queue_sysfs_entry throtl_sample_time_entry = {
 683        .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR },
 684        .show = blk_throtl_sample_time_show,
 685        .store = blk_throtl_sample_time_store,
 686};
 687#endif
 688
 689static struct attribute *default_attrs[] = {
 690        &queue_requests_entry.attr,
 691        &queue_ra_entry.attr,
 692        &queue_max_hw_sectors_entry.attr,
 693        &queue_max_sectors_entry.attr,
 694        &queue_max_segments_entry.attr,
 695        &queue_max_discard_segments_entry.attr,
 696        &queue_max_integrity_segments_entry.attr,
 697        &queue_max_segment_size_entry.attr,
 698        &queue_iosched_entry.attr,
 699        &queue_hw_sector_size_entry.attr,
 700        &queue_logical_block_size_entry.attr,
 701        &queue_physical_block_size_entry.attr,
 702        &queue_chunk_sectors_entry.attr,
 703        &queue_io_min_entry.attr,
 704        &queue_io_opt_entry.attr,
 705        &queue_discard_granularity_entry.attr,
 706        &queue_discard_max_entry.attr,
 707        &queue_discard_max_hw_entry.attr,
 708        &queue_discard_zeroes_data_entry.attr,
 709        &queue_write_same_max_entry.attr,
 710        &queue_write_zeroes_max_entry.attr,
 711        &queue_nonrot_entry.attr,
 712        &queue_zoned_entry.attr,
 713        &queue_nomerges_entry.attr,
 714        &queue_rq_affinity_entry.attr,
 715        &queue_iostats_entry.attr,
 716        &queue_random_entry.attr,
 717        &queue_poll_entry.attr,
 718        &queue_wc_entry.attr,
 719        &queue_dax_entry.attr,
 720        &queue_wb_lat_entry.attr,
 721        &queue_poll_delay_entry.attr,
 722#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 723        &throtl_sample_time_entry.attr,
 724#endif
 725        NULL,
 726};
 727
 728#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
 729
 730static ssize_t
 731queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 732{
 733        struct queue_sysfs_entry *entry = to_queue(attr);
 734        struct request_queue *q =
 735                container_of(kobj, struct request_queue, kobj);
 736        ssize_t res;
 737
 738        if (!entry->show)
 739                return -EIO;
 740        mutex_lock(&q->sysfs_lock);
 741        if (blk_queue_dying(q)) {
 742                mutex_unlock(&q->sysfs_lock);
 743                return -ENOENT;
 744        }
 745        res = entry->show(q, page);
 746        mutex_unlock(&q->sysfs_lock);
 747        return res;
 748}
 749
 750static ssize_t
 751queue_attr_store(struct kobject *kobj, struct attribute *attr,
 752                    const char *page, size_t length)
 753{
 754        struct queue_sysfs_entry *entry = to_queue(attr);
 755        struct request_queue *q;
 756        ssize_t res;
 757
 758        if (!entry->store)
 759                return -EIO;
 760
 761        q = container_of(kobj, struct request_queue, kobj);
 762        mutex_lock(&q->sysfs_lock);
 763        if (blk_queue_dying(q)) {
 764                mutex_unlock(&q->sysfs_lock);
 765                return -ENOENT;
 766        }
 767        res = entry->store(q, page, length);
 768        mutex_unlock(&q->sysfs_lock);
 769        return res;
 770}
 771
 772static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 773{
 774        struct request_queue *q = container_of(rcu_head, struct request_queue,
 775                                               rcu_head);
 776        kmem_cache_free(blk_requestq_cachep, q);
 777}
 778
 779/**
 780 * __blk_release_queue - release a request queue when it is no longer needed
 781 * @work: pointer to the release_work member of the request queue to be released
 782 *
 783 * Description:
 784 *     blk_release_queue is the counterpart of blk_init_queue(). It should be
 785 *     called when a request queue is being released; typically when a block
 786 *     device is being de-registered. Its primary task it to free the queue
 787 *     itself.
 788 *
 789 * Notes:
 790 *     The low level driver must have finished any outstanding requests first
 791 *     via blk_cleanup_queue().
 792 *
 793 *     Although blk_release_queue() may be called with preemption disabled,
 794 *     __blk_release_queue() may sleep.
 795 */
 796static void __blk_release_queue(struct work_struct *work)
 797{
 798        struct request_queue *q = container_of(work, typeof(*q), release_work);
 799
 800        if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
 801                blk_stat_remove_callback(q, q->poll_cb);
 802        blk_stat_free_callback(q->poll_cb);
 803        bdi_put(q->backing_dev_info);
 804        blkcg_exit_queue(q);
 805
 806        if (q->elevator) {
 807                ioc_clear_queue(q);
 808                elevator_exit(q, q->elevator);
 809        }
 810
 811        blk_free_queue_stats(q->stats);
 812
 813        blk_exit_rl(q, &q->root_rl);
 814
 815        if (q->queue_tags)
 816                __blk_queue_free_tags(q);
 817
 818        if (!q->mq_ops) {
 819                if (q->exit_rq_fn)
 820                        q->exit_rq_fn(q, q->fq->flush_rq);
 821                blk_free_flush_queue(q->fq);
 822        } else {
 823                blk_mq_release(q);
 824        }
 825
 826        blk_trace_shutdown(q);
 827
 828        if (q->mq_ops)
 829                blk_mq_debugfs_unregister(q);
 830
 831        if (q->bio_split)
 832                bioset_free(q->bio_split);
 833
 834        ida_simple_remove(&blk_queue_ida, q->id);
 835        call_rcu(&q->rcu_head, blk_free_queue_rcu);
 836}
 837
 838static void blk_release_queue(struct kobject *kobj)
 839{
 840        struct request_queue *q =
 841                container_of(kobj, struct request_queue, kobj);
 842
 843        INIT_WORK(&q->release_work, __blk_release_queue);
 844        schedule_work(&q->release_work);
 845}
 846
 847static const struct sysfs_ops queue_sysfs_ops = {
 848        .show   = queue_attr_show,
 849        .store  = queue_attr_store,
 850};
 851
 852struct kobj_type blk_queue_ktype = {
 853        .sysfs_ops      = &queue_sysfs_ops,
 854        .default_attrs  = default_attrs,
 855        .release        = blk_release_queue,
 856};
 857
 858int blk_register_queue(struct gendisk *disk)
 859{
 860        int ret;
 861        struct device *dev = disk_to_dev(disk);
 862        struct request_queue *q = disk->queue;
 863
 864        if (WARN_ON(!q))
 865                return -ENXIO;
 866
 867        WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
 868                  "%s is registering an already registered queue\n",
 869                  kobject_name(&dev->kobj));
 870        queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
 871
 872        /*
 873         * SCSI probing may synchronously create and destroy a lot of
 874         * request_queues for non-existent devices.  Shutting down a fully
 875         * functional queue takes measureable wallclock time as RCU grace
 876         * periods are involved.  To avoid excessive latency in these
 877         * cases, a request_queue starts out in a degraded mode which is
 878         * faster to shut down and is made fully functional here as
 879         * request_queues for non-existent devices never get registered.
 880         */
 881        if (!blk_queue_init_done(q)) {
 882                queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
 883                percpu_ref_switch_to_percpu(&q->q_usage_counter);
 884                blk_queue_bypass_end(q);
 885        }
 886
 887        ret = blk_trace_init_sysfs(dev);
 888        if (ret)
 889                return ret;
 890
 891        /* Prevent changes through sysfs until registration is completed. */
 892        mutex_lock(&q->sysfs_lock);
 893
 894        ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
 895        if (ret < 0) {
 896                blk_trace_remove_sysfs(dev);
 897                goto unlock;
 898        }
 899
 900        if (q->mq_ops) {
 901                __blk_mq_register_dev(dev, q);
 902                blk_mq_debugfs_register(q);
 903        }
 904
 905        kobject_uevent(&q->kobj, KOBJ_ADD);
 906
 907        wbt_enable_default(q);
 908
 909        blk_throtl_register_queue(q);
 910
 911        if (q->request_fn || (q->mq_ops && q->elevator)) {
 912                ret = elv_register_queue(q);
 913                if (ret) {
 914                        kobject_uevent(&q->kobj, KOBJ_REMOVE);
 915                        kobject_del(&q->kobj);
 916                        blk_trace_remove_sysfs(dev);
 917                        kobject_put(&dev->kobj);
 918                        goto unlock;
 919                }
 920        }
 921        ret = 0;
 922unlock:
 923        mutex_unlock(&q->sysfs_lock);
 924        return ret;
 925}
 926
 927void blk_unregister_queue(struct gendisk *disk)
 928{
 929        struct request_queue *q = disk->queue;
 930
 931        if (WARN_ON(!q))
 932                return;
 933
 934        queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q);
 935
 936        wbt_exit(q);
 937
 938
 939        if (q->mq_ops)
 940                blk_mq_unregister_dev(disk_to_dev(disk), q);
 941
 942        if (q->request_fn || (q->mq_ops && q->elevator))
 943                elv_unregister_queue(q);
 944
 945        kobject_uevent(&q->kobj, KOBJ_REMOVE);
 946        kobject_del(&q->kobj);
 947        blk_trace_remove_sysfs(disk_to_dev(disk));
 948        kobject_put(&disk_to_dev(disk)->kobj);
 949}
 950