linux/block/blk-settings.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to setting various queue properties from drivers
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/bio.h>
   9#include <linux/blkdev.h>
  10#include <linux/pagemap.h>
  11#include <linux/backing-dev-defs.h>
  12#include <linux/gcd.h>
  13#include <linux/lcm.h>
  14#include <linux/jiffies.h>
  15#include <linux/gfp.h>
  16#include <linux/dma-mapping.h>
  17
  18#include "blk.h"
  19#include "blk-wbt.h"
  20
  21void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  22{
  23        q->rq_timeout = timeout;
  24}
  25EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  26
  27/**
  28 * blk_set_default_limits - reset limits to default values
  29 * @lim:  the queue_limits structure to reset
  30 *
  31 * Description:
  32 *   Returns a queue_limit struct to its default state.
  33 */
  34void blk_set_default_limits(struct queue_limits *lim)
  35{
  36        lim->max_segments = BLK_MAX_SEGMENTS;
  37        lim->max_discard_segments = 1;
  38        lim->max_integrity_segments = 0;
  39        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  40        lim->virt_boundary_mask = 0;
  41        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  42        lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  43        lim->max_dev_sectors = 0;
  44        lim->chunk_sectors = 0;
  45        lim->max_write_same_sectors = 0;
  46        lim->max_write_zeroes_sectors = 0;
  47        lim->max_zone_append_sectors = 0;
  48        lim->max_discard_sectors = 0;
  49        lim->max_hw_discard_sectors = 0;
  50        lim->discard_granularity = 0;
  51        lim->discard_alignment = 0;
  52        lim->discard_misaligned = 0;
  53        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  54        lim->bounce = BLK_BOUNCE_NONE;
  55        lim->alignment_offset = 0;
  56        lim->io_opt = 0;
  57        lim->misaligned = 0;
  58        lim->zoned = BLK_ZONED_NONE;
  59        lim->zone_write_granularity = 0;
  60}
  61EXPORT_SYMBOL(blk_set_default_limits);
  62
  63/**
  64 * blk_set_stacking_limits - set default limits for stacking devices
  65 * @lim:  the queue_limits structure to reset
  66 *
  67 * Description:
  68 *   Returns a queue_limit struct to its default state. Should be used
  69 *   by stacking drivers like DM that have no internal limits.
  70 */
  71void blk_set_stacking_limits(struct queue_limits *lim)
  72{
  73        blk_set_default_limits(lim);
  74
  75        /* Inherit limits from component devices */
  76        lim->max_segments = USHRT_MAX;
  77        lim->max_discard_segments = USHRT_MAX;
  78        lim->max_hw_sectors = UINT_MAX;
  79        lim->max_segment_size = UINT_MAX;
  80        lim->max_sectors = UINT_MAX;
  81        lim->max_dev_sectors = UINT_MAX;
  82        lim->max_write_same_sectors = UINT_MAX;
  83        lim->max_write_zeroes_sectors = UINT_MAX;
  84        lim->max_zone_append_sectors = UINT_MAX;
  85}
  86EXPORT_SYMBOL(blk_set_stacking_limits);
  87
  88/**
  89 * blk_queue_bounce_limit - set bounce buffer limit for queue
  90 * @q: the request queue for the device
  91 * @bounce: bounce limit to enforce
  92 *
  93 * Description:
  94 *    Force bouncing for ISA DMA ranges or highmem.
  95 *
  96 *    DEPRECATED, don't use in new code.
  97 **/
  98void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
  99{
 100        q->limits.bounce = bounce;
 101}
 102EXPORT_SYMBOL(blk_queue_bounce_limit);
 103
 104/**
 105 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 106 * @q:  the request queue for the device
 107 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 108 *
 109 * Description:
 110 *    Enables a low level driver to set a hard upper limit,
 111 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 112 *    the device driver based upon the capabilities of the I/O
 113 *    controller.
 114 *
 115 *    max_dev_sectors is a hard limit imposed by the storage device for
 116 *    READ/WRITE requests. It is set by the disk driver.
 117 *
 118 *    max_sectors is a soft limit imposed by the block layer for
 119 *    filesystem type requests.  This value can be overridden on a
 120 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 121 *    The soft limit can not exceed max_hw_sectors.
 122 **/
 123void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 124{
 125        struct queue_limits *limits = &q->limits;
 126        unsigned int max_sectors;
 127
 128        if ((max_hw_sectors << 9) < PAGE_SIZE) {
 129                max_hw_sectors = 1 << (PAGE_SHIFT - 9);
 130                printk(KERN_INFO "%s: set to minimum %d\n",
 131                       __func__, max_hw_sectors);
 132        }
 133
 134        max_hw_sectors = round_down(max_hw_sectors,
 135                                    limits->logical_block_size >> SECTOR_SHIFT);
 136        limits->max_hw_sectors = max_hw_sectors;
 137
 138        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
 139        max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
 140        max_sectors = round_down(max_sectors,
 141                                 limits->logical_block_size >> SECTOR_SHIFT);
 142        limits->max_sectors = max_sectors;
 143
 144        if (!q->disk)
 145                return;
 146        q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
 147}
 148EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 149
 150/**
 151 * blk_queue_chunk_sectors - set size of the chunk for this queue
 152 * @q:  the request queue for the device
 153 * @chunk_sectors:  chunk sectors in the usual 512b unit
 154 *
 155 * Description:
 156 *    If a driver doesn't want IOs to cross a given chunk size, it can set
 157 *    this limit and prevent merging across chunks. Note that the block layer
 158 *    must accept a page worth of data at any offset. So if the crossing of
 159 *    chunks is a hard limitation in the driver, it must still be prepared
 160 *    to split single page bios.
 161 **/
 162void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
 163{
 164        q->limits.chunk_sectors = chunk_sectors;
 165}
 166EXPORT_SYMBOL(blk_queue_chunk_sectors);
 167
 168/**
 169 * blk_queue_max_discard_sectors - set max sectors for a single discard
 170 * @q:  the request queue for the device
 171 * @max_discard_sectors: maximum number of sectors to discard
 172 **/
 173void blk_queue_max_discard_sectors(struct request_queue *q,
 174                unsigned int max_discard_sectors)
 175{
 176        q->limits.max_hw_discard_sectors = max_discard_sectors;
 177        q->limits.max_discard_sectors = max_discard_sectors;
 178}
 179EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 180
 181/**
 182 * blk_queue_max_write_same_sectors - set max sectors for a single write same
 183 * @q:  the request queue for the device
 184 * @max_write_same_sectors: maximum number of sectors to write per command
 185 **/
 186void blk_queue_max_write_same_sectors(struct request_queue *q,
 187                                      unsigned int max_write_same_sectors)
 188{
 189        q->limits.max_write_same_sectors = max_write_same_sectors;
 190}
 191EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 192
 193/**
 194 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
 195 *                                      write zeroes
 196 * @q:  the request queue for the device
 197 * @max_write_zeroes_sectors: maximum number of sectors to write per command
 198 **/
 199void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
 200                unsigned int max_write_zeroes_sectors)
 201{
 202        q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
 203}
 204EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
 205
 206/**
 207 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
 208 * @q:  the request queue for the device
 209 * @max_zone_append_sectors: maximum number of sectors to write per command
 210 **/
 211void blk_queue_max_zone_append_sectors(struct request_queue *q,
 212                unsigned int max_zone_append_sectors)
 213{
 214        unsigned int max_sectors;
 215
 216        if (WARN_ON(!blk_queue_is_zoned(q)))
 217                return;
 218
 219        max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
 220        max_sectors = min(q->limits.chunk_sectors, max_sectors);
 221
 222        /*
 223         * Signal eventual driver bugs resulting in the max_zone_append sectors limit
 224         * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
 225         * or the max_hw_sectors limit not set.
 226         */
 227        WARN_ON(!max_sectors);
 228
 229        q->limits.max_zone_append_sectors = max_sectors;
 230}
 231EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
 232
 233/**
 234 * blk_queue_max_segments - set max hw segments for a request for this queue
 235 * @q:  the request queue for the device
 236 * @max_segments:  max number of segments
 237 *
 238 * Description:
 239 *    Enables a low level driver to set an upper limit on the number of
 240 *    hw data segments in a request.
 241 **/
 242void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 243{
 244        if (!max_segments) {
 245                max_segments = 1;
 246                printk(KERN_INFO "%s: set to minimum %d\n",
 247                       __func__, max_segments);
 248        }
 249
 250        q->limits.max_segments = max_segments;
 251}
 252EXPORT_SYMBOL(blk_queue_max_segments);
 253
 254/**
 255 * blk_queue_max_discard_segments - set max segments for discard requests
 256 * @q:  the request queue for the device
 257 * @max_segments:  max number of segments
 258 *
 259 * Description:
 260 *    Enables a low level driver to set an upper limit on the number of
 261 *    segments in a discard request.
 262 **/
 263void blk_queue_max_discard_segments(struct request_queue *q,
 264                unsigned short max_segments)
 265{
 266        q->limits.max_discard_segments = max_segments;
 267}
 268EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
 269
 270/**
 271 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 272 * @q:  the request queue for the device
 273 * @max_size:  max size of segment in bytes
 274 *
 275 * Description:
 276 *    Enables a low level driver to set an upper limit on the size of a
 277 *    coalesced segment
 278 **/
 279void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 280{
 281        if (max_size < PAGE_SIZE) {
 282                max_size = PAGE_SIZE;
 283                printk(KERN_INFO "%s: set to minimum %d\n",
 284                       __func__, max_size);
 285        }
 286
 287        /* see blk_queue_virt_boundary() for the explanation */
 288        WARN_ON_ONCE(q->limits.virt_boundary_mask);
 289
 290        q->limits.max_segment_size = max_size;
 291}
 292EXPORT_SYMBOL(blk_queue_max_segment_size);
 293
 294/**
 295 * blk_queue_logical_block_size - set logical block size for the queue
 296 * @q:  the request queue for the device
 297 * @size:  the logical block size, in bytes
 298 *
 299 * Description:
 300 *   This should be set to the lowest possible block size that the
 301 *   storage device can address.  The default of 512 covers most
 302 *   hardware.
 303 **/
 304void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
 305{
 306        struct queue_limits *limits = &q->limits;
 307
 308        limits->logical_block_size = size;
 309
 310        if (limits->physical_block_size < size)
 311                limits->physical_block_size = size;
 312
 313        if (limits->io_min < limits->physical_block_size)
 314                limits->io_min = limits->physical_block_size;
 315
 316        limits->max_hw_sectors =
 317                round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
 318        limits->max_sectors =
 319                round_down(limits->max_sectors, size >> SECTOR_SHIFT);
 320}
 321EXPORT_SYMBOL(blk_queue_logical_block_size);
 322
 323/**
 324 * blk_queue_physical_block_size - set physical block size for the queue
 325 * @q:  the request queue for the device
 326 * @size:  the physical block size, in bytes
 327 *
 328 * Description:
 329 *   This should be set to the lowest possible sector size that the
 330 *   hardware can operate on without reverting to read-modify-write
 331 *   operations.
 332 */
 333void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
 334{
 335        q->limits.physical_block_size = size;
 336
 337        if (q->limits.physical_block_size < q->limits.logical_block_size)
 338                q->limits.physical_block_size = q->limits.logical_block_size;
 339
 340        if (q->limits.io_min < q->limits.physical_block_size)
 341                q->limits.io_min = q->limits.physical_block_size;
 342}
 343EXPORT_SYMBOL(blk_queue_physical_block_size);
 344
 345/**
 346 * blk_queue_zone_write_granularity - set zone write granularity for the queue
 347 * @q:  the request queue for the zoned device
 348 * @size:  the zone write granularity size, in bytes
 349 *
 350 * Description:
 351 *   This should be set to the lowest possible size allowing to write in
 352 *   sequential zones of a zoned block device.
 353 */
 354void blk_queue_zone_write_granularity(struct request_queue *q,
 355                                      unsigned int size)
 356{
 357        if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
 358                return;
 359
 360        q->limits.zone_write_granularity = size;
 361
 362        if (q->limits.zone_write_granularity < q->limits.logical_block_size)
 363                q->limits.zone_write_granularity = q->limits.logical_block_size;
 364}
 365EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
 366
 367/**
 368 * blk_queue_alignment_offset - set physical block alignment offset
 369 * @q:  the request queue for the device
 370 * @offset: alignment offset in bytes
 371 *
 372 * Description:
 373 *   Some devices are naturally misaligned to compensate for things like
 374 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 375 *   should call this function for devices whose first sector is not
 376 *   naturally aligned.
 377 */
 378void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 379{
 380        q->limits.alignment_offset =
 381                offset & (q->limits.physical_block_size - 1);
 382        q->limits.misaligned = 0;
 383}
 384EXPORT_SYMBOL(blk_queue_alignment_offset);
 385
 386void disk_update_readahead(struct gendisk *disk)
 387{
 388        struct request_queue *q = disk->queue;
 389
 390        /*
 391         * For read-ahead of large files to be effective, we need to read ahead
 392         * at least twice the optimal I/O size.
 393         */
 394        disk->bdi->ra_pages =
 395                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 396        disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
 397}
 398EXPORT_SYMBOL_GPL(disk_update_readahead);
 399
 400/**
 401 * blk_limits_io_min - set minimum request size for a device
 402 * @limits: the queue limits
 403 * @min:  smallest I/O size in bytes
 404 *
 405 * Description:
 406 *   Some devices have an internal block size bigger than the reported
 407 *   hardware sector size.  This function can be used to signal the
 408 *   smallest I/O the device can perform without incurring a performance
 409 *   penalty.
 410 */
 411void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
 412{
 413        limits->io_min = min;
 414
 415        if (limits->io_min < limits->logical_block_size)
 416                limits->io_min = limits->logical_block_size;
 417
 418        if (limits->io_min < limits->physical_block_size)
 419                limits->io_min = limits->physical_block_size;
 420}
 421EXPORT_SYMBOL(blk_limits_io_min);
 422
 423/**
 424 * blk_queue_io_min - set minimum request size for the queue
 425 * @q:  the request queue for the device
 426 * @min:  smallest I/O size in bytes
 427 *
 428 * Description:
 429 *   Storage devices may report a granularity or preferred minimum I/O
 430 *   size which is the smallest request the device can perform without
 431 *   incurring a performance penalty.  For disk drives this is often the
 432 *   physical block size.  For RAID arrays it is often the stripe chunk
 433 *   size.  A properly aligned multiple of minimum_io_size is the
 434 *   preferred request size for workloads where a high number of I/O
 435 *   operations is desired.
 436 */
 437void blk_queue_io_min(struct request_queue *q, unsigned int min)
 438{
 439        blk_limits_io_min(&q->limits, min);
 440}
 441EXPORT_SYMBOL(blk_queue_io_min);
 442
 443/**
 444 * blk_limits_io_opt - set optimal request size for a device
 445 * @limits: the queue limits
 446 * @opt:  smallest I/O size in bytes
 447 *
 448 * Description:
 449 *   Storage devices may report an optimal I/O size, which is the
 450 *   device's preferred unit for sustained I/O.  This is rarely reported
 451 *   for disk drives.  For RAID arrays it is usually the stripe width or
 452 *   the internal track size.  A properly aligned multiple of
 453 *   optimal_io_size is the preferred request size for workloads where
 454 *   sustained throughput is desired.
 455 */
 456void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
 457{
 458        limits->io_opt = opt;
 459}
 460EXPORT_SYMBOL(blk_limits_io_opt);
 461
 462/**
 463 * blk_queue_io_opt - set optimal request size for the queue
 464 * @q:  the request queue for the device
 465 * @opt:  optimal request size in bytes
 466 *
 467 * Description:
 468 *   Storage devices may report an optimal I/O size, which is the
 469 *   device's preferred unit for sustained I/O.  This is rarely reported
 470 *   for disk drives.  For RAID arrays it is usually the stripe width or
 471 *   the internal track size.  A properly aligned multiple of
 472 *   optimal_io_size is the preferred request size for workloads where
 473 *   sustained throughput is desired.
 474 */
 475void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 476{
 477        blk_limits_io_opt(&q->limits, opt);
 478        if (!q->disk)
 479                return;
 480        q->disk->bdi->ra_pages =
 481                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 482}
 483EXPORT_SYMBOL(blk_queue_io_opt);
 484
 485static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
 486{
 487        sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
 488        if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
 489                sectors = PAGE_SIZE >> SECTOR_SHIFT;
 490        return sectors;
 491}
 492
 493/**
 494 * blk_stack_limits - adjust queue_limits for stacked devices
 495 * @t:  the stacking driver limits (top device)
 496 * @b:  the underlying queue limits (bottom, component device)
 497 * @start:  first data sector within component device
 498 *
 499 * Description:
 500 *    This function is used by stacking drivers like MD and DM to ensure
 501 *    that all component devices have compatible block sizes and
 502 *    alignments.  The stacking driver must provide a queue_limits
 503 *    struct (top) and then iteratively call the stacking function for
 504 *    all component (bottom) devices.  The stacking function will
 505 *    attempt to combine the values and ensure proper alignment.
 506 *
 507 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 508 *    top device's block sizes and alignment offsets may be adjusted to
 509 *    ensure alignment with the bottom device. If no compatible sizes
 510 *    and alignments exist, -1 is returned and the resulting top
 511 *    queue_limits will have the misaligned flag set to indicate that
 512 *    the alignment_offset is undefined.
 513 */
 514int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 515                     sector_t start)
 516{
 517        unsigned int top, bottom, alignment, ret = 0;
 518
 519        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 520        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 521        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
 522        t->max_write_same_sectors = min(t->max_write_same_sectors,
 523                                        b->max_write_same_sectors);
 524        t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
 525                                        b->max_write_zeroes_sectors);
 526        t->max_zone_append_sectors = min(t->max_zone_append_sectors,
 527                                        b->max_zone_append_sectors);
 528        t->bounce = max(t->bounce, b->bounce);
 529
 530        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 531                                            b->seg_boundary_mask);
 532        t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
 533                                            b->virt_boundary_mask);
 534
 535        t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 536        t->max_discard_segments = min_not_zero(t->max_discard_segments,
 537                                               b->max_discard_segments);
 538        t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
 539                                                 b->max_integrity_segments);
 540
 541        t->max_segment_size = min_not_zero(t->max_segment_size,
 542                                           b->max_segment_size);
 543
 544        t->misaligned |= b->misaligned;
 545
 546        alignment = queue_limit_alignment_offset(b, start);
 547
 548        /* Bottom device has different alignment.  Check that it is
 549         * compatible with the current top alignment.
 550         */
 551        if (t->alignment_offset != alignment) {
 552
 553                top = max(t->physical_block_size, t->io_min)
 554                        + t->alignment_offset;
 555                bottom = max(b->physical_block_size, b->io_min) + alignment;
 556
 557                /* Verify that top and bottom intervals line up */
 558                if (max(top, bottom) % min(top, bottom)) {
 559                        t->misaligned = 1;
 560                        ret = -1;
 561                }
 562        }
 563
 564        t->logical_block_size = max(t->logical_block_size,
 565                                    b->logical_block_size);
 566
 567        t->physical_block_size = max(t->physical_block_size,
 568                                     b->physical_block_size);
 569
 570        t->io_min = max(t->io_min, b->io_min);
 571        t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 572
 573        /* Set non-power-of-2 compatible chunk_sectors boundary */
 574        if (b->chunk_sectors)
 575                t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
 576
 577        /* Physical block size a multiple of the logical block size? */
 578        if (t->physical_block_size & (t->logical_block_size - 1)) {
 579                t->physical_block_size = t->logical_block_size;
 580                t->misaligned = 1;
 581                ret = -1;
 582        }
 583
 584        /* Minimum I/O a multiple of the physical block size? */
 585        if (t->io_min & (t->physical_block_size - 1)) {
 586                t->io_min = t->physical_block_size;
 587                t->misaligned = 1;
 588                ret = -1;
 589        }
 590
 591        /* Optimal I/O a multiple of the physical block size? */
 592        if (t->io_opt & (t->physical_block_size - 1)) {
 593                t->io_opt = 0;
 594                t->misaligned = 1;
 595                ret = -1;
 596        }
 597
 598        /* chunk_sectors a multiple of the physical block size? */
 599        if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
 600                t->chunk_sectors = 0;
 601                t->misaligned = 1;
 602                ret = -1;
 603        }
 604
 605        t->raid_partial_stripes_expensive =
 606                max(t->raid_partial_stripes_expensive,
 607                    b->raid_partial_stripes_expensive);
 608
 609        /* Find lowest common alignment_offset */
 610        t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
 611                % max(t->physical_block_size, t->io_min);
 612
 613        /* Verify that new alignment_offset is on a logical block boundary */
 614        if (t->alignment_offset & (t->logical_block_size - 1)) {
 615                t->misaligned = 1;
 616                ret = -1;
 617        }
 618
 619        t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
 620        t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
 621        t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
 622
 623        /* Discard alignment and granularity */
 624        if (b->discard_granularity) {
 625                alignment = queue_limit_discard_alignment(b, start);
 626
 627                if (t->discard_granularity != 0 &&
 628                    t->discard_alignment != alignment) {
 629                        top = t->discard_granularity + t->discard_alignment;
 630                        bottom = b->discard_granularity + alignment;
 631
 632                        /* Verify that top and bottom intervals line up */
 633                        if ((max(top, bottom) % min(top, bottom)) != 0)
 634                                t->discard_misaligned = 1;
 635                }
 636
 637                t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
 638                                                      b->max_discard_sectors);
 639                t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
 640                                                         b->max_hw_discard_sectors);
 641                t->discard_granularity = max(t->discard_granularity,
 642                                             b->discard_granularity);
 643                t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
 644                        t->discard_granularity;
 645        }
 646
 647        t->zone_write_granularity = max(t->zone_write_granularity,
 648                                        b->zone_write_granularity);
 649        t->zoned = max(t->zoned, b->zoned);
 650        return ret;
 651}
 652EXPORT_SYMBOL(blk_stack_limits);
 653
 654/**
 655 * disk_stack_limits - adjust queue limits for stacked drivers
 656 * @disk:  MD/DM gendisk (top)
 657 * @bdev:  the underlying block device (bottom)
 658 * @offset:  offset to beginning of data within component device
 659 *
 660 * Description:
 661 *    Merges the limits for a top level gendisk and a bottom level
 662 *    block_device.
 663 */
 664void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
 665                       sector_t offset)
 666{
 667        struct request_queue *t = disk->queue;
 668
 669        if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
 670                        get_start_sect(bdev) + (offset >> 9)) < 0)
 671                pr_notice("%s: Warning: Device %pg is misaligned\n",
 672                        disk->disk_name, bdev);
 673
 674        disk_update_readahead(disk);
 675}
 676EXPORT_SYMBOL(disk_stack_limits);
 677
 678/**
 679 * blk_queue_update_dma_pad - update pad mask
 680 * @q:     the request queue for the device
 681 * @mask:  pad mask
 682 *
 683 * Update dma pad mask.
 684 *
 685 * Appending pad buffer to a request modifies the last entry of a
 686 * scatter list such that it includes the pad buffer.
 687 **/
 688void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
 689{
 690        if (mask > q->dma_pad_mask)
 691                q->dma_pad_mask = mask;
 692}
 693EXPORT_SYMBOL(blk_queue_update_dma_pad);
 694
 695/**
 696 * blk_queue_segment_boundary - set boundary rules for segment merging
 697 * @q:  the request queue for the device
 698 * @mask:  the memory boundary mask
 699 **/
 700void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 701{
 702        if (mask < PAGE_SIZE - 1) {
 703                mask = PAGE_SIZE - 1;
 704                printk(KERN_INFO "%s: set to minimum %lx\n",
 705                       __func__, mask);
 706        }
 707
 708        q->limits.seg_boundary_mask = mask;
 709}
 710EXPORT_SYMBOL(blk_queue_segment_boundary);
 711
 712/**
 713 * blk_queue_virt_boundary - set boundary rules for bio merging
 714 * @q:  the request queue for the device
 715 * @mask:  the memory boundary mask
 716 **/
 717void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
 718{
 719        q->limits.virt_boundary_mask = mask;
 720
 721        /*
 722         * Devices that require a virtual boundary do not support scatter/gather
 723         * I/O natively, but instead require a descriptor list entry for each
 724         * page (which might not be idential to the Linux PAGE_SIZE).  Because
 725         * of that they are not limited by our notion of "segment size".
 726         */
 727        if (mask)
 728                q->limits.max_segment_size = UINT_MAX;
 729}
 730EXPORT_SYMBOL(blk_queue_virt_boundary);
 731
 732/**
 733 * blk_queue_dma_alignment - set dma length and memory alignment
 734 * @q:     the request queue for the device
 735 * @mask:  alignment mask
 736 *
 737 * description:
 738 *    set required memory and length alignment for direct dma transactions.
 739 *    this is used when building direct io requests for the queue.
 740 *
 741 **/
 742void blk_queue_dma_alignment(struct request_queue *q, int mask)
 743{
 744        q->dma_alignment = mask;
 745}
 746EXPORT_SYMBOL(blk_queue_dma_alignment);
 747
 748/**
 749 * blk_queue_update_dma_alignment - update dma length and memory alignment
 750 * @q:     the request queue for the device
 751 * @mask:  alignment mask
 752 *
 753 * description:
 754 *    update required memory and length alignment for direct dma transactions.
 755 *    If the requested alignment is larger than the current alignment, then
 756 *    the current queue alignment is updated to the new value, otherwise it
 757 *    is left alone.  The design of this is to allow multiple objects
 758 *    (driver, device, transport etc) to set their respective
 759 *    alignments without having them interfere.
 760 *
 761 **/
 762void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 763{
 764        BUG_ON(mask > PAGE_SIZE);
 765
 766        if (mask > q->dma_alignment)
 767                q->dma_alignment = mask;
 768}
 769EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 770
 771/**
 772 * blk_set_queue_depth - tell the block layer about the device queue depth
 773 * @q:          the request queue for the device
 774 * @depth:              queue depth
 775 *
 776 */
 777void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
 778{
 779        q->queue_depth = depth;
 780        rq_qos_queue_depth_changed(q);
 781}
 782EXPORT_SYMBOL(blk_set_queue_depth);
 783
 784/**
 785 * blk_queue_write_cache - configure queue's write cache
 786 * @q:          the request queue for the device
 787 * @wc:         write back cache on or off
 788 * @fua:        device supports FUA writes, if true
 789 *
 790 * Tell the block layer about the write cache of @q.
 791 */
 792void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 793{
 794        if (wc)
 795                blk_queue_flag_set(QUEUE_FLAG_WC, q);
 796        else
 797                blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 798        if (fua)
 799                blk_queue_flag_set(QUEUE_FLAG_FUA, q);
 800        else
 801                blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
 802
 803        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 804}
 805EXPORT_SYMBOL_GPL(blk_queue_write_cache);
 806
 807/**
 808 * blk_queue_required_elevator_features - Set a queue required elevator features
 809 * @q:          the request queue for the target device
 810 * @features:   Required elevator features OR'ed together
 811 *
 812 * Tell the block layer that for the device controlled through @q, only the
 813 * only elevators that can be used are those that implement at least the set of
 814 * features specified by @features.
 815 */
 816void blk_queue_required_elevator_features(struct request_queue *q,
 817                                          unsigned int features)
 818{
 819        q->required_elevator_features = features;
 820}
 821EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
 822
 823/**
 824 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
 825 * @q:          the request queue for the device
 826 * @dev:        the device pointer for dma
 827 *
 828 * Tell the block layer about merging the segments by dma map of @q.
 829 */
 830bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
 831                                       struct device *dev)
 832{
 833        unsigned long boundary = dma_get_merge_boundary(dev);
 834
 835        if (!boundary)
 836                return false;
 837
 838        /* No need to update max_segment_size. see blk_queue_virt_boundary() */
 839        blk_queue_virt_boundary(q, boundary);
 840
 841        return true;
 842}
 843EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
 844
 845static bool disk_has_partitions(struct gendisk *disk)
 846{
 847        unsigned long idx;
 848        struct block_device *part;
 849        bool ret = false;
 850
 851        rcu_read_lock();
 852        xa_for_each(&disk->part_tbl, idx, part) {
 853                if (bdev_is_partition(part)) {
 854                        ret = true;
 855                        break;
 856                }
 857        }
 858        rcu_read_unlock();
 859
 860        return ret;
 861}
 862
 863/**
 864 * blk_queue_set_zoned - configure a disk queue zoned model.
 865 * @disk:       the gendisk of the queue to configure
 866 * @model:      the zoned model to set
 867 *
 868 * Set the zoned model of the request queue of @disk according to @model.
 869 * When @model is BLK_ZONED_HM (host managed), this should be called only
 870 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
 871 * If @model specifies BLK_ZONED_HA (host aware), the effective model used
 872 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
 873 * on the disk.
 874 */
 875void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
 876{
 877        struct request_queue *q = disk->queue;
 878
 879        switch (model) {
 880        case BLK_ZONED_HM:
 881                /*
 882                 * Host managed devices are supported only if
 883                 * CONFIG_BLK_DEV_ZONED is enabled.
 884                 */
 885                WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
 886                break;
 887        case BLK_ZONED_HA:
 888                /*
 889                 * Host aware devices can be treated either as regular block
 890                 * devices (similar to drive managed devices) or as zoned block
 891                 * devices to take advantage of the zone command set, similarly
 892                 * to host managed devices. We try the latter if there are no
 893                 * partitions and zoned block device support is enabled, else
 894                 * we do nothing special as far as the block layer is concerned.
 895                 */
 896                if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
 897                    disk_has_partitions(disk))
 898                        model = BLK_ZONED_NONE;
 899                break;
 900        case BLK_ZONED_NONE:
 901        default:
 902                if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
 903                        model = BLK_ZONED_NONE;
 904                break;
 905        }
 906
 907        q->limits.zoned = model;
 908        if (model != BLK_ZONED_NONE) {
 909                /*
 910                 * Set the zone write granularity to the device logical block
 911                 * size by default. The driver can change this value if needed.
 912                 */
 913                blk_queue_zone_write_granularity(q,
 914                                                queue_logical_block_size(q));
 915        } else {
 916                blk_queue_clear_zone_settings(q);
 917        }
 918}
 919EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
 920