linux/block/blk-settings.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to setting various queue properties from drivers
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/bio.h>
   9#include <linux/blkdev.h>
  10#include <linux/memblock.h>     /* for max_pfn/max_low_pfn */
  11#include <linux/gcd.h>
  12#include <linux/lcm.h>
  13#include <linux/jiffies.h>
  14#include <linux/gfp.h>
  15#include <linux/dma-mapping.h>
  16
  17#include "blk.h"
  18#include "blk-wbt.h"
  19
  20unsigned long blk_max_low_pfn;
  21EXPORT_SYMBOL(blk_max_low_pfn);
  22
  23unsigned long blk_max_pfn;
  24
  25void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  26{
  27        q->rq_timeout = timeout;
  28}
  29EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  30
  31/**
  32 * blk_set_default_limits - reset limits to default values
  33 * @lim:  the queue_limits structure to reset
  34 *
  35 * Description:
  36 *   Returns a queue_limit struct to its default state.
  37 */
  38void blk_set_default_limits(struct queue_limits *lim)
  39{
  40        lim->max_segments = BLK_MAX_SEGMENTS;
  41        lim->max_discard_segments = 1;
  42        lim->max_integrity_segments = 0;
  43        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  44        lim->virt_boundary_mask = 0;
  45        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  46        lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  47        lim->max_dev_sectors = 0;
  48        lim->chunk_sectors = 0;
  49        lim->max_write_same_sectors = 0;
  50        lim->max_write_zeroes_sectors = 0;
  51        lim->max_zone_append_sectors = 0;
  52        lim->max_discard_sectors = 0;
  53        lim->max_hw_discard_sectors = 0;
  54        lim->discard_granularity = 0;
  55        lim->discard_alignment = 0;
  56        lim->discard_misaligned = 0;
  57        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  58        lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
  59        lim->alignment_offset = 0;
  60        lim->io_opt = 0;
  61        lim->misaligned = 0;
  62        lim->zoned = BLK_ZONED_NONE;
  63}
  64EXPORT_SYMBOL(blk_set_default_limits);
  65
  66/**
  67 * blk_set_stacking_limits - set default limits for stacking devices
  68 * @lim:  the queue_limits structure to reset
  69 *
  70 * Description:
  71 *   Returns a queue_limit struct to its default state. Should be used
  72 *   by stacking drivers like DM that have no internal limits.
  73 */
  74void blk_set_stacking_limits(struct queue_limits *lim)
  75{
  76        blk_set_default_limits(lim);
  77
  78        /* Inherit limits from component devices */
  79        lim->max_segments = USHRT_MAX;
  80        lim->max_discard_segments = USHRT_MAX;
  81        lim->max_hw_sectors = UINT_MAX;
  82        lim->max_segment_size = UINT_MAX;
  83        lim->max_sectors = UINT_MAX;
  84        lim->max_dev_sectors = UINT_MAX;
  85        lim->max_write_same_sectors = UINT_MAX;
  86        lim->max_write_zeroes_sectors = UINT_MAX;
  87        lim->max_zone_append_sectors = UINT_MAX;
  88}
  89EXPORT_SYMBOL(blk_set_stacking_limits);
  90
  91/**
  92 * blk_queue_bounce_limit - set bounce buffer limit for queue
  93 * @q: the request queue for the device
  94 * @max_addr: the maximum address the device can handle
  95 *
  96 * Description:
  97 *    Different hardware can have different requirements as to what pages
  98 *    it can do I/O directly to. A low level driver can call
  99 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 100 *    buffers for doing I/O to pages residing above @max_addr.
 101 **/
 102void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 103{
 104        unsigned long b_pfn = max_addr >> PAGE_SHIFT;
 105        int dma = 0;
 106
 107        q->bounce_gfp = GFP_NOIO;
 108#if BITS_PER_LONG == 64
 109        /*
 110         * Assume anything <= 4GB can be handled by IOMMU.  Actually
 111         * some IOMMUs can handle everything, but I don't know of a
 112         * way to test this here.
 113         */
 114        if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
 115                dma = 1;
 116        q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
 117#else
 118        if (b_pfn < blk_max_low_pfn)
 119                dma = 1;
 120        q->limits.bounce_pfn = b_pfn;
 121#endif
 122        if (dma) {
 123                init_emergency_isa_pool();
 124                q->bounce_gfp = GFP_NOIO | GFP_DMA;
 125                q->limits.bounce_pfn = b_pfn;
 126        }
 127}
 128EXPORT_SYMBOL(blk_queue_bounce_limit);
 129
 130/**
 131 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 132 * @q:  the request queue for the device
 133 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 134 *
 135 * Description:
 136 *    Enables a low level driver to set a hard upper limit,
 137 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 138 *    the device driver based upon the capabilities of the I/O
 139 *    controller.
 140 *
 141 *    max_dev_sectors is a hard limit imposed by the storage device for
 142 *    READ/WRITE requests. It is set by the disk driver.
 143 *
 144 *    max_sectors is a soft limit imposed by the block layer for
 145 *    filesystem type requests.  This value can be overridden on a
 146 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 147 *    The soft limit can not exceed max_hw_sectors.
 148 **/
 149void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 150{
 151        struct queue_limits *limits = &q->limits;
 152        unsigned int max_sectors;
 153
 154        if ((max_hw_sectors << 9) < PAGE_SIZE) {
 155                max_hw_sectors = 1 << (PAGE_SHIFT - 9);
 156                printk(KERN_INFO "%s: set to minimum %d\n",
 157                       __func__, max_hw_sectors);
 158        }
 159
 160        limits->max_hw_sectors = max_hw_sectors;
 161        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
 162        max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
 163        limits->max_sectors = max_sectors;
 164        q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
 165}
 166EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 167
 168/**
 169 * blk_queue_chunk_sectors - set size of the chunk for this queue
 170 * @q:  the request queue for the device
 171 * @chunk_sectors:  chunk sectors in the usual 512b unit
 172 *
 173 * Description:
 174 *    If a driver doesn't want IOs to cross a given chunk size, it can set
 175 *    this limit and prevent merging across chunks. Note that the chunk size
 176 *    must currently be a power-of-2 in sectors. Also note that the block
 177 *    layer must accept a page worth of data at any offset. So if the
 178 *    crossing of chunks is a hard limitation in the driver, it must still be
 179 *    prepared to split single page bios.
 180 **/
 181void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
 182{
 183        BUG_ON(!is_power_of_2(chunk_sectors));
 184        q->limits.chunk_sectors = chunk_sectors;
 185}
 186EXPORT_SYMBOL(blk_queue_chunk_sectors);
 187
 188/**
 189 * blk_queue_max_discard_sectors - set max sectors for a single discard
 190 * @q:  the request queue for the device
 191 * @max_discard_sectors: maximum number of sectors to discard
 192 **/
 193void blk_queue_max_discard_sectors(struct request_queue *q,
 194                unsigned int max_discard_sectors)
 195{
 196        q->limits.max_hw_discard_sectors = max_discard_sectors;
 197        q->limits.max_discard_sectors = max_discard_sectors;
 198}
 199EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 200
 201/**
 202 * blk_queue_max_write_same_sectors - set max sectors for a single write same
 203 * @q:  the request queue for the device
 204 * @max_write_same_sectors: maximum number of sectors to write per command
 205 **/
 206void blk_queue_max_write_same_sectors(struct request_queue *q,
 207                                      unsigned int max_write_same_sectors)
 208{
 209        q->limits.max_write_same_sectors = max_write_same_sectors;
 210}
 211EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 212
 213/**
 214 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
 215 *                                      write zeroes
 216 * @q:  the request queue for the device
 217 * @max_write_zeroes_sectors: maximum number of sectors to write per command
 218 **/
 219void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
 220                unsigned int max_write_zeroes_sectors)
 221{
 222        q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
 223}
 224EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
 225
 226/**
 227 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
 228 * @q:  the request queue for the device
 229 * @max_zone_append_sectors: maximum number of sectors to write per command
 230 **/
 231void blk_queue_max_zone_append_sectors(struct request_queue *q,
 232                unsigned int max_zone_append_sectors)
 233{
 234        unsigned int max_sectors;
 235
 236        if (WARN_ON(!blk_queue_is_zoned(q)))
 237                return;
 238
 239        max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
 240        max_sectors = min(q->limits.chunk_sectors, max_sectors);
 241
 242        /*
 243         * Signal eventual driver bugs resulting in the max_zone_append sectors limit
 244         * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
 245         * or the max_hw_sectors limit not set.
 246         */
 247        WARN_ON(!max_sectors);
 248
 249        q->limits.max_zone_append_sectors = max_sectors;
 250}
 251EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
 252
 253/**
 254 * blk_queue_max_segments - set max hw segments for a request for this queue
 255 * @q:  the request queue for the device
 256 * @max_segments:  max number of segments
 257 *
 258 * Description:
 259 *    Enables a low level driver to set an upper limit on the number of
 260 *    hw data segments in a request.
 261 **/
 262void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 263{
 264        if (!max_segments) {
 265                max_segments = 1;
 266                printk(KERN_INFO "%s: set to minimum %d\n",
 267                       __func__, max_segments);
 268        }
 269
 270        q->limits.max_segments = max_segments;
 271}
 272EXPORT_SYMBOL(blk_queue_max_segments);
 273
 274/**
 275 * blk_queue_max_discard_segments - set max segments for discard requests
 276 * @q:  the request queue for the device
 277 * @max_segments:  max number of segments
 278 *
 279 * Description:
 280 *    Enables a low level driver to set an upper limit on the number of
 281 *    segments in a discard request.
 282 **/
 283void blk_queue_max_discard_segments(struct request_queue *q,
 284                unsigned short max_segments)
 285{
 286        q->limits.max_discard_segments = max_segments;
 287}
 288EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
 289
 290/**
 291 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 292 * @q:  the request queue for the device
 293 * @max_size:  max size of segment in bytes
 294 *
 295 * Description:
 296 *    Enables a low level driver to set an upper limit on the size of a
 297 *    coalesced segment
 298 **/
 299void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 300{
 301        if (max_size < PAGE_SIZE) {
 302                max_size = PAGE_SIZE;
 303                printk(KERN_INFO "%s: set to minimum %d\n",
 304                       __func__, max_size);
 305        }
 306
 307        /* see blk_queue_virt_boundary() for the explanation */
 308        WARN_ON_ONCE(q->limits.virt_boundary_mask);
 309
 310        q->limits.max_segment_size = max_size;
 311}
 312EXPORT_SYMBOL(blk_queue_max_segment_size);
 313
 314/**
 315 * blk_queue_logical_block_size - set logical block size for the queue
 316 * @q:  the request queue for the device
 317 * @size:  the logical block size, in bytes
 318 *
 319 * Description:
 320 *   This should be set to the lowest possible block size that the
 321 *   storage device can address.  The default of 512 covers most
 322 *   hardware.
 323 **/
 324void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
 325{
 326        q->limits.logical_block_size = size;
 327
 328        if (q->limits.physical_block_size < size)
 329                q->limits.physical_block_size = size;
 330
 331        if (q->limits.io_min < q->limits.physical_block_size)
 332                q->limits.io_min = q->limits.physical_block_size;
 333}
 334EXPORT_SYMBOL(blk_queue_logical_block_size);
 335
 336/**
 337 * blk_queue_physical_block_size - set physical block size for the queue
 338 * @q:  the request queue for the device
 339 * @size:  the physical block size, in bytes
 340 *
 341 * Description:
 342 *   This should be set to the lowest possible sector size that the
 343 *   hardware can operate on without reverting to read-modify-write
 344 *   operations.
 345 */
 346void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
 347{
 348        q->limits.physical_block_size = size;
 349
 350        if (q->limits.physical_block_size < q->limits.logical_block_size)
 351                q->limits.physical_block_size = q->limits.logical_block_size;
 352
 353        if (q->limits.io_min < q->limits.physical_block_size)
 354                q->limits.io_min = q->limits.physical_block_size;
 355}
 356EXPORT_SYMBOL(blk_queue_physical_block_size);
 357
 358/**
 359 * blk_queue_alignment_offset - set physical block alignment offset
 360 * @q:  the request queue for the device
 361 * @offset: alignment offset in bytes
 362 *
 363 * Description:
 364 *   Some devices are naturally misaligned to compensate for things like
 365 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 366 *   should call this function for devices whose first sector is not
 367 *   naturally aligned.
 368 */
 369void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 370{
 371        q->limits.alignment_offset =
 372                offset & (q->limits.physical_block_size - 1);
 373        q->limits.misaligned = 0;
 374}
 375EXPORT_SYMBOL(blk_queue_alignment_offset);
 376
 377/**
 378 * blk_limits_io_min - set minimum request size for a device
 379 * @limits: the queue limits
 380 * @min:  smallest I/O size in bytes
 381 *
 382 * Description:
 383 *   Some devices have an internal block size bigger than the reported
 384 *   hardware sector size.  This function can be used to signal the
 385 *   smallest I/O the device can perform without incurring a performance
 386 *   penalty.
 387 */
 388void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
 389{
 390        limits->io_min = min;
 391
 392        if (limits->io_min < limits->logical_block_size)
 393                limits->io_min = limits->logical_block_size;
 394
 395        if (limits->io_min < limits->physical_block_size)
 396                limits->io_min = limits->physical_block_size;
 397}
 398EXPORT_SYMBOL(blk_limits_io_min);
 399
 400/**
 401 * blk_queue_io_min - set minimum request size for the queue
 402 * @q:  the request queue for the device
 403 * @min:  smallest I/O size in bytes
 404 *
 405 * Description:
 406 *   Storage devices may report a granularity or preferred minimum I/O
 407 *   size which is the smallest request the device can perform without
 408 *   incurring a performance penalty.  For disk drives this is often the
 409 *   physical block size.  For RAID arrays it is often the stripe chunk
 410 *   size.  A properly aligned multiple of minimum_io_size is the
 411 *   preferred request size for workloads where a high number of I/O
 412 *   operations is desired.
 413 */
 414void blk_queue_io_min(struct request_queue *q, unsigned int min)
 415{
 416        blk_limits_io_min(&q->limits, min);
 417}
 418EXPORT_SYMBOL(blk_queue_io_min);
 419
 420/**
 421 * blk_limits_io_opt - set optimal request size for a device
 422 * @limits: the queue limits
 423 * @opt:  smallest I/O size in bytes
 424 *
 425 * Description:
 426 *   Storage devices may report an optimal I/O size, which is the
 427 *   device's preferred unit for sustained I/O.  This is rarely reported
 428 *   for disk drives.  For RAID arrays it is usually the stripe width or
 429 *   the internal track size.  A properly aligned multiple of
 430 *   optimal_io_size is the preferred request size for workloads where
 431 *   sustained throughput is desired.
 432 */
 433void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
 434{
 435        limits->io_opt = opt;
 436}
 437EXPORT_SYMBOL(blk_limits_io_opt);
 438
 439/**
 440 * blk_queue_io_opt - set optimal request size for the queue
 441 * @q:  the request queue for the device
 442 * @opt:  optimal request size in bytes
 443 *
 444 * Description:
 445 *   Storage devices may report an optimal I/O size, which is the
 446 *   device's preferred unit for sustained I/O.  This is rarely reported
 447 *   for disk drives.  For RAID arrays it is usually the stripe width or
 448 *   the internal track size.  A properly aligned multiple of
 449 *   optimal_io_size is the preferred request size for workloads where
 450 *   sustained throughput is desired.
 451 */
 452void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 453{
 454        blk_limits_io_opt(&q->limits, opt);
 455}
 456EXPORT_SYMBOL(blk_queue_io_opt);
 457
 458/**
 459 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 460 * @t:  the stacking driver (top)
 461 * @b:  the underlying device (bottom)
 462 **/
 463void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 464{
 465        blk_stack_limits(&t->limits, &b->limits, 0);
 466}
 467EXPORT_SYMBOL(blk_queue_stack_limits);
 468
 469/**
 470 * blk_stack_limits - adjust queue_limits for stacked devices
 471 * @t:  the stacking driver limits (top device)
 472 * @b:  the underlying queue limits (bottom, component device)
 473 * @start:  first data sector within component device
 474 *
 475 * Description:
 476 *    This function is used by stacking drivers like MD and DM to ensure
 477 *    that all component devices have compatible block sizes and
 478 *    alignments.  The stacking driver must provide a queue_limits
 479 *    struct (top) and then iteratively call the stacking function for
 480 *    all component (bottom) devices.  The stacking function will
 481 *    attempt to combine the values and ensure proper alignment.
 482 *
 483 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 484 *    top device's block sizes and alignment offsets may be adjusted to
 485 *    ensure alignment with the bottom device. If no compatible sizes
 486 *    and alignments exist, -1 is returned and the resulting top
 487 *    queue_limits will have the misaligned flag set to indicate that
 488 *    the alignment_offset is undefined.
 489 */
 490int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 491                     sector_t start)
 492{
 493        unsigned int top, bottom, alignment, ret = 0;
 494
 495        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 496        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 497        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
 498        t->max_write_same_sectors = min(t->max_write_same_sectors,
 499                                        b->max_write_same_sectors);
 500        t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
 501                                        b->max_write_zeroes_sectors);
 502        t->max_zone_append_sectors = min(t->max_zone_append_sectors,
 503                                        b->max_zone_append_sectors);
 504        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 505
 506        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 507                                            b->seg_boundary_mask);
 508        t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
 509                                            b->virt_boundary_mask);
 510
 511        t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 512        t->max_discard_segments = min_not_zero(t->max_discard_segments,
 513                                               b->max_discard_segments);
 514        t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
 515                                                 b->max_integrity_segments);
 516
 517        t->max_segment_size = min_not_zero(t->max_segment_size,
 518                                           b->max_segment_size);
 519
 520        t->misaligned |= b->misaligned;
 521
 522        alignment = queue_limit_alignment_offset(b, start);
 523
 524        /* Bottom device has different alignment.  Check that it is
 525         * compatible with the current top alignment.
 526         */
 527        if (t->alignment_offset != alignment) {
 528
 529                top = max(t->physical_block_size, t->io_min)
 530                        + t->alignment_offset;
 531                bottom = max(b->physical_block_size, b->io_min) + alignment;
 532
 533                /* Verify that top and bottom intervals line up */
 534                if (max(top, bottom) % min(top, bottom)) {
 535                        t->misaligned = 1;
 536                        ret = -1;
 537                }
 538        }
 539
 540        t->logical_block_size = max(t->logical_block_size,
 541                                    b->logical_block_size);
 542
 543        t->physical_block_size = max(t->physical_block_size,
 544                                     b->physical_block_size);
 545
 546        t->io_min = max(t->io_min, b->io_min);
 547        t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 548
 549        /* Physical block size a multiple of the logical block size? */
 550        if (t->physical_block_size & (t->logical_block_size - 1)) {
 551                t->physical_block_size = t->logical_block_size;
 552                t->misaligned = 1;
 553                ret = -1;
 554        }
 555
 556        /* Minimum I/O a multiple of the physical block size? */
 557        if (t->io_min & (t->physical_block_size - 1)) {
 558                t->io_min = t->physical_block_size;
 559                t->misaligned = 1;
 560                ret = -1;
 561        }
 562
 563        /* Optimal I/O a multiple of the physical block size? */
 564        if (t->io_opt & (t->physical_block_size - 1)) {
 565                t->io_opt = 0;
 566                t->misaligned = 1;
 567                ret = -1;
 568        }
 569
 570        t->raid_partial_stripes_expensive =
 571                max(t->raid_partial_stripes_expensive,
 572                    b->raid_partial_stripes_expensive);
 573
 574        /* Find lowest common alignment_offset */
 575        t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
 576                % max(t->physical_block_size, t->io_min);
 577
 578        /* Verify that new alignment_offset is on a logical block boundary */
 579        if (t->alignment_offset & (t->logical_block_size - 1)) {
 580                t->misaligned = 1;
 581                ret = -1;
 582        }
 583
 584        /* Discard alignment and granularity */
 585        if (b->discard_granularity) {
 586                alignment = queue_limit_discard_alignment(b, start);
 587
 588                if (t->discard_granularity != 0 &&
 589                    t->discard_alignment != alignment) {
 590                        top = t->discard_granularity + t->discard_alignment;
 591                        bottom = b->discard_granularity + alignment;
 592
 593                        /* Verify that top and bottom intervals line up */
 594                        if ((max(top, bottom) % min(top, bottom)) != 0)
 595                                t->discard_misaligned = 1;
 596                }
 597
 598                t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
 599                                                      b->max_discard_sectors);
 600                t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
 601                                                         b->max_hw_discard_sectors);
 602                t->discard_granularity = max(t->discard_granularity,
 603                                             b->discard_granularity);
 604                t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
 605                        t->discard_granularity;
 606        }
 607
 608        if (b->chunk_sectors)
 609                t->chunk_sectors = min_not_zero(t->chunk_sectors,
 610                                                b->chunk_sectors);
 611
 612        return ret;
 613}
 614EXPORT_SYMBOL(blk_stack_limits);
 615
 616/**
 617 * bdev_stack_limits - adjust queue limits for stacked drivers
 618 * @t:  the stacking driver limits (top device)
 619 * @bdev:  the component block_device (bottom)
 620 * @start:  first data sector within component device
 621 *
 622 * Description:
 623 *    Merges queue limits for a top device and a block_device.  Returns
 624 *    0 if alignment didn't change.  Returns -1 if adding the bottom
 625 *    device caused misalignment.
 626 */
 627int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
 628                      sector_t start)
 629{
 630        struct request_queue *bq = bdev_get_queue(bdev);
 631
 632        start += get_start_sect(bdev);
 633
 634        return blk_stack_limits(t, &bq->limits, start);
 635}
 636EXPORT_SYMBOL(bdev_stack_limits);
 637
 638/**
 639 * disk_stack_limits - adjust queue limits for stacked drivers
 640 * @disk:  MD/DM gendisk (top)
 641 * @bdev:  the underlying block device (bottom)
 642 * @offset:  offset to beginning of data within component device
 643 *
 644 * Description:
 645 *    Merges the limits for a top level gendisk and a bottom level
 646 *    block_device.
 647 */
 648void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
 649                       sector_t offset)
 650{
 651        struct request_queue *t = disk->queue;
 652
 653        if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
 654                char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
 655
 656                disk_name(disk, 0, top);
 657                bdevname(bdev, bottom);
 658
 659                printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
 660                       top, bottom);
 661        }
 662
 663        t->backing_dev_info->io_pages =
 664                t->limits.max_sectors >> (PAGE_SHIFT - 9);
 665}
 666EXPORT_SYMBOL(disk_stack_limits);
 667
 668/**
 669 * blk_queue_update_dma_pad - update pad mask
 670 * @q:     the request queue for the device
 671 * @mask:  pad mask
 672 *
 673 * Update dma pad mask.
 674 *
 675 * Appending pad buffer to a request modifies the last entry of a
 676 * scatter list such that it includes the pad buffer.
 677 **/
 678void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
 679{
 680        if (mask > q->dma_pad_mask)
 681                q->dma_pad_mask = mask;
 682}
 683EXPORT_SYMBOL(blk_queue_update_dma_pad);
 684
 685/**
 686 * blk_queue_segment_boundary - set boundary rules for segment merging
 687 * @q:  the request queue for the device
 688 * @mask:  the memory boundary mask
 689 **/
 690void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 691{
 692        if (mask < PAGE_SIZE - 1) {
 693                mask = PAGE_SIZE - 1;
 694                printk(KERN_INFO "%s: set to minimum %lx\n",
 695                       __func__, mask);
 696        }
 697
 698        q->limits.seg_boundary_mask = mask;
 699}
 700EXPORT_SYMBOL(blk_queue_segment_boundary);
 701
 702/**
 703 * blk_queue_virt_boundary - set boundary rules for bio merging
 704 * @q:  the request queue for the device
 705 * @mask:  the memory boundary mask
 706 **/
 707void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
 708{
 709        q->limits.virt_boundary_mask = mask;
 710
 711        /*
 712         * Devices that require a virtual boundary do not support scatter/gather
 713         * I/O natively, but instead require a descriptor list entry for each
 714         * page (which might not be idential to the Linux PAGE_SIZE).  Because
 715         * of that they are not limited by our notion of "segment size".
 716         */
 717        if (mask)
 718                q->limits.max_segment_size = UINT_MAX;
 719}
 720EXPORT_SYMBOL(blk_queue_virt_boundary);
 721
 722/**
 723 * blk_queue_dma_alignment - set dma length and memory alignment
 724 * @q:     the request queue for the device
 725 * @mask:  alignment mask
 726 *
 727 * description:
 728 *    set required memory and length alignment for direct dma transactions.
 729 *    this is used when building direct io requests for the queue.
 730 *
 731 **/
 732void blk_queue_dma_alignment(struct request_queue *q, int mask)
 733{
 734        q->dma_alignment = mask;
 735}
 736EXPORT_SYMBOL(blk_queue_dma_alignment);
 737
 738/**
 739 * blk_queue_update_dma_alignment - update dma length and memory alignment
 740 * @q:     the request queue for the device
 741 * @mask:  alignment mask
 742 *
 743 * description:
 744 *    update required memory and length alignment for direct dma transactions.
 745 *    If the requested alignment is larger than the current alignment, then
 746 *    the current queue alignment is updated to the new value, otherwise it
 747 *    is left alone.  The design of this is to allow multiple objects
 748 *    (driver, device, transport etc) to set their respective
 749 *    alignments without having them interfere.
 750 *
 751 **/
 752void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 753{
 754        BUG_ON(mask > PAGE_SIZE);
 755
 756        if (mask > q->dma_alignment)
 757                q->dma_alignment = mask;
 758}
 759EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 760
 761/**
 762 * blk_set_queue_depth - tell the block layer about the device queue depth
 763 * @q:          the request queue for the device
 764 * @depth:              queue depth
 765 *
 766 */
 767void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
 768{
 769        q->queue_depth = depth;
 770        rq_qos_queue_depth_changed(q);
 771}
 772EXPORT_SYMBOL(blk_set_queue_depth);
 773
 774/**
 775 * blk_queue_write_cache - configure queue's write cache
 776 * @q:          the request queue for the device
 777 * @wc:         write back cache on or off
 778 * @fua:        device supports FUA writes, if true
 779 *
 780 * Tell the block layer about the write cache of @q.
 781 */
 782void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 783{
 784        if (wc)
 785                blk_queue_flag_set(QUEUE_FLAG_WC, q);
 786        else
 787                blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 788        if (fua)
 789                blk_queue_flag_set(QUEUE_FLAG_FUA, q);
 790        else
 791                blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
 792
 793        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 794}
 795EXPORT_SYMBOL_GPL(blk_queue_write_cache);
 796
 797/**
 798 * blk_queue_required_elevator_features - Set a queue required elevator features
 799 * @q:          the request queue for the target device
 800 * @features:   Required elevator features OR'ed together
 801 *
 802 * Tell the block layer that for the device controlled through @q, only the
 803 * only elevators that can be used are those that implement at least the set of
 804 * features specified by @features.
 805 */
 806void blk_queue_required_elevator_features(struct request_queue *q,
 807                                          unsigned int features)
 808{
 809        q->required_elevator_features = features;
 810}
 811EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
 812
 813/**
 814 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
 815 * @q:          the request queue for the device
 816 * @dev:        the device pointer for dma
 817 *
 818 * Tell the block layer about merging the segments by dma map of @q.
 819 */
 820bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
 821                                       struct device *dev)
 822{
 823        unsigned long boundary = dma_get_merge_boundary(dev);
 824
 825        if (!boundary)
 826                return false;
 827
 828        /* No need to update max_segment_size. see blk_queue_virt_boundary() */
 829        blk_queue_virt_boundary(q, boundary);
 830
 831        return true;
 832}
 833EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
 834
 835static int __init blk_settings_init(void)
 836{
 837        blk_max_low_pfn = max_low_pfn - 1;
 838        blk_max_pfn = max_pfn - 1;
 839        return 0;
 840}
 841subsys_initcall(blk_settings_init);
 842