linux/block/blk-settings.c
<<
>>
Prefs
   1/*
   2 * Functions related to setting various queue properties from drivers
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/init.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/bootmem.h>      /* for max_pfn/max_low_pfn */
  10#include <linux/gcd.h>
  11#include <linux/lcm.h>
  12#include <linux/jiffies.h>
  13#include <linux/gfp.h>
  14
  15#include "blk.h"
  16
  17unsigned long blk_max_low_pfn;
  18EXPORT_SYMBOL(blk_max_low_pfn);
  19
  20unsigned long blk_max_pfn;
  21
  22/**
  23 * blk_queue_prep_rq - set a prepare_request function for queue
  24 * @q:          queue
  25 * @pfn:        prepare_request function
  26 *
  27 * It's possible for a queue to register a prepare_request callback which
  28 * is invoked before the request is handed to the request_fn. The goal of
  29 * the function is to prepare a request for I/O, it can be used to build a
  30 * cdb from the request data for instance.
  31 *
  32 */
  33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  34{
  35        q->prep_rq_fn = pfn;
  36}
  37EXPORT_SYMBOL(blk_queue_prep_rq);
  38
  39/**
  40 * blk_queue_unprep_rq - set an unprepare_request function for queue
  41 * @q:          queue
  42 * @ufn:        unprepare_request function
  43 *
  44 * It's possible for a queue to register an unprepare_request callback
  45 * which is invoked before the request is finally completed. The goal
  46 * of the function is to deallocate any data that was allocated in the
  47 * prepare_request callback.
  48 *
  49 */
  50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
  51{
  52        q->unprep_rq_fn = ufn;
  53}
  54EXPORT_SYMBOL(blk_queue_unprep_rq);
  55
  56void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  57{
  58        q->softirq_done_fn = fn;
  59}
  60EXPORT_SYMBOL(blk_queue_softirq_done);
  61
  62void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  63{
  64        q->rq_timeout = timeout;
  65}
  66EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  67
  68void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  69{
  70        q->rq_timed_out_fn = fn;
  71}
  72EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  73
  74void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  75{
  76        q->lld_busy_fn = fn;
  77}
  78EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  79
  80/**
  81 * blk_set_default_limits - reset limits to default values
  82 * @lim:  the queue_limits structure to reset
  83 *
  84 * Description:
  85 *   Returns a queue_limit struct to its default state.
  86 */
  87void blk_set_default_limits(struct queue_limits *lim)
  88{
  89        lim->max_segments = BLK_MAX_SEGMENTS;
  90        lim->max_integrity_segments = 0;
  91        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  92        lim->virt_boundary_mask = 0;
  93        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  94        lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  95        lim->max_dev_sectors = 0;
  96        lim->chunk_sectors = 0;
  97        lim->max_write_same_sectors = 0;
  98        lim->max_discard_sectors = 0;
  99        lim->max_hw_discard_sectors = 0;
 100        lim->discard_granularity = 0;
 101        lim->discard_alignment = 0;
 102        lim->discard_misaligned = 0;
 103        lim->discard_zeroes_data = 0;
 104        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
 105        lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
 106        lim->alignment_offset = 0;
 107        lim->io_opt = 0;
 108        lim->misaligned = 0;
 109        lim->cluster = 1;
 110}
 111EXPORT_SYMBOL(blk_set_default_limits);
 112
 113/**
 114 * blk_set_stacking_limits - set default limits for stacking devices
 115 * @lim:  the queue_limits structure to reset
 116 *
 117 * Description:
 118 *   Returns a queue_limit struct to its default state. Should be used
 119 *   by stacking drivers like DM that have no internal limits.
 120 */
 121void blk_set_stacking_limits(struct queue_limits *lim)
 122{
 123        blk_set_default_limits(lim);
 124
 125        /* Inherit limits from component devices */
 126        lim->discard_zeroes_data = 1;
 127        lim->max_segments = USHRT_MAX;
 128        lim->max_hw_sectors = UINT_MAX;
 129        lim->max_segment_size = UINT_MAX;
 130        lim->max_sectors = UINT_MAX;
 131        lim->max_dev_sectors = UINT_MAX;
 132        lim->max_write_same_sectors = UINT_MAX;
 133}
 134EXPORT_SYMBOL(blk_set_stacking_limits);
 135
 136/**
 137 * blk_queue_make_request - define an alternate make_request function for a device
 138 * @q:  the request queue for the device to be affected
 139 * @mfn: the alternate make_request function
 140 *
 141 * Description:
 142 *    The normal way for &struct bios to be passed to a device
 143 *    driver is for them to be collected into requests on a request
 144 *    queue, and then to allow the device driver to select requests
 145 *    off that queue when it is ready.  This works well for many block
 146 *    devices. However some block devices (typically virtual devices
 147 *    such as md or lvm) do not benefit from the processing on the
 148 *    request queue, and are served best by having the requests passed
 149 *    directly to them.  This can be achieved by providing a function
 150 *    to blk_queue_make_request().
 151 *
 152 * Caveat:
 153 *    The driver that does this *must* be able to deal appropriately
 154 *    with buffers in "highmemory". This can be accomplished by either calling
 155 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
 156 *    blk_queue_bounce() to create a buffer in normal memory.
 157 **/
 158void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 159{
 160        /*
 161         * set defaults
 162         */
 163        q->nr_requests = BLKDEV_MAX_RQ;
 164
 165        q->make_request_fn = mfn;
 166        blk_queue_dma_alignment(q, 511);
 167        blk_queue_congestion_threshold(q);
 168        q->nr_batching = BLK_BATCH_REQ;
 169
 170        blk_set_default_limits(&q->limits);
 171
 172        /*
 173         * by default assume old behaviour and bounce for any highmem page
 174         */
 175        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 176}
 177EXPORT_SYMBOL(blk_queue_make_request);
 178
 179/**
 180 * blk_queue_bounce_limit - set bounce buffer limit for queue
 181 * @q: the request queue for the device
 182 * @max_addr: the maximum address the device can handle
 183 *
 184 * Description:
 185 *    Different hardware can have different requirements as to what pages
 186 *    it can do I/O directly to. A low level driver can call
 187 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 188 *    buffers for doing I/O to pages residing above @max_addr.
 189 **/
 190void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 191{
 192        unsigned long b_pfn = max_addr >> PAGE_SHIFT;
 193        int dma = 0;
 194
 195        q->bounce_gfp = GFP_NOIO;
 196#if BITS_PER_LONG == 64
 197        /*
 198         * Assume anything <= 4GB can be handled by IOMMU.  Actually
 199         * some IOMMUs can handle everything, but I don't know of a
 200         * way to test this here.
 201         */
 202        if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
 203                dma = 1;
 204        q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
 205#else
 206        if (b_pfn < blk_max_low_pfn)
 207                dma = 1;
 208        q->limits.bounce_pfn = b_pfn;
 209#endif
 210        if (dma) {
 211                init_emergency_isa_pool();
 212                q->bounce_gfp = GFP_NOIO | GFP_DMA;
 213                q->limits.bounce_pfn = b_pfn;
 214        }
 215}
 216EXPORT_SYMBOL(blk_queue_bounce_limit);
 217
 218/**
 219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 220 * @q:  the request queue for the device
 221 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 222 *
 223 * Description:
 224 *    Enables a low level driver to set a hard upper limit,
 225 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 226 *    the device driver based upon the capabilities of the I/O
 227 *    controller.
 228 *
 229 *    max_dev_sectors is a hard limit imposed by the storage device for
 230 *    READ/WRITE requests. It is set by the disk driver.
 231 *
 232 *    max_sectors is a soft limit imposed by the block layer for
 233 *    filesystem type requests.  This value can be overridden on a
 234 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 235 *    The soft limit can not exceed max_hw_sectors.
 236 **/
 237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 238{
 239        struct queue_limits *limits = &q->limits;
 240        unsigned int max_sectors;
 241
 242        if ((max_hw_sectors << 9) < PAGE_SIZE) {
 243                max_hw_sectors = 1 << (PAGE_SHIFT - 9);
 244                printk(KERN_INFO "%s: set to minimum %d\n",
 245                       __func__, max_hw_sectors);
 246        }
 247
 248        limits->max_hw_sectors = max_hw_sectors;
 249        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
 250        max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
 251        limits->max_sectors = max_sectors;
 252}
 253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 254
 255/**
 256 * blk_queue_chunk_sectors - set size of the chunk for this queue
 257 * @q:  the request queue for the device
 258 * @chunk_sectors:  chunk sectors in the usual 512b unit
 259 *
 260 * Description:
 261 *    If a driver doesn't want IOs to cross a given chunk size, it can set
 262 *    this limit and prevent merging across chunks. Note that the chunk size
 263 *    must currently be a power-of-2 in sectors. Also note that the block
 264 *    layer must accept a page worth of data at any offset. So if the
 265 *    crossing of chunks is a hard limitation in the driver, it must still be
 266 *    prepared to split single page bios.
 267 **/
 268void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
 269{
 270        BUG_ON(!is_power_of_2(chunk_sectors));
 271        q->limits.chunk_sectors = chunk_sectors;
 272}
 273EXPORT_SYMBOL(blk_queue_chunk_sectors);
 274
 275/**
 276 * blk_queue_max_discard_sectors - set max sectors for a single discard
 277 * @q:  the request queue for the device
 278 * @max_discard_sectors: maximum number of sectors to discard
 279 **/
 280void blk_queue_max_discard_sectors(struct request_queue *q,
 281                unsigned int max_discard_sectors)
 282{
 283        q->limits.max_hw_discard_sectors = max_discard_sectors;
 284        q->limits.max_discard_sectors = max_discard_sectors;
 285}
 286EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 287
 288/**
 289 * blk_queue_max_write_same_sectors - set max sectors for a single write same
 290 * @q:  the request queue for the device
 291 * @max_write_same_sectors: maximum number of sectors to write per command
 292 **/
 293void blk_queue_max_write_same_sectors(struct request_queue *q,
 294                                      unsigned int max_write_same_sectors)
 295{
 296        q->limits.max_write_same_sectors = max_write_same_sectors;
 297}
 298EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 299
 300/**
 301 * blk_queue_max_segments - set max hw segments for a request for this queue
 302 * @q:  the request queue for the device
 303 * @max_segments:  max number of segments
 304 *
 305 * Description:
 306 *    Enables a low level driver to set an upper limit on the number of
 307 *    hw data segments in a request.
 308 **/
 309void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 310{
 311        if (!max_segments) {
 312                max_segments = 1;
 313                printk(KERN_INFO "%s: set to minimum %d\n",
 314                       __func__, max_segments);
 315        }
 316
 317        q->limits.max_segments = max_segments;
 318}
 319EXPORT_SYMBOL(blk_queue_max_segments);
 320
 321/**
 322 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 323 * @q:  the request queue for the device
 324 * @max_size:  max size of segment in bytes
 325 *
 326 * Description:
 327 *    Enables a low level driver to set an upper limit on the size of a
 328 *    coalesced segment
 329 **/
 330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 331{
 332        if (max_size < PAGE_SIZE) {
 333                max_size = PAGE_SIZE;
 334                printk(KERN_INFO "%s: set to minimum %d\n",
 335                       __func__, max_size);
 336        }
 337
 338        q->limits.max_segment_size = max_size;
 339}
 340EXPORT_SYMBOL(blk_queue_max_segment_size);
 341
 342/**
 343 * blk_queue_logical_block_size - set logical block size for the queue
 344 * @q:  the request queue for the device
 345 * @size:  the logical block size, in bytes
 346 *
 347 * Description:
 348 *   This should be set to the lowest possible block size that the
 349 *   storage device can address.  The default of 512 covers most
 350 *   hardware.
 351 **/
 352void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 353{
 354        q->limits.logical_block_size = size;
 355
 356        if (q->limits.physical_block_size < size)
 357                q->limits.physical_block_size = size;
 358
 359        if (q->limits.io_min < q->limits.physical_block_size)
 360                q->limits.io_min = q->limits.physical_block_size;
 361}
 362EXPORT_SYMBOL(blk_queue_logical_block_size);
 363
 364/**
 365 * blk_queue_physical_block_size - set physical block size for the queue
 366 * @q:  the request queue for the device
 367 * @size:  the physical block size, in bytes
 368 *
 369 * Description:
 370 *   This should be set to the lowest possible sector size that the
 371 *   hardware can operate on without reverting to read-modify-write
 372 *   operations.
 373 */
 374void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
 375{
 376        q->limits.physical_block_size = size;
 377
 378        if (q->limits.physical_block_size < q->limits.logical_block_size)
 379                q->limits.physical_block_size = q->limits.logical_block_size;
 380
 381        if (q->limits.io_min < q->limits.physical_block_size)
 382                q->limits.io_min = q->limits.physical_block_size;
 383}
 384EXPORT_SYMBOL(blk_queue_physical_block_size);
 385
 386/**
 387 * blk_queue_alignment_offset - set physical block alignment offset
 388 * @q:  the request queue for the device
 389 * @offset: alignment offset in bytes
 390 *
 391 * Description:
 392 *   Some devices are naturally misaligned to compensate for things like
 393 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 394 *   should call this function for devices whose first sector is not
 395 *   naturally aligned.
 396 */
 397void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 398{
 399        q->limits.alignment_offset =
 400                offset & (q->limits.physical_block_size - 1);
 401        q->limits.misaligned = 0;
 402}
 403EXPORT_SYMBOL(blk_queue_alignment_offset);
 404
 405/**
 406 * blk_limits_io_min - set minimum request size for a device
 407 * @limits: the queue limits
 408 * @min:  smallest I/O size in bytes
 409 *
 410 * Description:
 411 *   Some devices have an internal block size bigger than the reported
 412 *   hardware sector size.  This function can be used to signal the
 413 *   smallest I/O the device can perform without incurring a performance
 414 *   penalty.
 415 */
 416void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
 417{
 418        limits->io_min = min;
 419
 420        if (limits->io_min < limits->logical_block_size)
 421                limits->io_min = limits->logical_block_size;
 422
 423        if (limits->io_min < limits->physical_block_size)
 424                limits->io_min = limits->physical_block_size;
 425}
 426EXPORT_SYMBOL(blk_limits_io_min);
 427
 428/**
 429 * blk_queue_io_min - set minimum request size for the queue
 430 * @q:  the request queue for the device
 431 * @min:  smallest I/O size in bytes
 432 *
 433 * Description:
 434 *   Storage devices may report a granularity or preferred minimum I/O
 435 *   size which is the smallest request the device can perform without
 436 *   incurring a performance penalty.  For disk drives this is often the
 437 *   physical block size.  For RAID arrays it is often the stripe chunk
 438 *   size.  A properly aligned multiple of minimum_io_size is the
 439 *   preferred request size for workloads where a high number of I/O
 440 *   operations is desired.
 441 */
 442void blk_queue_io_min(struct request_queue *q, unsigned int min)
 443{
 444        blk_limits_io_min(&q->limits, min);
 445}
 446EXPORT_SYMBOL(blk_queue_io_min);
 447
 448/**
 449 * blk_limits_io_opt - set optimal request size for a device
 450 * @limits: the queue limits
 451 * @opt:  smallest I/O size in bytes
 452 *
 453 * Description:
 454 *   Storage devices may report an optimal I/O size, which is the
 455 *   device's preferred unit for sustained I/O.  This is rarely reported
 456 *   for disk drives.  For RAID arrays it is usually the stripe width or
 457 *   the internal track size.  A properly aligned multiple of
 458 *   optimal_io_size is the preferred request size for workloads where
 459 *   sustained throughput is desired.
 460 */
 461void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
 462{
 463        limits->io_opt = opt;
 464}
 465EXPORT_SYMBOL(blk_limits_io_opt);
 466
 467/**
 468 * blk_queue_io_opt - set optimal request size for the queue
 469 * @q:  the request queue for the device
 470 * @opt:  optimal request size in bytes
 471 *
 472 * Description:
 473 *   Storage devices may report an optimal I/O size, which is the
 474 *   device's preferred unit for sustained I/O.  This is rarely reported
 475 *   for disk drives.  For RAID arrays it is usually the stripe width or
 476 *   the internal track size.  A properly aligned multiple of
 477 *   optimal_io_size is the preferred request size for workloads where
 478 *   sustained throughput is desired.
 479 */
 480void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 481{
 482        blk_limits_io_opt(&q->limits, opt);
 483}
 484EXPORT_SYMBOL(blk_queue_io_opt);
 485
 486/**
 487 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 488 * @t:  the stacking driver (top)
 489 * @b:  the underlying device (bottom)
 490 **/
 491void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 492{
 493        blk_stack_limits(&t->limits, &b->limits, 0);
 494}
 495EXPORT_SYMBOL(blk_queue_stack_limits);
 496
 497/**
 498 * blk_stack_limits - adjust queue_limits for stacked devices
 499 * @t:  the stacking driver limits (top device)
 500 * @b:  the underlying queue limits (bottom, component device)
 501 * @start:  first data sector within component device
 502 *
 503 * Description:
 504 *    This function is used by stacking drivers like MD and DM to ensure
 505 *    that all component devices have compatible block sizes and
 506 *    alignments.  The stacking driver must provide a queue_limits
 507 *    struct (top) and then iteratively call the stacking function for
 508 *    all component (bottom) devices.  The stacking function will
 509 *    attempt to combine the values and ensure proper alignment.
 510 *
 511 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 512 *    top device's block sizes and alignment offsets may be adjusted to
 513 *    ensure alignment with the bottom device. If no compatible sizes
 514 *    and alignments exist, -1 is returned and the resulting top
 515 *    queue_limits will have the misaligned flag set to indicate that
 516 *    the alignment_offset is undefined.
 517 */
 518int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 519                     sector_t start)
 520{
 521        unsigned int top, bottom, alignment, ret = 0;
 522
 523        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 524        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 525        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
 526        t->max_write_same_sectors = min(t->max_write_same_sectors,
 527                                        b->max_write_same_sectors);
 528        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 529
 530        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 531                                            b->seg_boundary_mask);
 532        t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
 533                                            b->virt_boundary_mask);
 534
 535        t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 536        t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
 537                                                 b->max_integrity_segments);
 538
 539        t->max_segment_size = min_not_zero(t->max_segment_size,
 540                                           b->max_segment_size);
 541
 542        t->misaligned |= b->misaligned;
 543
 544        alignment = queue_limit_alignment_offset(b, start);
 545
 546        /* Bottom device has different alignment.  Check that it is
 547         * compatible with the current top alignment.
 548         */
 549        if (t->alignment_offset != alignment) {
 550
 551                top = max(t->physical_block_size, t->io_min)
 552                        + t->alignment_offset;
 553                bottom = max(b->physical_block_size, b->io_min) + alignment;
 554
 555                /* Verify that top and bottom intervals line up */
 556                if (max(top, bottom) % min(top, bottom)) {
 557                        t->misaligned = 1;
 558                        ret = -1;
 559                }
 560        }
 561
 562        t->logical_block_size = max(t->logical_block_size,
 563                                    b->logical_block_size);
 564
 565        t->physical_block_size = max(t->physical_block_size,
 566                                     b->physical_block_size);
 567
 568        t->io_min = max(t->io_min, b->io_min);
 569        t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 570
 571        t->cluster &= b->cluster;
 572        t->discard_zeroes_data &= b->discard_zeroes_data;
 573
 574        /* Physical block size a multiple of the logical block size? */
 575        if (t->physical_block_size & (t->logical_block_size - 1)) {
 576                t->physical_block_size = t->logical_block_size;
 577                t->misaligned = 1;
 578                ret = -1;
 579        }
 580
 581        /* Minimum I/O a multiple of the physical block size? */
 582        if (t->io_min & (t->physical_block_size - 1)) {
 583                t->io_min = t->physical_block_size;
 584                t->misaligned = 1;
 585                ret = -1;
 586        }
 587
 588        /* Optimal I/O a multiple of the physical block size? */
 589        if (t->io_opt & (t->physical_block_size - 1)) {
 590                t->io_opt = 0;
 591                t->misaligned = 1;
 592                ret = -1;
 593        }
 594
 595        t->raid_partial_stripes_expensive =
 596                max(t->raid_partial_stripes_expensive,
 597                    b->raid_partial_stripes_expensive);
 598
 599        /* Find lowest common alignment_offset */
 600        t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
 601                % max(t->physical_block_size, t->io_min);
 602
 603        /* Verify that new alignment_offset is on a logical block boundary */
 604        if (t->alignment_offset & (t->logical_block_size - 1)) {
 605                t->misaligned = 1;
 606                ret = -1;
 607        }
 608
 609        /* Discard alignment and granularity */
 610        if (b->discard_granularity) {
 611                alignment = queue_limit_discard_alignment(b, start);
 612
 613                if (t->discard_granularity != 0 &&
 614                    t->discard_alignment != alignment) {
 615                        top = t->discard_granularity + t->discard_alignment;
 616                        bottom = b->discard_granularity + alignment;
 617
 618                        /* Verify that top and bottom intervals line up */
 619                        if ((max(top, bottom) % min(top, bottom)) != 0)
 620                                t->discard_misaligned = 1;
 621                }
 622
 623                t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
 624                                                      b->max_discard_sectors);
 625                t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
 626                                                         b->max_hw_discard_sectors);
 627                t->discard_granularity = max(t->discard_granularity,
 628                                             b->discard_granularity);
 629                t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
 630                        t->discard_granularity;
 631        }
 632
 633        return ret;
 634}
 635EXPORT_SYMBOL(blk_stack_limits);
 636
 637/**
 638 * bdev_stack_limits - adjust queue limits for stacked drivers
 639 * @t:  the stacking driver limits (top device)
 640 * @bdev:  the component block_device (bottom)
 641 * @start:  first data sector within component device
 642 *
 643 * Description:
 644 *    Merges queue limits for a top device and a block_device.  Returns
 645 *    0 if alignment didn't change.  Returns -1 if adding the bottom
 646 *    device caused misalignment.
 647 */
 648int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
 649                      sector_t start)
 650{
 651        struct request_queue *bq = bdev_get_queue(bdev);
 652
 653        start += get_start_sect(bdev);
 654
 655        return blk_stack_limits(t, &bq->limits, start);
 656}
 657EXPORT_SYMBOL(bdev_stack_limits);
 658
 659/**
 660 * disk_stack_limits - adjust queue limits for stacked drivers
 661 * @disk:  MD/DM gendisk (top)
 662 * @bdev:  the underlying block device (bottom)
 663 * @offset:  offset to beginning of data within component device
 664 *
 665 * Description:
 666 *    Merges the limits for a top level gendisk and a bottom level
 667 *    block_device.
 668 */
 669void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
 670                       sector_t offset)
 671{
 672        struct request_queue *t = disk->queue;
 673
 674        if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
 675                char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
 676
 677                disk_name(disk, 0, top);
 678                bdevname(bdev, bottom);
 679
 680                printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
 681                       top, bottom);
 682        }
 683}
 684EXPORT_SYMBOL(disk_stack_limits);
 685
 686/**
 687 * blk_queue_dma_pad - set pad mask
 688 * @q:     the request queue for the device
 689 * @mask:  pad mask
 690 *
 691 * Set dma pad mask.
 692 *
 693 * Appending pad buffer to a request modifies the last entry of a
 694 * scatter list such that it includes the pad buffer.
 695 **/
 696void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
 697{
 698        q->dma_pad_mask = mask;
 699}
 700EXPORT_SYMBOL(blk_queue_dma_pad);
 701
 702/**
 703 * blk_queue_update_dma_pad - update pad mask
 704 * @q:     the request queue for the device
 705 * @mask:  pad mask
 706 *
 707 * Update dma pad mask.
 708 *
 709 * Appending pad buffer to a request modifies the last entry of a
 710 * scatter list such that it includes the pad buffer.
 711 **/
 712void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
 713{
 714        if (mask > q->dma_pad_mask)
 715                q->dma_pad_mask = mask;
 716}
 717EXPORT_SYMBOL(blk_queue_update_dma_pad);
 718
 719/**
 720 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 721 * @q:  the request queue for the device
 722 * @dma_drain_needed: fn which returns non-zero if drain is necessary
 723 * @buf:        physically contiguous buffer
 724 * @size:       size of the buffer in bytes
 725 *
 726 * Some devices have excess DMA problems and can't simply discard (or
 727 * zero fill) the unwanted piece of the transfer.  They have to have a
 728 * real area of memory to transfer it into.  The use case for this is
 729 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 730 * bigger than the transfer size some HBAs will lock up if there
 731 * aren't DMA elements to contain the excess transfer.  What this API
 732 * does is adjust the queue so that the buf is always appended
 733 * silently to the scatterlist.
 734 *
 735 * Note: This routine adjusts max_hw_segments to make room for appending
 736 * the drain buffer.  If you call blk_queue_max_segments() after calling
 737 * this routine, you must set the limit to one fewer than your device
 738 * can support otherwise there won't be room for the drain buffer.
 739 */
 740int blk_queue_dma_drain(struct request_queue *q,
 741                               dma_drain_needed_fn *dma_drain_needed,
 742                               void *buf, unsigned int size)
 743{
 744        if (queue_max_segments(q) < 2)
 745                return -EINVAL;
 746        /* make room for appending the drain */
 747        blk_queue_max_segments(q, queue_max_segments(q) - 1);
 748        q->dma_drain_needed = dma_drain_needed;
 749        q->dma_drain_buffer = buf;
 750        q->dma_drain_size = size;
 751
 752        return 0;
 753}
 754EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
 755
 756/**
 757 * blk_queue_segment_boundary - set boundary rules for segment merging
 758 * @q:  the request queue for the device
 759 * @mask:  the memory boundary mask
 760 **/
 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 762{
 763        if (mask < PAGE_SIZE - 1) {
 764                mask = PAGE_SIZE - 1;
 765                printk(KERN_INFO "%s: set to minimum %lx\n",
 766                       __func__, mask);
 767        }
 768
 769        q->limits.seg_boundary_mask = mask;
 770}
 771EXPORT_SYMBOL(blk_queue_segment_boundary);
 772
 773/**
 774 * blk_queue_virt_boundary - set boundary rules for bio merging
 775 * @q:  the request queue for the device
 776 * @mask:  the memory boundary mask
 777 **/
 778void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
 779{
 780        q->limits.virt_boundary_mask = mask;
 781}
 782EXPORT_SYMBOL(blk_queue_virt_boundary);
 783
 784/**
 785 * blk_queue_dma_alignment - set dma length and memory alignment
 786 * @q:     the request queue for the device
 787 * @mask:  alignment mask
 788 *
 789 * description:
 790 *    set required memory and length alignment for direct dma transactions.
 791 *    this is used when building direct io requests for the queue.
 792 *
 793 **/
 794void blk_queue_dma_alignment(struct request_queue *q, int mask)
 795{
 796        q->dma_alignment = mask;
 797}
 798EXPORT_SYMBOL(blk_queue_dma_alignment);
 799
 800/**
 801 * blk_queue_update_dma_alignment - update dma length and memory alignment
 802 * @q:     the request queue for the device
 803 * @mask:  alignment mask
 804 *
 805 * description:
 806 *    update required memory and length alignment for direct dma transactions.
 807 *    If the requested alignment is larger than the current alignment, then
 808 *    the current queue alignment is updated to the new value, otherwise it
 809 *    is left alone.  The design of this is to allow multiple objects
 810 *    (driver, device, transport etc) to set their respective
 811 *    alignments without having them interfere.
 812 *
 813 **/
 814void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 815{
 816        BUG_ON(mask > PAGE_SIZE);
 817
 818        if (mask > q->dma_alignment)
 819                q->dma_alignment = mask;
 820}
 821EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 822
 823/**
 824 * blk_queue_flush - configure queue's cache flush capability
 825 * @q:          the request queue for the device
 826 * @flush:      0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
 827 *
 828 * Tell block layer cache flush capability of @q.  If it supports
 829 * flushing, REQ_FLUSH should be set.  If it supports bypassing
 830 * write cache for individual writes, REQ_FUA should be set.
 831 */
 832void blk_queue_flush(struct request_queue *q, unsigned int flush)
 833{
 834        WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
 835
 836        if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
 837                flush &= ~REQ_FUA;
 838
 839        q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
 840}
 841EXPORT_SYMBOL_GPL(blk_queue_flush);
 842
 843void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 844{
 845        q->flush_not_queueable = !queueable;
 846}
 847EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 848
 849static int __init blk_settings_init(void)
 850{
 851        blk_max_low_pfn = max_low_pfn - 1;
 852        blk_max_pfn = max_pfn - 1;
 853        return 0;
 854}
 855subsys_initcall(blk_settings_init);
 856