1
2
3
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/memblock.h>
11#include <linux/gcd.h>
12#include <linux/lcm.h>
13#include <linux/jiffies.h>
14#include <linux/gfp.h>
15#include <linux/dma-mapping.h>
16
17#include "blk.h"
18#include "blk-wbt.h"
19
20unsigned long blk_max_low_pfn;
21EXPORT_SYMBOL(blk_max_low_pfn);
22
23unsigned long blk_max_pfn;
24
25void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
26{
27 q->rq_timeout = timeout;
28}
29EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
30
31
32
33
34
35
36
37
38void blk_set_default_limits(struct queue_limits *lim)
39{
40 lim->max_segments = BLK_MAX_SEGMENTS;
41 lim->max_discard_segments = 1;
42 lim->max_integrity_segments = 0;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 lim->virt_boundary_mask = 0;
45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
47 lim->max_dev_sectors = 0;
48 lim->chunk_sectors = 0;
49 lim->max_write_same_sectors = 0;
50 lim->max_write_zeroes_sectors = 0;
51 lim->max_zone_append_sectors = 0;
52 lim->max_discard_sectors = 0;
53 lim->max_hw_discard_sectors = 0;
54 lim->discard_granularity = 0;
55 lim->discard_alignment = 0;
56 lim->discard_misaligned = 0;
57 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
58 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
59 lim->alignment_offset = 0;
60 lim->io_opt = 0;
61 lim->misaligned = 0;
62 lim->zoned = BLK_ZONED_NONE;
63}
64EXPORT_SYMBOL(blk_set_default_limits);
65
66
67
68
69
70
71
72
73
74void blk_set_stacking_limits(struct queue_limits *lim)
75{
76 blk_set_default_limits(lim);
77
78
79 lim->max_segments = USHRT_MAX;
80 lim->max_discard_segments = USHRT_MAX;
81 lim->max_hw_sectors = UINT_MAX;
82 lim->max_segment_size = UINT_MAX;
83 lim->max_sectors = UINT_MAX;
84 lim->max_dev_sectors = UINT_MAX;
85 lim->max_write_same_sectors = UINT_MAX;
86 lim->max_write_zeroes_sectors = UINT_MAX;
87 lim->max_zone_append_sectors = UINT_MAX;
88}
89EXPORT_SYMBOL(blk_set_stacking_limits);
90
91
92
93
94
95
96
97
98
99
100
101
102void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
103{
104 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
105 int dma = 0;
106
107 q->bounce_gfp = GFP_NOIO;
108#if BITS_PER_LONG == 64
109
110
111
112
113
114 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
115 dma = 1;
116 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
117#else
118 if (b_pfn < blk_max_low_pfn)
119 dma = 1;
120 q->limits.bounce_pfn = b_pfn;
121#endif
122 if (dma) {
123 init_emergency_isa_pool();
124 q->bounce_gfp = GFP_NOIO | GFP_DMA;
125 q->limits.bounce_pfn = b_pfn;
126 }
127}
128EXPORT_SYMBOL(blk_queue_bounce_limit);
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
150{
151 struct queue_limits *limits = &q->limits;
152 unsigned int max_sectors;
153
154 if ((max_hw_sectors << 9) < PAGE_SIZE) {
155 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
156 printk(KERN_INFO "%s: set to minimum %d\n",
157 __func__, max_hw_sectors);
158 }
159
160 limits->max_hw_sectors = max_hw_sectors;
161 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
162 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
163 limits->max_sectors = max_sectors;
164 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
165}
166EXPORT_SYMBOL(blk_queue_max_hw_sectors);
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
182{
183 BUG_ON(!is_power_of_2(chunk_sectors));
184 q->limits.chunk_sectors = chunk_sectors;
185}
186EXPORT_SYMBOL(blk_queue_chunk_sectors);
187
188
189
190
191
192
193void blk_queue_max_discard_sectors(struct request_queue *q,
194 unsigned int max_discard_sectors)
195{
196 q->limits.max_hw_discard_sectors = max_discard_sectors;
197 q->limits.max_discard_sectors = max_discard_sectors;
198}
199EXPORT_SYMBOL(blk_queue_max_discard_sectors);
200
201
202
203
204
205
206void blk_queue_max_write_same_sectors(struct request_queue *q,
207 unsigned int max_write_same_sectors)
208{
209 q->limits.max_write_same_sectors = max_write_same_sectors;
210}
211EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
212
213
214
215
216
217
218
219void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
220 unsigned int max_write_zeroes_sectors)
221{
222 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
223}
224EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
225
226
227
228
229
230
231void blk_queue_max_zone_append_sectors(struct request_queue *q,
232 unsigned int max_zone_append_sectors)
233{
234 unsigned int max_sectors;
235
236 if (WARN_ON(!blk_queue_is_zoned(q)))
237 return;
238
239 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
240 max_sectors = min(q->limits.chunk_sectors, max_sectors);
241
242
243
244
245
246
247 WARN_ON(!max_sectors);
248
249 q->limits.max_zone_append_sectors = max_sectors;
250}
251EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
252
253
254
255
256
257
258
259
260
261
262void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
263{
264 if (!max_segments) {
265 max_segments = 1;
266 printk(KERN_INFO "%s: set to minimum %d\n",
267 __func__, max_segments);
268 }
269
270 q->limits.max_segments = max_segments;
271}
272EXPORT_SYMBOL(blk_queue_max_segments);
273
274
275
276
277
278
279
280
281
282
283void blk_queue_max_discard_segments(struct request_queue *q,
284 unsigned short max_segments)
285{
286 q->limits.max_discard_segments = max_segments;
287}
288EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
289
290
291
292
293
294
295
296
297
298
299void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
300{
301 if (max_size < PAGE_SIZE) {
302 max_size = PAGE_SIZE;
303 printk(KERN_INFO "%s: set to minimum %d\n",
304 __func__, max_size);
305 }
306
307
308 WARN_ON_ONCE(q->limits.virt_boundary_mask);
309
310 q->limits.max_segment_size = max_size;
311}
312EXPORT_SYMBOL(blk_queue_max_segment_size);
313
314
315
316
317
318
319
320
321
322
323
324void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
325{
326 q->limits.logical_block_size = size;
327
328 if (q->limits.physical_block_size < size)
329 q->limits.physical_block_size = size;
330
331 if (q->limits.io_min < q->limits.physical_block_size)
332 q->limits.io_min = q->limits.physical_block_size;
333}
334EXPORT_SYMBOL(blk_queue_logical_block_size);
335
336
337
338
339
340
341
342
343
344
345
346void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
347{
348 q->limits.physical_block_size = size;
349
350 if (q->limits.physical_block_size < q->limits.logical_block_size)
351 q->limits.physical_block_size = q->limits.logical_block_size;
352
353 if (q->limits.io_min < q->limits.physical_block_size)
354 q->limits.io_min = q->limits.physical_block_size;
355}
356EXPORT_SYMBOL(blk_queue_physical_block_size);
357
358
359
360
361
362
363
364
365
366
367
368
369void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
370{
371 q->limits.alignment_offset =
372 offset & (q->limits.physical_block_size - 1);
373 q->limits.misaligned = 0;
374}
375EXPORT_SYMBOL(blk_queue_alignment_offset);
376
377
378
379
380
381
382
383
384
385
386
387
388void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
389{
390 limits->io_min = min;
391
392 if (limits->io_min < limits->logical_block_size)
393 limits->io_min = limits->logical_block_size;
394
395 if (limits->io_min < limits->physical_block_size)
396 limits->io_min = limits->physical_block_size;
397}
398EXPORT_SYMBOL(blk_limits_io_min);
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414void blk_queue_io_min(struct request_queue *q, unsigned int min)
415{
416 blk_limits_io_min(&q->limits, min);
417}
418EXPORT_SYMBOL(blk_queue_io_min);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
434{
435 limits->io_opt = opt;
436}
437EXPORT_SYMBOL(blk_limits_io_opt);
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
453{
454 blk_limits_io_opt(&q->limits, opt);
455}
456EXPORT_SYMBOL(blk_queue_io_opt);
457
458
459
460
461
462
463void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
464{
465 blk_stack_limits(&t->limits, &b->limits, 0);
466}
467EXPORT_SYMBOL(blk_queue_stack_limits);
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
491 sector_t start)
492{
493 unsigned int top, bottom, alignment, ret = 0;
494
495 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
496 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
497 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
498 t->max_write_same_sectors = min(t->max_write_same_sectors,
499 b->max_write_same_sectors);
500 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
501 b->max_write_zeroes_sectors);
502 t->max_zone_append_sectors = min(t->max_zone_append_sectors,
503 b->max_zone_append_sectors);
504 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
505
506 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
507 b->seg_boundary_mask);
508 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
509 b->virt_boundary_mask);
510
511 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
512 t->max_discard_segments = min_not_zero(t->max_discard_segments,
513 b->max_discard_segments);
514 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
515 b->max_integrity_segments);
516
517 t->max_segment_size = min_not_zero(t->max_segment_size,
518 b->max_segment_size);
519
520 t->misaligned |= b->misaligned;
521
522 alignment = queue_limit_alignment_offset(b, start);
523
524
525
526
527 if (t->alignment_offset != alignment) {
528
529 top = max(t->physical_block_size, t->io_min)
530 + t->alignment_offset;
531 bottom = max(b->physical_block_size, b->io_min) + alignment;
532
533
534 if (max(top, bottom) % min(top, bottom)) {
535 t->misaligned = 1;
536 ret = -1;
537 }
538 }
539
540 t->logical_block_size = max(t->logical_block_size,
541 b->logical_block_size);
542
543 t->physical_block_size = max(t->physical_block_size,
544 b->physical_block_size);
545
546 t->io_min = max(t->io_min, b->io_min);
547 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
548
549
550 if (t->physical_block_size & (t->logical_block_size - 1)) {
551 t->physical_block_size = t->logical_block_size;
552 t->misaligned = 1;
553 ret = -1;
554 }
555
556
557 if (t->io_min & (t->physical_block_size - 1)) {
558 t->io_min = t->physical_block_size;
559 t->misaligned = 1;
560 ret = -1;
561 }
562
563
564 if (t->io_opt & (t->physical_block_size - 1)) {
565 t->io_opt = 0;
566 t->misaligned = 1;
567 ret = -1;
568 }
569
570 t->raid_partial_stripes_expensive =
571 max(t->raid_partial_stripes_expensive,
572 b->raid_partial_stripes_expensive);
573
574
575 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
576 % max(t->physical_block_size, t->io_min);
577
578
579 if (t->alignment_offset & (t->logical_block_size - 1)) {
580 t->misaligned = 1;
581 ret = -1;
582 }
583
584
585 if (b->discard_granularity) {
586 alignment = queue_limit_discard_alignment(b, start);
587
588 if (t->discard_granularity != 0 &&
589 t->discard_alignment != alignment) {
590 top = t->discard_granularity + t->discard_alignment;
591 bottom = b->discard_granularity + alignment;
592
593
594 if ((max(top, bottom) % min(top, bottom)) != 0)
595 t->discard_misaligned = 1;
596 }
597
598 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
599 b->max_discard_sectors);
600 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
601 b->max_hw_discard_sectors);
602 t->discard_granularity = max(t->discard_granularity,
603 b->discard_granularity);
604 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
605 t->discard_granularity;
606 }
607
608 if (b->chunk_sectors)
609 t->chunk_sectors = min_not_zero(t->chunk_sectors,
610 b->chunk_sectors);
611
612 return ret;
613}
614EXPORT_SYMBOL(blk_stack_limits);
615
616
617
618
619
620
621
622
623
624
625
626
627int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
628 sector_t start)
629{
630 struct request_queue *bq = bdev_get_queue(bdev);
631
632 start += get_start_sect(bdev);
633
634 return blk_stack_limits(t, &bq->limits, start);
635}
636EXPORT_SYMBOL(bdev_stack_limits);
637
638
639
640
641
642
643
644
645
646
647
648void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
649 sector_t offset)
650{
651 struct request_queue *t = disk->queue;
652
653 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
654 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
655
656 disk_name(disk, 0, top);
657 bdevname(bdev, bottom);
658
659 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
660 top, bottom);
661 }
662
663 t->backing_dev_info->io_pages =
664 t->limits.max_sectors >> (PAGE_SHIFT - 9);
665}
666EXPORT_SYMBOL(disk_stack_limits);
667
668
669
670
671
672
673
674
675
676
677
678void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
679{
680 if (mask > q->dma_pad_mask)
681 q->dma_pad_mask = mask;
682}
683EXPORT_SYMBOL(blk_queue_update_dma_pad);
684
685
686
687
688
689
690void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
691{
692 if (mask < PAGE_SIZE - 1) {
693 mask = PAGE_SIZE - 1;
694 printk(KERN_INFO "%s: set to minimum %lx\n",
695 __func__, mask);
696 }
697
698 q->limits.seg_boundary_mask = mask;
699}
700EXPORT_SYMBOL(blk_queue_segment_boundary);
701
702
703
704
705
706
707void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
708{
709 q->limits.virt_boundary_mask = mask;
710
711
712
713
714
715
716
717 if (mask)
718 q->limits.max_segment_size = UINT_MAX;
719}
720EXPORT_SYMBOL(blk_queue_virt_boundary);
721
722
723
724
725
726
727
728
729
730
731
732void blk_queue_dma_alignment(struct request_queue *q, int mask)
733{
734 q->dma_alignment = mask;
735}
736EXPORT_SYMBOL(blk_queue_dma_alignment);
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
753{
754 BUG_ON(mask > PAGE_SIZE);
755
756 if (mask > q->dma_alignment)
757 q->dma_alignment = mask;
758}
759EXPORT_SYMBOL(blk_queue_update_dma_alignment);
760
761
762
763
764
765
766
767void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
768{
769 q->queue_depth = depth;
770 rq_qos_queue_depth_changed(q);
771}
772EXPORT_SYMBOL(blk_set_queue_depth);
773
774
775
776
777
778
779
780
781
782void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
783{
784 if (wc)
785 blk_queue_flag_set(QUEUE_FLAG_WC, q);
786 else
787 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
788 if (fua)
789 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
790 else
791 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
792
793 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
794}
795EXPORT_SYMBOL_GPL(blk_queue_write_cache);
796
797
798
799
800
801
802
803
804
805
806void blk_queue_required_elevator_features(struct request_queue *q,
807 unsigned int features)
808{
809 q->required_elevator_features = features;
810}
811EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
812
813
814
815
816
817
818
819
820bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
821 struct device *dev)
822{
823 unsigned long boundary = dma_get_merge_boundary(dev);
824
825 if (!boundary)
826 return false;
827
828
829 blk_queue_virt_boundary(q, boundary);
830
831 return true;
832}
833EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
834
835static int __init blk_settings_init(void)
836{
837 blk_max_low_pfn = max_low_pfn - 1;
838 blk_max_pfn = max_pfn - 1;
839 return 0;
840}
841subsys_initcall(blk_settings_init);
842