1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h>
10#include <linux/gcd.h>
11#include <linux/lcm.h>
12#include <linux/jiffies.h>
13#include <linux/gfp.h>
14
15#include "blk.h"
16#include "blk-wbt.h"
17
18unsigned long blk_max_low_pfn;
19EXPORT_SYMBOL(blk_max_low_pfn);
20
21unsigned long blk_max_pfn;
22
23void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
24{
25 q->rq_timeout = timeout;
26}
27EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
28
29
30
31
32
33
34
35
36void blk_set_default_limits(struct queue_limits *lim)
37{
38 lim->max_segments = BLK_MAX_SEGMENTS;
39 lim->max_discard_segments = 1;
40 lim->max_integrity_segments = 0;
41 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
42 lim->virt_boundary_mask = 0;
43 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
44 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
45 lim->max_dev_sectors = 0;
46 lim->chunk_sectors = 0;
47 lim->max_write_same_sectors = 0;
48 lim->max_write_zeroes_sectors = 0;
49 lim->max_discard_sectors = 0;
50 lim->max_hw_discard_sectors = 0;
51 lim->discard_granularity = 0;
52 lim->discard_alignment = 0;
53 lim->discard_misaligned = 0;
54 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
55 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
56 lim->alignment_offset = 0;
57 lim->io_opt = 0;
58 lim->misaligned = 0;
59 lim->cluster = 1;
60 lim->zoned = BLK_ZONED_NONE;
61}
62EXPORT_SYMBOL(blk_set_default_limits);
63
64
65
66
67
68
69
70
71
72void blk_set_stacking_limits(struct queue_limits *lim)
73{
74 blk_set_default_limits(lim);
75
76
77 lim->max_segments = USHRT_MAX;
78 lim->max_discard_segments = USHRT_MAX;
79 lim->max_hw_sectors = UINT_MAX;
80 lim->max_segment_size = UINT_MAX;
81 lim->max_sectors = UINT_MAX;
82 lim->max_dev_sectors = UINT_MAX;
83 lim->max_write_same_sectors = UINT_MAX;
84 lim->max_write_zeroes_sectors = UINT_MAX;
85}
86EXPORT_SYMBOL(blk_set_stacking_limits);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
111{
112
113
114
115 q->nr_requests = BLKDEV_MAX_RQ;
116
117 q->make_request_fn = mfn;
118 blk_queue_dma_alignment(q, 511);
119
120 blk_set_default_limits(&q->limits);
121}
122EXPORT_SYMBOL(blk_queue_make_request);
123
124
125
126
127
128
129
130
131
132
133
134
135void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
136{
137 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
138 int dma = 0;
139
140 q->bounce_gfp = GFP_NOIO;
141#if BITS_PER_LONG == 64
142
143
144
145
146
147 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
148 dma = 1;
149 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
150#else
151 if (b_pfn < blk_max_low_pfn)
152 dma = 1;
153 q->limits.bounce_pfn = b_pfn;
154#endif
155 if (dma) {
156 init_emergency_isa_pool();
157 q->bounce_gfp = GFP_NOIO | GFP_DMA;
158 q->limits.bounce_pfn = b_pfn;
159 }
160}
161EXPORT_SYMBOL(blk_queue_bounce_limit);
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
183{
184 struct queue_limits *limits = &q->limits;
185 unsigned int max_sectors;
186
187 if ((max_hw_sectors << 9) < PAGE_SIZE) {
188 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
189 printk(KERN_INFO "%s: set to minimum %d\n",
190 __func__, max_hw_sectors);
191 }
192
193 limits->max_hw_sectors = max_hw_sectors;
194 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
195 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
196 limits->max_sectors = max_sectors;
197 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
198}
199EXPORT_SYMBOL(blk_queue_max_hw_sectors);
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
215{
216 BUG_ON(!is_power_of_2(chunk_sectors));
217 q->limits.chunk_sectors = chunk_sectors;
218}
219EXPORT_SYMBOL(blk_queue_chunk_sectors);
220
221
222
223
224
225
226void blk_queue_max_discard_sectors(struct request_queue *q,
227 unsigned int max_discard_sectors)
228{
229 q->limits.max_hw_discard_sectors = max_discard_sectors;
230 q->limits.max_discard_sectors = max_discard_sectors;
231}
232EXPORT_SYMBOL(blk_queue_max_discard_sectors);
233
234
235
236
237
238
239void blk_queue_max_write_same_sectors(struct request_queue *q,
240 unsigned int max_write_same_sectors)
241{
242 q->limits.max_write_same_sectors = max_write_same_sectors;
243}
244EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
245
246
247
248
249
250
251
252void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
253 unsigned int max_write_zeroes_sectors)
254{
255 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
256}
257EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
258
259
260
261
262
263
264
265
266
267
268void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
269{
270 if (!max_segments) {
271 max_segments = 1;
272 printk(KERN_INFO "%s: set to minimum %d\n",
273 __func__, max_segments);
274 }
275
276 q->limits.max_segments = max_segments;
277}
278EXPORT_SYMBOL(blk_queue_max_segments);
279
280
281
282
283
284
285
286
287
288
289void blk_queue_max_discard_segments(struct request_queue *q,
290 unsigned short max_segments)
291{
292 q->limits.max_discard_segments = max_segments;
293}
294EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
295
296
297
298
299
300
301
302
303
304
305void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
306{
307 if (max_size < PAGE_SIZE) {
308 max_size = PAGE_SIZE;
309 printk(KERN_INFO "%s: set to minimum %d\n",
310 __func__, max_size);
311 }
312
313 q->limits.max_segment_size = max_size;
314}
315EXPORT_SYMBOL(blk_queue_max_segment_size);
316
317
318
319
320
321
322
323
324
325
326
327void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
328{
329 q->limits.logical_block_size = size;
330
331 if (q->limits.physical_block_size < size)
332 q->limits.physical_block_size = size;
333
334 if (q->limits.io_min < q->limits.physical_block_size)
335 q->limits.io_min = q->limits.physical_block_size;
336}
337EXPORT_SYMBOL(blk_queue_logical_block_size);
338
339
340
341
342
343
344
345
346
347
348
349void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
350{
351 q->limits.physical_block_size = size;
352
353 if (q->limits.physical_block_size < q->limits.logical_block_size)
354 q->limits.physical_block_size = q->limits.logical_block_size;
355
356 if (q->limits.io_min < q->limits.physical_block_size)
357 q->limits.io_min = q->limits.physical_block_size;
358}
359EXPORT_SYMBOL(blk_queue_physical_block_size);
360
361
362
363
364
365
366
367
368
369
370
371
372void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
373{
374 q->limits.alignment_offset =
375 offset & (q->limits.physical_block_size - 1);
376 q->limits.misaligned = 0;
377}
378EXPORT_SYMBOL(blk_queue_alignment_offset);
379
380
381
382
383
384
385
386
387
388
389
390
391void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
392{
393 limits->io_min = min;
394
395 if (limits->io_min < limits->logical_block_size)
396 limits->io_min = limits->logical_block_size;
397
398 if (limits->io_min < limits->physical_block_size)
399 limits->io_min = limits->physical_block_size;
400}
401EXPORT_SYMBOL(blk_limits_io_min);
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417void blk_queue_io_min(struct request_queue *q, unsigned int min)
418{
419 blk_limits_io_min(&q->limits, min);
420}
421EXPORT_SYMBOL(blk_queue_io_min);
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
437{
438 limits->io_opt = opt;
439}
440EXPORT_SYMBOL(blk_limits_io_opt);
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
456{
457 blk_limits_io_opt(&q->limits, opt);
458}
459EXPORT_SYMBOL(blk_queue_io_opt);
460
461
462
463
464
465
466void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
467{
468 blk_stack_limits(&t->limits, &b->limits, 0);
469}
470EXPORT_SYMBOL(blk_queue_stack_limits);
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
494 sector_t start)
495{
496 unsigned int top, bottom, alignment, ret = 0;
497
498 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
499 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
500 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
501 t->max_write_same_sectors = min(t->max_write_same_sectors,
502 b->max_write_same_sectors);
503 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
504 b->max_write_zeroes_sectors);
505 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
506
507 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
508 b->seg_boundary_mask);
509 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
510 b->virt_boundary_mask);
511
512 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
513 t->max_discard_segments = min_not_zero(t->max_discard_segments,
514 b->max_discard_segments);
515 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
516 b->max_integrity_segments);
517
518 t->max_segment_size = min_not_zero(t->max_segment_size,
519 b->max_segment_size);
520
521 t->misaligned |= b->misaligned;
522
523 alignment = queue_limit_alignment_offset(b, start);
524
525
526
527
528 if (t->alignment_offset != alignment) {
529
530 top = max(t->physical_block_size, t->io_min)
531 + t->alignment_offset;
532 bottom = max(b->physical_block_size, b->io_min) + alignment;
533
534
535 if (max(top, bottom) % min(top, bottom)) {
536 t->misaligned = 1;
537 ret = -1;
538 }
539 }
540
541 t->logical_block_size = max(t->logical_block_size,
542 b->logical_block_size);
543
544 t->physical_block_size = max(t->physical_block_size,
545 b->physical_block_size);
546
547 t->io_min = max(t->io_min, b->io_min);
548 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
549
550 t->cluster &= b->cluster;
551
552
553 if (t->physical_block_size & (t->logical_block_size - 1)) {
554 t->physical_block_size = t->logical_block_size;
555 t->misaligned = 1;
556 ret = -1;
557 }
558
559
560 if (t->io_min & (t->physical_block_size - 1)) {
561 t->io_min = t->physical_block_size;
562 t->misaligned = 1;
563 ret = -1;
564 }
565
566
567 if (t->io_opt & (t->physical_block_size - 1)) {
568 t->io_opt = 0;
569 t->misaligned = 1;
570 ret = -1;
571 }
572
573 t->raid_partial_stripes_expensive =
574 max(t->raid_partial_stripes_expensive,
575 b->raid_partial_stripes_expensive);
576
577
578 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
579 % max(t->physical_block_size, t->io_min);
580
581
582 if (t->alignment_offset & (t->logical_block_size - 1)) {
583 t->misaligned = 1;
584 ret = -1;
585 }
586
587
588 if (b->discard_granularity) {
589 alignment = queue_limit_discard_alignment(b, start);
590
591 if (t->discard_granularity != 0 &&
592 t->discard_alignment != alignment) {
593 top = t->discard_granularity + t->discard_alignment;
594 bottom = b->discard_granularity + alignment;
595
596
597 if ((max(top, bottom) % min(top, bottom)) != 0)
598 t->discard_misaligned = 1;
599 }
600
601 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
602 b->max_discard_sectors);
603 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
604 b->max_hw_discard_sectors);
605 t->discard_granularity = max(t->discard_granularity,
606 b->discard_granularity);
607 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
608 t->discard_granularity;
609 }
610
611 if (b->chunk_sectors)
612 t->chunk_sectors = min_not_zero(t->chunk_sectors,
613 b->chunk_sectors);
614
615 return ret;
616}
617EXPORT_SYMBOL(blk_stack_limits);
618
619
620
621
622
623
624
625
626
627
628
629
630int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
631 sector_t start)
632{
633 struct request_queue *bq = bdev_get_queue(bdev);
634
635 start += get_start_sect(bdev);
636
637 return blk_stack_limits(t, &bq->limits, start);
638}
639EXPORT_SYMBOL(bdev_stack_limits);
640
641
642
643
644
645
646
647
648
649
650
651void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
652 sector_t offset)
653{
654 struct request_queue *t = disk->queue;
655
656 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
657 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
658
659 disk_name(disk, 0, top);
660 bdevname(bdev, bottom);
661
662 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
663 top, bottom);
664 }
665}
666EXPORT_SYMBOL(disk_stack_limits);
667
668
669
670
671
672
673
674
675
676
677
678void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
679{
680 q->dma_pad_mask = mask;
681}
682EXPORT_SYMBOL(blk_queue_dma_pad);
683
684
685
686
687
688
689
690
691
692
693
694void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
695{
696 if (mask > q->dma_pad_mask)
697 q->dma_pad_mask = mask;
698}
699EXPORT_SYMBOL(blk_queue_update_dma_pad);
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722int blk_queue_dma_drain(struct request_queue *q,
723 dma_drain_needed_fn *dma_drain_needed,
724 void *buf, unsigned int size)
725{
726 if (queue_max_segments(q) < 2)
727 return -EINVAL;
728
729 blk_queue_max_segments(q, queue_max_segments(q) - 1);
730 q->dma_drain_needed = dma_drain_needed;
731 q->dma_drain_buffer = buf;
732 q->dma_drain_size = size;
733
734 return 0;
735}
736EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
737
738
739
740
741
742
743void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
744{
745 if (mask < PAGE_SIZE - 1) {
746 mask = PAGE_SIZE - 1;
747 printk(KERN_INFO "%s: set to minimum %lx\n",
748 __func__, mask);
749 }
750
751 q->limits.seg_boundary_mask = mask;
752}
753EXPORT_SYMBOL(blk_queue_segment_boundary);
754
755
756
757
758
759
760void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
761{
762 q->limits.virt_boundary_mask = mask;
763}
764EXPORT_SYMBOL(blk_queue_virt_boundary);
765
766
767
768
769
770
771
772
773
774
775
776void blk_queue_dma_alignment(struct request_queue *q, int mask)
777{
778 q->dma_alignment = mask;
779}
780EXPORT_SYMBOL(blk_queue_dma_alignment);
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
797{
798 BUG_ON(mask > PAGE_SIZE);
799
800 if (mask > q->dma_alignment)
801 q->dma_alignment = mask;
802}
803EXPORT_SYMBOL(blk_queue_update_dma_alignment);
804
805
806
807
808
809
810
811void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
812{
813 q->queue_depth = depth;
814 wbt_set_queue_depth(q, depth);
815}
816EXPORT_SYMBOL(blk_set_queue_depth);
817
818
819
820
821
822
823
824
825
826void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
827{
828 if (wc)
829 blk_queue_flag_set(QUEUE_FLAG_WC, q);
830 else
831 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
832 if (fua)
833 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
834 else
835 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
836
837 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
838}
839EXPORT_SYMBOL_GPL(blk_queue_write_cache);
840
841static int __init blk_settings_init(void)
842{
843 blk_max_low_pfn = max_low_pfn - 1;
844 blk_max_pfn = max_pfn - 1;
845 return 0;
846}
847subsys_initcall(blk_settings_init);
848