1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h>
10#include <linux/gcd.h>
11#include <linux/lcm.h>
12#include <linux/jiffies.h>
13#include <linux/gfp.h>
14
15#include "blk.h"
16
17unsigned long blk_max_low_pfn;
18EXPORT_SYMBOL(blk_max_low_pfn);
19
20unsigned long blk_max_pfn;
21
22
23
24
25
26
27
28
29
30
31
32
33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34{
35 q->prep_rq_fn = pfn;
36}
37EXPORT_SYMBOL(blk_queue_prep_rq);
38
39
40
41
42
43
44
45
46
47
48
49
50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
51{
52 q->unprep_rq_fn = ufn;
53}
54EXPORT_SYMBOL(blk_queue_unprep_rq);
55
56void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
57{
58 q->softirq_done_fn = fn;
59}
60EXPORT_SYMBOL(blk_queue_softirq_done);
61
62void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
63{
64 q->rq_timeout = timeout;
65}
66EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
67
68void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
69{
70 q->rq_timed_out_fn = fn;
71}
72EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
73
74void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
75{
76 q->lld_busy_fn = fn;
77}
78EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
79
80
81
82
83
84
85
86
87void blk_set_default_limits(struct queue_limits *lim)
88{
89 lim->max_segments = BLK_MAX_SEGMENTS;
90 lim->max_integrity_segments = 0;
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
95 lim->max_dev_sectors = 0;
96 lim->chunk_sectors = 0;
97 lim->max_write_same_sectors = 0;
98 lim->max_discard_sectors = 0;
99 lim->max_hw_discard_sectors = 0;
100 lim->discard_granularity = 0;
101 lim->discard_alignment = 0;
102 lim->discard_misaligned = 0;
103 lim->discard_zeroes_data = 0;
104 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
105 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
106 lim->alignment_offset = 0;
107 lim->io_opt = 0;
108 lim->misaligned = 0;
109 lim->cluster = 1;
110}
111EXPORT_SYMBOL(blk_set_default_limits);
112
113
114
115
116
117
118
119
120
121void blk_set_stacking_limits(struct queue_limits *lim)
122{
123 blk_set_default_limits(lim);
124
125
126 lim->discard_zeroes_data = 1;
127 lim->max_segments = USHRT_MAX;
128 lim->max_hw_sectors = UINT_MAX;
129 lim->max_segment_size = UINT_MAX;
130 lim->max_sectors = UINT_MAX;
131 lim->max_dev_sectors = UINT_MAX;
132 lim->max_write_same_sectors = UINT_MAX;
133}
134EXPORT_SYMBOL(blk_set_stacking_limits);
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
159{
160
161
162
163 q->nr_requests = BLKDEV_MAX_RQ;
164
165 q->make_request_fn = mfn;
166 blk_queue_dma_alignment(q, 511);
167 blk_queue_congestion_threshold(q);
168 q->nr_batching = BLK_BATCH_REQ;
169
170 blk_set_default_limits(&q->limits);
171
172
173
174
175 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
176}
177EXPORT_SYMBOL(blk_queue_make_request);
178
179
180
181
182
183
184
185
186
187
188
189
190void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
191{
192 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
193 int dma = 0;
194
195 q->bounce_gfp = GFP_NOIO;
196#if BITS_PER_LONG == 64
197
198
199
200
201
202 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
203 dma = 1;
204 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
205#else
206 if (b_pfn < blk_max_low_pfn)
207 dma = 1;
208 q->limits.bounce_pfn = b_pfn;
209#endif
210 if (dma) {
211 init_emergency_isa_pool();
212 q->bounce_gfp = GFP_NOIO | GFP_DMA;
213 q->limits.bounce_pfn = b_pfn;
214 }
215}
216EXPORT_SYMBOL(blk_queue_bounce_limit);
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
238{
239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors;
241
242 if ((max_hw_sectors << 9) < PAGE_SIZE) {
243 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244 printk(KERN_INFO "%s: set to minimum %d\n",
245 __func__, max_hw_sectors);
246 }
247
248 limits->max_hw_sectors = max_hw_sectors;
249 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
250 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
251 limits->max_sectors = max_sectors;
252}
253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
269{
270 BUG_ON(!is_power_of_2(chunk_sectors));
271 q->limits.chunk_sectors = chunk_sectors;
272}
273EXPORT_SYMBOL(blk_queue_chunk_sectors);
274
275
276
277
278
279
280void blk_queue_max_discard_sectors(struct request_queue *q,
281 unsigned int max_discard_sectors)
282{
283 q->limits.max_hw_discard_sectors = max_discard_sectors;
284 q->limits.max_discard_sectors = max_discard_sectors;
285}
286EXPORT_SYMBOL(blk_queue_max_discard_sectors);
287
288
289
290
291
292
293void blk_queue_max_write_same_sectors(struct request_queue *q,
294 unsigned int max_write_same_sectors)
295{
296 q->limits.max_write_same_sectors = max_write_same_sectors;
297}
298EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
299
300
301
302
303
304
305
306
307
308
309void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
310{
311 if (!max_segments) {
312 max_segments = 1;
313 printk(KERN_INFO "%s: set to minimum %d\n",
314 __func__, max_segments);
315 }
316
317 q->limits.max_segments = max_segments;
318}
319EXPORT_SYMBOL(blk_queue_max_segments);
320
321
322
323
324
325
326
327
328
329
330void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331{
332 if (max_size < PAGE_SIZE) {
333 max_size = PAGE_SIZE;
334 printk(KERN_INFO "%s: set to minimum %d\n",
335 __func__, max_size);
336 }
337
338 q->limits.max_segment_size = max_size;
339}
340EXPORT_SYMBOL(blk_queue_max_segment_size);
341
342
343
344
345
346
347
348
349
350
351
352void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
353{
354 q->limits.logical_block_size = size;
355
356 if (q->limits.physical_block_size < size)
357 q->limits.physical_block_size = size;
358
359 if (q->limits.io_min < q->limits.physical_block_size)
360 q->limits.io_min = q->limits.physical_block_size;
361}
362EXPORT_SYMBOL(blk_queue_logical_block_size);
363
364
365
366
367
368
369
370
371
372
373
374void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
375{
376 q->limits.physical_block_size = size;
377
378 if (q->limits.physical_block_size < q->limits.logical_block_size)
379 q->limits.physical_block_size = q->limits.logical_block_size;
380
381 if (q->limits.io_min < q->limits.physical_block_size)
382 q->limits.io_min = q->limits.physical_block_size;
383}
384EXPORT_SYMBOL(blk_queue_physical_block_size);
385
386
387
388
389
390
391
392
393
394
395
396
397void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
398{
399 q->limits.alignment_offset =
400 offset & (q->limits.physical_block_size - 1);
401 q->limits.misaligned = 0;
402}
403EXPORT_SYMBOL(blk_queue_alignment_offset);
404
405
406
407
408
409
410
411
412
413
414
415
416void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
417{
418 limits->io_min = min;
419
420 if (limits->io_min < limits->logical_block_size)
421 limits->io_min = limits->logical_block_size;
422
423 if (limits->io_min < limits->physical_block_size)
424 limits->io_min = limits->physical_block_size;
425}
426EXPORT_SYMBOL(blk_limits_io_min);
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442void blk_queue_io_min(struct request_queue *q, unsigned int min)
443{
444 blk_limits_io_min(&q->limits, min);
445}
446EXPORT_SYMBOL(blk_queue_io_min);
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
462{
463 limits->io_opt = opt;
464}
465EXPORT_SYMBOL(blk_limits_io_opt);
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
481{
482 blk_limits_io_opt(&q->limits, opt);
483}
484EXPORT_SYMBOL(blk_queue_io_opt);
485
486
487
488
489
490
491void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
492{
493 blk_stack_limits(&t->limits, &b->limits, 0);
494}
495EXPORT_SYMBOL(blk_queue_stack_limits);
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
519 sector_t start)
520{
521 unsigned int top, bottom, alignment, ret = 0;
522
523 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
524 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
525 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
526 t->max_write_same_sectors = min(t->max_write_same_sectors,
527 b->max_write_same_sectors);
528 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
529
530 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
531 b->seg_boundary_mask);
532 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
533 b->virt_boundary_mask);
534
535 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
536 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
537 b->max_integrity_segments);
538
539 t->max_segment_size = min_not_zero(t->max_segment_size,
540 b->max_segment_size);
541
542 t->misaligned |= b->misaligned;
543
544 alignment = queue_limit_alignment_offset(b, start);
545
546
547
548
549 if (t->alignment_offset != alignment) {
550
551 top = max(t->physical_block_size, t->io_min)
552 + t->alignment_offset;
553 bottom = max(b->physical_block_size, b->io_min) + alignment;
554
555
556 if (max(top, bottom) % min(top, bottom)) {
557 t->misaligned = 1;
558 ret = -1;
559 }
560 }
561
562 t->logical_block_size = max(t->logical_block_size,
563 b->logical_block_size);
564
565 t->physical_block_size = max(t->physical_block_size,
566 b->physical_block_size);
567
568 t->io_min = max(t->io_min, b->io_min);
569 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
570
571 t->cluster &= b->cluster;
572 t->discard_zeroes_data &= b->discard_zeroes_data;
573
574
575 if (t->physical_block_size & (t->logical_block_size - 1)) {
576 t->physical_block_size = t->logical_block_size;
577 t->misaligned = 1;
578 ret = -1;
579 }
580
581
582 if (t->io_min & (t->physical_block_size - 1)) {
583 t->io_min = t->physical_block_size;
584 t->misaligned = 1;
585 ret = -1;
586 }
587
588
589 if (t->io_opt & (t->physical_block_size - 1)) {
590 t->io_opt = 0;
591 t->misaligned = 1;
592 ret = -1;
593 }
594
595 t->raid_partial_stripes_expensive =
596 max(t->raid_partial_stripes_expensive,
597 b->raid_partial_stripes_expensive);
598
599
600 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
601 % max(t->physical_block_size, t->io_min);
602
603
604 if (t->alignment_offset & (t->logical_block_size - 1)) {
605 t->misaligned = 1;
606 ret = -1;
607 }
608
609
610 if (b->discard_granularity) {
611 alignment = queue_limit_discard_alignment(b, start);
612
613 if (t->discard_granularity != 0 &&
614 t->discard_alignment != alignment) {
615 top = t->discard_granularity + t->discard_alignment;
616 bottom = b->discard_granularity + alignment;
617
618
619 if ((max(top, bottom) % min(top, bottom)) != 0)
620 t->discard_misaligned = 1;
621 }
622
623 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
624 b->max_discard_sectors);
625 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
626 b->max_hw_discard_sectors);
627 t->discard_granularity = max(t->discard_granularity,
628 b->discard_granularity);
629 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
630 t->discard_granularity;
631 }
632
633 return ret;
634}
635EXPORT_SYMBOL(blk_stack_limits);
636
637
638
639
640
641
642
643
644
645
646
647
648int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
649 sector_t start)
650{
651 struct request_queue *bq = bdev_get_queue(bdev);
652
653 start += get_start_sect(bdev);
654
655 return blk_stack_limits(t, &bq->limits, start);
656}
657EXPORT_SYMBOL(bdev_stack_limits);
658
659
660
661
662
663
664
665
666
667
668
669void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
670 sector_t offset)
671{
672 struct request_queue *t = disk->queue;
673
674 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
675 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
676
677 disk_name(disk, 0, top);
678 bdevname(bdev, bottom);
679
680 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
681 top, bottom);
682 }
683}
684EXPORT_SYMBOL(disk_stack_limits);
685
686
687
688
689
690
691
692
693
694
695
696void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
697{
698 q->dma_pad_mask = mask;
699}
700EXPORT_SYMBOL(blk_queue_dma_pad);
701
702
703
704
705
706
707
708
709
710
711
712void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
713{
714 if (mask > q->dma_pad_mask)
715 q->dma_pad_mask = mask;
716}
717EXPORT_SYMBOL(blk_queue_update_dma_pad);
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740int blk_queue_dma_drain(struct request_queue *q,
741 dma_drain_needed_fn *dma_drain_needed,
742 void *buf, unsigned int size)
743{
744 if (queue_max_segments(q) < 2)
745 return -EINVAL;
746
747 blk_queue_max_segments(q, queue_max_segments(q) - 1);
748 q->dma_drain_needed = dma_drain_needed;
749 q->dma_drain_buffer = buf;
750 q->dma_drain_size = size;
751
752 return 0;
753}
754EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
755
756
757
758
759
760
761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762{
763 if (mask < PAGE_SIZE - 1) {
764 mask = PAGE_SIZE - 1;
765 printk(KERN_INFO "%s: set to minimum %lx\n",
766 __func__, mask);
767 }
768
769 q->limits.seg_boundary_mask = mask;
770}
771EXPORT_SYMBOL(blk_queue_segment_boundary);
772
773
774
775
776
777
778void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
779{
780 q->limits.virt_boundary_mask = mask;
781}
782EXPORT_SYMBOL(blk_queue_virt_boundary);
783
784
785
786
787
788
789
790
791
792
793
794void blk_queue_dma_alignment(struct request_queue *q, int mask)
795{
796 q->dma_alignment = mask;
797}
798EXPORT_SYMBOL(blk_queue_dma_alignment);
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
815{
816 BUG_ON(mask > PAGE_SIZE);
817
818 if (mask > q->dma_alignment)
819 q->dma_alignment = mask;
820}
821EXPORT_SYMBOL(blk_queue_update_dma_alignment);
822
823
824
825
826
827
828
829
830
831
832void blk_queue_flush(struct request_queue *q, unsigned int flush)
833{
834 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
835
836 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
837 flush &= ~REQ_FUA;
838
839 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
840}
841EXPORT_SYMBOL_GPL(blk_queue_flush);
842
843void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
844{
845 q->flush_not_queueable = !queueable;
846}
847EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
848
849static int __init blk_settings_init(void)
850{
851 blk_max_low_pfn = max_low_pfn - 1;
852 blk_max_pfn = max_pfn - 1;
853 return 0;
854}
855subsys_initcall(blk_settings_init);
856