1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h>
10#include <linux/gcd.h>
11#include <linux/lcm.h>
12#include <linux/jiffies.h>
13#include <linux/gfp.h>
14
15#include "blk.h"
16
17unsigned long blk_max_low_pfn;
18EXPORT_SYMBOL(blk_max_low_pfn);
19
20unsigned long blk_max_pfn;
21
22
23
24
25
26
27
28
29
30
31
32
33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34{
35 q->prep_rq_fn = pfn;
36}
37EXPORT_SYMBOL(blk_queue_prep_rq);
38
39
40
41
42
43
44
45
46
47
48
49
50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
51{
52 q->unprep_rq_fn = ufn;
53}
54EXPORT_SYMBOL(blk_queue_unprep_rq);
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
73{
74 q->merge_bvec_fn = mbfn;
75}
76EXPORT_SYMBOL(blk_queue_merge_bvec);
77
78void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
79{
80 q->softirq_done_fn = fn;
81}
82EXPORT_SYMBOL(blk_queue_softirq_done);
83
84void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85{
86 q->rq_timeout = timeout;
87}
88EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89
90void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91{
92 q->rq_timed_out_fn = fn;
93}
94EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95
96void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97{
98 q->lld_busy_fn = fn;
99}
100EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101
102
103
104
105
106
107
108
109
110
111void blk_set_default_limits(struct queue_limits *lim)
112{
113 lim->max_segments = BLK_MAX_SEGMENTS;
114 lim->max_integrity_segments = 0;
115 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
116 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
117 lim->max_sectors = BLK_DEF_MAX_SECTORS;
118 lim->max_hw_sectors = INT_MAX;
119 lim->max_discard_sectors = 0;
120 lim->discard_granularity = 0;
121 lim->discard_alignment = 0;
122 lim->discard_misaligned = 0;
123 lim->discard_zeroes_data = -1;
124 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
125 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126 lim->alignment_offset = 0;
127 lim->io_opt = 0;
128 lim->misaligned = 0;
129 lim->cluster = 1;
130}
131EXPORT_SYMBOL(blk_set_default_limits);
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
156{
157
158
159
160 q->nr_requests = BLKDEV_MAX_RQ;
161
162 q->make_request_fn = mfn;
163 blk_queue_dma_alignment(q, 511);
164 blk_queue_congestion_threshold(q);
165 q->nr_batching = BLK_BATCH_REQ;
166
167 q->unplug_thresh = 4;
168 q->unplug_delay = msecs_to_jiffies(3);
169 if (q->unplug_delay == 0)
170 q->unplug_delay = 1;
171
172 q->unplug_timer.function = blk_unplug_timeout;
173 q->unplug_timer.data = (unsigned long)q;
174
175 blk_set_default_limits(&q->limits);
176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177
178
179
180
181
182 if (!q->queue_lock)
183 q->queue_lock = &q->__queue_lock;
184
185
186
187
188 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
189}
190EXPORT_SYMBOL(blk_queue_make_request);
191
192
193
194
195
196
197
198
199
200
201
202
203void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
204{
205 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
206 int dma = 0;
207
208 q->bounce_gfp = GFP_NOIO;
209#if BITS_PER_LONG == 64
210
211
212
213
214
215 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
216 dma = 1;
217 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
218#else
219 if (b_pfn < blk_max_low_pfn)
220 dma = 1;
221 q->limits.bounce_pfn = b_pfn;
222#endif
223 if (dma) {
224 init_emergency_isa_pool();
225 q->bounce_gfp = GFP_NOIO | GFP_DMA;
226 q->limits.bounce_pfn = b_pfn;
227 }
228}
229EXPORT_SYMBOL(blk_queue_bounce_limit);
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
248{
249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
251 printk(KERN_INFO "%s: set to minimum %d\n",
252 __func__, max_hw_sectors);
253 }
254
255 limits->max_hw_sectors = max_hw_sectors;
256 limits->max_sectors = min_t(unsigned int, max_hw_sectors,
257 BLK_DEF_MAX_SECTORS);
258}
259EXPORT_SYMBOL(blk_limits_max_hw_sectors);
260
261
262
263
264
265
266
267
268
269void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
270{
271 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
272}
273EXPORT_SYMBOL(blk_queue_max_hw_sectors);
274
275
276
277
278
279
280void blk_queue_max_discard_sectors(struct request_queue *q,
281 unsigned int max_discard_sectors)
282{
283 q->limits.max_discard_sectors = max_discard_sectors;
284}
285EXPORT_SYMBOL(blk_queue_max_discard_sectors);
286
287
288
289
290
291
292
293
294
295
296void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
297{
298 if (!max_segments) {
299 max_segments = 1;
300 printk(KERN_INFO "%s: set to minimum %d\n",
301 __func__, max_segments);
302 }
303
304 q->limits.max_segments = max_segments;
305}
306EXPORT_SYMBOL(blk_queue_max_segments);
307
308
309
310
311
312
313
314
315
316
317void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
318{
319 if (max_size < PAGE_CACHE_SIZE) {
320 max_size = PAGE_CACHE_SIZE;
321 printk(KERN_INFO "%s: set to minimum %d\n",
322 __func__, max_size);
323 }
324
325 q->limits.max_segment_size = max_size;
326}
327EXPORT_SYMBOL(blk_queue_max_segment_size);
328
329
330
331
332
333
334
335
336
337
338
339void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
340{
341 q->limits.logical_block_size = size;
342
343 if (q->limits.physical_block_size < size)
344 q->limits.physical_block_size = size;
345
346 if (q->limits.io_min < q->limits.physical_block_size)
347 q->limits.io_min = q->limits.physical_block_size;
348}
349EXPORT_SYMBOL(blk_queue_logical_block_size);
350
351
352
353
354
355
356
357
358
359
360
361void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
362{
363 q->limits.physical_block_size = size;
364
365 if (q->limits.physical_block_size < q->limits.logical_block_size)
366 q->limits.physical_block_size = q->limits.logical_block_size;
367
368 if (q->limits.io_min < q->limits.physical_block_size)
369 q->limits.io_min = q->limits.physical_block_size;
370}
371EXPORT_SYMBOL(blk_queue_physical_block_size);
372
373
374
375
376
377
378
379
380
381
382
383
384void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
385{
386 q->limits.alignment_offset =
387 offset & (q->limits.physical_block_size - 1);
388 q->limits.misaligned = 0;
389}
390EXPORT_SYMBOL(blk_queue_alignment_offset);
391
392
393
394
395
396
397
398
399
400
401
402
403void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
404{
405 limits->io_min = min;
406
407 if (limits->io_min < limits->logical_block_size)
408 limits->io_min = limits->logical_block_size;
409
410 if (limits->io_min < limits->physical_block_size)
411 limits->io_min = limits->physical_block_size;
412}
413EXPORT_SYMBOL(blk_limits_io_min);
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429void blk_queue_io_min(struct request_queue *q, unsigned int min)
430{
431 blk_limits_io_min(&q->limits, min);
432}
433EXPORT_SYMBOL(blk_queue_io_min);
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
449{
450 limits->io_opt = opt;
451}
452EXPORT_SYMBOL(blk_limits_io_opt);
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
468{
469 blk_limits_io_opt(&q->limits, opt);
470}
471EXPORT_SYMBOL(blk_queue_io_opt);
472
473
474
475
476
477
478void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
479{
480 blk_stack_limits(&t->limits, &b->limits, 0);
481}
482EXPORT_SYMBOL(blk_queue_stack_limits);
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
506 sector_t start)
507{
508 unsigned int top, bottom, alignment, ret = 0;
509
510 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
511 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
512 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
513
514 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
515 b->seg_boundary_mask);
516
517 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
518 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
519 b->max_integrity_segments);
520
521 t->max_segment_size = min_not_zero(t->max_segment_size,
522 b->max_segment_size);
523
524 t->misaligned |= b->misaligned;
525
526 alignment = queue_limit_alignment_offset(b, start);
527
528
529
530
531 if (t->alignment_offset != alignment) {
532
533 top = max(t->physical_block_size, t->io_min)
534 + t->alignment_offset;
535 bottom = max(b->physical_block_size, b->io_min) + alignment;
536
537
538 if (max(top, bottom) & (min(top, bottom) - 1)) {
539 t->misaligned = 1;
540 ret = -1;
541 }
542 }
543
544 t->logical_block_size = max(t->logical_block_size,
545 b->logical_block_size);
546
547 t->physical_block_size = max(t->physical_block_size,
548 b->physical_block_size);
549
550 t->io_min = max(t->io_min, b->io_min);
551 t->io_opt = lcm(t->io_opt, b->io_opt);
552
553 t->cluster &= b->cluster;
554 t->discard_zeroes_data &= b->discard_zeroes_data;
555
556
557 if (t->physical_block_size & (t->logical_block_size - 1)) {
558 t->physical_block_size = t->logical_block_size;
559 t->misaligned = 1;
560 ret = -1;
561 }
562
563
564 if (t->io_min & (t->physical_block_size - 1)) {
565 t->io_min = t->physical_block_size;
566 t->misaligned = 1;
567 ret = -1;
568 }
569
570
571 if (t->io_opt & (t->physical_block_size - 1)) {
572 t->io_opt = 0;
573 t->misaligned = 1;
574 ret = -1;
575 }
576
577
578 t->alignment_offset = lcm(t->alignment_offset, alignment)
579 & (max(t->physical_block_size, t->io_min) - 1);
580
581
582 if (t->alignment_offset & (t->logical_block_size - 1)) {
583 t->misaligned = 1;
584 ret = -1;
585 }
586
587
588 if (b->discard_granularity) {
589 alignment = queue_limit_discard_alignment(b, start);
590
591 if (t->discard_granularity != 0 &&
592 t->discard_alignment != alignment) {
593 top = t->discard_granularity + t->discard_alignment;
594 bottom = b->discard_granularity + alignment;
595
596
597 if (max(top, bottom) & (min(top, bottom) - 1))
598 t->discard_misaligned = 1;
599 }
600
601 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
602 b->max_discard_sectors);
603 t->discard_granularity = max(t->discard_granularity,
604 b->discard_granularity);
605 t->discard_alignment = lcm(t->discard_alignment, alignment) &
606 (t->discard_granularity - 1);
607 }
608
609 return ret;
610}
611EXPORT_SYMBOL(blk_stack_limits);
612
613
614
615
616
617
618
619
620
621
622
623
624int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
625 sector_t start)
626{
627 struct request_queue *bq = bdev_get_queue(bdev);
628
629 start += get_start_sect(bdev);
630
631 return blk_stack_limits(t, &bq->limits, start);
632}
633EXPORT_SYMBOL(bdev_stack_limits);
634
635
636
637
638
639
640
641
642
643
644
645void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
646 sector_t offset)
647{
648 struct request_queue *t = disk->queue;
649
650 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
651 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
652
653 disk_name(disk, 0, top);
654 bdevname(bdev, bottom);
655
656 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
657 top, bottom);
658 }
659}
660EXPORT_SYMBOL(disk_stack_limits);
661
662
663
664
665
666
667
668
669
670
671
672void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
673{
674 q->dma_pad_mask = mask;
675}
676EXPORT_SYMBOL(blk_queue_dma_pad);
677
678
679
680
681
682
683
684
685
686
687
688void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
689{
690 if (mask > q->dma_pad_mask)
691 q->dma_pad_mask = mask;
692}
693EXPORT_SYMBOL(blk_queue_update_dma_pad);
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716int blk_queue_dma_drain(struct request_queue *q,
717 dma_drain_needed_fn *dma_drain_needed,
718 void *buf, unsigned int size)
719{
720 if (queue_max_segments(q) < 2)
721 return -EINVAL;
722
723 blk_queue_max_segments(q, queue_max_segments(q) - 1);
724 q->dma_drain_needed = dma_drain_needed;
725 q->dma_drain_buffer = buf;
726 q->dma_drain_size = size;
727
728 return 0;
729}
730EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
731
732
733
734
735
736
737void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
738{
739 if (mask < PAGE_CACHE_SIZE - 1) {
740 mask = PAGE_CACHE_SIZE - 1;
741 printk(KERN_INFO "%s: set to minimum %lx\n",
742 __func__, mask);
743 }
744
745 q->limits.seg_boundary_mask = mask;
746}
747EXPORT_SYMBOL(blk_queue_segment_boundary);
748
749
750
751
752
753
754
755
756
757
758
759void blk_queue_dma_alignment(struct request_queue *q, int mask)
760{
761 q->dma_alignment = mask;
762}
763EXPORT_SYMBOL(blk_queue_dma_alignment);
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
780{
781 BUG_ON(mask > PAGE_SIZE);
782
783 if (mask > q->dma_alignment)
784 q->dma_alignment = mask;
785}
786EXPORT_SYMBOL(blk_queue_update_dma_alignment);
787
788
789
790
791
792
793
794
795
796
797void blk_queue_flush(struct request_queue *q, unsigned int flush)
798{
799 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
800
801 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
802 flush &= ~REQ_FUA;
803
804 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
805}
806EXPORT_SYMBOL_GPL(blk_queue_flush);
807
808static int __init blk_settings_init(void)
809{
810 blk_max_low_pfn = max_low_pfn - 1;
811 blk_max_pfn = max_pfn - 1;
812 return 0;
813}
814subsys_initcall(blk_settings_init);
815