1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/elevator.h>
13#include <linux/bio.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/compiler.h>
18#include <linux/rbtree.h>
19#include <linux/sbitmap.h>
20
21#include <trace/events/block.h>
22
23#include "blk.h"
24#include "blk-mq.h"
25#include "blk-mq-debugfs.h"
26#include "blk-mq-tag.h"
27#include "blk-mq-sched.h"
28
29
30
31
32static const int read_expire = HZ / 2;
33static const int write_expire = 5 * HZ;
34static const int writes_starved = 2;
35static const int fifo_batch = 16;
36
37
38enum dd_data_dir {
39 DD_READ = READ,
40 DD_WRITE = WRITE,
41};
42
43enum { DD_DIR_COUNT = 2 };
44
45enum dd_prio {
46 DD_RT_PRIO = 0,
47 DD_BE_PRIO = 1,
48 DD_IDLE_PRIO = 2,
49 DD_PRIO_MAX = 2,
50};
51
52enum { DD_PRIO_COUNT = 3 };
53
54
55struct io_stats_per_prio {
56 local_t inserted;
57 local_t merged;
58 local_t dispatched;
59 local_t completed;
60};
61
62
63struct io_stats {
64 struct io_stats_per_prio stats[DD_PRIO_COUNT];
65};
66
67
68
69
70
71struct dd_per_prio {
72 struct list_head dispatch;
73 struct rb_root sort_list[DD_DIR_COUNT];
74 struct list_head fifo_list[DD_DIR_COUNT];
75
76 struct request *next_rq[DD_DIR_COUNT];
77};
78
79struct deadline_data {
80
81
82
83
84 struct dd_per_prio per_prio[DD_PRIO_COUNT];
85
86
87 enum dd_data_dir last_dir;
88 unsigned int batching;
89 unsigned int starved;
90
91 struct io_stats __percpu *stats;
92
93
94
95
96 int fifo_expire[DD_DIR_COUNT];
97 int fifo_batch;
98 int writes_starved;
99 int front_merges;
100 u32 async_depth;
101
102 spinlock_t lock;
103 spinlock_t zone_lock;
104};
105
106
107#define dd_count(dd, event_type, prio) do { \
108 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
109 \
110 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
111 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
112 local_inc(&io_stats->stats[(prio)].event_type); \
113 put_cpu_ptr(io_stats); \
114} while (0)
115
116
117
118
119
120
121#define dd_sum(dd, event_type, prio) ({ \
122 unsigned int cpu; \
123 u32 sum = 0; \
124 \
125 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
126 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
127 for_each_present_cpu(cpu) \
128 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
129 stats[(prio)].event_type); \
130 sum; \
131})
132
133
134static const enum dd_prio ioprio_class_to_prio[] = {
135 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
136 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
137 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
138 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
139};
140
141static inline struct rb_root *
142deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
143{
144 return &per_prio->sort_list[rq_data_dir(rq)];
145}
146
147
148
149
150
151static u8 dd_rq_ioclass(struct request *rq)
152{
153 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
154}
155
156
157
158
159static inline struct request *
160deadline_latter_request(struct request *rq)
161{
162 struct rb_node *node = rb_next(&rq->rb_node);
163
164 if (node)
165 return rb_entry_rq(node);
166
167 return NULL;
168}
169
170static void
171deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
172{
173 struct rb_root *root = deadline_rb_root(per_prio, rq);
174
175 elv_rb_add(root, rq);
176}
177
178static inline void
179deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
180{
181 const enum dd_data_dir data_dir = rq_data_dir(rq);
182
183 if (per_prio->next_rq[data_dir] == rq)
184 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
185
186 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
187}
188
189
190
191
192static void deadline_remove_request(struct request_queue *q,
193 struct dd_per_prio *per_prio,
194 struct request *rq)
195{
196 list_del_init(&rq->queuelist);
197
198
199
200
201 if (!RB_EMPTY_NODE(&rq->rb_node))
202 deadline_del_rq_rb(per_prio, rq);
203
204 elv_rqhash_del(q, rq);
205 if (q->last_merge == rq)
206 q->last_merge = NULL;
207}
208
209static void dd_request_merged(struct request_queue *q, struct request *req,
210 enum elv_merge type)
211{
212 struct deadline_data *dd = q->elevator->elevator_data;
213 const u8 ioprio_class = dd_rq_ioclass(req);
214 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215 struct dd_per_prio *per_prio = &dd->per_prio[prio];
216
217
218
219
220 if (type == ELEVATOR_FRONT_MERGE) {
221 elv_rb_del(deadline_rb_root(per_prio, req), req);
222 deadline_add_rq_rb(per_prio, req);
223 }
224}
225
226
227
228
229static void dd_merged_requests(struct request_queue *q, struct request *req,
230 struct request *next)
231{
232 struct deadline_data *dd = q->elevator->elevator_data;
233 const u8 ioprio_class = dd_rq_ioclass(next);
234 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
235
236 dd_count(dd, merged, prio);
237
238
239
240
241
242 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
243 if (time_before((unsigned long)next->fifo_time,
244 (unsigned long)req->fifo_time)) {
245 list_move(&req->queuelist, &next->queuelist);
246 req->fifo_time = next->fifo_time;
247 }
248 }
249
250
251
252
253 deadline_remove_request(q, &dd->per_prio[prio], next);
254}
255
256
257
258
259static void
260deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
261 struct request *rq)
262{
263 const enum dd_data_dir data_dir = rq_data_dir(rq);
264
265 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
266
267
268
269
270 deadline_remove_request(rq->q, per_prio, rq);
271}
272
273
274static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
275{
276 return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
277}
278
279
280
281
282
283static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
284 enum dd_data_dir data_dir)
285{
286 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
287
288
289
290
291 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
292 return 1;
293
294 return 0;
295}
296
297
298
299
300
301static struct request *
302deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
303 enum dd_data_dir data_dir)
304{
305 struct request *rq;
306 unsigned long flags;
307
308 if (list_empty(&per_prio->fifo_list[data_dir]))
309 return NULL;
310
311 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
312 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
313 return rq;
314
315
316
317
318
319 spin_lock_irqsave(&dd->zone_lock, flags);
320 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
321 if (blk_req_can_dispatch_to_zone(rq))
322 goto out;
323 }
324 rq = NULL;
325out:
326 spin_unlock_irqrestore(&dd->zone_lock, flags);
327
328 return rq;
329}
330
331
332
333
334
335static struct request *
336deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
337 enum dd_data_dir data_dir)
338{
339 struct request *rq;
340 unsigned long flags;
341
342 rq = per_prio->next_rq[data_dir];
343 if (!rq)
344 return NULL;
345
346 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
347 return rq;
348
349
350
351
352
353 spin_lock_irqsave(&dd->zone_lock, flags);
354 while (rq) {
355 if (blk_req_can_dispatch_to_zone(rq))
356 break;
357 rq = deadline_latter_request(rq);
358 }
359 spin_unlock_irqrestore(&dd->zone_lock, flags);
360
361 return rq;
362}
363
364
365
366
367
368static struct request *__dd_dispatch_request(struct deadline_data *dd,
369 struct dd_per_prio *per_prio)
370{
371 struct request *rq, *next_rq;
372 enum dd_data_dir data_dir;
373 enum dd_prio prio;
374 u8 ioprio_class;
375
376 lockdep_assert_held(&dd->lock);
377
378 if (!list_empty(&per_prio->dispatch)) {
379 rq = list_first_entry(&per_prio->dispatch, struct request,
380 queuelist);
381 list_del_init(&rq->queuelist);
382 goto done;
383 }
384
385
386
387
388 rq = deadline_next_request(dd, per_prio, dd->last_dir);
389 if (rq && dd->batching < dd->fifo_batch)
390
391 goto dispatch_request;
392
393
394
395
396
397
398 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
399 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
400
401 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
402 (dd->starved++ >= dd->writes_starved))
403 goto dispatch_writes;
404
405 data_dir = DD_READ;
406
407 goto dispatch_find_request;
408 }
409
410
411
412
413
414 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
415dispatch_writes:
416 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
417
418 dd->starved = 0;
419
420 data_dir = DD_WRITE;
421
422 goto dispatch_find_request;
423 }
424
425 return NULL;
426
427dispatch_find_request:
428
429
430
431 next_rq = deadline_next_request(dd, per_prio, data_dir);
432 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
433
434
435
436
437
438 rq = deadline_fifo_request(dd, per_prio, data_dir);
439 } else {
440
441
442
443
444 rq = next_rq;
445 }
446
447
448
449
450
451 if (!rq)
452 return NULL;
453
454 dd->last_dir = data_dir;
455 dd->batching = 0;
456
457dispatch_request:
458
459
460
461 dd->batching++;
462 deadline_move_request(dd, per_prio, rq);
463done:
464 ioprio_class = dd_rq_ioclass(rq);
465 prio = ioprio_class_to_prio[ioprio_class];
466 dd_count(dd, dispatched, prio);
467
468
469
470 blk_req_zone_write_lock(rq);
471 rq->rq_flags |= RQF_STARTED;
472 return rq;
473}
474
475
476
477
478
479
480
481
482
483static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
484{
485 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
486 struct request *rq;
487 enum dd_prio prio;
488
489 spin_lock(&dd->lock);
490 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
491 rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
492 if (rq)
493 break;
494 }
495 spin_unlock(&dd->lock);
496
497 return rq;
498}
499
500
501
502
503
504static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
505{
506 struct deadline_data *dd = data->q->elevator->elevator_data;
507
508
509 if (op_is_sync(op) && !op_is_write(op))
510 return;
511
512
513
514
515
516 data->shallow_depth = dd->async_depth;
517}
518
519
520static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
521{
522 struct request_queue *q = hctx->queue;
523 struct deadline_data *dd = q->elevator->elevator_data;
524 struct blk_mq_tags *tags = hctx->sched_tags;
525
526 dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
527
528 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
529}
530
531
532static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
533{
534 dd_depth_updated(hctx);
535 return 0;
536}
537
538static void dd_exit_sched(struct elevator_queue *e)
539{
540 struct deadline_data *dd = e->elevator_data;
541 enum dd_prio prio;
542
543 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
544 struct dd_per_prio *per_prio = &dd->per_prio[prio];
545
546 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
547 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
548 }
549
550 free_percpu(dd->stats);
551
552 kfree(dd);
553}
554
555
556
557
558static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
559{
560 struct deadline_data *dd;
561 struct elevator_queue *eq;
562 enum dd_prio prio;
563 int ret = -ENOMEM;
564
565 eq = elevator_alloc(q, e);
566 if (!eq)
567 return ret;
568
569 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
570 if (!dd)
571 goto put_eq;
572
573 eq->elevator_data = dd;
574
575 dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
576 GFP_KERNEL | __GFP_ZERO);
577 if (!dd->stats)
578 goto free_dd;
579
580 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
581 struct dd_per_prio *per_prio = &dd->per_prio[prio];
582
583 INIT_LIST_HEAD(&per_prio->dispatch);
584 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
585 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
586 per_prio->sort_list[DD_READ] = RB_ROOT;
587 per_prio->sort_list[DD_WRITE] = RB_ROOT;
588 }
589 dd->fifo_expire[DD_READ] = read_expire;
590 dd->fifo_expire[DD_WRITE] = write_expire;
591 dd->writes_starved = writes_starved;
592 dd->front_merges = 1;
593 dd->last_dir = DD_WRITE;
594 dd->fifo_batch = fifo_batch;
595 spin_lock_init(&dd->lock);
596 spin_lock_init(&dd->zone_lock);
597
598 q->elevator = eq;
599 return 0;
600
601free_dd:
602 kfree(dd);
603
604put_eq:
605 kobject_put(&eq->kobj);
606 return ret;
607}
608
609
610
611
612
613static int dd_request_merge(struct request_queue *q, struct request **rq,
614 struct bio *bio)
615{
616 struct deadline_data *dd = q->elevator->elevator_data;
617 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
618 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
619 struct dd_per_prio *per_prio = &dd->per_prio[prio];
620 sector_t sector = bio_end_sector(bio);
621 struct request *__rq;
622
623 if (!dd->front_merges)
624 return ELEVATOR_NO_MERGE;
625
626 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
627 if (__rq) {
628 BUG_ON(sector != blk_rq_pos(__rq));
629
630 if (elv_bio_merge_ok(__rq, bio)) {
631 *rq = __rq;
632 return ELEVATOR_FRONT_MERGE;
633 }
634 }
635
636 return ELEVATOR_NO_MERGE;
637}
638
639
640
641
642
643static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
644 unsigned int nr_segs)
645{
646 struct deadline_data *dd = q->elevator->elevator_data;
647 struct request *free = NULL;
648 bool ret;
649
650 spin_lock(&dd->lock);
651 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
652 spin_unlock(&dd->lock);
653
654 if (free)
655 blk_mq_free_request(free);
656
657 return ret;
658}
659
660
661
662
663static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
664 bool at_head)
665{
666 struct request_queue *q = hctx->queue;
667 struct deadline_data *dd = q->elevator->elevator_data;
668 const enum dd_data_dir data_dir = rq_data_dir(rq);
669 u16 ioprio = req_get_ioprio(rq);
670 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
671 struct dd_per_prio *per_prio;
672 enum dd_prio prio;
673 LIST_HEAD(free);
674
675 lockdep_assert_held(&dd->lock);
676
677
678
679
680
681 blk_req_zone_write_unlock(rq);
682
683 prio = ioprio_class_to_prio[ioprio_class];
684 dd_count(dd, inserted, prio);
685 rq->elv.priv[0] = (void *)(uintptr_t)1;
686
687 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
688 blk_mq_free_requests(&free);
689 return;
690 }
691
692 trace_block_rq_insert(rq);
693
694 per_prio = &dd->per_prio[prio];
695 if (at_head) {
696 list_add(&rq->queuelist, &per_prio->dispatch);
697 } else {
698 deadline_add_rq_rb(per_prio, rq);
699
700 if (rq_mergeable(rq)) {
701 elv_rqhash_add(q, rq);
702 if (!q->last_merge)
703 q->last_merge = rq;
704 }
705
706
707
708
709 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
710 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
711 }
712}
713
714
715
716
717static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
718 struct list_head *list, bool at_head)
719{
720 struct request_queue *q = hctx->queue;
721 struct deadline_data *dd = q->elevator->elevator_data;
722
723 spin_lock(&dd->lock);
724 while (!list_empty(list)) {
725 struct request *rq;
726
727 rq = list_first_entry(list, struct request, queuelist);
728 list_del_init(&rq->queuelist);
729 dd_insert_request(hctx, rq, at_head);
730 }
731 spin_unlock(&dd->lock);
732}
733
734
735static void dd_prepare_request(struct request *rq)
736{
737 rq->elv.priv[0] = NULL;
738}
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static void dd_finish_request(struct request *rq)
757{
758 struct request_queue *q = rq->q;
759 struct deadline_data *dd = q->elevator->elevator_data;
760 const u8 ioprio_class = dd_rq_ioclass(rq);
761 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
762 struct dd_per_prio *per_prio = &dd->per_prio[prio];
763
764
765
766
767
768
769
770 if (rq->elv.priv[0])
771 dd_count(dd, completed, prio);
772
773 if (blk_queue_is_zoned(q)) {
774 unsigned long flags;
775
776 spin_lock_irqsave(&dd->zone_lock, flags);
777 blk_req_zone_write_unlock(rq);
778 if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
779 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
780 spin_unlock_irqrestore(&dd->zone_lock, flags);
781 }
782}
783
784static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
785{
786 return !list_empty_careful(&per_prio->dispatch) ||
787 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
788 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
789}
790
791static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
792{
793 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
794 enum dd_prio prio;
795
796 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
797 if (dd_has_work_for_prio(&dd->per_prio[prio]))
798 return true;
799
800 return false;
801}
802
803
804
805
806#define SHOW_INT(__FUNC, __VAR) \
807static ssize_t __FUNC(struct elevator_queue *e, char *page) \
808{ \
809 struct deadline_data *dd = e->elevator_data; \
810 \
811 return sysfs_emit(page, "%d\n", __VAR); \
812}
813#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
814SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
815SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
816SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
817SHOW_INT(deadline_front_merges_show, dd->front_merges);
818SHOW_INT(deadline_async_depth_show, dd->front_merges);
819SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
820#undef SHOW_INT
821#undef SHOW_JIFFIES
822
823#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
824static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
825{ \
826 struct deadline_data *dd = e->elevator_data; \
827 int __data, __ret; \
828 \
829 __ret = kstrtoint(page, 0, &__data); \
830 if (__ret < 0) \
831 return __ret; \
832 if (__data < (MIN)) \
833 __data = (MIN); \
834 else if (__data > (MAX)) \
835 __data = (MAX); \
836 *(__PTR) = __CONV(__data); \
837 return count; \
838}
839#define STORE_INT(__FUNC, __PTR, MIN, MAX) \
840 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
841#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
842 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
843STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
844STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
845STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
846STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
847STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
848STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
849#undef STORE_FUNCTION
850#undef STORE_INT
851#undef STORE_JIFFIES
852
853#define DD_ATTR(name) \
854 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
855
856static struct elv_fs_entry deadline_attrs[] = {
857 DD_ATTR(read_expire),
858 DD_ATTR(write_expire),
859 DD_ATTR(writes_starved),
860 DD_ATTR(front_merges),
861 DD_ATTR(async_depth),
862 DD_ATTR(fifo_batch),
863 __ATTR_NULL
864};
865
866#ifdef CONFIG_BLK_DEBUG_FS
867#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
868static void *deadline_##name##_fifo_start(struct seq_file *m, \
869 loff_t *pos) \
870 __acquires(&dd->lock) \
871{ \
872 struct request_queue *q = m->private; \
873 struct deadline_data *dd = q->elevator->elevator_data; \
874 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
875 \
876 spin_lock(&dd->lock); \
877 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
878} \
879 \
880static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
881 loff_t *pos) \
882{ \
883 struct request_queue *q = m->private; \
884 struct deadline_data *dd = q->elevator->elevator_data; \
885 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
886 \
887 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
888} \
889 \
890static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
891 __releases(&dd->lock) \
892{ \
893 struct request_queue *q = m->private; \
894 struct deadline_data *dd = q->elevator->elevator_data; \
895 \
896 spin_unlock(&dd->lock); \
897} \
898 \
899static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
900 .start = deadline_##name##_fifo_start, \
901 .next = deadline_##name##_fifo_next, \
902 .stop = deadline_##name##_fifo_stop, \
903 .show = blk_mq_debugfs_rq_show, \
904}; \
905 \
906static int deadline_##name##_next_rq_show(void *data, \
907 struct seq_file *m) \
908{ \
909 struct request_queue *q = data; \
910 struct deadline_data *dd = q->elevator->elevator_data; \
911 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
912 struct request *rq = per_prio->next_rq[data_dir]; \
913 \
914 if (rq) \
915 __blk_mq_debugfs_rq_show(m, rq); \
916 return 0; \
917}
918
919DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
920DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
921DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
922DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
923DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
924DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
925#undef DEADLINE_DEBUGFS_DDIR_ATTRS
926
927static int deadline_batching_show(void *data, struct seq_file *m)
928{
929 struct request_queue *q = data;
930 struct deadline_data *dd = q->elevator->elevator_data;
931
932 seq_printf(m, "%u\n", dd->batching);
933 return 0;
934}
935
936static int deadline_starved_show(void *data, struct seq_file *m)
937{
938 struct request_queue *q = data;
939 struct deadline_data *dd = q->elevator->elevator_data;
940
941 seq_printf(m, "%u\n", dd->starved);
942 return 0;
943}
944
945static int dd_async_depth_show(void *data, struct seq_file *m)
946{
947 struct request_queue *q = data;
948 struct deadline_data *dd = q->elevator->elevator_data;
949
950 seq_printf(m, "%u\n", dd->async_depth);
951 return 0;
952}
953
954static int dd_queued_show(void *data, struct seq_file *m)
955{
956 struct request_queue *q = data;
957 struct deadline_data *dd = q->elevator->elevator_data;
958
959 seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
960 dd_queued(dd, DD_BE_PRIO),
961 dd_queued(dd, DD_IDLE_PRIO));
962 return 0;
963}
964
965
966static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
967{
968 return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
969 - dd_sum(dd, completed, prio);
970}
971
972static int dd_owned_by_driver_show(void *data, struct seq_file *m)
973{
974 struct request_queue *q = data;
975 struct deadline_data *dd = q->elevator->elevator_data;
976
977 seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
978 dd_owned_by_driver(dd, DD_BE_PRIO),
979 dd_owned_by_driver(dd, DD_IDLE_PRIO));
980 return 0;
981}
982
983#define DEADLINE_DISPATCH_ATTR(prio) \
984static void *deadline_dispatch##prio##_start(struct seq_file *m, \
985 loff_t *pos) \
986 __acquires(&dd->lock) \
987{ \
988 struct request_queue *q = m->private; \
989 struct deadline_data *dd = q->elevator->elevator_data; \
990 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
991 \
992 spin_lock(&dd->lock); \
993 return seq_list_start(&per_prio->dispatch, *pos); \
994} \
995 \
996static void *deadline_dispatch##prio##_next(struct seq_file *m, \
997 void *v, loff_t *pos) \
998{ \
999 struct request_queue *q = m->private; \
1000 struct deadline_data *dd = q->elevator->elevator_data; \
1001 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1002 \
1003 return seq_list_next(v, &per_prio->dispatch, pos); \
1004} \
1005 \
1006static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1007 __releases(&dd->lock) \
1008{ \
1009 struct request_queue *q = m->private; \
1010 struct deadline_data *dd = q->elevator->elevator_data; \
1011 \
1012 spin_unlock(&dd->lock); \
1013} \
1014 \
1015static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1016 .start = deadline_dispatch##prio##_start, \
1017 .next = deadline_dispatch##prio##_next, \
1018 .stop = deadline_dispatch##prio##_stop, \
1019 .show = blk_mq_debugfs_rq_show, \
1020}
1021
1022DEADLINE_DISPATCH_ATTR(0);
1023DEADLINE_DISPATCH_ATTR(1);
1024DEADLINE_DISPATCH_ATTR(2);
1025#undef DEADLINE_DISPATCH_ATTR
1026
1027#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1028 {#name "_fifo_list", 0400, \
1029 .seq_ops = &deadline_##name##_fifo_seq_ops}
1030#define DEADLINE_NEXT_RQ_ATTR(name) \
1031 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1032static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1033 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1034 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1035 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1036 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1037 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1038 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1039 DEADLINE_NEXT_RQ_ATTR(read0),
1040 DEADLINE_NEXT_RQ_ATTR(write0),
1041 DEADLINE_NEXT_RQ_ATTR(read1),
1042 DEADLINE_NEXT_RQ_ATTR(write1),
1043 DEADLINE_NEXT_RQ_ATTR(read2),
1044 DEADLINE_NEXT_RQ_ATTR(write2),
1045 {"batching", 0400, deadline_batching_show},
1046 {"starved", 0400, deadline_starved_show},
1047 {"async_depth", 0400, dd_async_depth_show},
1048 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1049 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1050 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1051 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1052 {"queued", 0400, dd_queued_show},
1053 {},
1054};
1055#undef DEADLINE_QUEUE_DDIR_ATTRS
1056#endif
1057
1058static struct elevator_type mq_deadline = {
1059 .ops = {
1060 .depth_updated = dd_depth_updated,
1061 .limit_depth = dd_limit_depth,
1062 .insert_requests = dd_insert_requests,
1063 .dispatch_request = dd_dispatch_request,
1064 .prepare_request = dd_prepare_request,
1065 .finish_request = dd_finish_request,
1066 .next_request = elv_rb_latter_request,
1067 .former_request = elv_rb_former_request,
1068 .bio_merge = dd_bio_merge,
1069 .request_merge = dd_request_merge,
1070 .requests_merged = dd_merged_requests,
1071 .request_merged = dd_request_merged,
1072 .has_work = dd_has_work,
1073 .init_sched = dd_init_sched,
1074 .exit_sched = dd_exit_sched,
1075 .init_hctx = dd_init_hctx,
1076 },
1077
1078#ifdef CONFIG_BLK_DEBUG_FS
1079 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1080#endif
1081 .elevator_attrs = deadline_attrs,
1082 .elevator_name = "mq-deadline",
1083 .elevator_alias = "deadline",
1084 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1085 .elevator_owner = THIS_MODULE,
1086};
1087MODULE_ALIAS("mq-deadline-iosched");
1088
1089static int __init deadline_init(void)
1090{
1091 return elv_register(&mq_deadline);
1092}
1093
1094static void __exit deadline_exit(void)
1095{
1096 elv_unregister(&mq_deadline);
1097}
1098
1099module_init(deadline_init);
1100module_exit(deadline_exit);
1101
1102MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1103MODULE_LICENSE("GPL");
1104MODULE_DESCRIPTION("MQ deadline IO scheduler");
1105