1
2
3
4
5
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h>
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15
16
17
18struct dm_rq_target_io {
19 struct mapped_device *md;
20 struct dm_target *ti;
21 struct request *orig, *clone;
22 struct kthread_work work;
23 blk_status_t error;
24 union map_info info;
25 struct dm_stats_aux stats_aux;
26 unsigned long duration_jiffies;
27 unsigned n_sectors;
28 unsigned completed;
29};
30
31#define DM_MQ_NR_HW_QUEUES 1
32#define DM_MQ_QUEUE_DEPTH 2048
33static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
34static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
35
36
37
38
39#define RESERVED_REQUEST_BASED_IOS 256
40static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
41
42unsigned dm_get_reserved_rq_based_ios(void)
43{
44 return __dm_get_module_param(&reserved_rq_based_ios,
45 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
46}
47EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
48
49static unsigned dm_get_blk_mq_nr_hw_queues(void)
50{
51 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
52}
53
54static unsigned dm_get_blk_mq_queue_depth(void)
55{
56 return __dm_get_module_param(&dm_mq_queue_depth,
57 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
58}
59
60int dm_request_based(struct mapped_device *md)
61{
62 return queue_is_mq(md->queue);
63}
64
65void dm_start_queue(struct request_queue *q)
66{
67 blk_mq_unquiesce_queue(q);
68 blk_mq_kick_requeue_list(q);
69}
70
71void dm_stop_queue(struct request_queue *q)
72{
73 if (blk_mq_queue_stopped(q))
74 return;
75
76 blk_mq_quiesce_queue(q);
77}
78
79
80
81
82static void end_clone_bio(struct bio *clone)
83{
84 struct dm_rq_clone_bio_info *info =
85 container_of(clone, struct dm_rq_clone_bio_info, clone);
86 struct dm_rq_target_io *tio = info->tio;
87 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
88 blk_status_t error = clone->bi_status;
89 bool is_last = !clone->bi_next;
90
91 bio_put(clone);
92
93 if (tio->error)
94
95
96
97
98
99 return;
100 else if (error) {
101
102
103
104
105
106 tio->error = error;
107 goto exit;
108 }
109
110
111
112
113
114 tio->completed += nr_bytes;
115
116
117
118
119
120
121 if (is_last)
122 exit:
123 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
124}
125
126static struct dm_rq_target_io *tio_from_request(struct request *rq)
127{
128 return blk_mq_rq_to_pdu(rq);
129}
130
131static void rq_end_stats(struct mapped_device *md, struct request *orig)
132{
133 if (unlikely(dm_stats_used(&md->stats))) {
134 struct dm_rq_target_io *tio = tio_from_request(orig);
135 tio->duration_jiffies = jiffies - tio->duration_jiffies;
136 dm_stats_account_io(&md->stats, rq_data_dir(orig),
137 blk_rq_pos(orig), tio->n_sectors, true,
138 tio->duration_jiffies, &tio->stats_aux);
139 }
140}
141
142
143
144
145
146
147static void rq_completed(struct mapped_device *md)
148{
149
150 if (unlikely(wq_has_sleeper(&md->wait)))
151 wake_up(&md->wait);
152
153
154
155
156 dm_put(md);
157}
158
159
160
161
162
163
164static void dm_end_request(struct request *clone, blk_status_t error)
165{
166 struct dm_rq_target_io *tio = clone->end_io_data;
167 struct mapped_device *md = tio->md;
168 struct request *rq = tio->orig;
169
170 blk_rq_unprep_clone(clone);
171 tio->ti->type->release_clone_rq(clone, NULL);
172
173 rq_end_stats(md, rq);
174 blk_mq_end_request(rq, error);
175 rq_completed(md);
176}
177
178static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
179{
180 blk_mq_delay_kick_requeue_list(q, msecs);
181}
182
183void dm_mq_kick_requeue_list(struct mapped_device *md)
184{
185 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
186}
187EXPORT_SYMBOL(dm_mq_kick_requeue_list);
188
189static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
190{
191 blk_mq_requeue_request(rq, false);
192 __dm_mq_kick_requeue_list(rq->q, msecs);
193}
194
195static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
196{
197 struct mapped_device *md = tio->md;
198 struct request *rq = tio->orig;
199 unsigned long delay_ms = delay_requeue ? 100 : 0;
200
201 rq_end_stats(md, rq);
202 if (tio->clone) {
203 blk_rq_unprep_clone(tio->clone);
204 tio->ti->type->release_clone_rq(tio->clone, NULL);
205 }
206
207 dm_mq_delay_requeue_request(rq, delay_ms);
208 rq_completed(md);
209}
210
211static void dm_done(struct request *clone, blk_status_t error, bool mapped)
212{
213 int r = DM_ENDIO_DONE;
214 struct dm_rq_target_io *tio = clone->end_io_data;
215 dm_request_endio_fn rq_end_io = NULL;
216
217 if (tio->ti) {
218 rq_end_io = tio->ti->type->rq_end_io;
219
220 if (mapped && rq_end_io)
221 r = rq_end_io(tio->ti, clone, error, &tio->info);
222 }
223
224 if (unlikely(error == BLK_STS_TARGET)) {
225 if (req_op(clone) == REQ_OP_DISCARD &&
226 !clone->q->limits.max_discard_sectors)
227 disable_discard(tio->md);
228 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
229 !clone->q->limits.max_write_same_sectors)
230 disable_write_same(tio->md);
231 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
232 !clone->q->limits.max_write_zeroes_sectors)
233 disable_write_zeroes(tio->md);
234 }
235
236 switch (r) {
237 case DM_ENDIO_DONE:
238
239 dm_end_request(clone, error);
240 break;
241 case DM_ENDIO_INCOMPLETE:
242
243 return;
244 case DM_ENDIO_REQUEUE:
245
246 dm_requeue_original_request(tio, false);
247 break;
248 case DM_ENDIO_DELAY_REQUEUE:
249
250 dm_requeue_original_request(tio, true);
251 break;
252 default:
253 DMWARN("unimplemented target endio return value: %d", r);
254 BUG();
255 }
256}
257
258
259
260
261static void dm_softirq_done(struct request *rq)
262{
263 bool mapped = true;
264 struct dm_rq_target_io *tio = tio_from_request(rq);
265 struct request *clone = tio->clone;
266
267 if (!clone) {
268 struct mapped_device *md = tio->md;
269
270 rq_end_stats(md, rq);
271 blk_mq_end_request(rq, tio->error);
272 rq_completed(md);
273 return;
274 }
275
276 if (rq->rq_flags & RQF_FAILED)
277 mapped = false;
278
279 dm_done(clone, tio->error, mapped);
280}
281
282
283
284
285
286static void dm_complete_request(struct request *rq, blk_status_t error)
287{
288 struct dm_rq_target_io *tio = tio_from_request(rq);
289
290 tio->error = error;
291 blk_mq_complete_request(rq);
292}
293
294
295
296
297
298
299
300static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
301{
302 rq->rq_flags |= RQF_FAILED;
303 dm_complete_request(rq, error);
304}
305
306static void end_clone_request(struct request *clone, blk_status_t error)
307{
308 struct dm_rq_target_io *tio = clone->end_io_data;
309
310 dm_complete_request(tio->orig, error);
311}
312
313static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
314{
315 blk_status_t r;
316
317 if (blk_queue_io_stat(clone->q))
318 clone->rq_flags |= RQF_IO_STAT;
319
320 clone->start_time_ns = ktime_get_ns();
321 r = blk_insert_cloned_request(clone->q, clone);
322 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
323
324 dm_complete_request(rq, r);
325 return r;
326}
327
328static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
329 void *data)
330{
331 struct dm_rq_target_io *tio = data;
332 struct dm_rq_clone_bio_info *info =
333 container_of(bio, struct dm_rq_clone_bio_info, clone);
334
335 info->orig = bio_orig;
336 info->tio = tio;
337 bio->bi_end_io = end_clone_bio;
338
339 return 0;
340}
341
342static int setup_clone(struct request *clone, struct request *rq,
343 struct dm_rq_target_io *tio, gfp_t gfp_mask)
344{
345 int r;
346
347 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
348 dm_rq_bio_constructor, tio);
349 if (r)
350 return r;
351
352 clone->end_io = end_clone_request;
353 clone->end_io_data = tio;
354
355 tio->clone = clone;
356
357 return 0;
358}
359
360static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
361 struct mapped_device *md)
362{
363 tio->md = md;
364 tio->ti = NULL;
365 tio->clone = NULL;
366 tio->orig = rq;
367 tio->error = 0;
368 tio->completed = 0;
369
370
371
372
373
374 if (!md->init_tio_pdu)
375 memset(&tio->info, 0, sizeof(tio->info));
376}
377
378
379
380
381
382
383
384static int map_request(struct dm_rq_target_io *tio)
385{
386 int r;
387 struct dm_target *ti = tio->ti;
388 struct mapped_device *md = tio->md;
389 struct request *rq = tio->orig;
390 struct request *clone = NULL;
391 blk_status_t ret;
392
393 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
394 switch (r) {
395 case DM_MAPIO_SUBMITTED:
396
397 break;
398 case DM_MAPIO_REMAPPED:
399 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
400
401 ti->type->release_clone_rq(clone, &tio->info);
402 return DM_MAPIO_REQUEUE;
403 }
404
405
406 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
407 blk_rq_pos(rq));
408 ret = dm_dispatch_clone_request(clone, rq);
409 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
410 blk_rq_unprep_clone(clone);
411 tio->ti->type->release_clone_rq(clone, &tio->info);
412 tio->clone = NULL;
413 return DM_MAPIO_REQUEUE;
414 }
415 break;
416 case DM_MAPIO_REQUEUE:
417
418 break;
419 case DM_MAPIO_DELAY_REQUEUE:
420
421 dm_requeue_original_request(tio, true);
422 break;
423 case DM_MAPIO_KILL:
424
425 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
426 break;
427 default:
428 DMWARN("unimplemented target map return value: %d", r);
429 BUG();
430 }
431
432 return r;
433}
434
435
436ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
437{
438 return sprintf(buf, "%u\n", 0);
439}
440
441ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
442 const char *buf, size_t count)
443{
444 return count;
445}
446
447static void dm_start_request(struct mapped_device *md, struct request *orig)
448{
449 blk_mq_start_request(orig);
450
451 if (unlikely(dm_stats_used(&md->stats))) {
452 struct dm_rq_target_io *tio = tio_from_request(orig);
453 tio->duration_jiffies = jiffies;
454 tio->n_sectors = blk_rq_sectors(orig);
455 dm_stats_account_io(&md->stats, rq_data_dir(orig),
456 blk_rq_pos(orig), tio->n_sectors, false, 0,
457 &tio->stats_aux);
458 }
459
460
461
462
463
464
465
466
467 dm_get(md);
468}
469
470static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
471 unsigned int hctx_idx, unsigned int numa_node)
472{
473 struct mapped_device *md = set->driver_data;
474 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
475
476
477
478
479
480 tio->md = md;
481
482 if (md->init_tio_pdu) {
483
484 tio->info.ptr = tio + 1;
485 }
486
487 return 0;
488}
489
490static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
491 const struct blk_mq_queue_data *bd)
492{
493 struct request *rq = bd->rq;
494 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
495 struct mapped_device *md = tio->md;
496 struct dm_target *ti = md->immutable_target;
497
498 if (unlikely(!ti)) {
499 int srcu_idx;
500 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
501
502 ti = dm_table_find_target(map, 0);
503 dm_put_live_table(md, srcu_idx);
504 }
505
506 if (ti->type->busy && ti->type->busy(ti))
507 return BLK_STS_RESOURCE;
508
509 dm_start_request(md, rq);
510
511
512 init_tio(tio, rq, md);
513
514
515
516
517 tio->ti = ti;
518
519
520 if (map_request(tio) == DM_MAPIO_REQUEUE) {
521
522 rq_end_stats(md, rq);
523 rq_completed(md);
524 return BLK_STS_RESOURCE;
525 }
526
527 return BLK_STS_OK;
528}
529
530static const struct blk_mq_ops dm_mq_ops = {
531 .queue_rq = dm_mq_queue_rq,
532 .complete = dm_softirq_done,
533 .init_request = dm_mq_init_request,
534};
535
536int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
537{
538 struct request_queue *q;
539 struct dm_target *immutable_tgt;
540 int err;
541
542 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
543 if (!md->tag_set)
544 return -ENOMEM;
545
546 md->tag_set->ops = &dm_mq_ops;
547 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
548 md->tag_set->numa_node = md->numa_node_id;
549 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
550 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
551 md->tag_set->driver_data = md;
552
553 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
554 immutable_tgt = dm_table_get_immutable_target(t);
555 if (immutable_tgt && immutable_tgt->per_io_data_size) {
556
557 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
558 md->init_tio_pdu = true;
559 }
560
561 err = blk_mq_alloc_tag_set(md->tag_set);
562 if (err)
563 goto out_kfree_tag_set;
564
565 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
566 if (IS_ERR(q)) {
567 err = PTR_ERR(q);
568 goto out_tag_set;
569 }
570
571 return 0;
572
573out_tag_set:
574 blk_mq_free_tag_set(md->tag_set);
575out_kfree_tag_set:
576 kfree(md->tag_set);
577
578 return err;
579}
580
581void dm_mq_cleanup_mapped_device(struct mapped_device *md)
582{
583 if (md->tag_set) {
584 blk_mq_free_tag_set(md->tag_set);
585 kfree(md->tag_set);
586 }
587}
588
589module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
590MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
591
592
593static bool use_blk_mq = true;
594module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
595MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
596
597module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
598MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
599
600module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
601MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
602