1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/blkdev.h>
20#include <linux/blktrace_api.h>
21#include <linux/percpu.h>
22#include <linux/init.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25#include <linux/debugfs.h>
26#include <linux/export.h>
27#include <linux/time.h>
28#include <linux/uaccess.h>
29#include <linux/list.h>
30
31#include <trace/events/block.h>
32
33#include "trace_output.h"
34
35#ifdef CONFIG_BLK_DEV_IO_TRACE
36
37static unsigned int blktrace_seq __read_mostly = 1;
38
39static struct trace_array *blk_tr;
40static bool blk_tracer_enabled __read_mostly;
41
42static LIST_HEAD(running_trace_list);
43static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
44
45
46#define TRACE_BLK_OPT_CLASSIC 0x1
47
48static struct tracer_opt blk_tracer_opts[] = {
49
50 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
51 { }
52};
53
54static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57};
58
59
60static atomic_t blk_probes_ref = ATOMIC_INIT(0);
61
62static void blk_register_tracepoints(void);
63static void blk_unregister_tracepoints(void);
64
65
66
67
68static void trace_note(struct blk_trace *bt, pid_t pid, int action,
69 const void *data, size_t len)
70{
71 struct blk_io_trace *t;
72 struct ring_buffer_event *event = NULL;
73 struct ring_buffer *buffer = NULL;
74 int pc = 0;
75 int cpu = smp_processor_id();
76 bool blk_tracer = blk_tracer_enabled;
77
78 if (blk_tracer) {
79 buffer = blk_tr->trace_buffer.buffer;
80 pc = preempt_count();
81 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
82 sizeof(*t) + len,
83 0, pc);
84 if (!event)
85 return;
86 t = ring_buffer_event_data(event);
87 goto record_it;
88 }
89
90 if (!bt->rchan)
91 return;
92
93 t = relay_reserve(bt->rchan, sizeof(*t) + len);
94 if (t) {
95 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
96 t->time = ktime_to_ns(ktime_get());
97record_it:
98 t->device = bt->dev;
99 t->action = action;
100 t->pid = pid;
101 t->cpu = cpu;
102 t->pdu_len = len;
103 memcpy((void *) t + sizeof(*t), data, len);
104
105 if (blk_tracer)
106 trace_buffer_unlock_commit(buffer, event, 0, pc);
107 }
108}
109
110
111
112
113
114static void trace_note_tsk(struct task_struct *tsk)
115{
116 unsigned long flags;
117 struct blk_trace *bt;
118
119 tsk->btrace_seq = blktrace_seq;
120 spin_lock_irqsave(&running_trace_lock, flags);
121 list_for_each_entry(bt, &running_trace_list, running_list) {
122 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
123 sizeof(tsk->comm));
124 }
125 spin_unlock_irqrestore(&running_trace_lock, flags);
126}
127
128static void trace_note_time(struct blk_trace *bt)
129{
130 struct timespec now;
131 unsigned long flags;
132 u32 words[2];
133
134 getnstimeofday(&now);
135 words[0] = now.tv_sec;
136 words[1] = now.tv_nsec;
137
138 local_irq_save(flags);
139 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
140 local_irq_restore(flags);
141}
142
143void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
144{
145 int n;
146 va_list args;
147 unsigned long flags;
148 char *buf;
149
150 if (unlikely(bt->trace_state != Blktrace_running &&
151 !blk_tracer_enabled))
152 return;
153
154
155
156
157
158 if (!(bt->act_mask & BLK_TC_NOTIFY))
159 return;
160
161 local_irq_save(flags);
162 buf = this_cpu_ptr(bt->msg_data);
163 va_start(args, fmt);
164 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
165 va_end(args);
166
167 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
168 local_irq_restore(flags);
169}
170EXPORT_SYMBOL_GPL(__trace_note_message);
171
172static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
173 pid_t pid)
174{
175 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
176 return 1;
177 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
178 return 1;
179 if (bt->pid && pid != bt->pid)
180 return 1;
181
182 return 0;
183}
184
185
186
187
188static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
189 BLK_TC_ACT(BLK_TC_WRITE) };
190
191#define BLK_TC_RAHEAD BLK_TC_AHEAD
192
193
194#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
195 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
196
197
198
199
200
201static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
202 int rw, u32 what, int error, int pdu_len, void *pdu_data)
203{
204 struct task_struct *tsk = current;
205 struct ring_buffer_event *event = NULL;
206 struct ring_buffer *buffer = NULL;
207 struct blk_io_trace *t;
208 unsigned long flags = 0;
209 unsigned long *sequence;
210 pid_t pid;
211 int cpu, pc = 0;
212 bool blk_tracer = blk_tracer_enabled;
213
214 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
215 return;
216
217 what |= ddir_act[rw & WRITE];
218 what |= MASK_TC_BIT(rw, SYNC);
219 what |= MASK_TC_BIT(rw, RAHEAD);
220 what |= MASK_TC_BIT(rw, META);
221 what |= MASK_TC_BIT(rw, DISCARD);
222 what |= MASK_TC_BIT(rw, FLUSH);
223 what |= MASK_TC_BIT(rw, FUA);
224
225 pid = tsk->pid;
226 if (act_log_check(bt, what, sector, pid))
227 return;
228 cpu = raw_smp_processor_id();
229
230 if (blk_tracer) {
231 tracing_record_cmdline(current);
232
233 buffer = blk_tr->trace_buffer.buffer;
234 pc = preempt_count();
235 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
236 sizeof(*t) + pdu_len,
237 0, pc);
238 if (!event)
239 return;
240 t = ring_buffer_event_data(event);
241 goto record_it;
242 }
243
244 if (unlikely(tsk->btrace_seq != blktrace_seq))
245 trace_note_tsk(tsk);
246
247
248
249
250
251
252 local_irq_save(flags);
253 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
254 if (t) {
255 sequence = per_cpu_ptr(bt->sequence, cpu);
256
257 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
258 t->sequence = ++(*sequence);
259 t->time = ktime_to_ns(ktime_get());
260record_it:
261
262
263
264
265
266
267 t->cpu = cpu;
268 t->pid = pid;
269
270 t->sector = sector;
271 t->bytes = bytes;
272 t->action = what;
273 t->device = bt->dev;
274 t->error = error;
275 t->pdu_len = pdu_len;
276
277 if (pdu_len)
278 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
279
280 if (blk_tracer) {
281 trace_buffer_unlock_commit(buffer, event, 0, pc);
282 return;
283 }
284 }
285
286 local_irq_restore(flags);
287}
288
289static struct dentry *blk_tree_root;
290static DEFINE_MUTEX(blk_tree_mutex);
291
292static void blk_trace_free(struct blk_trace *bt)
293{
294 debugfs_remove(bt->msg_file);
295 debugfs_remove(bt->dropped_file);
296 relay_close(bt->rchan);
297 debugfs_remove(bt->dir);
298 free_percpu(bt->sequence);
299 free_percpu(bt->msg_data);
300 kfree(bt);
301}
302
303static void blk_trace_cleanup(struct blk_trace *bt)
304{
305 blk_trace_free(bt);
306 if (atomic_dec_and_test(&blk_probes_ref))
307 blk_unregister_tracepoints();
308}
309
310int blk_trace_remove(struct request_queue *q)
311{
312 struct blk_trace *bt;
313
314 bt = xchg(&q->blk_trace, NULL);
315 if (!bt)
316 return -EINVAL;
317
318 if (bt->trace_state != Blktrace_running)
319 blk_trace_cleanup(bt);
320
321 return 0;
322}
323EXPORT_SYMBOL_GPL(blk_trace_remove);
324
325static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
326 size_t count, loff_t *ppos)
327{
328 struct blk_trace *bt = filp->private_data;
329 char buf[16];
330
331 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
332
333 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
334}
335
336static const struct file_operations blk_dropped_fops = {
337 .owner = THIS_MODULE,
338 .open = simple_open,
339 .read = blk_dropped_read,
340 .llseek = default_llseek,
341};
342
343static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
344 size_t count, loff_t *ppos)
345{
346 char *msg;
347 struct blk_trace *bt;
348
349 if (count >= BLK_TN_MAX_MSG)
350 return -EINVAL;
351
352 msg = kmalloc(count + 1, GFP_KERNEL);
353 if (msg == NULL)
354 return -ENOMEM;
355
356 if (copy_from_user(msg, buffer, count)) {
357 kfree(msg);
358 return -EFAULT;
359 }
360
361 msg[count] = '\0';
362 bt = filp->private_data;
363 __trace_note_message(bt, "%s", msg);
364 kfree(msg);
365
366 return count;
367}
368
369static const struct file_operations blk_msg_fops = {
370 .owner = THIS_MODULE,
371 .open = simple_open,
372 .write = blk_msg_write,
373 .llseek = noop_llseek,
374};
375
376
377
378
379
380static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
381 void *prev_subbuf, size_t prev_padding)
382{
383 struct blk_trace *bt;
384
385 if (!relay_buf_full(buf))
386 return 1;
387
388 bt = buf->chan->private_data;
389 atomic_inc(&bt->dropped);
390 return 0;
391}
392
393static int blk_remove_buf_file_callback(struct dentry *dentry)
394{
395 debugfs_remove(dentry);
396
397 return 0;
398}
399
400static struct dentry *blk_create_buf_file_callback(const char *filename,
401 struct dentry *parent,
402 umode_t mode,
403 struct rchan_buf *buf,
404 int *is_global)
405{
406 return debugfs_create_file(filename, mode, parent, buf,
407 &relay_file_operations);
408}
409
410static struct rchan_callbacks blk_relay_callbacks = {
411 .subbuf_start = blk_subbuf_start_callback,
412 .create_buf_file = blk_create_buf_file_callback,
413 .remove_buf_file = blk_remove_buf_file_callback,
414};
415
416static void blk_trace_setup_lba(struct blk_trace *bt,
417 struct block_device *bdev)
418{
419 struct hd_struct *part = NULL;
420
421 if (bdev)
422 part = bdev->bd_part;
423
424 if (part) {
425 bt->start_lba = part->start_sect;
426 bt->end_lba = part->start_sect + part->nr_sects;
427 } else {
428 bt->start_lba = 0;
429 bt->end_lba = -1ULL;
430 }
431}
432
433
434
435
436int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
437 struct block_device *bdev,
438 struct blk_user_trace_setup *buts)
439{
440 struct blk_trace *old_bt, *bt = NULL;
441 struct dentry *dir = NULL;
442 int ret, i;
443
444 if (!buts->buf_size || !buts->buf_nr)
445 return -EINVAL;
446
447 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
448 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
449
450
451
452
453
454 for (i = 0; i < strlen(buts->name); i++)
455 if (buts->name[i] == '/')
456 buts->name[i] = '_';
457
458 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
459 if (!bt)
460 return -ENOMEM;
461
462 ret = -ENOMEM;
463 bt->sequence = alloc_percpu(unsigned long);
464 if (!bt->sequence)
465 goto err;
466
467 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
468 if (!bt->msg_data)
469 goto err;
470
471 ret = -ENOENT;
472
473 mutex_lock(&blk_tree_mutex);
474 if (!blk_tree_root) {
475 blk_tree_root = debugfs_create_dir("block", NULL);
476 if (!blk_tree_root) {
477 mutex_unlock(&blk_tree_mutex);
478 goto err;
479 }
480 }
481 mutex_unlock(&blk_tree_mutex);
482
483 dir = debugfs_create_dir(buts->name, blk_tree_root);
484
485 if (!dir)
486 goto err;
487
488 bt->dir = dir;
489 bt->dev = dev;
490 atomic_set(&bt->dropped, 0);
491 INIT_LIST_HEAD(&bt->running_list);
492
493 ret = -EIO;
494 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
495 &blk_dropped_fops);
496 if (!bt->dropped_file)
497 goto err;
498
499 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
500 if (!bt->msg_file)
501 goto err;
502
503 bt->rchan = relay_open("trace", dir, buts->buf_size,
504 buts->buf_nr, &blk_relay_callbacks, bt);
505 if (!bt->rchan)
506 goto err;
507
508 bt->act_mask = buts->act_mask;
509 if (!bt->act_mask)
510 bt->act_mask = (u16) -1;
511
512 blk_trace_setup_lba(bt, bdev);
513
514
515 if (buts->start_lba)
516 bt->start_lba = buts->start_lba;
517 if (buts->end_lba)
518 bt->end_lba = buts->end_lba;
519
520 bt->pid = buts->pid;
521 bt->trace_state = Blktrace_setup;
522
523 ret = -EBUSY;
524 old_bt = xchg(&q->blk_trace, bt);
525 if (old_bt) {
526 (void) xchg(&q->blk_trace, old_bt);
527 goto err;
528 }
529
530 if (atomic_inc_return(&blk_probes_ref) == 1)
531 blk_register_tracepoints();
532
533 return 0;
534err:
535 blk_trace_free(bt);
536 return ret;
537}
538
539int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
540 struct block_device *bdev,
541 char __user *arg)
542{
543 struct blk_user_trace_setup buts;
544 int ret;
545
546 ret = copy_from_user(&buts, arg, sizeof(buts));
547 if (ret)
548 return -EFAULT;
549
550 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
551 if (ret)
552 return ret;
553
554 if (copy_to_user(arg, &buts, sizeof(buts))) {
555 blk_trace_remove(q);
556 return -EFAULT;
557 }
558 return 0;
559}
560EXPORT_SYMBOL_GPL(blk_trace_setup);
561
562#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
563static int compat_blk_trace_setup(struct request_queue *q, char *name,
564 dev_t dev, struct block_device *bdev,
565 char __user *arg)
566{
567 struct blk_user_trace_setup buts;
568 struct compat_blk_user_trace_setup cbuts;
569 int ret;
570
571 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
572 return -EFAULT;
573
574 buts = (struct blk_user_trace_setup) {
575 .act_mask = cbuts.act_mask,
576 .buf_size = cbuts.buf_size,
577 .buf_nr = cbuts.buf_nr,
578 .start_lba = cbuts.start_lba,
579 .end_lba = cbuts.end_lba,
580 .pid = cbuts.pid,
581 };
582
583 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
584 if (ret)
585 return ret;
586
587 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
588 blk_trace_remove(q);
589 return -EFAULT;
590 }
591
592 return 0;
593}
594#endif
595
596int blk_trace_startstop(struct request_queue *q, int start)
597{
598 int ret;
599 struct blk_trace *bt = q->blk_trace;
600
601 if (bt == NULL)
602 return -EINVAL;
603
604
605
606
607
608 ret = -EINVAL;
609 if (start) {
610 if (bt->trace_state == Blktrace_setup ||
611 bt->trace_state == Blktrace_stopped) {
612 blktrace_seq++;
613 smp_mb();
614 bt->trace_state = Blktrace_running;
615 spin_lock_irq(&running_trace_lock);
616 list_add(&bt->running_list, &running_trace_list);
617 spin_unlock_irq(&running_trace_lock);
618
619 trace_note_time(bt);
620 ret = 0;
621 }
622 } else {
623 if (bt->trace_state == Blktrace_running) {
624 bt->trace_state = Blktrace_stopped;
625 spin_lock_irq(&running_trace_lock);
626 list_del_init(&bt->running_list);
627 spin_unlock_irq(&running_trace_lock);
628 relay_flush(bt->rchan);
629 ret = 0;
630 }
631 }
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(blk_trace_startstop);
636
637
638
639
640
641
642
643
644int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
645{
646 struct request_queue *q;
647 int ret, start = 0;
648 char b[BDEVNAME_SIZE];
649
650 q = bdev_get_queue(bdev);
651 if (!q)
652 return -ENXIO;
653
654 mutex_lock(&bdev->bd_mutex);
655
656 switch (cmd) {
657 case BLKTRACESETUP:
658 bdevname(bdev, b);
659 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
660 break;
661#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
662 case BLKTRACESETUP32:
663 bdevname(bdev, b);
664 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
665 break;
666#endif
667 case BLKTRACESTART:
668 start = 1;
669 case BLKTRACESTOP:
670 ret = blk_trace_startstop(q, start);
671 break;
672 case BLKTRACETEARDOWN:
673 ret = blk_trace_remove(q);
674 break;
675 default:
676 ret = -ENOTTY;
677 break;
678 }
679
680 mutex_unlock(&bdev->bd_mutex);
681 return ret;
682}
683
684
685
686
687
688
689void blk_trace_shutdown(struct request_queue *q)
690{
691 if (q->blk_trace) {
692 blk_trace_startstop(q, 0);
693 blk_trace_remove(q);
694 }
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
713 unsigned int nr_bytes, u32 what)
714{
715 struct blk_trace *bt = q->blk_trace;
716
717 if (likely(!bt))
718 return;
719
720 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
721 what |= BLK_TC_ACT(BLK_TC_PC);
722 __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
723 what, rq->errors, rq->cmd_len, rq->cmd);
724 } else {
725 what |= BLK_TC_ACT(BLK_TC_FS);
726 __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
727 rq->cmd_flags, what, rq->errors, 0, NULL);
728 }
729}
730
731static void blk_add_trace_rq_abort(void *ignore,
732 struct request_queue *q, struct request *rq)
733{
734 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
735}
736
737static void blk_add_trace_rq_insert(void *ignore,
738 struct request_queue *q, struct request *rq)
739{
740 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
741}
742
743static void blk_add_trace_rq_issue(void *ignore,
744 struct request_queue *q, struct request *rq)
745{
746 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
747}
748
749static void blk_add_trace_rq_requeue(void *ignore,
750 struct request_queue *q,
751 struct request *rq)
752{
753 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
754}
755
756static void blk_add_trace_rq_complete(void *ignore,
757 struct request_queue *q,
758 struct request *rq,
759 unsigned int nr_bytes)
760{
761 blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
762}
763
764
765
766
767
768
769
770
771
772
773
774
775static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
776 u32 what, int error)
777{
778 struct blk_trace *bt = q->blk_trace;
779
780 if (likely(!bt))
781 return;
782
783 if (!error && !bio_flagged(bio, BIO_UPTODATE))
784 error = EIO;
785
786 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
787 bio->bi_rw, what, error, 0, NULL);
788}
789
790static void blk_add_trace_bio_bounce(void *ignore,
791 struct request_queue *q, struct bio *bio)
792{
793 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
794}
795
796static void blk_add_trace_bio_complete(void *ignore,
797 struct request_queue *q, struct bio *bio,
798 int error)
799{
800 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
801}
802
803static void blk_add_trace_bio_backmerge(void *ignore,
804 struct request_queue *q,
805 struct request *rq,
806 struct bio *bio)
807{
808 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
809}
810
811static void blk_add_trace_bio_frontmerge(void *ignore,
812 struct request_queue *q,
813 struct request *rq,
814 struct bio *bio)
815{
816 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
817}
818
819static void blk_add_trace_bio_queue(void *ignore,
820 struct request_queue *q, struct bio *bio)
821{
822 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
823}
824
825static void blk_add_trace_getrq(void *ignore,
826 struct request_queue *q,
827 struct bio *bio, int rw)
828{
829 if (bio)
830 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
831 else {
832 struct blk_trace *bt = q->blk_trace;
833
834 if (bt)
835 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
836 }
837}
838
839
840static void blk_add_trace_sleeprq(void *ignore,
841 struct request_queue *q,
842 struct bio *bio, int rw)
843{
844 if (bio)
845 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
846 else {
847 struct blk_trace *bt = q->blk_trace;
848
849 if (bt)
850 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
851 0, 0, NULL);
852 }
853}
854
855static void blk_add_trace_plug(void *ignore, struct request_queue *q)
856{
857 struct blk_trace *bt = q->blk_trace;
858
859 if (bt)
860 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
861}
862
863static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
864 unsigned int depth, bool explicit)
865{
866 struct blk_trace *bt = q->blk_trace;
867
868 if (bt) {
869 __be64 rpdu = cpu_to_be64(depth);
870 u32 what;
871
872 if (explicit)
873 what = BLK_TA_UNPLUG_IO;
874 else
875 what = BLK_TA_UNPLUG_TIMER;
876
877 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
878 }
879}
880
881static void blk_add_trace_split(void *ignore,
882 struct request_queue *q, struct bio *bio,
883 unsigned int pdu)
884{
885 struct blk_trace *bt = q->blk_trace;
886
887 if (bt) {
888 __be64 rpdu = cpu_to_be64(pdu);
889
890 __blk_add_trace(bt, bio->bi_iter.bi_sector,
891 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
892 !bio_flagged(bio, BIO_UPTODATE),
893 sizeof(rpdu), &rpdu);
894 }
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910static void blk_add_trace_bio_remap(void *ignore,
911 struct request_queue *q, struct bio *bio,
912 dev_t dev, sector_t from)
913{
914 struct blk_trace *bt = q->blk_trace;
915 struct blk_io_trace_remap r;
916
917 if (likely(!bt))
918 return;
919
920 r.device_from = cpu_to_be32(dev);
921 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
922 r.sector_from = cpu_to_be64(from);
923
924 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
925 bio->bi_rw, BLK_TA_REMAP,
926 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942static void blk_add_trace_rq_remap(void *ignore,
943 struct request_queue *q,
944 struct request *rq, dev_t dev,
945 sector_t from)
946{
947 struct blk_trace *bt = q->blk_trace;
948 struct blk_io_trace_remap r;
949
950 if (likely(!bt))
951 return;
952
953 r.device_from = cpu_to_be32(dev);
954 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
955 r.sector_from = cpu_to_be64(from);
956
957 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
958 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
959 sizeof(r), &r);
960}
961
962
963
964
965
966
967
968
969
970
971
972
973void blk_add_driver_data(struct request_queue *q,
974 struct request *rq,
975 void *data, size_t len)
976{
977 struct blk_trace *bt = q->blk_trace;
978
979 if (likely(!bt))
980 return;
981
982 if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
983 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
984 BLK_TA_DRV_DATA, rq->errors, len, data);
985 else
986 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
987 BLK_TA_DRV_DATA, rq->errors, len, data);
988}
989EXPORT_SYMBOL_GPL(blk_add_driver_data);
990
991static void blk_register_tracepoints(void)
992{
993 int ret;
994
995 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
996 WARN_ON(ret);
997 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
998 WARN_ON(ret);
999 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1000 WARN_ON(ret);
1001 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1002 WARN_ON(ret);
1003 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1004 WARN_ON(ret);
1005 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1006 WARN_ON(ret);
1007 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1008 WARN_ON(ret);
1009 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1010 WARN_ON(ret);
1011 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1012 WARN_ON(ret);
1013 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1014 WARN_ON(ret);
1015 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1016 WARN_ON(ret);
1017 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1018 WARN_ON(ret);
1019 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1020 WARN_ON(ret);
1021 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1022 WARN_ON(ret);
1023 ret = register_trace_block_split(blk_add_trace_split, NULL);
1024 WARN_ON(ret);
1025 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1026 WARN_ON(ret);
1027 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1028 WARN_ON(ret);
1029}
1030
1031static void blk_unregister_tracepoints(void)
1032{
1033 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1034 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1035 unregister_trace_block_split(blk_add_trace_split, NULL);
1036 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1037 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1038 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1039 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1040 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1041 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1042 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1043 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1044 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1045 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1046 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1047 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1048 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1049 unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1050
1051 tracepoint_synchronize_unregister();
1052}
1053
1054
1055
1056
1057
1058static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1059{
1060 int i = 0;
1061 int tc = t->action >> BLK_TC_SHIFT;
1062
1063 if (t->action == BLK_TN_MESSAGE) {
1064 rwbs[i++] = 'N';
1065 goto out;
1066 }
1067
1068 if (tc & BLK_TC_FLUSH)
1069 rwbs[i++] = 'F';
1070
1071 if (tc & BLK_TC_DISCARD)
1072 rwbs[i++] = 'D';
1073 else if (tc & BLK_TC_WRITE)
1074 rwbs[i++] = 'W';
1075 else if (t->bytes)
1076 rwbs[i++] = 'R';
1077 else
1078 rwbs[i++] = 'N';
1079
1080 if (tc & BLK_TC_FUA)
1081 rwbs[i++] = 'F';
1082 if (tc & BLK_TC_AHEAD)
1083 rwbs[i++] = 'A';
1084 if (tc & BLK_TC_SYNC)
1085 rwbs[i++] = 'S';
1086 if (tc & BLK_TC_META)
1087 rwbs[i++] = 'M';
1088out:
1089 rwbs[i] = '\0';
1090}
1091
1092static inline
1093const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1094{
1095 return (const struct blk_io_trace *)ent;
1096}
1097
1098static inline const void *pdu_start(const struct trace_entry *ent)
1099{
1100 return te_blk_io_trace(ent) + 1;
1101}
1102
1103static inline u32 t_action(const struct trace_entry *ent)
1104{
1105 return te_blk_io_trace(ent)->action;
1106}
1107
1108static inline u32 t_bytes(const struct trace_entry *ent)
1109{
1110 return te_blk_io_trace(ent)->bytes;
1111}
1112
1113static inline u32 t_sec(const struct trace_entry *ent)
1114{
1115 return te_blk_io_trace(ent)->bytes >> 9;
1116}
1117
1118static inline unsigned long long t_sector(const struct trace_entry *ent)
1119{
1120 return te_blk_io_trace(ent)->sector;
1121}
1122
1123static inline __u16 t_error(const struct trace_entry *ent)
1124{
1125 return te_blk_io_trace(ent)->error;
1126}
1127
1128static __u64 get_pdu_int(const struct trace_entry *ent)
1129{
1130 const __u64 *val = pdu_start(ent);
1131 return be64_to_cpu(*val);
1132}
1133
1134static void get_pdu_remap(const struct trace_entry *ent,
1135 struct blk_io_trace_remap *r)
1136{
1137 const struct blk_io_trace_remap *__r = pdu_start(ent);
1138 __u64 sector_from = __r->sector_from;
1139
1140 r->device_from = be32_to_cpu(__r->device_from);
1141 r->device_to = be32_to_cpu(__r->device_to);
1142 r->sector_from = be64_to_cpu(sector_from);
1143}
1144
1145typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1146
1147static void blk_log_action_classic(struct trace_iterator *iter, const char *act)
1148{
1149 char rwbs[RWBS_LEN];
1150 unsigned long long ts = iter->ts;
1151 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1152 unsigned secs = (unsigned long)ts;
1153 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1154
1155 fill_rwbs(rwbs, t);
1156
1157 trace_seq_printf(&iter->seq,
1158 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1159 MAJOR(t->device), MINOR(t->device), iter->cpu,
1160 secs, nsec_rem, iter->ent->pid, act, rwbs);
1161}
1162
1163static void blk_log_action(struct trace_iterator *iter, const char *act)
1164{
1165 char rwbs[RWBS_LEN];
1166 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1167
1168 fill_rwbs(rwbs, t);
1169 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1170 MAJOR(t->device), MINOR(t->device), act, rwbs);
1171}
1172
1173static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1174{
1175 const unsigned char *pdu_buf;
1176 int pdu_len;
1177 int i, end;
1178
1179 pdu_buf = pdu_start(ent);
1180 pdu_len = te_blk_io_trace(ent)->pdu_len;
1181
1182 if (!pdu_len)
1183 return;
1184
1185
1186 for (end = pdu_len - 1; end >= 0; end--)
1187 if (pdu_buf[end])
1188 break;
1189 end++;
1190
1191 trace_seq_putc(s, '(');
1192
1193 for (i = 0; i < pdu_len; i++) {
1194
1195 trace_seq_printf(s, "%s%02x",
1196 i == 0 ? "" : " ", pdu_buf[i]);
1197
1198
1199
1200
1201
1202 if (i == end && end != pdu_len - 1) {
1203 trace_seq_puts(s, " ..) ");
1204 return;
1205 }
1206 }
1207
1208 trace_seq_puts(s, ") ");
1209}
1210
1211static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1212{
1213 char cmd[TASK_COMM_LEN];
1214
1215 trace_find_cmdline(ent->pid, cmd);
1216
1217 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1218 trace_seq_printf(s, "%u ", t_bytes(ent));
1219 blk_log_dump_pdu(s, ent);
1220 trace_seq_printf(s, "[%s]\n", cmd);
1221 } else {
1222 if (t_sec(ent))
1223 trace_seq_printf(s, "%llu + %u [%s]\n",
1224 t_sector(ent), t_sec(ent), cmd);
1225 else
1226 trace_seq_printf(s, "[%s]\n", cmd);
1227 }
1228}
1229
1230static void blk_log_with_error(struct trace_seq *s,
1231 const struct trace_entry *ent)
1232{
1233 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1234 blk_log_dump_pdu(s, ent);
1235 trace_seq_printf(s, "[%d]\n", t_error(ent));
1236 } else {
1237 if (t_sec(ent))
1238 trace_seq_printf(s, "%llu + %u [%d]\n",
1239 t_sector(ent),
1240 t_sec(ent), t_error(ent));
1241 else
1242 trace_seq_printf(s, "%llu [%d]\n",
1243 t_sector(ent), t_error(ent));
1244 }
1245}
1246
1247static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1248{
1249 struct blk_io_trace_remap r = { .device_from = 0, };
1250
1251 get_pdu_remap(ent, &r);
1252 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1253 t_sector(ent), t_sec(ent),
1254 MAJOR(r.device_from), MINOR(r.device_from),
1255 (unsigned long long)r.sector_from);
1256}
1257
1258static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1259{
1260 char cmd[TASK_COMM_LEN];
1261
1262 trace_find_cmdline(ent->pid, cmd);
1263
1264 trace_seq_printf(s, "[%s]\n", cmd);
1265}
1266
1267static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1268{
1269 char cmd[TASK_COMM_LEN];
1270
1271 trace_find_cmdline(ent->pid, cmd);
1272
1273 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1274}
1275
1276static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1277{
1278 char cmd[TASK_COMM_LEN];
1279
1280 trace_find_cmdline(ent->pid, cmd);
1281
1282 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1283 get_pdu_int(ent), cmd);
1284}
1285
1286static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1287{
1288 const struct blk_io_trace *t = te_blk_io_trace(ent);
1289
1290 trace_seq_putmem(s, t + 1, t->pdu_len);
1291 trace_seq_putc(s, '\n');
1292}
1293
1294
1295
1296
1297
1298static void blk_tracer_print_header(struct seq_file *m)
1299{
1300 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1301 return;
1302 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1303 "# | | | | | |\n");
1304}
1305
1306static void blk_tracer_start(struct trace_array *tr)
1307{
1308 blk_tracer_enabled = true;
1309}
1310
1311static int blk_tracer_init(struct trace_array *tr)
1312{
1313 blk_tr = tr;
1314 blk_tracer_start(tr);
1315 return 0;
1316}
1317
1318static void blk_tracer_stop(struct trace_array *tr)
1319{
1320 blk_tracer_enabled = false;
1321}
1322
1323static void blk_tracer_reset(struct trace_array *tr)
1324{
1325 blk_tracer_stop(tr);
1326}
1327
1328static const struct {
1329 const char *act[2];
1330 void (*print)(struct trace_seq *s, const struct trace_entry *ent);
1331} what2act[] = {
1332 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1333 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1334 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1335 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1336 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1337 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1338 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1339 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1340 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1341 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1342 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1343 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1344 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1345 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1346 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1347};
1348
1349static enum print_line_t print_one_line(struct trace_iterator *iter,
1350 bool classic)
1351{
1352 struct trace_seq *s = &iter->seq;
1353 const struct blk_io_trace *t;
1354 u16 what;
1355 bool long_act;
1356 blk_log_action_t *log_action;
1357
1358 t = te_blk_io_trace(iter->ent);
1359 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1360 long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1361 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1362
1363 if (t->action == BLK_TN_MESSAGE) {
1364 log_action(iter, long_act ? "message" : "m");
1365 blk_log_msg(s, iter->ent);
1366 }
1367
1368 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1369 trace_seq_printf(s, "Unknown action %x\n", what);
1370 else {
1371 log_action(iter, what2act[what].act[long_act]);
1372 what2act[what].print(s, iter->ent);
1373 }
1374
1375 return trace_handle_return(s);
1376}
1377
1378static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1379 int flags, struct trace_event *event)
1380{
1381 return print_one_line(iter, false);
1382}
1383
1384static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1385{
1386 struct trace_seq *s = &iter->seq;
1387 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1388 const int offset = offsetof(struct blk_io_trace, sector);
1389 struct blk_io_trace old = {
1390 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1391 .time = iter->ts,
1392 };
1393
1394 trace_seq_putmem(s, &old, offset);
1395 trace_seq_putmem(s, &t->sector,
1396 sizeof(old) - offset + t->pdu_len);
1397}
1398
1399static enum print_line_t
1400blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1401 struct trace_event *event)
1402{
1403 blk_trace_synthesize_old_trace(iter);
1404
1405 return trace_handle_return(&iter->seq);
1406}
1407
1408static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1409{
1410 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1411 return TRACE_TYPE_UNHANDLED;
1412
1413 return print_one_line(iter, true);
1414}
1415
1416static int
1417blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1418{
1419
1420 if (bit == TRACE_BLK_OPT_CLASSIC) {
1421 if (set)
1422 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1423 else
1424 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1425 }
1426 return 0;
1427}
1428
1429static struct tracer blk_tracer __read_mostly = {
1430 .name = "blk",
1431 .init = blk_tracer_init,
1432 .reset = blk_tracer_reset,
1433 .start = blk_tracer_start,
1434 .stop = blk_tracer_stop,
1435 .print_header = blk_tracer_print_header,
1436 .print_line = blk_tracer_print_line,
1437 .flags = &blk_tracer_flags,
1438 .set_flag = blk_tracer_set_flag,
1439};
1440
1441static struct trace_event_functions trace_blk_event_funcs = {
1442 .trace = blk_trace_event_print,
1443 .binary = blk_trace_event_print_binary,
1444};
1445
1446static struct trace_event trace_blk_event = {
1447 .type = TRACE_BLK,
1448 .funcs = &trace_blk_event_funcs,
1449};
1450
1451static int __init init_blk_tracer(void)
1452{
1453 if (!register_ftrace_event(&trace_blk_event)) {
1454 pr_warning("Warning: could not register block events\n");
1455 return 1;
1456 }
1457
1458 if (register_tracer(&blk_tracer) != 0) {
1459 pr_warning("Warning: could not register the block tracer\n");
1460 unregister_ftrace_event(&trace_blk_event);
1461 return 1;
1462 }
1463
1464 return 0;
1465}
1466
1467device_initcall(init_blk_tracer);
1468
1469static int blk_trace_remove_queue(struct request_queue *q)
1470{
1471 struct blk_trace *bt;
1472
1473 bt = xchg(&q->blk_trace, NULL);
1474 if (bt == NULL)
1475 return -EINVAL;
1476
1477 if (atomic_dec_and_test(&blk_probes_ref))
1478 blk_unregister_tracepoints();
1479
1480 blk_trace_free(bt);
1481 return 0;
1482}
1483
1484
1485
1486
1487static int blk_trace_setup_queue(struct request_queue *q,
1488 struct block_device *bdev)
1489{
1490 struct blk_trace *old_bt, *bt = NULL;
1491 int ret = -ENOMEM;
1492
1493 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1494 if (!bt)
1495 return -ENOMEM;
1496
1497 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1498 if (!bt->msg_data)
1499 goto free_bt;
1500
1501 bt->dev = bdev->bd_dev;
1502 bt->act_mask = (u16)-1;
1503
1504 blk_trace_setup_lba(bt, bdev);
1505
1506 old_bt = xchg(&q->blk_trace, bt);
1507 if (old_bt != NULL) {
1508 (void)xchg(&q->blk_trace, old_bt);
1509 ret = -EBUSY;
1510 goto free_bt;
1511 }
1512
1513 if (atomic_inc_return(&blk_probes_ref) == 1)
1514 blk_register_tracepoints();
1515 return 0;
1516
1517free_bt:
1518 blk_trace_free(bt);
1519 return ret;
1520}
1521
1522
1523
1524
1525
1526static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1527 struct device_attribute *attr,
1528 char *buf);
1529static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1530 struct device_attribute *attr,
1531 const char *buf, size_t count);
1532#define BLK_TRACE_DEVICE_ATTR(_name) \
1533 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1534 sysfs_blk_trace_attr_show, \
1535 sysfs_blk_trace_attr_store)
1536
1537static BLK_TRACE_DEVICE_ATTR(enable);
1538static BLK_TRACE_DEVICE_ATTR(act_mask);
1539static BLK_TRACE_DEVICE_ATTR(pid);
1540static BLK_TRACE_DEVICE_ATTR(start_lba);
1541static BLK_TRACE_DEVICE_ATTR(end_lba);
1542
1543static struct attribute *blk_trace_attrs[] = {
1544 &dev_attr_enable.attr,
1545 &dev_attr_act_mask.attr,
1546 &dev_attr_pid.attr,
1547 &dev_attr_start_lba.attr,
1548 &dev_attr_end_lba.attr,
1549 NULL
1550};
1551
1552struct attribute_group blk_trace_attr_group = {
1553 .name = "trace",
1554 .attrs = blk_trace_attrs,
1555};
1556
1557static const struct {
1558 int mask;
1559 const char *str;
1560} mask_maps[] = {
1561 { BLK_TC_READ, "read" },
1562 { BLK_TC_WRITE, "write" },
1563 { BLK_TC_FLUSH, "flush" },
1564 { BLK_TC_SYNC, "sync" },
1565 { BLK_TC_QUEUE, "queue" },
1566 { BLK_TC_REQUEUE, "requeue" },
1567 { BLK_TC_ISSUE, "issue" },
1568 { BLK_TC_COMPLETE, "complete" },
1569 { BLK_TC_FS, "fs" },
1570 { BLK_TC_PC, "pc" },
1571 { BLK_TC_AHEAD, "ahead" },
1572 { BLK_TC_META, "meta" },
1573 { BLK_TC_DISCARD, "discard" },
1574 { BLK_TC_DRV_DATA, "drv_data" },
1575 { BLK_TC_FUA, "fua" },
1576};
1577
1578static int blk_trace_str2mask(const char *str)
1579{
1580 int i;
1581 int mask = 0;
1582 char *buf, *s, *token;
1583
1584 buf = kstrdup(str, GFP_KERNEL);
1585 if (buf == NULL)
1586 return -ENOMEM;
1587 s = strstrip(buf);
1588
1589 while (1) {
1590 token = strsep(&s, ",");
1591 if (token == NULL)
1592 break;
1593
1594 if (*token == '\0')
1595 continue;
1596
1597 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1598 if (strcasecmp(token, mask_maps[i].str) == 0) {
1599 mask |= mask_maps[i].mask;
1600 break;
1601 }
1602 }
1603 if (i == ARRAY_SIZE(mask_maps)) {
1604 mask = -EINVAL;
1605 break;
1606 }
1607 }
1608 kfree(buf);
1609
1610 return mask;
1611}
1612
1613static ssize_t blk_trace_mask2str(char *buf, int mask)
1614{
1615 int i;
1616 char *p = buf;
1617
1618 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1619 if (mask & mask_maps[i].mask) {
1620 p += sprintf(p, "%s%s",
1621 (p == buf) ? "" : ",", mask_maps[i].str);
1622 }
1623 }
1624 *p++ = '\n';
1625
1626 return p - buf;
1627}
1628
1629static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1630{
1631 if (bdev->bd_disk == NULL)
1632 return NULL;
1633
1634 return bdev_get_queue(bdev);
1635}
1636
1637static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1638 struct device_attribute *attr,
1639 char *buf)
1640{
1641 struct hd_struct *p = dev_to_part(dev);
1642 struct request_queue *q;
1643 struct block_device *bdev;
1644 ssize_t ret = -ENXIO;
1645
1646 bdev = bdget(part_devt(p));
1647 if (bdev == NULL)
1648 goto out;
1649
1650 q = blk_trace_get_queue(bdev);
1651 if (q == NULL)
1652 goto out_bdput;
1653
1654 mutex_lock(&bdev->bd_mutex);
1655
1656 if (attr == &dev_attr_enable) {
1657 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1658 goto out_unlock_bdev;
1659 }
1660
1661 if (q->blk_trace == NULL)
1662 ret = sprintf(buf, "disabled\n");
1663 else if (attr == &dev_attr_act_mask)
1664 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1665 else if (attr == &dev_attr_pid)
1666 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1667 else if (attr == &dev_attr_start_lba)
1668 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1669 else if (attr == &dev_attr_end_lba)
1670 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1671
1672out_unlock_bdev:
1673 mutex_unlock(&bdev->bd_mutex);
1674out_bdput:
1675 bdput(bdev);
1676out:
1677 return ret;
1678}
1679
1680static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1681 struct device_attribute *attr,
1682 const char *buf, size_t count)
1683{
1684 struct block_device *bdev;
1685 struct request_queue *q;
1686 struct hd_struct *p;
1687 u64 value;
1688 ssize_t ret = -EINVAL;
1689
1690 if (count == 0)
1691 goto out;
1692
1693 if (attr == &dev_attr_act_mask) {
1694 if (sscanf(buf, "%llx", &value) != 1) {
1695
1696 ret = blk_trace_str2mask(buf);
1697 if (ret < 0)
1698 goto out;
1699 value = ret;
1700 }
1701 } else if (sscanf(buf, "%llu", &value) != 1)
1702 goto out;
1703
1704 ret = -ENXIO;
1705
1706 p = dev_to_part(dev);
1707 bdev = bdget(part_devt(p));
1708 if (bdev == NULL)
1709 goto out;
1710
1711 q = blk_trace_get_queue(bdev);
1712 if (q == NULL)
1713 goto out_bdput;
1714
1715 mutex_lock(&bdev->bd_mutex);
1716
1717 if (attr == &dev_attr_enable) {
1718 if (value)
1719 ret = blk_trace_setup_queue(q, bdev);
1720 else
1721 ret = blk_trace_remove_queue(q);
1722 goto out_unlock_bdev;
1723 }
1724
1725 ret = 0;
1726 if (q->blk_trace == NULL)
1727 ret = blk_trace_setup_queue(q, bdev);
1728
1729 if (ret == 0) {
1730 if (attr == &dev_attr_act_mask)
1731 q->blk_trace->act_mask = value;
1732 else if (attr == &dev_attr_pid)
1733 q->blk_trace->pid = value;
1734 else if (attr == &dev_attr_start_lba)
1735 q->blk_trace->start_lba = value;
1736 else if (attr == &dev_attr_end_lba)
1737 q->blk_trace->end_lba = value;
1738 }
1739
1740out_unlock_bdev:
1741 mutex_unlock(&bdev->bd_mutex);
1742out_bdput:
1743 bdput(bdev);
1744out:
1745 return ret ? ret : count;
1746}
1747
1748int blk_trace_init_sysfs(struct device *dev)
1749{
1750 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1751}
1752
1753void blk_trace_remove_sysfs(struct device *dev)
1754{
1755 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1756}
1757
1758#endif
1759
1760#ifdef CONFIG_EVENT_TRACING
1761
1762void blk_dump_cmd(char *buf, struct request *rq)
1763{
1764 int i, end;
1765 int len = rq->cmd_len;
1766 unsigned char *cmd = rq->cmd;
1767
1768 if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1769 buf[0] = '\0';
1770 return;
1771 }
1772
1773 for (end = len - 1; end >= 0; end--)
1774 if (cmd[end])
1775 break;
1776 end++;
1777
1778 for (i = 0; i < len; i++) {
1779 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1780 if (i == end && end != len - 1) {
1781 sprintf(buf, " ..");
1782 break;
1783 }
1784 }
1785}
1786
1787void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1788{
1789 int i = 0;
1790
1791 if (rw & REQ_FLUSH)
1792 rwbs[i++] = 'F';
1793
1794 if (rw & WRITE)
1795 rwbs[i++] = 'W';
1796 else if (rw & REQ_DISCARD)
1797 rwbs[i++] = 'D';
1798 else if (bytes)
1799 rwbs[i++] = 'R';
1800 else
1801 rwbs[i++] = 'N';
1802
1803 if (rw & REQ_FUA)
1804 rwbs[i++] = 'F';
1805 if (rw & REQ_RAHEAD)
1806 rwbs[i++] = 'A';
1807 if (rw & REQ_SYNC)
1808 rwbs[i++] = 'S';
1809 if (rw & REQ_META)
1810 rwbs[i++] = 'M';
1811 if (rw & REQ_SECURE)
1812 rwbs[i++] = 'E';
1813
1814 rwbs[i] = '\0';
1815}
1816EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1817
1818#endif
1819
1820