1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/rculist.h>
8#include <linux/blk-mq.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12#include "blk.h"
13
14struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
17 bool enable_accounting;
18};
19
20void blk_rq_stat_init(struct blk_rq_stat *stat)
21{
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
24 stat->batch = 0;
25}
26
27
28void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29{
30 if (!src->nr_samples)
31 return;
32
33 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
39 dst->nr_samples += src->nr_samples;
40}
41
42void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43{
44 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
46 stat->batch += value;
47 stat->nr_samples++;
48}
49
50void blk_stat_add(struct request *rq, u64 now)
51{
52 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket, cpu;
56 u64 value;
57
58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59
60 blk_throtl_stat_add(rq, value);
61
62 rcu_read_lock();
63 cpu = get_cpu();
64 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
65 if (!blk_stat_is_active(cb))
66 continue;
67
68 bucket = cb->bucket_fn(rq);
69 if (bucket < 0)
70 continue;
71
72 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
73 blk_rq_stat_add(stat, value);
74 }
75 put_cpu();
76 rcu_read_unlock();
77}
78
79static void blk_stat_timer_fn(struct timer_list *t)
80{
81 struct blk_stat_callback *cb = from_timer(cb, t, timer);
82 unsigned int bucket;
83 int cpu;
84
85 for (bucket = 0; bucket < cb->buckets; bucket++)
86 blk_rq_stat_init(&cb->stat[bucket]);
87
88 for_each_online_cpu(cpu) {
89 struct blk_rq_stat *cpu_stat;
90
91 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
92 for (bucket = 0; bucket < cb->buckets; bucket++) {
93 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
94 blk_rq_stat_init(&cpu_stat[bucket]);
95 }
96 }
97
98 cb->timer_fn(cb);
99}
100
101struct blk_stat_callback *
102blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
103 int (*bucket_fn)(const struct request *),
104 unsigned int buckets, void *data)
105{
106 struct blk_stat_callback *cb;
107
108 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
109 if (!cb)
110 return NULL;
111
112 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
113 GFP_KERNEL);
114 if (!cb->stat) {
115 kfree(cb);
116 return NULL;
117 }
118 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
119 __alignof__(struct blk_rq_stat));
120 if (!cb->cpu_stat) {
121 kfree(cb->stat);
122 kfree(cb);
123 return NULL;
124 }
125
126 cb->timer_fn = timer_fn;
127 cb->bucket_fn = bucket_fn;
128 cb->data = data;
129 cb->buckets = buckets;
130 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
131
132 return cb;
133}
134
135void blk_stat_add_callback(struct request_queue *q,
136 struct blk_stat_callback *cb)
137{
138 unsigned int bucket;
139 unsigned long flags;
140 int cpu;
141
142 for_each_possible_cpu(cpu) {
143 struct blk_rq_stat *cpu_stat;
144
145 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
146 for (bucket = 0; bucket < cb->buckets; bucket++)
147 blk_rq_stat_init(&cpu_stat[bucket]);
148 }
149
150 spin_lock_irqsave(&q->stats->lock, flags);
151 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
152 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
153 spin_unlock_irqrestore(&q->stats->lock, flags);
154}
155
156void blk_stat_remove_callback(struct request_queue *q,
157 struct blk_stat_callback *cb)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&q->stats->lock, flags);
162 list_del_rcu(&cb->list);
163 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
164 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
165 spin_unlock_irqrestore(&q->stats->lock, flags);
166
167 del_timer_sync(&cb->timer);
168}
169
170static void blk_stat_free_callback_rcu(struct rcu_head *head)
171{
172 struct blk_stat_callback *cb;
173
174 cb = container_of(head, struct blk_stat_callback, rcu);
175 free_percpu(cb->cpu_stat);
176 kfree(cb->stat);
177 kfree(cb);
178}
179
180void blk_stat_free_callback(struct blk_stat_callback *cb)
181{
182 if (cb)
183 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
184}
185
186void blk_stat_enable_accounting(struct request_queue *q)
187{
188 unsigned long flags;
189
190 spin_lock_irqsave(&q->stats->lock, flags);
191 q->stats->enable_accounting = true;
192 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
193 spin_unlock_irqrestore(&q->stats->lock, flags);
194}
195EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
196
197struct blk_queue_stats *blk_alloc_queue_stats(void)
198{
199 struct blk_queue_stats *stats;
200
201 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
202 if (!stats)
203 return NULL;
204
205 INIT_LIST_HEAD(&stats->callbacks);
206 spin_lock_init(&stats->lock);
207 stats->enable_accounting = false;
208
209 return stats;
210}
211
212void blk_free_queue_stats(struct blk_queue_stats *stats)
213{
214 if (!stats)
215 return;
216
217 WARN_ON(!list_empty(&stats->callbacks));
218
219 kfree(stats);
220}
221