1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/rculist.h>
8#include <linux/blk-mq.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12
13#define BLK_RQ_STAT_BATCH 64
14
15struct blk_queue_stats {
16 struct list_head callbacks;
17 spinlock_t lock;
18};
19
20int blk_stat_rq_ddir(const struct request *rq)
21{
22 return rq_data_dir(rq);
23}
24EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);
25
26static void blk_stat_init(struct blk_rq_stat *stat)
27{
28 stat->min = -1ULL;
29 stat->max = stat->nr_samples = stat->mean = 0;
30 stat->batch = stat->nr_batch = 0;
31}
32
33static void blk_stat_flush_batch(struct blk_rq_stat *stat)
34{
35 const s32 nr_batch = READ_ONCE(stat->nr_batch);
36 const s32 nr_samples = READ_ONCE(stat->nr_samples);
37
38 if (!nr_batch)
39 return;
40 if (!nr_samples)
41 stat->mean = div64_s64(stat->batch, nr_batch);
42 else {
43 stat->mean = div64_s64((stat->mean * nr_samples) +
44 stat->batch,
45 nr_batch + nr_samples);
46 }
47
48 stat->nr_samples += nr_batch;
49 stat->nr_batch = stat->batch = 0;
50}
51
52static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
53{
54 blk_stat_flush_batch(src);
55
56 if (!src->nr_samples)
57 return;
58
59 dst->min = min(dst->min, src->min);
60 dst->max = max(dst->max, src->max);
61
62 if (!dst->nr_samples)
63 dst->mean = src->mean;
64 else {
65 dst->mean = div64_s64((src->mean * src->nr_samples) +
66 (dst->mean * dst->nr_samples),
67 dst->nr_samples + src->nr_samples);
68 }
69 dst->nr_samples += src->nr_samples;
70}
71
72static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
73{
74 stat->min = min(stat->min, value);
75 stat->max = max(stat->max, value);
76
77 if (stat->batch + value < stat->batch ||
78 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
79 blk_stat_flush_batch(stat);
80
81 stat->batch += value;
82 stat->nr_batch++;
83}
84
85void blk_stat_add(struct request *rq)
86{
87 struct request_queue *q = rq->q;
88 struct blk_stat_callback *cb;
89 struct blk_rq_stat *stat;
90 int bucket;
91 s64 now, value;
92
93 now = __blk_stat_time(ktime_to_ns(ktime_get()));
94 if (now < blk_stat_time(&rq_aux(rq)->issue_stat))
95 return;
96
97 value = now - blk_stat_time(&rq_aux(rq)->issue_stat);
98
99 rcu_read_lock();
100 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
101 if (blk_stat_is_active(cb)) {
102 bucket = cb->bucket_fn(rq);
103 if (bucket < 0)
104 continue;
105 stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
106 __blk_stat_add(stat, value);
107 }
108 }
109 rcu_read_unlock();
110}
111
112static void blk_stat_timer_fn(unsigned long data)
113{
114 struct blk_stat_callback *cb = (void *)data;
115 unsigned int bucket;
116 int cpu;
117
118 for (bucket = 0; bucket < cb->buckets; bucket++)
119 blk_stat_init(&cb->stat[bucket]);
120
121 for_each_online_cpu(cpu) {
122 struct blk_rq_stat *cpu_stat;
123
124 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
125 for (bucket = 0; bucket < cb->buckets; bucket++) {
126 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
127 blk_stat_init(&cpu_stat[bucket]);
128 }
129 }
130
131 cb->timer_fn(cb);
132}
133
134struct blk_stat_callback *
135blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
136 int (*bucket_fn)(const struct request *),
137 unsigned int buckets, void *data)
138{
139 struct blk_stat_callback *cb;
140
141 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
142 if (!cb)
143 return NULL;
144
145 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
146 GFP_KERNEL);
147 if (!cb->stat) {
148 kfree(cb);
149 return NULL;
150 }
151 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
152 __alignof__(struct blk_rq_stat));
153 if (!cb->cpu_stat) {
154 kfree(cb->stat);
155 kfree(cb);
156 return NULL;
157 }
158
159 cb->timer_fn = timer_fn;
160 cb->bucket_fn = bucket_fn;
161 cb->data = data;
162 cb->buckets = buckets;
163 setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
164
165 return cb;
166}
167EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
168
169void blk_stat_add_callback(struct request_queue *q,
170 struct blk_stat_callback *cb)
171{
172 unsigned int bucket;
173 int cpu;
174
175 for_each_possible_cpu(cpu) {
176 struct blk_rq_stat *cpu_stat;
177
178 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
179 for (bucket = 0; bucket < cb->buckets; bucket++)
180 blk_stat_init(&cpu_stat[bucket]);
181 }
182
183 spin_lock(&q->stats->lock);
184 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
185 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
186 spin_unlock(&q->stats->lock);
187}
188EXPORT_SYMBOL_GPL(blk_stat_add_callback);
189
190void blk_stat_remove_callback(struct request_queue *q,
191 struct blk_stat_callback *cb)
192{
193 spin_lock(&q->stats->lock);
194 list_del_rcu(&cb->list);
195 if (list_empty(&q->stats->callbacks))
196 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
197 spin_unlock(&q->stats->lock);
198
199 del_timer_sync(&cb->timer);
200}
201EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
202
203static void blk_stat_free_callback_rcu(struct rcu_head *head)
204{
205 struct blk_stat_callback *cb;
206
207 cb = container_of(head, struct blk_stat_callback, rcu);
208 free_percpu(cb->cpu_stat);
209 kfree(cb->stat);
210 kfree(cb);
211}
212
213void blk_stat_free_callback(struct blk_stat_callback *cb)
214{
215 if (cb)
216 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
217}
218EXPORT_SYMBOL_GPL(blk_stat_free_callback);
219
220struct blk_queue_stats *blk_alloc_queue_stats(void)
221{
222 struct blk_queue_stats *stats;
223
224 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
225 if (!stats)
226 return NULL;
227
228 INIT_LIST_HEAD(&stats->callbacks);
229 spin_lock_init(&stats->lock);
230
231 return stats;
232}
233
234void blk_free_queue_stats(struct blk_queue_stats *stats)
235{
236 if (!stats)
237 return;
238
239 WARN_ON(!list_empty(&stats->callbacks));
240
241 kfree(stats);
242}
243