1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/fs.h>
35#include <linux/rbtree.h>
36#include "mlx5_core.h"
37#include "fs_core.h"
38#include "fs_cmd.h"
39
40#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41
42#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
75{
76 struct rb_node **new = &root->rb_node;
77 struct rb_node *parent = NULL;
78
79 while (*new) {
80 struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
81 int result = counter->id - this->id;
82
83 parent = *new;
84 if (result < 0)
85 new = &((*new)->rb_left);
86 else
87 new = &((*new)->rb_right);
88 }
89
90
91 rb_link_node(&counter->node, parent, new);
92 rb_insert_color(&counter->node, root);
93}
94
95
96
97
98static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
99 struct mlx5_fc *first,
100 u32 last_id)
101{
102 struct mlx5_cmd_fc_bulk *b;
103 struct rb_node *node = NULL;
104 u32 afirst_id;
105 int num;
106 int err;
107
108 int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
109 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
110
111
112 afirst_id = first->id & ~0x3;
113
114
115 num = ALIGN(last_id - afirst_id + 1, 4);
116 if (num > max_bulk) {
117 num = max_bulk;
118 last_id = afirst_id + num - 1;
119 }
120
121 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
122 if (!b) {
123 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
124 return NULL;
125 }
126
127 err = mlx5_cmd_fc_bulk_query(dev, b);
128 if (err) {
129 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
130 goto out;
131 }
132
133 for (node = &first->node; node; node = rb_next(node)) {
134 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
135 struct mlx5_fc_cache *c = &counter->cache;
136 u64 packets;
137 u64 bytes;
138
139 if (counter->id > last_id)
140 break;
141
142 mlx5_cmd_fc_bulk_get(dev, b,
143 counter->id, &packets, &bytes);
144
145 if (c->packets == packets)
146 continue;
147
148 c->packets = packets;
149 c->bytes = bytes;
150 c->lastuse = jiffies;
151 }
152
153out:
154 mlx5_cmd_fc_bulk_free(b);
155
156 return node;
157}
158
159static void mlx5_fc_stats_work(struct work_struct *work)
160{
161 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
162 priv.fc_stats.work.work);
163 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
164 unsigned long now = jiffies;
165 struct mlx5_fc *counter = NULL;
166 struct mlx5_fc *last = NULL;
167 struct rb_node *node;
168 LIST_HEAD(tmplist);
169
170 spin_lock(&fc_stats->addlist_lock);
171
172 list_splice_tail_init(&fc_stats->addlist, &tmplist);
173
174 if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
175 queue_delayed_work(fc_stats->wq, &fc_stats->work,
176 fc_stats->sampling_interval);
177
178 spin_unlock(&fc_stats->addlist_lock);
179
180 list_for_each_entry(counter, &tmplist, list)
181 mlx5_fc_stats_insert(&fc_stats->counters, counter);
182
183 node = rb_first(&fc_stats->counters);
184 while (node) {
185 counter = rb_entry(node, struct mlx5_fc, node);
186
187 node = rb_next(node);
188
189 if (counter->deleted) {
190 rb_erase(&counter->node, &fc_stats->counters);
191
192 mlx5_cmd_fc_free(dev, counter->id);
193
194 kfree(counter);
195 continue;
196 }
197
198 last = counter;
199 }
200
201 if (time_before(now, fc_stats->next_query) || !last)
202 return;
203
204 node = rb_first(&fc_stats->counters);
205 while (node) {
206 counter = rb_entry(node, struct mlx5_fc, node);
207
208 node = mlx5_fc_stats_query(dev, counter, last->id);
209 }
210
211 fc_stats->next_query = now + fc_stats->sampling_interval;
212}
213
214struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
215{
216 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
217 struct mlx5_fc *counter;
218 int err;
219
220 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
221 if (!counter)
222 return ERR_PTR(-ENOMEM);
223
224 err = mlx5_cmd_fc_alloc(dev, &counter->id);
225 if (err)
226 goto err_out;
227
228 if (aging) {
229 counter->cache.lastuse = jiffies;
230 counter->aging = true;
231
232 spin_lock(&fc_stats->addlist_lock);
233 list_add(&counter->list, &fc_stats->addlist);
234 spin_unlock(&fc_stats->addlist_lock);
235
236 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
237 }
238
239 return counter;
240
241err_out:
242 kfree(counter);
243
244 return ERR_PTR(err);
245}
246
247void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
248{
249 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
250
251 if (!counter)
252 return;
253
254 if (counter->aging) {
255 counter->deleted = true;
256 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
257 return;
258 }
259
260 mlx5_cmd_fc_free(dev, counter->id);
261 kfree(counter);
262}
263
264int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
265{
266 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
267
268 fc_stats->counters = RB_ROOT;
269 INIT_LIST_HEAD(&fc_stats->addlist);
270 spin_lock_init(&fc_stats->addlist_lock);
271
272 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
273 if (!fc_stats->wq)
274 return -ENOMEM;
275
276 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
277 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
278
279 return 0;
280}
281
282void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
283{
284 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
285 struct mlx5_fc *counter;
286 struct mlx5_fc *tmp;
287 struct rb_node *node;
288
289 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
290 destroy_workqueue(dev->priv.fc_stats.wq);
291 dev->priv.fc_stats.wq = NULL;
292
293 list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
294 list_del(&counter->list);
295
296 mlx5_cmd_fc_free(dev, counter->id);
297
298 kfree(counter);
299 }
300
301 node = rb_first(&fc_stats->counters);
302 while (node) {
303 counter = rb_entry(node, struct mlx5_fc, node);
304
305 node = rb_next(node);
306
307 rb_erase(&counter->node, &fc_stats->counters);
308
309 mlx5_cmd_fc_free(dev, counter->id);
310
311 kfree(counter);
312 }
313}
314
315int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
316 u64 *packets, u64 *bytes)
317{
318 return mlx5_cmd_fc_query(dev, id, packets, bytes);
319}
320
321void mlx5_fc_query_cached(struct mlx5_fc *counter,
322 u64 *bytes, u64 *packets, u64 *lastuse)
323{
324 struct mlx5_fc_cache c;
325
326 c = counter->cache;
327
328 *bytes = c.bytes - counter->lastbytes;
329 *packets = c.packets - counter->lastpackets;
330 *lastuse = c.lastuse;
331
332 counter->lastbytes = c.bytes;
333 counter->lastpackets = c.packets;
334}
335
336void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
337 struct delayed_work *dwork,
338 unsigned long delay)
339{
340 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
341
342 queue_delayed_work(fc_stats->wq, dwork, delay);
343}
344
345void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
346 unsigned long interval)
347{
348 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
349
350 fc_stats->sampling_interval = min_t(unsigned long, interval,
351 fc_stats->sampling_interval);
352}
353