1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/fs.h>
35#include <linux/rbtree.h>
36#include "mlx5_core.h"
37#include "fs_core.h"
38#include "fs_cmd.h"
39
40#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41
42#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
75{
76 struct rb_node **new = &root->rb_node;
77 struct rb_node *parent = NULL;
78
79 while (*new) {
80 struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
81 int result = counter->id - this->id;
82
83 parent = *new;
84 if (result < 0)
85 new = &((*new)->rb_left);
86 else
87 new = &((*new)->rb_right);
88 }
89
90
91 rb_link_node(&counter->node, parent, new);
92 rb_insert_color(&counter->node, root);
93}
94
95
96
97
98static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
99 struct mlx5_fc *first,
100 u32 last_id)
101{
102 struct mlx5_cmd_fc_bulk *b;
103 struct rb_node *node = NULL;
104 u32 afirst_id;
105 int num;
106 int err;
107
108 int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
109 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
110
111
112 afirst_id = first->id & ~0x3;
113
114
115 num = ALIGN(last_id - afirst_id + 1, 4);
116 if (num > max_bulk) {
117 num = max_bulk;
118 last_id = afirst_id + num - 1;
119 }
120
121 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
122 if (!b) {
123 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
124 return NULL;
125 }
126
127 err = mlx5_cmd_fc_bulk_query(dev, b);
128 if (err) {
129 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
130 goto out;
131 }
132
133 for (node = &first->node; node; node = rb_next(node)) {
134 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
135 struct mlx5_fc_cache *c = &counter->cache;
136 u64 packets;
137 u64 bytes;
138
139 if (counter->id > last_id)
140 break;
141
142 mlx5_cmd_fc_bulk_get(dev, b,
143 counter->id, &packets, &bytes);
144
145 if (c->packets == packets)
146 continue;
147
148 c->packets = packets;
149 c->bytes = bytes;
150 c->lastuse = jiffies;
151 }
152
153out:
154 mlx5_cmd_fc_bulk_free(b);
155
156 return node;
157}
158
159static void mlx5_fc_stats_work(struct work_struct *work)
160{
161 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
162 priv.fc_stats.work.work);
163 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
164 unsigned long now = jiffies;
165 struct mlx5_fc *counter = NULL;
166 struct mlx5_fc *last = NULL;
167 struct rb_node *node;
168 LIST_HEAD(tmplist);
169
170 spin_lock(&fc_stats->addlist_lock);
171
172 list_splice_tail_init(&fc_stats->addlist, &tmplist);
173
174 if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
175 queue_delayed_work(fc_stats->wq, &fc_stats->work,
176 fc_stats->sampling_interval);
177
178 spin_unlock(&fc_stats->addlist_lock);
179
180 list_for_each_entry(counter, &tmplist, list)
181 mlx5_fc_stats_insert(&fc_stats->counters, counter);
182
183 node = rb_first(&fc_stats->counters);
184 while (node) {
185 counter = rb_entry(node, struct mlx5_fc, node);
186
187 node = rb_next(node);
188
189 if (counter->deleted) {
190 rb_erase(&counter->node, &fc_stats->counters);
191
192 mlx5_cmd_fc_free(dev, counter->id);
193
194 kfree(counter);
195 continue;
196 }
197
198 last = counter;
199 }
200
201 if (time_before(now, fc_stats->next_query) || !last)
202 return;
203
204 node = rb_first(&fc_stats->counters);
205 while (node) {
206 counter = rb_entry(node, struct mlx5_fc, node);
207
208 node = mlx5_fc_stats_query(dev, counter, last->id);
209 }
210
211 fc_stats->next_query = now + fc_stats->sampling_interval;
212}
213
214struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
215{
216 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
217 struct mlx5_fc *counter;
218 int err;
219
220 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
221 if (!counter)
222 return ERR_PTR(-ENOMEM);
223
224 err = mlx5_cmd_fc_alloc(dev, &counter->id);
225 if (err)
226 goto err_out;
227
228 if (aging) {
229 counter->cache.lastuse = jiffies;
230 counter->aging = true;
231
232 spin_lock(&fc_stats->addlist_lock);
233 list_add(&counter->list, &fc_stats->addlist);
234 spin_unlock(&fc_stats->addlist_lock);
235
236 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
237 }
238
239 return counter;
240
241err_out:
242 kfree(counter);
243
244 return ERR_PTR(err);
245}
246EXPORT_SYMBOL(mlx5_fc_create);
247
248void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
249{
250 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
251
252 if (!counter)
253 return;
254
255 if (counter->aging) {
256 counter->deleted = true;
257 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
258 return;
259 }
260
261 mlx5_cmd_fc_free(dev, counter->id);
262 kfree(counter);
263}
264EXPORT_SYMBOL(mlx5_fc_destroy);
265
266int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
267{
268 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
269
270 fc_stats->counters = RB_ROOT;
271 INIT_LIST_HEAD(&fc_stats->addlist);
272 spin_lock_init(&fc_stats->addlist_lock);
273
274 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
275 if (!fc_stats->wq)
276 return -ENOMEM;
277
278 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
279 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
280
281 return 0;
282}
283
284void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
285{
286 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
287 struct mlx5_fc *counter;
288 struct mlx5_fc *tmp;
289 struct rb_node *node;
290
291 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
292 destroy_workqueue(dev->priv.fc_stats.wq);
293 dev->priv.fc_stats.wq = NULL;
294
295 list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
296 list_del(&counter->list);
297
298 mlx5_cmd_fc_free(dev, counter->id);
299
300 kfree(counter);
301 }
302
303 node = rb_first(&fc_stats->counters);
304 while (node) {
305 counter = rb_entry(node, struct mlx5_fc, node);
306
307 node = rb_next(node);
308
309 rb_erase(&counter->node, &fc_stats->counters);
310
311 mlx5_cmd_fc_free(dev, counter->id);
312
313 kfree(counter);
314 }
315}
316
317int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
318 u64 *packets, u64 *bytes)
319{
320 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
321}
322EXPORT_SYMBOL(mlx5_fc_query);
323
324void mlx5_fc_query_cached(struct mlx5_fc *counter,
325 u64 *bytes, u64 *packets, u64 *lastuse)
326{
327 struct mlx5_fc_cache c;
328
329 c = counter->cache;
330
331 *bytes = c.bytes - counter->lastbytes;
332 *packets = c.packets - counter->lastpackets;
333 *lastuse = c.lastuse;
334
335 counter->lastbytes = c.bytes;
336 counter->lastpackets = c.packets;
337}
338
339void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
340 struct delayed_work *dwork,
341 unsigned long delay)
342{
343 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
344
345 queue_delayed_work(fc_stats->wq, dwork, delay);
346}
347
348void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
349 unsigned long interval)
350{
351 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
352
353 fc_stats->sampling_interval = min_t(unsigned long, interval,
354 fc_stats->sampling_interval);
355}
356