1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/fs.h>
35#include <linux/rbtree.h>
36#include "mlx5_core.h"
37#include "fs_core.h"
38#include "fs_cmd.h"
39
40#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
73{
74 struct rb_node **new = &root->rb_node;
75 struct rb_node *parent = NULL;
76
77 while (*new) {
78 struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
79 int result = counter->id - this->id;
80
81 parent = *new;
82 if (result < 0)
83 new = &((*new)->rb_left);
84 else
85 new = &((*new)->rb_right);
86 }
87
88
89 rb_link_node(&counter->node, parent, new);
90 rb_insert_color(&counter->node, root);
91}
92
93static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
94 struct mlx5_fc *first,
95 u16 last_id)
96{
97 struct mlx5_cmd_fc_bulk *b;
98 struct rb_node *node = NULL;
99 u16 afirst_id;
100 int num;
101 int err;
102 int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
103
104
105 afirst_id = first->id & ~0x3;
106
107
108 num = ALIGN(last_id - afirst_id + 1, 4);
109 if (num > max_bulk) {
110 num = max_bulk;
111 last_id = afirst_id + num - 1;
112 }
113
114 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
115 if (!b) {
116 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
117 return NULL;
118 }
119
120 err = mlx5_cmd_fc_bulk_query(dev, b);
121 if (err) {
122 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
123 goto out;
124 }
125
126 for (node = &first->node; node; node = rb_next(node)) {
127 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
128 struct mlx5_fc_cache *c = &counter->cache;
129 u64 packets;
130 u64 bytes;
131
132 if (counter->id > last_id)
133 break;
134
135 mlx5_cmd_fc_bulk_get(dev, b,
136 counter->id, &packets, &bytes);
137
138 if (c->packets == packets)
139 continue;
140
141 c->packets = packets;
142 c->bytes = bytes;
143 c->lastuse = jiffies;
144 }
145
146out:
147 mlx5_cmd_fc_bulk_free(b);
148
149 return node;
150}
151
152static void mlx5_fc_stats_work(struct work_struct *work)
153{
154 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
155 priv.fc_stats.work.work);
156 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
157 unsigned long now = jiffies;
158 struct mlx5_fc *counter = NULL;
159 struct mlx5_fc *last = NULL;
160 struct rb_node *node;
161 LIST_HEAD(tmplist);
162
163 spin_lock(&fc_stats->addlist_lock);
164
165 list_splice_tail_init(&fc_stats->addlist, &tmplist);
166
167 if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
168 queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
169
170 spin_unlock(&fc_stats->addlist_lock);
171
172 list_for_each_entry(counter, &tmplist, list)
173 mlx5_fc_stats_insert(&fc_stats->counters, counter);
174
175 node = rb_first(&fc_stats->counters);
176 while (node) {
177 counter = rb_entry(node, struct mlx5_fc, node);
178
179 node = rb_next(node);
180
181 if (counter->deleted) {
182 rb_erase(&counter->node, &fc_stats->counters);
183
184 mlx5_cmd_fc_free(dev, counter->id);
185
186 kfree(counter);
187 continue;
188 }
189
190 last = counter;
191 }
192
193 if (time_before(now, fc_stats->next_query) || !last)
194 return;
195
196 node = rb_first(&fc_stats->counters);
197 while (node) {
198 counter = rb_entry(node, struct mlx5_fc, node);
199
200 node = mlx5_fc_stats_query(dev, counter, last->id);
201 }
202
203 fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
204}
205
206struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
207{
208 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
209 struct mlx5_fc *counter;
210 int err;
211
212 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
213 if (!counter)
214 return ERR_PTR(-ENOMEM);
215
216 err = mlx5_cmd_fc_alloc(dev, &counter->id);
217 if (err)
218 goto err_out;
219
220 if (aging) {
221 counter->cache.lastuse = jiffies;
222 counter->aging = true;
223
224 spin_lock(&fc_stats->addlist_lock);
225 list_add(&counter->list, &fc_stats->addlist);
226 spin_unlock(&fc_stats->addlist_lock);
227
228 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
229 }
230
231 return counter;
232
233err_out:
234 kfree(counter);
235
236 return ERR_PTR(err);
237}
238
239void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
240{
241 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
242
243 if (!counter)
244 return;
245
246 if (counter->aging) {
247 counter->deleted = true;
248 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
249 return;
250 }
251
252 mlx5_cmd_fc_free(dev, counter->id);
253 kfree(counter);
254}
255
256int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
257{
258 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
259
260 fc_stats->counters = RB_ROOT;
261 INIT_LIST_HEAD(&fc_stats->addlist);
262 spin_lock_init(&fc_stats->addlist_lock);
263
264 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
265 if (!fc_stats->wq)
266 return -ENOMEM;
267
268 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
269
270 return 0;
271}
272
273void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
274{
275 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
276 struct mlx5_fc *counter;
277 struct mlx5_fc *tmp;
278 struct rb_node *node;
279
280 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
281 destroy_workqueue(dev->priv.fc_stats.wq);
282 dev->priv.fc_stats.wq = NULL;
283
284 list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
285 list_del(&counter->list);
286
287 mlx5_cmd_fc_free(dev, counter->id);
288
289 kfree(counter);
290 }
291
292 node = rb_first(&fc_stats->counters);
293 while (node) {
294 counter = rb_entry(node, struct mlx5_fc, node);
295
296 node = rb_next(node);
297
298 rb_erase(&counter->node, &fc_stats->counters);
299
300 mlx5_cmd_fc_free(dev, counter->id);
301
302 kfree(counter);
303 }
304}
305
306void mlx5_fc_query_cached(struct mlx5_fc *counter,
307 u64 *bytes, u64 *packets, u64 *lastuse)
308{
309 struct mlx5_fc_cache c;
310
311 c = counter->cache;
312
313 *bytes = c.bytes - counter->lastbytes;
314 *packets = c.packets - counter->lastpackets;
315 *lastuse = c.lastuse;
316
317 counter->lastbytes = c.bytes;
318 counter->lastpackets = c.packets;
319}
320