1
2
3
4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/netdevice.h>
7#include <net/pkt_cls.h>
8#include <net/red.h>
9
10#include "spectrum.h"
11#include "spectrum_span.h"
12#include "reg.h"
13
14#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17
18enum mlxsw_sp_qdisc_type {
19 MLXSW_SP_QDISC_NO_QDISC,
20 MLXSW_SP_QDISC_RED,
21 MLXSW_SP_QDISC_PRIO,
22 MLXSW_SP_QDISC_ETS,
23 MLXSW_SP_QDISC_TBF,
24 MLXSW_SP_QDISC_FIFO,
25};
26
27struct mlxsw_sp_qdisc;
28
29struct mlxsw_sp_qdisc_ops {
30 enum mlxsw_sp_qdisc_type type;
31 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32 void *params);
33 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
34 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
35 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
36 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
37 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
38 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39 struct tc_qopt_offload_stats *stats_ptr);
40 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
41 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
42 void *xstats_ptr);
43 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
44 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
45
46
47
48 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
49 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
50 struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
51 u32 parent);
52 unsigned int num_classes;
53};
54
55struct mlxsw_sp_qdisc {
56 u32 handle;
57 int tclass_num;
58 u8 prio_bitmap;
59 union {
60 struct red_stats red;
61 } xstats_base;
62 struct mlxsw_sp_qdisc_stats {
63 u64 tx_bytes;
64 u64 tx_packets;
65 u64 drops;
66 u64 overlimits;
67 u64 backlog;
68 } stats_base;
69
70 struct mlxsw_sp_qdisc_ops *ops;
71 struct mlxsw_sp_qdisc *parent;
72 struct mlxsw_sp_qdisc *qdiscs;
73 unsigned int num_classes;
74};
75
76struct mlxsw_sp_qdisc_state {
77 struct mlxsw_sp_qdisc root_qdisc;
78
79
80
81
82
83
84
85
86
87
88
89
90 u32 future_handle;
91 bool future_fifos[IEEE_8021QAZ_MAX_TCS];
92 struct mutex lock;
93};
94
95static bool
96mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
97{
98 return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
99}
100
101static struct mlxsw_sp_qdisc *
102mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
103 struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
104 void *),
105 void *data)
106{
107 struct mlxsw_sp_qdisc *tmp;
108 unsigned int i;
109
110 if (pre) {
111 tmp = pre(qdisc, data);
112 if (tmp)
113 return tmp;
114 }
115
116 if (qdisc->ops) {
117 for (i = 0; i < qdisc->num_classes; i++) {
118 tmp = &qdisc->qdiscs[i];
119 if (qdisc->ops) {
120 tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
121 if (tmp)
122 return tmp;
123 }
124 }
125 }
126
127 return NULL;
128}
129
130static struct mlxsw_sp_qdisc *
131mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
132{
133 u32 parent = *(u32 *)data;
134
135 if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
136 if (qdisc->ops->find_class)
137 return qdisc->ops->find_class(qdisc, parent);
138 }
139
140 return NULL;
141}
142
143static struct mlxsw_sp_qdisc *
144mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
145 bool root_only)
146{
147 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
148
149 if (!qdisc_state)
150 return NULL;
151 if (parent == TC_H_ROOT)
152 return &qdisc_state->root_qdisc;
153 if (root_only)
154 return NULL;
155 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
156 mlxsw_sp_qdisc_walk_cb_find, &parent);
157}
158
159static struct mlxsw_sp_qdisc *
160mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
161{
162 u32 handle = *(u32 *)data;
163
164 if (qdisc->ops && qdisc->handle == handle)
165 return qdisc;
166 return NULL;
167}
168
169static struct mlxsw_sp_qdisc *
170mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
171{
172 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
173
174 if (!qdisc_state)
175 return NULL;
176 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
177 mlxsw_sp_qdisc_walk_cb_find_by_handle,
178 &handle);
179}
180
181static void
182mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
183{
184 struct mlxsw_sp_qdisc *tmp;
185
186 for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
187 tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
188}
189
190static int
191mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
192 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
193{
194 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
195 int err_hdroom = 0;
196 int err = 0;
197
198 if (!mlxsw_sp_qdisc)
199 return 0;
200
201 if (root_qdisc == mlxsw_sp_qdisc) {
202 struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
203
204 hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
205 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
206 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
207 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
208 err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
209 }
210
211 if (!mlxsw_sp_qdisc->ops)
212 return 0;
213
214 mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
215 if (mlxsw_sp_qdisc->ops->destroy)
216 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
217 mlxsw_sp_qdisc);
218 if (mlxsw_sp_qdisc->ops->clean_stats)
219 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
220
221 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
222 mlxsw_sp_qdisc->ops = NULL;
223 mlxsw_sp_qdisc->num_classes = 0;
224 kfree(mlxsw_sp_qdisc->qdiscs);
225 mlxsw_sp_qdisc->qdiscs = NULL;
226 return err_hdroom ?: err;
227}
228
229static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
230 u32 handle,
231 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
232 struct mlxsw_sp_qdisc_ops *ops, void *params)
233{
234 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
235 struct mlxsw_sp_hdroom orig_hdroom;
236 unsigned int i;
237 int err;
238
239 err = ops->check_params(mlxsw_sp_port, params);
240 if (err)
241 return err;
242
243 if (ops->num_classes) {
244 mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
245 sizeof(*mlxsw_sp_qdisc->qdiscs),
246 GFP_KERNEL);
247 if (!mlxsw_sp_qdisc->qdiscs)
248 return -ENOMEM;
249
250 for (i = 0; i < ops->num_classes; i++)
251 mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
252 }
253
254 orig_hdroom = *mlxsw_sp_port->hdroom;
255 if (root_qdisc == mlxsw_sp_qdisc) {
256 struct mlxsw_sp_hdroom hdroom = orig_hdroom;
257
258 hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
259 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
260 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
261 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
262
263 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
264 if (err)
265 goto err_hdroom_configure;
266 }
267
268 mlxsw_sp_qdisc->num_classes = ops->num_classes;
269 mlxsw_sp_qdisc->ops = ops;
270 mlxsw_sp_qdisc->handle = handle;
271 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
272 if (err)
273 goto err_replace;
274
275 return 0;
276
277err_replace:
278 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
279 mlxsw_sp_qdisc->ops = NULL;
280 mlxsw_sp_qdisc->num_classes = 0;
281 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
282err_hdroom_configure:
283 kfree(mlxsw_sp_qdisc->qdiscs);
284 mlxsw_sp_qdisc->qdiscs = NULL;
285 return err;
286}
287
288static int
289mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
290 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
291{
292 struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
293 int err;
294
295 err = ops->check_params(mlxsw_sp_port, params);
296 if (err)
297 goto unoffload;
298
299 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
300 if (err)
301 goto unoffload;
302
303
304
305
306
307 if (mlxsw_sp_qdisc->handle != handle) {
308 if (ops->clean_stats)
309 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
310 }
311
312 mlxsw_sp_qdisc->handle = handle;
313 return 0;
314
315unoffload:
316 if (ops->unoffload)
317 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
318
319 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
320 return err;
321}
322
323static int
324mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
325 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
326 struct mlxsw_sp_qdisc_ops *ops, void *params)
327{
328 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
329
330
331
332
333
334 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
335
336 if (!mlxsw_sp_qdisc->ops)
337 return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
338 mlxsw_sp_qdisc, ops, params);
339 else
340 return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
341 mlxsw_sp_qdisc, params);
342}
343
344static int
345mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
346 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
347 struct tc_qopt_offload_stats *stats_ptr)
348{
349 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
350 mlxsw_sp_qdisc->ops->get_stats)
351 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
352 mlxsw_sp_qdisc,
353 stats_ptr);
354
355 return -EOPNOTSUPP;
356}
357
358static int
359mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
360 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
361 void *xstats_ptr)
362{
363 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
364 mlxsw_sp_qdisc->ops->get_xstats)
365 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
366 mlxsw_sp_qdisc,
367 xstats_ptr);
368
369 return -EOPNOTSUPP;
370}
371
372static u64
373mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
374{
375 return xstats->backlog[tclass_num] +
376 xstats->backlog[tclass_num + 8];
377}
378
379static u64
380mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
381{
382 return xstats->tail_drop[tclass_num] +
383 xstats->tail_drop[tclass_num + 8];
384}
385
386static void
387mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
388 u8 prio_bitmap, u64 *tx_packets,
389 u64 *tx_bytes)
390{
391 int i;
392
393 *tx_packets = 0;
394 *tx_bytes = 0;
395 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
396 if (prio_bitmap & BIT(i)) {
397 *tx_packets += xstats->tx_packets[i];
398 *tx_bytes += xstats->tx_bytes[i];
399 }
400 }
401}
402
403static void
404mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
405 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
406 u64 *p_tx_bytes, u64 *p_tx_packets,
407 u64 *p_drops, u64 *p_backlog)
408{
409 int tclass_num = mlxsw_sp_qdisc->tclass_num;
410 struct mlxsw_sp_port_xstats *xstats;
411 u64 tx_bytes, tx_packets;
412
413 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
414 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
415 mlxsw_sp_qdisc->prio_bitmap,
416 &tx_packets, &tx_bytes);
417
418 *p_tx_packets += tx_packets;
419 *p_tx_bytes += tx_bytes;
420 *p_drops += xstats->wred_drop[tclass_num] +
421 mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
422 *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
423}
424
425static void
426mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
427 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
428 u64 tx_bytes, u64 tx_packets,
429 u64 drops, u64 backlog,
430 struct tc_qopt_offload_stats *stats_ptr)
431{
432 struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
433
434 tx_bytes -= stats_base->tx_bytes;
435 tx_packets -= stats_base->tx_packets;
436 drops -= stats_base->drops;
437 backlog -= stats_base->backlog;
438
439 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
440 stats_ptr->qstats->drops += drops;
441 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
442
443 stats_base->backlog += backlog;
444 stats_base->drops += drops;
445 stats_base->tx_bytes += tx_bytes;
446 stats_base->tx_packets += tx_packets;
447}
448
449static void
450mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
451 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
452 struct tc_qopt_offload_stats *stats_ptr)
453{
454 u64 tx_packets = 0;
455 u64 tx_bytes = 0;
456 u64 backlog = 0;
457 u64 drops = 0;
458
459 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
460 &tx_bytes, &tx_packets,
461 &drops, &backlog);
462 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
463 tx_bytes, tx_packets, drops, backlog,
464 stats_ptr);
465}
466
467static int
468mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
469 int tclass_num, u32 min, u32 max,
470 u32 probability, bool is_wred, bool is_ecn)
471{
472 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
473 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
475 int err;
476
477 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
478 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
479 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
480 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
481 probability);
482
483 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
484 if (err)
485 return err;
486
487 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
488 MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
489
490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
491}
492
493static int
494mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
495 int tclass_num)
496{
497 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
498 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
499
500 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
501 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
503}
504
505static void
506mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
507 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
508{
509 int tclass_num = mlxsw_sp_qdisc->tclass_num;
510 struct mlxsw_sp_qdisc_stats *stats_base;
511 struct mlxsw_sp_port_xstats *xstats;
512 struct red_stats *red_base;
513
514 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
515 stats_base = &mlxsw_sp_qdisc->stats_base;
516 red_base = &mlxsw_sp_qdisc->xstats_base.red;
517
518 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
519 mlxsw_sp_qdisc->prio_bitmap,
520 &stats_base->tx_packets,
521 &stats_base->tx_bytes);
522 red_base->prob_drop = xstats->wred_drop[tclass_num];
523 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
524
525 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
526 stats_base->drops = red_base->prob_drop + red_base->pdrop;
527
528 stats_base->backlog = 0;
529}
530
531static int
532mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
533 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
534{
535 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
536 mlxsw_sp_qdisc->tclass_num);
537}
538
539static int
540mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
541 void *params)
542{
543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
544 struct tc_red_qopt_offload_params *p = params;
545
546 if (p->min > p->max) {
547 dev_err(mlxsw_sp->bus_info->dev,
548 "spectrum: RED: min %u is bigger then max %u\n", p->min,
549 p->max);
550 return -EINVAL;
551 }
552 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
553 GUARANTEED_SHARED_BUFFER)) {
554 dev_err(mlxsw_sp->bus_info->dev,
555 "spectrum: RED: max value %u is too big\n", p->max);
556 return -EINVAL;
557 }
558 if (p->min == 0 || p->max == 0) {
559 dev_err(mlxsw_sp->bus_info->dev,
560 "spectrum: RED: 0 value is illegal for min and max\n");
561 return -EINVAL;
562 }
563 return 0;
564}
565
566static int
567mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
568 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
569 void *params)
570{
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 struct tc_red_qopt_offload_params *p = params;
573 int tclass_num = mlxsw_sp_qdisc->tclass_num;
574 u32 min, max;
575 u64 prob;
576
577
578 prob = p->probability;
579 prob *= 100;
580 prob = DIV_ROUND_UP(prob, 1 << 16);
581 prob = DIV_ROUND_UP(prob, 1 << 16);
582 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
583 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
584 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
585 min, max, prob,
586 !p->is_nodrop, p->is_ecn);
587}
588
589static void
590mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
591 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
592 struct gnet_stats_queue *qstats)
593{
594 u64 backlog;
595
596 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
597 mlxsw_sp_qdisc->stats_base.backlog);
598 qstats->backlog -= backlog;
599 mlxsw_sp_qdisc->stats_base.backlog = 0;
600}
601
602static void
603mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
604 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
605 void *params)
606{
607 struct tc_red_qopt_offload_params *p = params;
608
609 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
610}
611
612static int
613mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
614 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
615 void *xstats_ptr)
616{
617 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
618 int tclass_num = mlxsw_sp_qdisc->tclass_num;
619 struct mlxsw_sp_port_xstats *xstats;
620 struct red_stats *res = xstats_ptr;
621 int early_drops, pdrops;
622
623 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
624
625 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
626 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
627 xstats_base->pdrop;
628
629 res->pdrop += pdrops;
630 res->prob_drop += early_drops;
631
632 xstats_base->pdrop += pdrops;
633 xstats_base->prob_drop += early_drops;
634 return 0;
635}
636
637static int
638mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
639 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
640 struct tc_qopt_offload_stats *stats_ptr)
641{
642 int tclass_num = mlxsw_sp_qdisc->tclass_num;
643 struct mlxsw_sp_qdisc_stats *stats_base;
644 struct mlxsw_sp_port_xstats *xstats;
645 u64 overlimits;
646
647 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
648 stats_base = &mlxsw_sp_qdisc->stats_base;
649
650 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
651 overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
652
653 stats_ptr->qstats->overlimits += overlimits;
654 stats_base->overlimits += overlimits;
655
656 return 0;
657}
658
659static struct mlxsw_sp_qdisc *
660mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
661 u32 parent)
662{
663 return NULL;
664}
665
666#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
667
668static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
669 .type = MLXSW_SP_QDISC_RED,
670 .check_params = mlxsw_sp_qdisc_red_check_params,
671 .replace = mlxsw_sp_qdisc_red_replace,
672 .unoffload = mlxsw_sp_qdisc_red_unoffload,
673 .destroy = mlxsw_sp_qdisc_red_destroy,
674 .get_stats = mlxsw_sp_qdisc_get_red_stats,
675 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
676 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
677 .find_class = mlxsw_sp_qdisc_leaf_find_class,
678};
679
680static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
681 struct tc_red_qopt_offload *p)
682{
683 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
684
685 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
686 if (!mlxsw_sp_qdisc)
687 return -EOPNOTSUPP;
688
689 if (p->command == TC_RED_REPLACE)
690 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
691 mlxsw_sp_qdisc,
692 &mlxsw_sp_qdisc_ops_red,
693 &p->set);
694
695 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
696 return -EOPNOTSUPP;
697
698 switch (p->command) {
699 case TC_RED_DESTROY:
700 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
701 case TC_RED_XSTATS:
702 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
703 p->xstats);
704 case TC_RED_STATS:
705 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
706 &p->stats);
707 default:
708 return -EOPNOTSUPP;
709 }
710}
711
712int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
713 struct tc_red_qopt_offload *p)
714{
715 int err;
716
717 mutex_lock(&mlxsw_sp_port->qdisc->lock);
718 err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
719 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
720
721 return err;
722}
723
724static void
725mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
726 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
727{
728 u64 backlog_cells = 0;
729 u64 tx_packets = 0;
730 u64 tx_bytes = 0;
731 u64 drops = 0;
732
733 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
734 &tx_bytes, &tx_packets,
735 &drops, &backlog_cells);
736
737 mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
738 mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
739 mlxsw_sp_qdisc->stats_base.drops = drops;
740 mlxsw_sp_qdisc->stats_base.backlog = 0;
741}
742
743static int
744mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
745 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
746{
747 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
748 MLXSW_REG_QEEC_HR_SUBGROUP,
749 mlxsw_sp_qdisc->tclass_num, 0,
750 MLXSW_REG_QEEC_MAS_DIS, 0);
751}
752
753static int
754mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
755 u32 max_size, u8 *p_burst_size)
756{
757
758
759
760 u32 bs512 = max_size / 64;
761 u8 bs = fls(bs512);
762
763 if (!bs)
764 return -EINVAL;
765 --bs;
766
767
768 if ((1 << bs) != bs512)
769 return -EINVAL;
770
771 if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
772 bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
773 return -EINVAL;
774
775 *p_burst_size = bs;
776 return 0;
777}
778
779static u32
780mlxsw_sp_qdisc_tbf_max_size(u8 bs)
781{
782 return (1U << bs) * 64;
783}
784
785static u64
786mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
787{
788
789
790
791 return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
792}
793
794static int
795mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
796 void *params)
797{
798 struct tc_tbf_qopt_offload_replace_params *p = params;
799 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
800 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
801 u8 burst_size;
802 int err;
803
804 if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
805 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
806 "spectrum: TBF: rate of %lluKbps must be below %u\n",
807 rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
808 return -EINVAL;
809 }
810
811 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
812 if (err) {
813 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
814
815 dev_err(mlxsw_sp->bus_info->dev,
816 "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
817 p->max_size,
818 mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
819 mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
820 return -EINVAL;
821 }
822
823 return 0;
824}
825
826static int
827mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
828 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
829 void *params)
830{
831 struct tc_tbf_qopt_offload_replace_params *p = params;
832 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
833 u8 burst_size;
834 int err;
835
836 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
837 if (WARN_ON_ONCE(err))
838
839 return -EINVAL;
840
841
842
843
844
845
846
847
848
849 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
850 MLXSW_REG_QEEC_HR_SUBGROUP,
851 mlxsw_sp_qdisc->tclass_num, 0,
852 rate_kbps, burst_size);
853}
854
855static void
856mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
857 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
858 void *params)
859{
860 struct tc_tbf_qopt_offload_replace_params *p = params;
861
862 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
863}
864
865static int
866mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
867 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
868 struct tc_qopt_offload_stats *stats_ptr)
869{
870 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
871 stats_ptr);
872 return 0;
873}
874
875static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
876 .type = MLXSW_SP_QDISC_TBF,
877 .check_params = mlxsw_sp_qdisc_tbf_check_params,
878 .replace = mlxsw_sp_qdisc_tbf_replace,
879 .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
880 .destroy = mlxsw_sp_qdisc_tbf_destroy,
881 .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
882 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
883 .find_class = mlxsw_sp_qdisc_leaf_find_class,
884};
885
886static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
887 struct tc_tbf_qopt_offload *p)
888{
889 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
890
891 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
892 if (!mlxsw_sp_qdisc)
893 return -EOPNOTSUPP;
894
895 if (p->command == TC_TBF_REPLACE)
896 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
897 mlxsw_sp_qdisc,
898 &mlxsw_sp_qdisc_ops_tbf,
899 &p->replace_params);
900
901 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
902 return -EOPNOTSUPP;
903
904 switch (p->command) {
905 case TC_TBF_DESTROY:
906 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
907 case TC_TBF_STATS:
908 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
909 &p->stats);
910 default:
911 return -EOPNOTSUPP;
912 }
913}
914
915int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
916 struct tc_tbf_qopt_offload *p)
917{
918 int err;
919
920 mutex_lock(&mlxsw_sp_port->qdisc->lock);
921 err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
922 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
923
924 return err;
925}
926
927static int
928mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
929 void *params)
930{
931 return 0;
932}
933
934static int
935mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
936 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
937 void *params)
938{
939 return 0;
940}
941
942static int
943mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
944 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
945 struct tc_qopt_offload_stats *stats_ptr)
946{
947 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
948 stats_ptr);
949 return 0;
950}
951
952static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
953 .type = MLXSW_SP_QDISC_FIFO,
954 .check_params = mlxsw_sp_qdisc_fifo_check_params,
955 .replace = mlxsw_sp_qdisc_fifo_replace,
956 .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
957 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
958};
959
960static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
961 struct tc_fifo_qopt_offload *p)
962{
963 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
964 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
965 unsigned int band;
966 u32 parent_handle;
967
968 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
969 if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
970 parent_handle = TC_H_MAJ(p->parent);
971 if (parent_handle != qdisc_state->future_handle) {
972
973
974
975 memset(qdisc_state->future_fifos, 0,
976 sizeof(qdisc_state->future_fifos));
977 qdisc_state->future_handle = parent_handle;
978 }
979
980 band = TC_H_MIN(p->parent) - 1;
981 if (band < IEEE_8021QAZ_MAX_TCS) {
982 if (p->command == TC_FIFO_REPLACE)
983 qdisc_state->future_fifos[band] = true;
984 else if (p->command == TC_FIFO_DESTROY)
985 qdisc_state->future_fifos[band] = false;
986 }
987 }
988 if (!mlxsw_sp_qdisc)
989 return -EOPNOTSUPP;
990
991 if (p->command == TC_FIFO_REPLACE) {
992 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
993 mlxsw_sp_qdisc,
994 &mlxsw_sp_qdisc_ops_fifo, NULL);
995 }
996
997 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
998 return -EOPNOTSUPP;
999
1000 switch (p->command) {
1001 case TC_FIFO_DESTROY:
1002 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1003 case TC_FIFO_STATS:
1004 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1005 &p->stats);
1006 case TC_FIFO_REPLACE:
1007 break;
1008 }
1009
1010 return -EOPNOTSUPP;
1011}
1012
1013int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1014 struct tc_fifo_qopt_offload *p)
1015{
1016 int err;
1017
1018 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1019 err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1020 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1021
1022 return err;
1023}
1024
1025static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1026 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1027{
1028 int i;
1029
1030 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1031 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1032 MLXSW_SP_PORT_DEFAULT_TCLASS);
1033 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1034 MLXSW_REG_QEEC_HR_SUBGROUP,
1035 i, 0, false, 0);
1036 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
1037 &mlxsw_sp_qdisc->qdiscs[i]);
1038 mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
1039 }
1040
1041 return 0;
1042}
1043
1044static int
1045mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1046 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1047{
1048 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1049}
1050
1051static int
1052__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1053{
1054 if (nbands > IEEE_8021QAZ_MAX_TCS)
1055 return -EOPNOTSUPP;
1056
1057 return 0;
1058}
1059
1060static int
1061mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1062 void *params)
1063{
1064 struct tc_prio_qopt_offload_params *p = params;
1065
1066 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1067}
1068
1069static int
1070__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1071 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1072 u32 handle, unsigned int nbands,
1073 const unsigned int *quanta,
1074 const unsigned int *weights,
1075 const u8 *priomap)
1076{
1077 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1078 struct mlxsw_sp_qdisc *child_qdisc;
1079 int tclass, i, band, backlog;
1080 u8 old_priomap;
1081 int err;
1082
1083 for (band = 0; band < nbands; band++) {
1084 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1085 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1086 old_priomap = child_qdisc->prio_bitmap;
1087 child_qdisc->prio_bitmap = 0;
1088
1089 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1090 MLXSW_REG_QEEC_HR_SUBGROUP,
1091 tclass, 0, !!quanta[band],
1092 weights[band]);
1093 if (err)
1094 return err;
1095
1096 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1097 if (priomap[i] == band) {
1098 child_qdisc->prio_bitmap |= BIT(i);
1099 if (BIT(i) & old_priomap)
1100 continue;
1101 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1102 i, tclass);
1103 if (err)
1104 return err;
1105 }
1106 }
1107
1108 child_qdisc->tclass_num = tclass;
1109
1110 if (old_priomap != child_qdisc->prio_bitmap &&
1111 child_qdisc->ops && child_qdisc->ops->clean_stats) {
1112 backlog = child_qdisc->stats_base.backlog;
1113 child_qdisc->ops->clean_stats(mlxsw_sp_port,
1114 child_qdisc);
1115 child_qdisc->stats_base.backlog = backlog;
1116 }
1117
1118 if (handle == qdisc_state->future_handle &&
1119 qdisc_state->future_fifos[band]) {
1120 err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1121 child_qdisc,
1122 &mlxsw_sp_qdisc_ops_fifo,
1123 NULL);
1124 if (err)
1125 return err;
1126 }
1127 }
1128 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1129 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1130 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1131 child_qdisc->prio_bitmap = 0;
1132 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1133 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1134 MLXSW_REG_QEEC_HR_SUBGROUP,
1135 tclass, 0, false, 0);
1136 }
1137
1138 qdisc_state->future_handle = TC_H_UNSPEC;
1139 memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1140 return 0;
1141}
1142
1143static int
1144mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1145 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1146 void *params)
1147{
1148 struct tc_prio_qopt_offload_params *p = params;
1149 unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1150
1151 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1152 handle, p->bands, zeroes,
1153 zeroes, p->priomap);
1154}
1155
1156static void
1157__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1158 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1159 struct gnet_stats_queue *qstats)
1160{
1161 u64 backlog;
1162
1163 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1164 mlxsw_sp_qdisc->stats_base.backlog);
1165 qstats->backlog -= backlog;
1166}
1167
1168static void
1169mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1170 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1171 void *params)
1172{
1173 struct tc_prio_qopt_offload_params *p = params;
1174
1175 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1176 p->qstats);
1177}
1178
1179static int
1180mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1181 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1182 struct tc_qopt_offload_stats *stats_ptr)
1183{
1184 struct mlxsw_sp_qdisc *tc_qdisc;
1185 u64 tx_packets = 0;
1186 u64 tx_bytes = 0;
1187 u64 backlog = 0;
1188 u64 drops = 0;
1189 int i;
1190
1191 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1192 tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1193 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1194 &tx_bytes, &tx_packets,
1195 &drops, &backlog);
1196 }
1197
1198 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1199 tx_bytes, tx_packets, drops, backlog,
1200 stats_ptr);
1201 return 0;
1202}
1203
1204static void
1205mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1206 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1207{
1208 struct mlxsw_sp_qdisc_stats *stats_base;
1209 struct mlxsw_sp_port_xstats *xstats;
1210 struct rtnl_link_stats64 *stats;
1211 int i;
1212
1213 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1214 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1215 stats_base = &mlxsw_sp_qdisc->stats_base;
1216
1217 stats_base->tx_packets = stats->tx_packets;
1218 stats_base->tx_bytes = stats->tx_bytes;
1219
1220 stats_base->drops = 0;
1221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1222 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1223 stats_base->drops += xstats->wred_drop[i];
1224 }
1225
1226 mlxsw_sp_qdisc->stats_base.backlog = 0;
1227}
1228
1229static struct mlxsw_sp_qdisc *
1230mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1231 u32 parent)
1232{
1233 int child_index = TC_H_MIN(parent);
1234 int band = child_index - 1;
1235
1236 if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1237 return NULL;
1238 return &mlxsw_sp_qdisc->qdiscs[band];
1239}
1240
1241static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1242 .type = MLXSW_SP_QDISC_PRIO,
1243 .check_params = mlxsw_sp_qdisc_prio_check_params,
1244 .replace = mlxsw_sp_qdisc_prio_replace,
1245 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
1246 .destroy = mlxsw_sp_qdisc_prio_destroy,
1247 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1248 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1249 .find_class = mlxsw_sp_qdisc_prio_find_class,
1250 .num_classes = IEEE_8021QAZ_MAX_TCS,
1251};
1252
1253static int
1254mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1255 void *params)
1256{
1257 struct tc_ets_qopt_offload_replace_params *p = params;
1258
1259 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1260}
1261
1262static int
1263mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1264 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1265 void *params)
1266{
1267 struct tc_ets_qopt_offload_replace_params *p = params;
1268
1269 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1270 handle, p->bands, p->quanta,
1271 p->weights, p->priomap);
1272}
1273
1274static void
1275mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1276 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1277 void *params)
1278{
1279 struct tc_ets_qopt_offload_replace_params *p = params;
1280
1281 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1282 p->qstats);
1283}
1284
1285static int
1286mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1287 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1288{
1289 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1290}
1291
1292static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1293 .type = MLXSW_SP_QDISC_ETS,
1294 .check_params = mlxsw_sp_qdisc_ets_check_params,
1295 .replace = mlxsw_sp_qdisc_ets_replace,
1296 .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1297 .destroy = mlxsw_sp_qdisc_ets_destroy,
1298 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1299 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1300 .find_class = mlxsw_sp_qdisc_prio_find_class,
1301 .num_classes = IEEE_8021QAZ_MAX_TCS,
1302};
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static int
1330__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1331 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1332 u8 band, u32 child_handle)
1333{
1334 struct mlxsw_sp_qdisc *old_qdisc;
1335
1336 if (band < mlxsw_sp_qdisc->num_classes &&
1337 mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1338 return 0;
1339
1340 if (!child_handle) {
1341
1342
1343
1344 return 0;
1345 }
1346
1347
1348
1349
1350 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1351 child_handle);
1352 if (old_qdisc)
1353 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1354
1355 mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
1356 if (!WARN_ON(!mlxsw_sp_qdisc))
1357 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1358
1359 return -EOPNOTSUPP;
1360}
1361
1362static int
1363mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1364 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1365 struct tc_prio_qopt_offload_graft_params *p)
1366{
1367 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1368 p->band, p->child_handle);
1369}
1370
1371static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1372 struct tc_prio_qopt_offload *p)
1373{
1374 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1375
1376 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1377 if (!mlxsw_sp_qdisc)
1378 return -EOPNOTSUPP;
1379
1380 if (p->command == TC_PRIO_REPLACE)
1381 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1382 mlxsw_sp_qdisc,
1383 &mlxsw_sp_qdisc_ops_prio,
1384 &p->replace_params);
1385
1386 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1387 return -EOPNOTSUPP;
1388
1389 switch (p->command) {
1390 case TC_PRIO_DESTROY:
1391 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1392 case TC_PRIO_STATS:
1393 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1394 &p->stats);
1395 case TC_PRIO_GRAFT:
1396 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1397 &p->graft_params);
1398 default:
1399 return -EOPNOTSUPP;
1400 }
1401}
1402
1403int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1404 struct tc_prio_qopt_offload *p)
1405{
1406 int err;
1407
1408 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1409 err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1410 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1411
1412 return err;
1413}
1414
1415static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1416 struct tc_ets_qopt_offload *p)
1417{
1418 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1419
1420 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1421 if (!mlxsw_sp_qdisc)
1422 return -EOPNOTSUPP;
1423
1424 if (p->command == TC_ETS_REPLACE)
1425 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1426 mlxsw_sp_qdisc,
1427 &mlxsw_sp_qdisc_ops_ets,
1428 &p->replace_params);
1429
1430 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1431 return -EOPNOTSUPP;
1432
1433 switch (p->command) {
1434 case TC_ETS_DESTROY:
1435 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1436 case TC_ETS_STATS:
1437 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1438 &p->stats);
1439 case TC_ETS_GRAFT:
1440 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1441 p->graft_params.band,
1442 p->graft_params.child_handle);
1443 default:
1444 return -EOPNOTSUPP;
1445 }
1446}
1447
1448int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1449 struct tc_ets_qopt_offload *p)
1450{
1451 int err;
1452
1453 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1454 err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1455 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1456
1457 return err;
1458}
1459
1460struct mlxsw_sp_qevent_block {
1461 struct list_head binding_list;
1462 struct list_head mall_entry_list;
1463 struct mlxsw_sp *mlxsw_sp;
1464};
1465
1466struct mlxsw_sp_qevent_binding {
1467 struct list_head list;
1468 struct mlxsw_sp_port *mlxsw_sp_port;
1469 u32 handle;
1470 int tclass_num;
1471 enum mlxsw_sp_span_trigger span_trigger;
1472};
1473
1474static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1475
1476static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1477 struct mlxsw_sp_mall_entry *mall_entry,
1478 struct mlxsw_sp_qevent_binding *qevent_binding,
1479 const struct mlxsw_sp_span_agent_parms *agent_parms,
1480 int *p_span_id)
1481{
1482 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1483 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1484 int span_id;
1485 int err;
1486
1487 err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1488 if (err)
1489 return err;
1490
1491 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
1492 if (err)
1493 goto err_analyzed_port_get;
1494
1495 trigger_parms.span_id = span_id;
1496 trigger_parms.probability_rate = 1;
1497 err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1498 &trigger_parms);
1499 if (err)
1500 goto err_agent_bind;
1501
1502 err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
1503 qevent_binding->tclass_num);
1504 if (err)
1505 goto err_trigger_enable;
1506
1507 *p_span_id = span_id;
1508 return 0;
1509
1510err_trigger_enable:
1511 mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1512 &trigger_parms);
1513err_agent_bind:
1514 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1515err_analyzed_port_get:
1516 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1517 return err;
1518}
1519
1520static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1521 struct mlxsw_sp_qevent_binding *qevent_binding,
1522 int span_id)
1523{
1524 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1525 struct mlxsw_sp_span_trigger_parms trigger_parms = {
1526 .span_id = span_id,
1527 };
1528
1529 mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
1530 qevent_binding->tclass_num);
1531 mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1532 &trigger_parms);
1533 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1534 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1535}
1536
1537static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1538 struct mlxsw_sp_mall_entry *mall_entry,
1539 struct mlxsw_sp_qevent_binding *qevent_binding)
1540{
1541 struct mlxsw_sp_span_agent_parms agent_parms = {
1542 .to_dev = mall_entry->mirror.to_dev,
1543 };
1544
1545 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1546 &agent_parms, &mall_entry->mirror.span_id);
1547}
1548
1549static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1550 struct mlxsw_sp_mall_entry *mall_entry,
1551 struct mlxsw_sp_qevent_binding *qevent_binding)
1552{
1553 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1554}
1555
1556static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1557 struct mlxsw_sp_mall_entry *mall_entry,
1558 struct mlxsw_sp_qevent_binding *qevent_binding)
1559{
1560 struct mlxsw_sp_span_agent_parms agent_parms = {
1561 .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1562 };
1563 int err;
1564
1565 err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1566 DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1567 &agent_parms.policer_enable,
1568 &agent_parms.policer_id);
1569 if (err)
1570 return err;
1571
1572 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1573 &agent_parms, &mall_entry->trap.span_id);
1574}
1575
1576static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1577 struct mlxsw_sp_mall_entry *mall_entry,
1578 struct mlxsw_sp_qevent_binding *qevent_binding)
1579{
1580 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1581}
1582
1583static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1584 struct mlxsw_sp_mall_entry *mall_entry,
1585 struct mlxsw_sp_qevent_binding *qevent_binding)
1586{
1587 switch (mall_entry->type) {
1588 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1589 return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1590 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1591 return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1592 default:
1593
1594 WARN_ON(1);
1595 return -EOPNOTSUPP;
1596 }
1597}
1598
1599static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1600 struct mlxsw_sp_mall_entry *mall_entry,
1601 struct mlxsw_sp_qevent_binding *qevent_binding)
1602{
1603 switch (mall_entry->type) {
1604 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1605 return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1606 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1607 return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1608 default:
1609 WARN_ON(1);
1610 return;
1611 }
1612}
1613
1614static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1615 struct mlxsw_sp_qevent_binding *qevent_binding)
1616{
1617 struct mlxsw_sp_mall_entry *mall_entry;
1618 int err;
1619
1620 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1621 err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1622 qevent_binding);
1623 if (err)
1624 goto err_entry_configure;
1625 }
1626
1627 return 0;
1628
1629err_entry_configure:
1630 list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1631 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1632 qevent_binding);
1633 return err;
1634}
1635
1636static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1637 struct mlxsw_sp_qevent_binding *qevent_binding)
1638{
1639 struct mlxsw_sp_mall_entry *mall_entry;
1640
1641 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1642 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1643 qevent_binding);
1644}
1645
1646static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
1647{
1648 struct mlxsw_sp_qevent_binding *qevent_binding;
1649 int err;
1650
1651 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1652 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1653 if (err)
1654 goto err_binding_configure;
1655 }
1656
1657 return 0;
1658
1659err_binding_configure:
1660 list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1661 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1662 return err;
1663}
1664
1665static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1666{
1667 struct mlxsw_sp_qevent_binding *qevent_binding;
1668
1669 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1670 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1671}
1672
1673static struct mlxsw_sp_mall_entry *
1674mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1675{
1676 struct mlxsw_sp_mall_entry *mall_entry;
1677
1678 list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1679 if (mall_entry->cookie == cookie)
1680 return mall_entry;
1681
1682 return NULL;
1683}
1684
1685static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1686 struct mlxsw_sp_qevent_block *qevent_block,
1687 struct tc_cls_matchall_offload *f)
1688{
1689 struct mlxsw_sp_mall_entry *mall_entry;
1690 struct flow_action_entry *act;
1691 int err;
1692
1693
1694
1695
1696 if (!list_empty(&qevent_block->mall_entry_list)) {
1697 NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1698 return -EOPNOTSUPP;
1699 }
1700 if (f->rule->action.num_entries != 1) {
1701 NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1702 return -EOPNOTSUPP;
1703 }
1704 if (f->common.chain_index) {
1705 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1706 return -EOPNOTSUPP;
1707 }
1708 if (f->common.protocol != htons(ETH_P_ALL)) {
1709 NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1710 return -EOPNOTSUPP;
1711 }
1712
1713 act = &f->rule->action.entries[0];
1714 if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1715 NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1716 return -EOPNOTSUPP;
1717 }
1718
1719 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1720 if (!mall_entry)
1721 return -ENOMEM;
1722 mall_entry->cookie = f->cookie;
1723
1724 if (act->id == FLOW_ACTION_MIRRED) {
1725 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1726 mall_entry->mirror.to_dev = act->dev;
1727 } else if (act->id == FLOW_ACTION_TRAP) {
1728 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1729 } else {
1730 NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1731 err = -EOPNOTSUPP;
1732 goto err_unsupported_action;
1733 }
1734
1735 list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1736
1737 err = mlxsw_sp_qevent_block_configure(qevent_block);
1738 if (err)
1739 goto err_block_configure;
1740
1741 return 0;
1742
1743err_block_configure:
1744 list_del(&mall_entry->list);
1745err_unsupported_action:
1746 kfree(mall_entry);
1747 return err;
1748}
1749
1750static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1751 struct tc_cls_matchall_offload *f)
1752{
1753 struct mlxsw_sp_mall_entry *mall_entry;
1754
1755 mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1756 if (!mall_entry)
1757 return;
1758
1759 mlxsw_sp_qevent_block_deconfigure(qevent_block);
1760
1761 list_del(&mall_entry->list);
1762 kfree(mall_entry);
1763}
1764
1765static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1766 struct tc_cls_matchall_offload *f)
1767{
1768 struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1769
1770 switch (f->command) {
1771 case TC_CLSMATCHALL_REPLACE:
1772 return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1773 case TC_CLSMATCHALL_DESTROY:
1774 mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1775 return 0;
1776 default:
1777 return -EOPNOTSUPP;
1778 }
1779}
1780
1781static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1782{
1783 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1784
1785 switch (type) {
1786 case TC_SETUP_CLSMATCHALL:
1787 return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1788 default:
1789 return -EOPNOTSUPP;
1790 }
1791}
1792
1793static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1794 struct net *net)
1795{
1796 struct mlxsw_sp_qevent_block *qevent_block;
1797
1798 qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1799 if (!qevent_block)
1800 return NULL;
1801
1802 INIT_LIST_HEAD(&qevent_block->binding_list);
1803 INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1804 qevent_block->mlxsw_sp = mlxsw_sp;
1805 return qevent_block;
1806}
1807
1808static void
1809mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1810{
1811 WARN_ON(!list_empty(&qevent_block->binding_list));
1812 WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1813 kfree(qevent_block);
1814}
1815
1816static void mlxsw_sp_qevent_block_release(void *cb_priv)
1817{
1818 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1819
1820 mlxsw_sp_qevent_block_destroy(qevent_block);
1821}
1822
1823static struct mlxsw_sp_qevent_binding *
1824mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
1825 enum mlxsw_sp_span_trigger span_trigger)
1826{
1827 struct mlxsw_sp_qevent_binding *binding;
1828
1829 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
1830 if (!binding)
1831 return ERR_PTR(-ENOMEM);
1832
1833 binding->mlxsw_sp_port = mlxsw_sp_port;
1834 binding->handle = handle;
1835 binding->tclass_num = tclass_num;
1836 binding->span_trigger = span_trigger;
1837 return binding;
1838}
1839
1840static void
1841mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
1842{
1843 kfree(binding);
1844}
1845
1846static struct mlxsw_sp_qevent_binding *
1847mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
1848 struct mlxsw_sp_port *mlxsw_sp_port,
1849 u32 handle,
1850 enum mlxsw_sp_span_trigger span_trigger)
1851{
1852 struct mlxsw_sp_qevent_binding *qevent_binding;
1853
1854 list_for_each_entry(qevent_binding, &block->binding_list, list)
1855 if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
1856 qevent_binding->handle == handle &&
1857 qevent_binding->span_trigger == span_trigger)
1858 return qevent_binding;
1859 return NULL;
1860}
1861
1862static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1863 struct flow_block_offload *f,
1864 enum mlxsw_sp_span_trigger span_trigger)
1865{
1866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1867 struct mlxsw_sp_qevent_binding *qevent_binding;
1868 struct mlxsw_sp_qevent_block *qevent_block;
1869 struct flow_block_cb *block_cb;
1870 struct mlxsw_sp_qdisc *qdisc;
1871 bool register_block = false;
1872 int err;
1873
1874 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1875 if (!block_cb) {
1876 qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
1877 if (!qevent_block)
1878 return -ENOMEM;
1879 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
1880 mlxsw_sp_qevent_block_release);
1881 if (IS_ERR(block_cb)) {
1882 mlxsw_sp_qevent_block_destroy(qevent_block);
1883 return PTR_ERR(block_cb);
1884 }
1885 register_block = true;
1886 } else {
1887 qevent_block = flow_block_cb_priv(block_cb);
1888 }
1889 flow_block_cb_incref(block_cb);
1890
1891 qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
1892 if (!qdisc) {
1893 NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
1894 err = -ENOENT;
1895 goto err_find_qdisc;
1896 }
1897
1898 if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1899 span_trigger))) {
1900 err = -EEXIST;
1901 goto err_binding_exists;
1902 }
1903
1904 qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
1905 qdisc->tclass_num, span_trigger);
1906 if (IS_ERR(qevent_binding)) {
1907 err = PTR_ERR(qevent_binding);
1908 goto err_binding_create;
1909 }
1910
1911 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1912 if (err)
1913 goto err_binding_configure;
1914
1915 list_add(&qevent_binding->list, &qevent_block->binding_list);
1916
1917 if (register_block) {
1918 flow_block_cb_add(block_cb, f);
1919 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
1920 }
1921
1922 return 0;
1923
1924err_binding_configure:
1925 mlxsw_sp_qevent_binding_destroy(qevent_binding);
1926err_binding_create:
1927err_binding_exists:
1928err_find_qdisc:
1929 if (!flow_block_cb_decref(block_cb))
1930 flow_block_cb_free(block_cb);
1931 return err;
1932}
1933
1934static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1935 struct flow_block_offload *f,
1936 enum mlxsw_sp_span_trigger span_trigger)
1937{
1938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1939 struct mlxsw_sp_qevent_binding *qevent_binding;
1940 struct mlxsw_sp_qevent_block *qevent_block;
1941 struct flow_block_cb *block_cb;
1942
1943 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1944 if (!block_cb)
1945 return;
1946 qevent_block = flow_block_cb_priv(block_cb);
1947
1948 qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1949 span_trigger);
1950 if (!qevent_binding)
1951 return;
1952
1953 list_del(&qevent_binding->list);
1954 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1955 mlxsw_sp_qevent_binding_destroy(qevent_binding);
1956
1957 if (!flow_block_cb_decref(block_cb)) {
1958 flow_block_cb_remove(block_cb, f);
1959 list_del(&block_cb->driver_list);
1960 }
1961}
1962
1963static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
1964 struct flow_block_offload *f,
1965 enum mlxsw_sp_span_trigger span_trigger)
1966{
1967 f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
1968
1969 switch (f->command) {
1970 case FLOW_BLOCK_BIND:
1971 return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
1972 case FLOW_BLOCK_UNBIND:
1973 mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
1974 return 0;
1975 default:
1976 return -EOPNOTSUPP;
1977 }
1978}
1979
1980int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
1981 struct flow_block_offload *f)
1982{
1983 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
1984}
1985
1986int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1987{
1988 struct mlxsw_sp_qdisc_state *qdisc_state;
1989
1990 qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
1991 if (!qdisc_state)
1992 return -ENOMEM;
1993
1994 mutex_init(&qdisc_state->lock);
1995 qdisc_state->root_qdisc.prio_bitmap = 0xff;
1996 qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1997 mlxsw_sp_port->qdisc = qdisc_state;
1998 return 0;
1999}
2000
2001void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2002{
2003 mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2004 kfree(mlxsw_sp_port->qdisc);
2005}
2006