1
2
3
4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/netdevice.h>
7#include <net/pkt_cls.h>
8#include <net/red.h>
9
10#include "spectrum.h"
11#include "reg.h"
12
13#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
14#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
15 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
16
17enum mlxsw_sp_qdisc_type {
18 MLXSW_SP_QDISC_NO_QDISC,
19 MLXSW_SP_QDISC_RED,
20 MLXSW_SP_QDISC_PRIO,
21};
22
23struct mlxsw_sp_qdisc_ops {
24 enum mlxsw_sp_qdisc_type type;
25 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
26 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
27 void *params);
28 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
29 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
30 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
31 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
32 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
33 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
34 struct tc_qopt_offload_stats *stats_ptr);
35 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
36 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
37 void *xstats_ptr);
38 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
40
41
42
43 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
44 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
45};
46
47struct mlxsw_sp_qdisc {
48 u32 handle;
49 u8 tclass_num;
50 u8 prio_bitmap;
51 union {
52 struct red_stats red;
53 } xstats_base;
54 struct mlxsw_sp_qdisc_stats {
55 u64 tx_bytes;
56 u64 tx_packets;
57 u64 drops;
58 u64 overlimits;
59 u64 backlog;
60 } stats_base;
61
62 struct mlxsw_sp_qdisc_ops *ops;
63};
64
65static bool
66mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
67 enum mlxsw_sp_qdisc_type type)
68{
69 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
70 mlxsw_sp_qdisc->ops->type == type &&
71 mlxsw_sp_qdisc->handle == handle;
72}
73
74static struct mlxsw_sp_qdisc *
75mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
76 bool root_only)
77{
78 int tclass, child_index;
79
80 if (parent == TC_H_ROOT)
81 return mlxsw_sp_port->root_qdisc;
82
83 if (root_only || !mlxsw_sp_port->root_qdisc ||
84 !mlxsw_sp_port->root_qdisc->ops ||
85 TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
86 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
87 return NULL;
88
89 child_index = TC_H_MIN(parent);
90 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
91 return &mlxsw_sp_port->tclass_qdiscs[tclass];
92}
93
94static struct mlxsw_sp_qdisc *
95mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
96{
97 int i;
98
99 if (mlxsw_sp_port->root_qdisc->handle == handle)
100 return mlxsw_sp_port->root_qdisc;
101
102 if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
103 return NULL;
104
105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
107 return &mlxsw_sp_port->tclass_qdiscs[i];
108
109 return NULL;
110}
111
112static int
113mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
114 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
115{
116 int err = 0;
117
118 if (!mlxsw_sp_qdisc)
119 return 0;
120
121 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
122 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
123 mlxsw_sp_qdisc);
124
125 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
126 mlxsw_sp_qdisc->ops = NULL;
127 return err;
128}
129
130static int
131mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
132 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
133 struct mlxsw_sp_qdisc_ops *ops, void *params)
134{
135 int err;
136
137 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
138
139
140
141
142
143 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
144 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
145 if (err)
146 goto err_bad_param;
147
148 err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
149 if (err)
150 goto err_config;
151
152 if (mlxsw_sp_qdisc->handle != handle) {
153 mlxsw_sp_qdisc->ops = ops;
154 if (ops->clean_stats)
155 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
156 }
157
158 mlxsw_sp_qdisc->handle = handle;
159 return 0;
160
161err_bad_param:
162err_config:
163 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
164 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
165
166 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
167 return err;
168}
169
170static int
171mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
172 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 struct tc_qopt_offload_stats *stats_ptr)
174{
175 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
176 mlxsw_sp_qdisc->ops->get_stats)
177 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
178 mlxsw_sp_qdisc,
179 stats_ptr);
180
181 return -EOPNOTSUPP;
182}
183
184static int
185mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
186 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
187 void *xstats_ptr)
188{
189 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
190 mlxsw_sp_qdisc->ops->get_xstats)
191 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
192 mlxsw_sp_qdisc,
193 xstats_ptr);
194
195 return -EOPNOTSUPP;
196}
197
198static u64
199mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
200{
201 return xstats->backlog[tclass_num] +
202 xstats->backlog[tclass_num + 8];
203}
204
205static u64
206mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
207{
208 return xstats->tail_drop[tclass_num] +
209 xstats->tail_drop[tclass_num + 8];
210}
211
212static void
213mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
214 u8 prio_bitmap, u64 *tx_packets,
215 u64 *tx_bytes)
216{
217 int i;
218
219 *tx_packets = 0;
220 *tx_bytes = 0;
221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
222 if (prio_bitmap & BIT(i)) {
223 *tx_packets += xstats->tx_packets[i];
224 *tx_bytes += xstats->tx_bytes[i];
225 }
226 }
227}
228
229static int
230mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
231 int tclass_num, u32 min, u32 max,
232 u32 probability, bool is_ecn)
233{
234 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
235 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
236 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
237 int err;
238
239 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
240 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
241 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
242 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
243 probability);
244
245 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
246 if (err)
247 return err;
248
249 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
250 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
251
252 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
253}
254
255static int
256mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
257 int tclass_num)
258{
259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
260 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
261
262 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
263 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
265}
266
267static void
268mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
269 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
270{
271 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
272 struct mlxsw_sp_qdisc_stats *stats_base;
273 struct mlxsw_sp_port_xstats *xstats;
274 struct red_stats *red_base;
275
276 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
277 stats_base = &mlxsw_sp_qdisc->stats_base;
278 red_base = &mlxsw_sp_qdisc->xstats_base.red;
279
280 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
281 mlxsw_sp_qdisc->prio_bitmap,
282 &stats_base->tx_packets,
283 &stats_base->tx_bytes);
284 red_base->prob_mark = xstats->ecn;
285 red_base->prob_drop = xstats->wred_drop[tclass_num];
286 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
287
288 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
289 stats_base->drops = red_base->prob_drop + red_base->pdrop;
290
291 stats_base->backlog = 0;
292}
293
294static int
295mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
296 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
297{
298 struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
299
300 if (root_qdisc != mlxsw_sp_qdisc)
301 root_qdisc->stats_base.backlog -=
302 mlxsw_sp_qdisc->stats_base.backlog;
303
304 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
305 mlxsw_sp_qdisc->tclass_num);
306}
307
308static int
309mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
310 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
311 void *params)
312{
313 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
314 struct tc_red_qopt_offload_params *p = params;
315
316 if (p->min > p->max) {
317 dev_err(mlxsw_sp->bus_info->dev,
318 "spectrum: RED: min %u is bigger then max %u\n", p->min,
319 p->max);
320 return -EINVAL;
321 }
322 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
323 GUARANTEED_SHARED_BUFFER)) {
324 dev_err(mlxsw_sp->bus_info->dev,
325 "spectrum: RED: max value %u is too big\n", p->max);
326 return -EINVAL;
327 }
328 if (p->min == 0 || p->max == 0) {
329 dev_err(mlxsw_sp->bus_info->dev,
330 "spectrum: RED: 0 value is illegal for min and max\n");
331 return -EINVAL;
332 }
333 return 0;
334}
335
336static int
337mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
338 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
339 void *params)
340{
341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342 struct tc_red_qopt_offload_params *p = params;
343 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
344 u32 min, max;
345 u64 prob;
346
347
348 prob = p->probability;
349 prob *= 100;
350 prob = DIV_ROUND_UP(prob, 1 << 16);
351 prob = DIV_ROUND_UP(prob, 1 << 16);
352 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
353 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
354 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
355 max, prob, p->is_ecn);
356}
357
358static void
359mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
360 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
361 void *params)
362{
363 struct tc_red_qopt_offload_params *p = params;
364 u64 backlog;
365
366 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
367 mlxsw_sp_qdisc->stats_base.backlog);
368 p->qstats->backlog -= backlog;
369 mlxsw_sp_qdisc->stats_base.backlog = 0;
370}
371
372static int
373mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
374 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
375 void *xstats_ptr)
376{
377 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
378 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
379 struct mlxsw_sp_port_xstats *xstats;
380 struct red_stats *res = xstats_ptr;
381 int early_drops, marks, pdrops;
382
383 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
384
385 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
386 marks = xstats->ecn - xstats_base->prob_mark;
387 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
388 xstats_base->pdrop;
389
390 res->pdrop += pdrops;
391 res->prob_drop += early_drops;
392 res->prob_mark += marks;
393
394 xstats_base->pdrop += pdrops;
395 xstats_base->prob_drop += early_drops;
396 xstats_base->prob_mark += marks;
397 return 0;
398}
399
400static int
401mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
402 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
403 struct tc_qopt_offload_stats *stats_ptr)
404{
405 u64 tx_bytes, tx_packets, overlimits, drops, backlog;
406 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
407 struct mlxsw_sp_qdisc_stats *stats_base;
408 struct mlxsw_sp_port_xstats *xstats;
409
410 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
411 stats_base = &mlxsw_sp_qdisc->stats_base;
412
413 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
414 mlxsw_sp_qdisc->prio_bitmap,
415 &tx_packets, &tx_bytes);
416 tx_bytes = tx_bytes - stats_base->tx_bytes;
417 tx_packets = tx_packets - stats_base->tx_packets;
418
419 overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
420 stats_base->overlimits;
421 drops = xstats->wred_drop[tclass_num] +
422 mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
423 stats_base->drops;
424 backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
425
426 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
427 stats_ptr->qstats->overlimits += overlimits;
428 stats_ptr->qstats->drops += drops;
429 stats_ptr->qstats->backlog +=
430 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
431 backlog) -
432 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
433 stats_base->backlog);
434
435 stats_base->backlog = backlog;
436 stats_base->drops += drops;
437 stats_base->overlimits += overlimits;
438 stats_base->tx_bytes += tx_bytes;
439 stats_base->tx_packets += tx_packets;
440 return 0;
441}
442
443#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
444
445static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
446 .type = MLXSW_SP_QDISC_RED,
447 .check_params = mlxsw_sp_qdisc_red_check_params,
448 .replace = mlxsw_sp_qdisc_red_replace,
449 .unoffload = mlxsw_sp_qdisc_red_unoffload,
450 .destroy = mlxsw_sp_qdisc_red_destroy,
451 .get_stats = mlxsw_sp_qdisc_get_red_stats,
452 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
453 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
454};
455
456int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
457 struct tc_red_qopt_offload *p)
458{
459 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
460
461 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
462 if (!mlxsw_sp_qdisc)
463 return -EOPNOTSUPP;
464
465 if (p->command == TC_RED_REPLACE)
466 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
467 mlxsw_sp_qdisc,
468 &mlxsw_sp_qdisc_ops_red,
469 &p->set);
470
471 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
472 MLXSW_SP_QDISC_RED))
473 return -EOPNOTSUPP;
474
475 switch (p->command) {
476 case TC_RED_DESTROY:
477 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
478 case TC_RED_XSTATS:
479 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
480 p->xstats);
481 case TC_RED_STATS:
482 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
483 &p->stats);
484 default:
485 return -EOPNOTSUPP;
486 }
487}
488
489static int
490mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
491 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
492{
493 int i;
494
495 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
496 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
497 MLXSW_SP_PORT_DEFAULT_TCLASS);
498 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
499 &mlxsw_sp_port->tclass_qdiscs[i]);
500 mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
501 }
502
503 return 0;
504}
505
506static int
507mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
508 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
509 void *params)
510{
511 struct tc_prio_qopt_offload_params *p = params;
512
513 if (p->bands > IEEE_8021QAZ_MAX_TCS)
514 return -EOPNOTSUPP;
515
516 return 0;
517}
518
519static int
520mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
521 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
522 void *params)
523{
524 struct tc_prio_qopt_offload_params *p = params;
525 struct mlxsw_sp_qdisc *child_qdisc;
526 int tclass, i, band, backlog;
527 u8 old_priomap;
528 int err;
529
530 for (band = 0; band < p->bands; band++) {
531 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
532 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
533 old_priomap = child_qdisc->prio_bitmap;
534 child_qdisc->prio_bitmap = 0;
535 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
536 if (p->priomap[i] == band) {
537 child_qdisc->prio_bitmap |= BIT(i);
538 if (BIT(i) & old_priomap)
539 continue;
540 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
541 i, tclass);
542 if (err)
543 return err;
544 }
545 }
546 if (old_priomap != child_qdisc->prio_bitmap &&
547 child_qdisc->ops && child_qdisc->ops->clean_stats) {
548 backlog = child_qdisc->stats_base.backlog;
549 child_qdisc->ops->clean_stats(mlxsw_sp_port,
550 child_qdisc);
551 child_qdisc->stats_base.backlog = backlog;
552 }
553 }
554 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
555 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
556 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
557 child_qdisc->prio_bitmap = 0;
558 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
559 }
560 return 0;
561}
562
563static void
564mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
565 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
566 void *params)
567{
568 struct tc_prio_qopt_offload_params *p = params;
569 u64 backlog;
570
571 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
572 mlxsw_sp_qdisc->stats_base.backlog);
573 p->qstats->backlog -= backlog;
574}
575
576static int
577mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
578 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
579 struct tc_qopt_offload_stats *stats_ptr)
580{
581 u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
582 struct mlxsw_sp_qdisc_stats *stats_base;
583 struct mlxsw_sp_port_xstats *xstats;
584 struct rtnl_link_stats64 *stats;
585 int i;
586
587 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
588 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
589 stats_base = &mlxsw_sp_qdisc->stats_base;
590
591 tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
592 tx_packets = stats->tx_packets - stats_base->tx_packets;
593
594 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
595 drops += mlxsw_sp_xstats_tail_drop(xstats, i);
596 drops += xstats->wred_drop[i];
597 backlog += mlxsw_sp_xstats_backlog(xstats, i);
598 }
599 drops = drops - stats_base->drops;
600
601 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
602 stats_ptr->qstats->drops += drops;
603 stats_ptr->qstats->backlog +=
604 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
605 backlog) -
606 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
607 stats_base->backlog);
608 stats_base->backlog = backlog;
609 stats_base->drops += drops;
610 stats_base->tx_bytes += tx_bytes;
611 stats_base->tx_packets += tx_packets;
612 return 0;
613}
614
615static void
616mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
617 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
618{
619 struct mlxsw_sp_qdisc_stats *stats_base;
620 struct mlxsw_sp_port_xstats *xstats;
621 struct rtnl_link_stats64 *stats;
622 int i;
623
624 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
625 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
626 stats_base = &mlxsw_sp_qdisc->stats_base;
627
628 stats_base->tx_packets = stats->tx_packets;
629 stats_base->tx_bytes = stats->tx_bytes;
630
631 stats_base->drops = 0;
632 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
633 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
634 stats_base->drops += xstats->wred_drop[i];
635 }
636
637 mlxsw_sp_qdisc->stats_base.backlog = 0;
638}
639
640static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
641 .type = MLXSW_SP_QDISC_PRIO,
642 .check_params = mlxsw_sp_qdisc_prio_check_params,
643 .replace = mlxsw_sp_qdisc_prio_replace,
644 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
645 .destroy = mlxsw_sp_qdisc_prio_destroy,
646 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
647 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
648};
649
650
651
652
653
654
655static int
656mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
657 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
658 struct tc_prio_qopt_offload_graft_params *p)
659{
660 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
661 struct mlxsw_sp_qdisc *old_qdisc;
662
663
664
665
666 if (p->band < IEEE_8021QAZ_MAX_TCS &&
667 mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
668 return 0;
669
670 if (!p->child_handle) {
671
672
673
674 return 0;
675 }
676
677
678
679
680 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
681 p->child_handle);
682 if (old_qdisc)
683 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
684
685 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
686 &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
687 return -EOPNOTSUPP;
688}
689
690int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
691 struct tc_prio_qopt_offload *p)
692{
693 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
694
695 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
696 if (!mlxsw_sp_qdisc)
697 return -EOPNOTSUPP;
698
699 if (p->command == TC_PRIO_REPLACE)
700 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
701 mlxsw_sp_qdisc,
702 &mlxsw_sp_qdisc_ops_prio,
703 &p->replace_params);
704
705 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
706 MLXSW_SP_QDISC_PRIO))
707 return -EOPNOTSUPP;
708
709 switch (p->command) {
710 case TC_PRIO_DESTROY:
711 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
712 case TC_PRIO_STATS:
713 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
714 &p->stats);
715 case TC_PRIO_GRAFT:
716 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
717 &p->graft_params);
718 default:
719 return -EOPNOTSUPP;
720 }
721}
722
723int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
724{
725 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
726 int i;
727
728 mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
729 if (!mlxsw_sp_qdisc)
730 goto err_root_qdisc_init;
731
732 mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
733 mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
734 mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
735
736 mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
737 sizeof(*mlxsw_sp_qdisc),
738 GFP_KERNEL);
739 if (!mlxsw_sp_qdisc)
740 goto err_tclass_qdiscs_init;
741
742 mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
743 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
744 mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
745
746 return 0;
747
748err_tclass_qdiscs_init:
749 kfree(mlxsw_sp_port->root_qdisc);
750err_root_qdisc_init:
751 return -ENOMEM;
752}
753
754void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
755{
756 kfree(mlxsw_sp_port->tclass_qdiscs);
757 kfree(mlxsw_sp_port->root_qdisc);
758}
759