1
2
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/dcbnl.h>
7#include <linux/if_ether.h>
8#include <linux/list.h>
9#include <linux/netlink.h>
10
11#include "spectrum.h"
12#include "core.h"
13#include "port.h"
14#include "reg.h"
15
16struct mlxsw_sp_sb_pr {
17 enum mlxsw_reg_sbpr_mode mode;
18 u32 size;
19 u8 freeze_mode:1,
20 freeze_size:1;
21};
22
23struct mlxsw_cp_sb_occ {
24 u32 cur;
25 u32 max;
26};
27
28struct mlxsw_sp_sb_cm {
29 u32 min_buff;
30 u32 max_buff;
31 u16 pool_index;
32 struct mlxsw_cp_sb_occ occ;
33 u8 freeze_pool:1,
34 freeze_thresh:1;
35};
36
37#define MLXSW_SP_SB_INFI -1U
38#define MLXSW_SP_SB_REST -2U
39
40struct mlxsw_sp_sb_pm {
41 u32 min_buff;
42 u32 max_buff;
43 struct mlxsw_cp_sb_occ occ;
44};
45
46struct mlxsw_sp_sb_mm {
47 u32 min_buff;
48 u32 max_buff;
49 u16 pool_index;
50};
51
52struct mlxsw_sp_sb_pool_des {
53 enum mlxsw_reg_sbxx_dir dir;
54 u8 pool;
55};
56
57#define MLXSW_SP_SB_POOL_ING 0
58#define MLXSW_SP_SB_POOL_EGR 4
59#define MLXSW_SP_SB_POOL_EGR_MC 8
60#define MLXSW_SP_SB_POOL_ING_CPU 9
61#define MLXSW_SP_SB_POOL_EGR_CPU 10
62
63static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
64 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
65 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
67 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
68 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
72 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
74 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
75};
76
77static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
78 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
79 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
80 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
81 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
82 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
83 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
84 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
85 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
86 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
87 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
88 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
89};
90
91#define MLXSW_SP_SB_ING_TC_COUNT 8
92#define MLXSW_SP_SB_EG_TC_COUNT 16
93
94struct mlxsw_sp_sb_port {
95 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
96 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
97 struct mlxsw_sp_sb_pm *pms;
98};
99
100struct mlxsw_sp_sb {
101 struct mlxsw_sp_sb_pr *prs;
102 struct mlxsw_sp_sb_port *ports;
103 u32 cell_size;
104 u32 max_headroom_cells;
105 u64 sb_size;
106};
107
108struct mlxsw_sp_sb_vals {
109 unsigned int pool_count;
110 const struct mlxsw_sp_sb_pool_des *pool_dess;
111 const struct mlxsw_sp_sb_pm *pms;
112 const struct mlxsw_sp_sb_pm *pms_cpu;
113 const struct mlxsw_sp_sb_pr *prs;
114 const struct mlxsw_sp_sb_mm *mms;
115 const struct mlxsw_sp_sb_cm *cms_ingress;
116 const struct mlxsw_sp_sb_cm *cms_egress;
117 const struct mlxsw_sp_sb_cm *cms_cpu;
118 unsigned int mms_count;
119 unsigned int cms_ingress_count;
120 unsigned int cms_egress_count;
121 unsigned int cms_cpu_count;
122};
123
124u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
125{
126 return mlxsw_sp->sb->cell_size * cells;
127}
128
129u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
130{
131 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
132}
133
134u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
135{
136 return mlxsw_sp->sb->max_headroom_cells;
137}
138
139static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
140 u16 pool_index)
141{
142 return &mlxsw_sp->sb->prs[pool_index];
143}
144
145static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
146{
147 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
148 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
149 else
150 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
151}
152
153static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
154 u8 local_port, u8 pg_buff,
155 enum mlxsw_reg_sbxx_dir dir)
156{
157 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
158
159 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
160 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
161 return &sb_port->ing_cms[pg_buff];
162 else
163 return &sb_port->eg_cms[pg_buff];
164}
165
166static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
167 u8 local_port, u16 pool_index)
168{
169 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
170}
171
172static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
173 enum mlxsw_reg_sbpr_mode mode,
174 u32 size, bool infi_size)
175{
176 const struct mlxsw_sp_sb_pool_des *des =
177 &mlxsw_sp->sb_vals->pool_dess[pool_index];
178 char sbpr_pl[MLXSW_REG_SBPR_LEN];
179 struct mlxsw_sp_sb_pr *pr;
180 int err;
181
182 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
183 size, infi_size);
184 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
185 if (err)
186 return err;
187
188 if (infi_size)
189 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
190 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
191 pr->mode = mode;
192 pr->size = size;
193 return 0;
194}
195
196static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
197 u8 pg_buff, u32 min_buff, u32 max_buff,
198 bool infi_max, u16 pool_index)
199{
200 const struct mlxsw_sp_sb_pool_des *des =
201 &mlxsw_sp->sb_vals->pool_dess[pool_index];
202 char sbcm_pl[MLXSW_REG_SBCM_LEN];
203 struct mlxsw_sp_sb_cm *cm;
204 int err;
205
206 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
207 min_buff, max_buff, infi_max, des->pool);
208 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
209 if (err)
210 return err;
211
212 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
213 if (infi_max)
214 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
215 mlxsw_sp->sb->sb_size);
216
217 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
218 des->dir);
219 cm->min_buff = min_buff;
220 cm->max_buff = max_buff;
221 cm->pool_index = pool_index;
222 }
223 return 0;
224}
225
226static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
227 u16 pool_index, u32 min_buff, u32 max_buff)
228{
229 const struct mlxsw_sp_sb_pool_des *des =
230 &mlxsw_sp->sb_vals->pool_dess[pool_index];
231 char sbpm_pl[MLXSW_REG_SBPM_LEN];
232 struct mlxsw_sp_sb_pm *pm;
233 int err;
234
235 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
236 min_buff, max_buff);
237 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
238 if (err)
239 return err;
240
241 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
242 pm->min_buff = min_buff;
243 pm->max_buff = max_buff;
244 return 0;
245}
246
247static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
248 u16 pool_index, struct list_head *bulk_list)
249{
250 const struct mlxsw_sp_sb_pool_des *des =
251 &mlxsw_sp->sb_vals->pool_dess[pool_index];
252 char sbpm_pl[MLXSW_REG_SBPM_LEN];
253
254 if (local_port == MLXSW_PORT_CPU_PORT &&
255 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
256 return 0;
257
258 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
259 true, 0, 0);
260 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
261 bulk_list, NULL, 0);
262}
263
264static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
265 char *sbpm_pl, size_t sbpm_pl_len,
266 unsigned long cb_priv)
267{
268 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
269
270 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
271}
272
273static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
274 u16 pool_index, struct list_head *bulk_list)
275{
276 const struct mlxsw_sp_sb_pool_des *des =
277 &mlxsw_sp->sb_vals->pool_dess[pool_index];
278 char sbpm_pl[MLXSW_REG_SBPM_LEN];
279 struct mlxsw_sp_sb_pm *pm;
280
281 if (local_port == MLXSW_PORT_CPU_PORT &&
282 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
283 return 0;
284
285 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
286 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
287 false, 0, 0);
288 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
289 bulk_list,
290 mlxsw_sp_sb_pm_occ_query_cb,
291 (unsigned long) pm);
292}
293
294
295#define MLXSW_SP_PB_HEADROOM 25632
296#define MLXSW_SP_PB_UNUSED 8
297
298static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
299{
300 const u32 pbs[] = {
301 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
302 [9] = MLXSW_PORT_MAX_MTU,
303 };
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
305 char pbmc_pl[MLXSW_REG_PBMC_LEN];
306 int i;
307
308 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
309 0xffff, 0xffff / 2);
310 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
311 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
312
313 if (i == MLXSW_SP_PB_UNUSED)
314 continue;
315 size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
316 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
317 }
318 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
319 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
320 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
321}
322
323static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
324{
325 char pptb_pl[MLXSW_REG_PPTB_LEN];
326 int i;
327
328 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
330 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
331 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
332 pptb_pl);
333}
334
335static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
336{
337 int err;
338
339 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
340 if (err)
341 return err;
342 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
343}
344
345static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
346 struct mlxsw_sp_sb_port *sb_port)
347{
348 struct mlxsw_sp_sb_pm *pms;
349
350 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
351 GFP_KERNEL);
352 if (!pms)
353 return -ENOMEM;
354 sb_port->pms = pms;
355 return 0;
356}
357
358static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
359{
360 kfree(sb_port->pms);
361}
362
363static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
364{
365 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
366 struct mlxsw_sp_sb_pr *prs;
367 int i;
368 int err;
369
370 mlxsw_sp->sb->ports = kcalloc(max_ports,
371 sizeof(struct mlxsw_sp_sb_port),
372 GFP_KERNEL);
373 if (!mlxsw_sp->sb->ports)
374 return -ENOMEM;
375
376 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
377 GFP_KERNEL);
378 if (!prs) {
379 err = -ENOMEM;
380 goto err_alloc_prs;
381 }
382 mlxsw_sp->sb->prs = prs;
383
384 for (i = 0; i < max_ports; i++) {
385 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
386 if (err)
387 goto err_sb_port_init;
388 }
389
390 return 0;
391
392err_sb_port_init:
393 for (i--; i >= 0; i--)
394 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
395 kfree(mlxsw_sp->sb->prs);
396err_alloc_prs:
397 kfree(mlxsw_sp->sb->ports);
398 return err;
399}
400
401static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
402{
403 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
404 int i;
405
406 for (i = max_ports - 1; i >= 0; i--)
407 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
408 kfree(mlxsw_sp->sb->prs);
409 kfree(mlxsw_sp->sb->ports);
410}
411
412#define MLXSW_SP_SB_PR(_mode, _size) \
413 { \
414 .mode = _mode, \
415 .size = _size, \
416 }
417
418#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
419 { \
420 .mode = _mode, \
421 .size = _size, \
422 .freeze_mode = _freeze_mode, \
423 .freeze_size = _freeze_size, \
424 }
425
426#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
427
428
429static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
431 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
432 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
433 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
434 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
435 true, false),
436 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
437 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
438 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
439 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
440 true, true),
441 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
442 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
443 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
444 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
445};
446
447#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
448
449
450static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
451 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
452 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
453 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
454 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
455 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
456 true, false),
457 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
458 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
459 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
460 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
461 true, true),
462 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
463 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
464 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
465 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
466};
467
468static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
469 const struct mlxsw_sp_sb_pr *prs,
470 const struct mlxsw_sp_sb_pool_des *pool_dess,
471 size_t prs_len)
472{
473
474 u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
475 u32 rest_cells[2] = {sb_cells, sb_cells};
476 int i;
477 int err;
478
479
480
481
482 for (i = 0; i < prs_len; i++) {
483 enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
484 u32 size = prs[i].size;
485 u32 size_cells;
486
487 if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
488 continue;
489
490 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
491 if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
492 continue;
493
494 rest_cells[dir] -= size_cells;
495 }
496
497 for (i = 0; i < prs_len; i++) {
498 u32 size = prs[i].size;
499 u32 size_cells;
500
501 if (size == MLXSW_SP_SB_INFI) {
502 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
503 0, true);
504 } else if (size == MLXSW_SP_SB_REST) {
505 size_cells = rest_cells[pool_dess[i].dir];
506 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
507 size_cells, false);
508 } else {
509 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
510 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
511 size_cells, false);
512 }
513 if (err)
514 return err;
515 }
516 return 0;
517}
518
519#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
520 { \
521 .min_buff = _min_buff, \
522 .max_buff = _max_buff, \
523 .pool_index = _pool, \
524 }
525
526#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
527 { \
528 .min_buff = _min_buff, \
529 .max_buff = _max_buff, \
530 .pool_index = MLXSW_SP_SB_POOL_ING, \
531 }
532
533#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
534 { \
535 .min_buff = _min_buff, \
536 .max_buff = _max_buff, \
537 .pool_index = MLXSW_SP_SB_POOL_EGR, \
538 }
539
540#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
541 { \
542 .min_buff = _min_buff, \
543 .max_buff = _max_buff, \
544 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
545 .freeze_pool = true, \
546 .freeze_thresh = true, \
547 }
548
549static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
550 MLXSW_SP_SB_CM_ING(10000, 8),
551 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
552 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
553 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
554 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
555 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
556 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
557 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
558 MLXSW_SP_SB_CM_ING(0, 0),
559 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
560};
561
562static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
563 MLXSW_SP_SB_CM_ING(0, 7),
564 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
565 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
566 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
567 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
568 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
569 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
570 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
571 MLXSW_SP_SB_CM_ING(0, 0),
572 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
573};
574
575static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
576 MLXSW_SP_SB_CM_EGR(1500, 9),
577 MLXSW_SP_SB_CM_EGR(1500, 9),
578 MLXSW_SP_SB_CM_EGR(1500, 9),
579 MLXSW_SP_SB_CM_EGR(1500, 9),
580 MLXSW_SP_SB_CM_EGR(1500, 9),
581 MLXSW_SP_SB_CM_EGR(1500, 9),
582 MLXSW_SP_SB_CM_EGR(1500, 9),
583 MLXSW_SP_SB_CM_EGR(1500, 9),
584 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
585 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
586 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
587 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
588 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
589 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
590 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
591 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
592 MLXSW_SP_SB_CM_EGR(1, 0xff),
593};
594
595static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
596 MLXSW_SP_SB_CM_EGR(0, 7),
597 MLXSW_SP_SB_CM_EGR(0, 7),
598 MLXSW_SP_SB_CM_EGR(0, 7),
599 MLXSW_SP_SB_CM_EGR(0, 7),
600 MLXSW_SP_SB_CM_EGR(0, 7),
601 MLXSW_SP_SB_CM_EGR(0, 7),
602 MLXSW_SP_SB_CM_EGR(0, 7),
603 MLXSW_SP_SB_CM_EGR(0, 7),
604 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
605 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
606 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
607 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
608 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
609 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
610 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
611 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
612 MLXSW_SP_SB_CM_EGR(1, 0xff),
613};
614
615#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
616
617static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
618 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
619 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
620 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
621 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
622 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
623 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
624 MLXSW_SP_CPU_PORT_SB_CM,
625 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
626 MLXSW_SP_CPU_PORT_SB_CM,
627 MLXSW_SP_CPU_PORT_SB_CM,
628 MLXSW_SP_CPU_PORT_SB_CM,
629 MLXSW_SP_CPU_PORT_SB_CM,
630 MLXSW_SP_CPU_PORT_SB_CM,
631 MLXSW_SP_CPU_PORT_SB_CM,
632 MLXSW_SP_CPU_PORT_SB_CM,
633 MLXSW_SP_CPU_PORT_SB_CM,
634 MLXSW_SP_CPU_PORT_SB_CM,
635 MLXSW_SP_CPU_PORT_SB_CM,
636 MLXSW_SP_CPU_PORT_SB_CM,
637 MLXSW_SP_CPU_PORT_SB_CM,
638 MLXSW_SP_CPU_PORT_SB_CM,
639 MLXSW_SP_CPU_PORT_SB_CM,
640 MLXSW_SP_CPU_PORT_SB_CM,
641 MLXSW_SP_CPU_PORT_SB_CM,
642 MLXSW_SP_CPU_PORT_SB_CM,
643 MLXSW_SP_CPU_PORT_SB_CM,
644 MLXSW_SP_CPU_PORT_SB_CM,
645 MLXSW_SP_CPU_PORT_SB_CM,
646 MLXSW_SP_CPU_PORT_SB_CM,
647 MLXSW_SP_CPU_PORT_SB_CM,
648 MLXSW_SP_CPU_PORT_SB_CM,
649 MLXSW_SP_CPU_PORT_SB_CM,
650};
651
652static bool
653mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
654{
655 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
656
657 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
658}
659
660static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
661 enum mlxsw_reg_sbxx_dir dir,
662 const struct mlxsw_sp_sb_cm *cms,
663 size_t cms_len)
664{
665 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
666 int i;
667 int err;
668
669 for (i = 0; i < cms_len; i++) {
670 const struct mlxsw_sp_sb_cm *cm;
671 u32 min_buff;
672 u32 max_buff;
673
674 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
675 continue;
676 cm = &cms[i];
677 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
678 continue;
679
680 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
681 max_buff = cm->max_buff;
682 if (max_buff == MLXSW_SP_SB_INFI) {
683 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
684 min_buff, 0,
685 true, cm->pool_index);
686 } else {
687 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
688 cm->pool_index))
689 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
690 max_buff);
691 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
692 min_buff, max_buff,
693 false, cm->pool_index);
694 }
695 if (err)
696 return err;
697 }
698 return 0;
699}
700
701static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
702{
703 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
704 int err;
705
706 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
707 mlxsw_sp_port->local_port,
708 MLXSW_REG_SBXX_DIR_INGRESS,
709 mlxsw_sp->sb_vals->cms_ingress,
710 mlxsw_sp->sb_vals->cms_ingress_count);
711 if (err)
712 return err;
713 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
714 mlxsw_sp_port->local_port,
715 MLXSW_REG_SBXX_DIR_EGRESS,
716 mlxsw_sp->sb_vals->cms_egress,
717 mlxsw_sp->sb_vals->cms_egress_count);
718}
719
720static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
721{
722 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
723 mlxsw_sp->sb_vals->cms_cpu,
724 mlxsw_sp->sb_vals->cms_cpu_count);
725}
726
727#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
728 { \
729 .min_buff = _min_buff, \
730 .max_buff = _max_buff, \
731 }
732
733
734static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
735 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
736 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
737 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
738 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
739 MLXSW_SP_SB_PM(0, 7),
740 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
741 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
742 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
743 MLXSW_SP_SB_PM(10000, 90000),
744 MLXSW_SP_SB_PM(0, 8),
745 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
746};
747
748
749static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
750 MLXSW_SP_SB_PM(0, 7),
751 MLXSW_SP_SB_PM(0, 0),
752 MLXSW_SP_SB_PM(0, 0),
753 MLXSW_SP_SB_PM(0, 0),
754 MLXSW_SP_SB_PM(0, 7),
755 MLXSW_SP_SB_PM(0, 0),
756 MLXSW_SP_SB_PM(0, 0),
757 MLXSW_SP_SB_PM(0, 0),
758 MLXSW_SP_SB_PM(10000, 90000),
759 MLXSW_SP_SB_PM(0, 8),
760 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
761};
762
763
764static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
765 MLXSW_SP_SB_PM(0, 0),
766 MLXSW_SP_SB_PM(0, 0),
767 MLXSW_SP_SB_PM(0, 0),
768 MLXSW_SP_SB_PM(0, 0),
769 MLXSW_SP_SB_PM(0, 0),
770 MLXSW_SP_SB_PM(0, 0),
771 MLXSW_SP_SB_PM(0, 0),
772 MLXSW_SP_SB_PM(0, 0),
773 MLXSW_SP_SB_PM(0, 90000),
774 MLXSW_SP_SB_PM(0, 0),
775 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
776};
777
778static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
779 const struct mlxsw_sp_sb_pm *pms,
780 bool skip_ingress)
781{
782 int i, err;
783
784 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
785 const struct mlxsw_sp_sb_pm *pm = &pms[i];
786 const struct mlxsw_sp_sb_pool_des *des;
787 u32 max_buff;
788 u32 min_buff;
789
790 des = &mlxsw_sp->sb_vals->pool_dess[i];
791 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
792 continue;
793
794 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
795 max_buff = pm->max_buff;
796 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
797 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
798 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
799 max_buff);
800 if (err)
801 return err;
802 }
803 return 0;
804}
805
806static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
807{
808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
809
810 return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
811 mlxsw_sp->sb_vals->pms, false);
812}
813
814static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
815{
816 return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
817 true);
818}
819
820#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
821 { \
822 .min_buff = _min_buff, \
823 .max_buff = _max_buff, \
824 .pool_index = MLXSW_SP_SB_POOL_EGR, \
825 }
826
827static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
828 MLXSW_SP_SB_MM(0, 6),
829 MLXSW_SP_SB_MM(0, 6),
830 MLXSW_SP_SB_MM(0, 6),
831 MLXSW_SP_SB_MM(0, 6),
832 MLXSW_SP_SB_MM(0, 6),
833 MLXSW_SP_SB_MM(0, 6),
834 MLXSW_SP_SB_MM(0, 6),
835 MLXSW_SP_SB_MM(0, 6),
836 MLXSW_SP_SB_MM(0, 6),
837 MLXSW_SP_SB_MM(0, 6),
838 MLXSW_SP_SB_MM(0, 6),
839 MLXSW_SP_SB_MM(0, 6),
840 MLXSW_SP_SB_MM(0, 6),
841 MLXSW_SP_SB_MM(0, 6),
842 MLXSW_SP_SB_MM(0, 6),
843};
844
845static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
846{
847 char sbmm_pl[MLXSW_REG_SBMM_LEN];
848 int i;
849 int err;
850
851 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
852 const struct mlxsw_sp_sb_pool_des *des;
853 const struct mlxsw_sp_sb_mm *mc;
854 u32 min_buff;
855
856 mc = &mlxsw_sp->sb_vals->mms[i];
857 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
858
859
860
861 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
862 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
863 des->pool);
864 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
865 if (err)
866 return err;
867 }
868 return 0;
869}
870
871static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
872 u16 *p_ingress_len, u16 *p_egress_len)
873{
874 int i;
875
876 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
877 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
878 MLXSW_REG_SBXX_DIR_INGRESS)
879 (*p_ingress_len)++;
880 else
881 (*p_egress_len)++;
882 }
883
884 WARN(*p_egress_len == 0, "No egress pools\n");
885}
886
887const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
888 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
889 .pool_dess = mlxsw_sp1_sb_pool_dess,
890 .pms = mlxsw_sp1_sb_pms,
891 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
892 .prs = mlxsw_sp1_sb_prs,
893 .mms = mlxsw_sp_sb_mms,
894 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
895 .cms_egress = mlxsw_sp1_sb_cms_egress,
896 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
897 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
898 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
899 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
900 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
901};
902
903const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
904 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
905 .pool_dess = mlxsw_sp2_sb_pool_dess,
906 .pms = mlxsw_sp2_sb_pms,
907 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
908 .prs = mlxsw_sp2_sb_prs,
909 .mms = mlxsw_sp_sb_mms,
910 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
911 .cms_egress = mlxsw_sp2_sb_cms_egress,
912 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
913 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
914 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
915 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
916 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
917};
918
919int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
920{
921 u32 max_headroom_size;
922 u16 ing_pool_count = 0;
923 u16 eg_pool_count = 0;
924 int err;
925
926 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
927 return -EIO;
928
929 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
930 return -EIO;
931
932 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
933 return -EIO;
934
935 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
936 if (!mlxsw_sp->sb)
937 return -ENOMEM;
938 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
939 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
940 GUARANTEED_SHARED_BUFFER);
941 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
942 MAX_HEADROOM_SIZE);
943
944 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
945 mlxsw_sp->sb->cell_size;
946
947 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
948 if (err)
949 goto err_sb_ports_init;
950 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
951 mlxsw_sp->sb_vals->pool_dess,
952 mlxsw_sp->sb_vals->pool_count);
953 if (err)
954 goto err_sb_prs_init;
955 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
956 if (err)
957 goto err_sb_cpu_port_sb_cms_init;
958 err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
959 if (err)
960 goto err_sb_cpu_port_pms_init;
961 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
962 if (err)
963 goto err_sb_mms_init;
964 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
965 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
966 mlxsw_sp->sb->sb_size,
967 ing_pool_count,
968 eg_pool_count,
969 MLXSW_SP_SB_ING_TC_COUNT,
970 MLXSW_SP_SB_EG_TC_COUNT);
971 if (err)
972 goto err_devlink_sb_register;
973
974 return 0;
975
976err_devlink_sb_register:
977err_sb_mms_init:
978err_sb_cpu_port_pms_init:
979err_sb_cpu_port_sb_cms_init:
980err_sb_prs_init:
981 mlxsw_sp_sb_ports_fini(mlxsw_sp);
982err_sb_ports_init:
983 kfree(mlxsw_sp->sb);
984 return err;
985}
986
987void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
988{
989 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
990 mlxsw_sp_sb_ports_fini(mlxsw_sp);
991 kfree(mlxsw_sp->sb);
992}
993
994int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
995{
996 int err;
997
998 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
999 if (err)
1000 return err;
1001 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1002 if (err)
1003 return err;
1004 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1005
1006 return err;
1007}
1008
1009int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1010 unsigned int sb_index, u16 pool_index,
1011 struct devlink_sb_pool_info *pool_info)
1012{
1013 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1014 enum mlxsw_reg_sbxx_dir dir;
1015 struct mlxsw_sp_sb_pr *pr;
1016
1017 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1018 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1019 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1020 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1021 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1022 pool_info->cell_size = mlxsw_sp->sb->cell_size;
1023 return 0;
1024}
1025
1026int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1027 unsigned int sb_index, u16 pool_index, u32 size,
1028 enum devlink_sb_threshold_type threshold_type,
1029 struct netlink_ext_ack *extack)
1030{
1031 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1032 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1033 const struct mlxsw_sp_sb_pr *pr;
1034 enum mlxsw_reg_sbpr_mode mode;
1035
1036 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1037 pr = &mlxsw_sp->sb_vals->prs[pool_index];
1038
1039 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1040 GUARANTEED_SHARED_BUFFER)) {
1041 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1042 return -EINVAL;
1043 }
1044
1045 if (pr->freeze_mode && pr->mode != mode) {
1046 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1047 return -EINVAL;
1048 }
1049
1050 if (pr->freeze_size && pr->size != size) {
1051 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1052 return -EINVAL;
1053 }
1054
1055 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1056 pool_size, false);
1057}
1058
1059#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2)
1060
1061static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1062 u32 max_buff)
1063{
1064 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1065
1066 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1067 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1068 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1069}
1070
1071static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1072 u32 threshold, u32 *p_max_buff,
1073 struct netlink_ext_ack *extack)
1074{
1075 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1076
1077 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1078 int val;
1079
1080 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1081 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1082 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1083 NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1084 return -EINVAL;
1085 }
1086 *p_max_buff = val;
1087 } else {
1088 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1089 }
1090 return 0;
1091}
1092
1093int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1094 unsigned int sb_index, u16 pool_index,
1095 u32 *p_threshold)
1096{
1097 struct mlxsw_sp_port *mlxsw_sp_port =
1098 mlxsw_core_port_driver_priv(mlxsw_core_port);
1099 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1100 u8 local_port = mlxsw_sp_port->local_port;
1101 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1102 pool_index);
1103
1104 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1105 pm->max_buff);
1106 return 0;
1107}
1108
1109int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1110 unsigned int sb_index, u16 pool_index,
1111 u32 threshold, struct netlink_ext_ack *extack)
1112{
1113 struct mlxsw_sp_port *mlxsw_sp_port =
1114 mlxsw_core_port_driver_priv(mlxsw_core_port);
1115 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1116 u8 local_port = mlxsw_sp_port->local_port;
1117 u32 max_buff;
1118 int err;
1119
1120 if (local_port == MLXSW_PORT_CPU_PORT) {
1121 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1122 return -EINVAL;
1123 }
1124
1125 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1126 threshold, &max_buff, extack);
1127 if (err)
1128 return err;
1129
1130 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1131 0, max_buff);
1132}
1133
1134int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1135 unsigned int sb_index, u16 tc_index,
1136 enum devlink_sb_pool_type pool_type,
1137 u16 *p_pool_index, u32 *p_threshold)
1138{
1139 struct mlxsw_sp_port *mlxsw_sp_port =
1140 mlxsw_core_port_driver_priv(mlxsw_core_port);
1141 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1142 u8 local_port = mlxsw_sp_port->local_port;
1143 u8 pg_buff = tc_index;
1144 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1145 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1146 pg_buff, dir);
1147
1148 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1149 cm->max_buff);
1150 *p_pool_index = cm->pool_index;
1151 return 0;
1152}
1153
1154int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1155 unsigned int sb_index, u16 tc_index,
1156 enum devlink_sb_pool_type pool_type,
1157 u16 pool_index, u32 threshold,
1158 struct netlink_ext_ack *extack)
1159{
1160 struct mlxsw_sp_port *mlxsw_sp_port =
1161 mlxsw_core_port_driver_priv(mlxsw_core_port);
1162 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1163 u8 local_port = mlxsw_sp_port->local_port;
1164 const struct mlxsw_sp_sb_cm *cm;
1165 u8 pg_buff = tc_index;
1166 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1167 u32 max_buff;
1168 int err;
1169
1170 if (local_port == MLXSW_PORT_CPU_PORT) {
1171 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1172 return -EINVAL;
1173 }
1174
1175 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1176 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1177 return -EINVAL;
1178 }
1179
1180 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1181 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1182 else
1183 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1184
1185 if (cm->freeze_pool && cm->pool_index != pool_index) {
1186 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1187 return -EINVAL;
1188 }
1189
1190 if (cm->freeze_thresh && cm->max_buff != threshold) {
1191 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1192 return -EINVAL;
1193 }
1194
1195 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1196 threshold, &max_buff, extack);
1197 if (err)
1198 return err;
1199
1200 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1201 0, max_buff, false, pool_index);
1202}
1203
1204#define MASKED_COUNT_MAX \
1205 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1206 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1207
1208struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1209 u8 masked_count;
1210 u8 local_port_1;
1211};
1212
1213static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1214 char *sbsr_pl, size_t sbsr_pl_len,
1215 unsigned long cb_priv)
1216{
1217 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1218 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1219 u8 masked_count;
1220 u8 local_port;
1221 int rec_index = 0;
1222 struct mlxsw_sp_sb_cm *cm;
1223 int i;
1224
1225 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1226
1227 masked_count = 0;
1228 for (local_port = cb_ctx.local_port_1;
1229 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1230 if (!mlxsw_sp->ports[local_port])
1231 continue;
1232 if (local_port == MLXSW_PORT_CPU_PORT) {
1233
1234 masked_count++;
1235 continue;
1236 }
1237 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1238 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1239 MLXSW_REG_SBXX_DIR_INGRESS);
1240 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1241 &cm->occ.cur, &cm->occ.max);
1242 }
1243 if (++masked_count == cb_ctx.masked_count)
1244 break;
1245 }
1246 masked_count = 0;
1247 for (local_port = cb_ctx.local_port_1;
1248 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1249 if (!mlxsw_sp->ports[local_port])
1250 continue;
1251 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1252 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1253 MLXSW_REG_SBXX_DIR_EGRESS);
1254 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1255 &cm->occ.cur, &cm->occ.max);
1256 }
1257 if (++masked_count == cb_ctx.masked_count)
1258 break;
1259 }
1260}
1261
1262int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1263 unsigned int sb_index)
1264{
1265 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1266 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1267 unsigned long cb_priv;
1268 LIST_HEAD(bulk_list);
1269 char *sbsr_pl;
1270 u8 masked_count;
1271 u8 local_port_1;
1272 u8 local_port;
1273 int i;
1274 int err;
1275 int err2;
1276
1277 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1278 if (!sbsr_pl)
1279 return -ENOMEM;
1280
1281 local_port = MLXSW_PORT_CPU_PORT;
1282next_batch:
1283 local_port_1 = local_port;
1284 masked_count = 0;
1285 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1286 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1287 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1288 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1289 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1290 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1291 if (!mlxsw_sp->ports[local_port])
1292 continue;
1293 if (local_port != MLXSW_PORT_CPU_PORT) {
1294
1295 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1296 local_port, 1);
1297 }
1298 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1299 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1300 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1301 &bulk_list);
1302 if (err)
1303 goto out;
1304 }
1305 if (++masked_count == MASKED_COUNT_MAX)
1306 goto do_query;
1307 }
1308
1309do_query:
1310 cb_ctx.masked_count = masked_count;
1311 cb_ctx.local_port_1 = local_port_1;
1312 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1313 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1314 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1315 cb_priv);
1316 if (err)
1317 goto out;
1318 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1319 local_port++;
1320 goto next_batch;
1321 }
1322
1323out:
1324 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1325 if (!err)
1326 err = err2;
1327 kfree(sbsr_pl);
1328 return err;
1329}
1330
1331int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1332 unsigned int sb_index)
1333{
1334 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1335 LIST_HEAD(bulk_list);
1336 char *sbsr_pl;
1337 unsigned int masked_count;
1338 u8 local_port;
1339 int i;
1340 int err;
1341 int err2;
1342
1343 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1344 if (!sbsr_pl)
1345 return -ENOMEM;
1346
1347 local_port = MLXSW_PORT_CPU_PORT;
1348next_batch:
1349 masked_count = 0;
1350 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1351 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1352 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1353 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1354 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1355 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1356 if (!mlxsw_sp->ports[local_port])
1357 continue;
1358 if (local_port != MLXSW_PORT_CPU_PORT) {
1359
1360 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1361 local_port, 1);
1362 }
1363 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1364 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1365 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1366 &bulk_list);
1367 if (err)
1368 goto out;
1369 }
1370 if (++masked_count == MASKED_COUNT_MAX)
1371 goto do_query;
1372 }
1373
1374do_query:
1375 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1376 &bulk_list, NULL, 0);
1377 if (err)
1378 goto out;
1379 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1380 local_port++;
1381 goto next_batch;
1382 }
1383
1384out:
1385 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1386 if (!err)
1387 err = err2;
1388 kfree(sbsr_pl);
1389 return err;
1390}
1391
1392int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1393 unsigned int sb_index, u16 pool_index,
1394 u32 *p_cur, u32 *p_max)
1395{
1396 struct mlxsw_sp_port *mlxsw_sp_port =
1397 mlxsw_core_port_driver_priv(mlxsw_core_port);
1398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1399 u8 local_port = mlxsw_sp_port->local_port;
1400 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1401 pool_index);
1402
1403 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1404 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1405 return 0;
1406}
1407
1408int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1409 unsigned int sb_index, u16 tc_index,
1410 enum devlink_sb_pool_type pool_type,
1411 u32 *p_cur, u32 *p_max)
1412{
1413 struct mlxsw_sp_port *mlxsw_sp_port =
1414 mlxsw_core_port_driver_priv(mlxsw_core_port);
1415 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1416 u8 local_port = mlxsw_sp_port->local_port;
1417 u8 pg_buff = tc_index;
1418 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1419 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1420 pg_buff, dir);
1421
1422 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1423 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1424 return 0;
1425}
1426