1
2
3
4
5#include <rte_io.h>
6#include <rte_ethdev.h>
7
8#include "hns3_logs.h"
9#include "hns3_ethdev.h"
10#include "hns3_dcb.h"
11
12#define HNS3_SHAPER_BS_U_DEF 5
13#define HNS3_SHAPER_BS_S_DEF 20
14#define BW_MAX_PERCENT 100
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30static int
31hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 struct hns3_shaper_parameter *shaper_para)
33{
34#define SHAPER_DEFAULT_IR_B 126
35#define DIVISOR_CLK (1000 * 8)
36#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
37
38 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 6 * 256,
40 6 * 32,
41 6 * 8,
42 6 * 256
43 };
44 uint8_t ir_u_calc = 0;
45 uint8_t ir_s_calc = 0;
46 uint32_t denominator;
47 uint32_t ir_calc;
48 uint32_t tick;
49
50
51 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52 hns3_err(hw,
53 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 shaper_level, HNS3_SHAPER_LVL_CNT);
55 return -EINVAL;
56 }
57
58 if (ir > hw->max_tm_rate) {
59 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 "supported.", ir, hw->max_tm_rate);
61 return -EINVAL;
62 }
63
64 tick = tick_array[shaper_level];
65
66
67
68
69
70
71
72
73 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74
75 if (ir_calc == ir) {
76 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 } else if (ir_calc > ir) {
78
79 while (ir_calc >= ir && ir) {
80 ir_s_calc++;
81 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82 }
83
84 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
86 } else {
87
88
89
90
91
92
93
94
95 uint32_t numerator;
96 do {
97 ir_u_calc++;
98 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99 ir_calc = (numerator + (tick >> 1)) / tick;
100 } while (ir_calc < ir);
101
102 if (ir_calc == ir) {
103 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
104 } else {
105 --ir_u_calc;
106
107
108
109
110
111
112 denominator = DIVISOR_CLK * (1 << ir_u_calc);
113 shaper_para->ir_b =
114 (ir * tick + (denominator >> 1)) / denominator;
115 }
116 }
117
118 shaper_para->ir_u = ir_u_calc;
119 shaper_para->ir_s = ir_s_calc;
120
121 return 0;
122}
123
124static int
125hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
126{
127#define HNS3_HALF_BYTE_BIT_OFFSET 4
128 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
129
130 if (tc >= hw->dcb_info.num_tc)
131 return -EINVAL;
132
133
134
135
136
137
138
139
140
141
142 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
143
144 return 0;
145}
146
147static int
148hns3_up_to_tc_map(struct hns3_hw *hw)
149{
150 struct hns3_cmd_desc desc;
151 uint8_t *pri = (uint8_t *)desc.data;
152 uint8_t pri_id;
153 int ret;
154
155 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
156
157 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158 ret = hns3_fill_pri_array(hw, pri, pri_id);
159 if (ret)
160 return ret;
161 }
162
163 return hns3_cmd_send(hw, &desc, 1);
164}
165
166static int
167hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
168{
169 struct hns3_pg_to_pri_link_cmd *map;
170 struct hns3_cmd_desc desc;
171
172 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
173
174 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
175
176 map->pg_id = pg_id;
177 map->pri_bit_map = pri_bit_map;
178
179 return hns3_cmd_send(hw, &desc, 1);
180}
181
182static int
183hns3_pg_to_pri_map(struct hns3_hw *hw)
184{
185 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186 struct hns3_pf *pf = &hns->pf;
187 struct hns3_pg_info *pg_info;
188 int ret, i;
189
190 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
191 return -EINVAL;
192
193 for (i = 0; i < hw->dcb_info.num_pg; i++) {
194
195 pg_info = &hw->dcb_info.pg_info[i];
196 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
197 if (ret)
198 return ret;
199 }
200
201 return 0;
202}
203
204static int
205hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
206{
207 struct hns3_qs_to_pri_link_cmd *map;
208 struct hns3_cmd_desc desc;
209
210 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
211
212 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
213
214 map->qs_id = rte_cpu_to_le_16(qs_id);
215 map->priority = pri;
216 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
217
218 return hns3_cmd_send(hw, &desc, 1);
219}
220
221static int
222hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
223{
224 struct hns3_qs_weight_cmd *weight;
225 struct hns3_cmd_desc desc;
226
227 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
228
229 weight = (struct hns3_qs_weight_cmd *)desc.data;
230
231 weight->qs_id = rte_cpu_to_le_16(qs_id);
232 weight->dwrr = dwrr;
233
234 return hns3_cmd_send(hw, &desc, 1);
235}
236
237static int
238hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
239{
240#define DEFAULT_TC_WEIGHT 1
241#define DEFAULT_TC_OFFSET 14
242 struct hns3_ets_tc_weight_cmd *ets_weight;
243 struct hns3_cmd_desc desc;
244 uint8_t i;
245
246 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
248
249 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250 struct hns3_pg_info *pg_info;
251
252 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
253
254 if (!(hw->hw_tc_map & BIT(i)))
255 continue;
256
257 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
258 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
259 }
260
261 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
262
263 return hns3_cmd_send(hw, &desc, 1);
264}
265
266static int
267hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
268{
269 struct hns3_priority_weight_cmd *weight;
270 struct hns3_cmd_desc desc;
271
272 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
273
274 weight = (struct hns3_priority_weight_cmd *)desc.data;
275
276 weight->pri_id = pri_id;
277 weight->dwrr = dwrr;
278
279 return hns3_cmd_send(hw, &desc, 1);
280}
281
282static int
283hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
284{
285 struct hns3_pg_weight_cmd *weight;
286 struct hns3_cmd_desc desc;
287
288 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
289
290 weight = (struct hns3_pg_weight_cmd *)desc.data;
291
292 weight->pg_id = pg_id;
293 weight->dwrr = dwrr;
294
295 return hns3_cmd_send(hw, &desc, 1);
296}
297static int
298hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
299{
300 struct hns3_cmd_desc desc;
301
302 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
303
304 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
305 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
306 else
307 desc.data[1] = 0;
308
309 desc.data[0] = rte_cpu_to_le_32(pg_id);
310
311 return hns3_cmd_send(hw, &desc, 1);
312}
313
314static uint32_t
315hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
316 uint8_t bs_b, uint8_t bs_s)
317{
318 uint32_t shapping_para = 0;
319
320
321 if (ir_b == 0)
322 return shapping_para;
323
324 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
325 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
326 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
327 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
328 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
329
330 return shapping_para;
331}
332
333static int
334hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
335{
336 struct hns3_port_shapping_cmd *shap_cfg_cmd;
337 struct hns3_shaper_parameter shaper_parameter;
338 uint32_t shapping_para;
339 uint32_t ir_u, ir_b, ir_s;
340 struct hns3_cmd_desc desc;
341 int ret;
342
343 ret = hns3_shaper_para_calc(hw, speed,
344 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
345 if (ret) {
346 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
347 return ret;
348 }
349
350 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
351 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
352
353 ir_b = shaper_parameter.ir_b;
354 ir_u = shaper_parameter.ir_u;
355 ir_s = shaper_parameter.ir_s;
356 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
357 HNS3_SHAPER_BS_U_DEF,
358 HNS3_SHAPER_BS_S_DEF);
359
360 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
361
362
363
364
365
366
367
368
369 shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
370 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
371
372 return hns3_cmd_send(hw, &desc, 1);
373}
374
375int
376hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
377{
378 int ret;
379
380 ret = hns3_dcb_port_shaper_cfg(hw, speed);
381 if (ret)
382 hns3_err(hw, "configure port shappering failed: ret = %d", ret);
383
384 return ret;
385}
386
387static int
388hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
389 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
390{
391 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
392 enum hns3_opcode_type opcode;
393 struct hns3_cmd_desc desc;
394
395 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
396 HNS3_OPC_TM_PG_C_SHAPPING;
397 hns3_cmd_setup_basic_desc(&desc, opcode, false);
398
399 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
400
401 shap_cfg_cmd->pg_id = pg_id;
402
403 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
404
405
406
407
408
409
410
411
412 shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
413 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
414
415 return hns3_cmd_send(hw, &desc, 1);
416}
417
418static int
419hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
420{
421 struct hns3_shaper_parameter shaper_parameter;
422 uint32_t ir_u, ir_b, ir_s;
423 uint32_t shaper_para;
424 int ret;
425
426
427 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
428 &shaper_parameter);
429 if (ret) {
430 hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
431 ret);
432 return ret;
433 }
434
435 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
436 HNS3_SHAPER_BS_U_DEF,
437 HNS3_SHAPER_BS_S_DEF);
438
439 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
440 shaper_para, rate);
441 if (ret) {
442 hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
443 ret);
444 return ret;
445 }
446
447 ir_b = shaper_parameter.ir_b;
448 ir_u = shaper_parameter.ir_u;
449 ir_s = shaper_parameter.ir_s;
450 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451 HNS3_SHAPER_BS_U_DEF,
452 HNS3_SHAPER_BS_S_DEF);
453
454 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
455 shaper_para, rate);
456 if (ret) {
457 hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
458 ret);
459 return ret;
460 }
461
462 return 0;
463}
464
465static int
466hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
467{
468 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
469 uint32_t rate;
470 uint8_t i;
471 int ret;
472
473
474 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
475 return -EINVAL;
476
477
478 for (i = 0; i < hw->dcb_info.num_pg; i++) {
479 rate = hw->dcb_info.pg_info[i].bw_limit;
480 ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
481 if (ret)
482 return ret;
483 }
484
485 return 0;
486}
487
488static int
489hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
490{
491 struct hns3_cmd_desc desc;
492
493 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
494
495 if (mode == HNS3_SCH_MODE_DWRR)
496 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
497 else
498 desc.data[1] = 0;
499
500 desc.data[0] = rte_cpu_to_le_32(qs_id);
501
502 return hns3_cmd_send(hw, &desc, 1);
503}
504
505static int
506hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
507{
508 struct hns3_cmd_desc desc;
509
510 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
511
512 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
513 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
514 else
515 desc.data[1] = 0;
516
517 desc.data[0] = rte_cpu_to_le_32(pri_id);
518
519 return hns3_cmd_send(hw, &desc, 1);
520}
521
522static int
523hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
524 uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
525{
526 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
527 enum hns3_opcode_type opcode;
528 struct hns3_cmd_desc desc;
529
530 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
531 HNS3_OPC_TM_PRI_C_SHAPPING;
532
533 hns3_cmd_setup_basic_desc(&desc, opcode, false);
534
535 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
536
537 shap_cfg_cmd->pri_id = pri_id;
538
539 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
540
541
542
543
544
545
546
547
548 shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
549 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
550
551 return hns3_cmd_send(hw, &desc, 1);
552}
553
554static int
555hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
556{
557 struct hns3_shaper_parameter shaper_parameter;
558 uint32_t ir_u, ir_b, ir_s;
559 uint32_t shaper_para;
560 int ret;
561
562 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
563 &shaper_parameter);
564 if (ret) {
565 hns3_err(hw, "calculate shaper parameter failed: %d.",
566 ret);
567 return ret;
568 }
569
570 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
571 HNS3_SHAPER_BS_U_DEF,
572 HNS3_SHAPER_BS_S_DEF);
573
574 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
575 shaper_para, rate);
576 if (ret) {
577 hns3_err(hw,
578 "config priority CIR shaper parameter failed: %d.",
579 ret);
580 return ret;
581 }
582
583 ir_b = shaper_parameter.ir_b;
584 ir_u = shaper_parameter.ir_u;
585 ir_s = shaper_parameter.ir_s;
586 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
587 HNS3_SHAPER_BS_U_DEF,
588 HNS3_SHAPER_BS_S_DEF);
589
590 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
591 shaper_para, rate);
592 if (ret) {
593 hns3_err(hw,
594 "config priority PIR shaper parameter failed: %d.",
595 ret);
596 return ret;
597 }
598
599 return 0;
600}
601
602static int
603hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
604{
605 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
606 uint32_t rate;
607 uint8_t i;
608 int ret;
609
610 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
611 return -EINVAL;
612
613 for (i = 0; i < hw->dcb_info.num_tc; i++) {
614 rate = hw->dcb_info.tc_info[i].bw_limit;
615 ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
616 if (ret) {
617 hns3_err(hw, "config pri shaper failed: %d.", ret);
618 return ret;
619 }
620 }
621
622 return 0;
623}
624
625static int
626hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
627{
628 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
629 uint16_t rx_qnum_per_tc;
630 uint16_t used_rx_queues;
631 int i;
632
633 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
634 if (rx_qnum_per_tc > hw->rss_size_max) {
635 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
636 "value (%u) hardware supported.",
637 rx_qnum_per_tc, hw->rss_size_max);
638 return -EINVAL;
639 }
640
641 used_rx_queues = hw->num_tc * rx_qnum_per_tc;
642 if (used_rx_queues != nb_rx_q) {
643 hns3_err(hw, "rx queue number (%u) configured must be an "
644 "integral multiple of valid tc number (%u).",
645 nb_rx_q, hw->num_tc);
646 return -EINVAL;
647 }
648 hw->alloc_rss_size = rx_qnum_per_tc;
649 hw->used_rx_queues = used_rx_queues;
650
651
652
653
654
655
656
657
658 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
659 for (i = 0; i < hw->rss_ind_tbl_size; i++)
660 rss_cfg->rss_indirection_tbl[i] =
661 i % hw->alloc_rss_size;
662 }
663
664 return 0;
665}
666
667static int
668hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
669{
670 struct hns3_tc_queue_info *tc_queue;
671 uint16_t used_tx_queues;
672 uint16_t tx_qnum_per_tc;
673 uint8_t i;
674
675 tx_qnum_per_tc = nb_tx_q / hw->num_tc;
676 used_tx_queues = hw->num_tc * tx_qnum_per_tc;
677 if (used_tx_queues != nb_tx_q) {
678 hns3_err(hw, "tx queue number (%u) configured must be an "
679 "integral multiple of valid tc number (%u).",
680 nb_tx_q, hw->num_tc);
681 return -EINVAL;
682 }
683
684 hw->used_tx_queues = used_tx_queues;
685 hw->tx_qnum_per_tc = tx_qnum_per_tc;
686 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
687 tc_queue = &hw->tc_queue[i];
688 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
689 tc_queue->enable = true;
690 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
691 tc_queue->tqp_count = hw->tx_qnum_per_tc;
692 tc_queue->tc = i;
693 } else {
694
695 tc_queue->enable = false;
696 tc_queue->tqp_offset = 0;
697 tc_queue->tqp_count = 0;
698 tc_queue->tc = 0;
699 }
700 }
701
702 return 0;
703}
704
705uint8_t
706hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
707{
708 struct hns3_tc_queue_info *tc_queue;
709 uint8_t i;
710
711 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
712 tc_queue = &hw->tc_queue[i];
713 if (!tc_queue->enable)
714 continue;
715
716 if (txq_no >= tc_queue->tqp_offset &&
717 txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
718 return i;
719 }
720
721
722 return 0;
723}
724
725int
726hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
727{
728 int ret;
729
730 if (nb_rx_q < hw->num_tc) {
731 hns3_err(hw, "number of Rx queues(%u) is less than number of TC(%u).",
732 nb_rx_q, hw->num_tc);
733 return -EINVAL;
734 }
735
736 if (nb_tx_q < hw->num_tc) {
737 hns3_err(hw, "number of Tx queues(%u) is less than number of TC(%u).",
738 nb_tx_q, hw->num_tc);
739 return -EINVAL;
740 }
741
742 ret = hns3_set_rss_size(hw, nb_rx_q);
743 if (ret)
744 return ret;
745
746 return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
747}
748
749static int
750hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
751 uint16_t nb_tx_q)
752{
753 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
754 struct hns3_pf *pf = &hns->pf;
755 int ret;
756
757 hw->num_tc = hw->dcb_info.num_tc;
758 ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
759 if (ret)
760 return ret;
761
762 if (!hns->is_vf)
763 memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
764
765 return 0;
766}
767
768int
769hns3_dcb_info_init(struct hns3_hw *hw)
770{
771 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
772 struct hns3_pf *pf = &hns->pf;
773 int i, k;
774
775 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
776 hw->dcb_info.num_pg != 1)
777 return -EINVAL;
778
779
780 memset(hw->dcb_info.pg_info, 0,
781 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
782 for (i = 0; i < hw->dcb_info.num_pg; i++) {
783 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
784 hw->dcb_info.pg_info[i].pg_id = i;
785 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
786 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
787
788 if (i != 0)
789 continue;
790
791 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
792 for (k = 0; k < hw->dcb_info.num_tc; k++)
793 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
794 }
795
796
797 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
798 hw->dcb_info.prio_tc[i] = 0;
799
800
801 memset(hw->dcb_info.tc_info, 0,
802 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
803 for (i = 0; i < hw->dcb_info.num_tc; i++) {
804 hw->dcb_info.tc_info[i].tc_id = i;
805 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
806 hw->dcb_info.tc_info[i].pgid = 0;
807 hw->dcb_info.tc_info[i].bw_limit =
808 hw->dcb_info.pg_info[0].bw_limit;
809 }
810
811 return 0;
812}
813
814static int
815hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
816{
817 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
818 struct hns3_pf *pf = &hns->pf;
819 int ret, i;
820
821
822 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
823 return -EINVAL;
824
825 for (i = 0; i < hw->dcb_info.num_pg; i++) {
826 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
827 if (ret)
828 return ret;
829 }
830
831 return 0;
832}
833
834static int
835hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
836{
837 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
838 struct hns3_pf *pf = &hns->pf;
839 uint8_t i;
840 int ret;
841
842 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
843 for (i = 0; i < hw->dcb_info.num_tc; i++) {
844 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
845 if (ret)
846 return ret;
847
848 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
849 HNS3_SCH_MODE_DWRR);
850 if (ret)
851 return ret;
852 }
853 }
854
855 return 0;
856}
857
858static int
859hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
860{
861 int ret;
862
863 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
864 if (ret) {
865 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
866 return ret;
867 }
868
869 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
870 if (ret)
871 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
872
873 return ret;
874}
875
876static int
877hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
878{
879 struct hns3_pg_info *pg_info;
880 uint8_t dwrr;
881 int ret, i;
882
883 for (i = 0; i < hw->dcb_info.num_tc; i++) {
884 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
885 dwrr = pg_info->tc_dwrr[i];
886
887 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
888 if (ret) {
889 hns3_err(hw,
890 "fail to send priority weight cmd: %d, ret = %d",
891 i, ret);
892 return ret;
893 }
894
895 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
896 if (ret) {
897 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
898 i, ret);
899 return ret;
900 }
901 }
902
903 return 0;
904}
905
906static int
907hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
908{
909 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
910 struct hns3_pf *pf = &hns->pf;
911 uint32_t version;
912 int ret;
913
914 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
915 return -EINVAL;
916
917 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
918 if (ret)
919 return ret;
920
921 if (!hns3_dev_dcb_supported(hw))
922 return 0;
923
924 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
925 if (ret == -EOPNOTSUPP) {
926 version = hw->fw_version;
927 hns3_warn(hw,
928 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
929 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
930 HNS3_FW_VERSION_BYTE3_S),
931 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
932 HNS3_FW_VERSION_BYTE2_S),
933 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
934 HNS3_FW_VERSION_BYTE1_S),
935 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
936 HNS3_FW_VERSION_BYTE0_S));
937 ret = 0;
938 }
939
940 return ret;
941}
942
943static int
944hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
945{
946 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
947 struct hns3_pf *pf = &hns->pf;
948 int ret, i;
949
950
951 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
952 return -EINVAL;
953
954
955 for (i = 0; i < hw->dcb_info.num_pg; i++) {
956
957 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
958 if (ret)
959 return ret;
960 }
961
962 return 0;
963}
964
965static int
966hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
967{
968 int ret;
969
970 ret = hns3_dcb_pg_dwrr_cfg(hw);
971 if (ret) {
972 hns3_err(hw, "config pg_dwrr failed: %d", ret);
973 return ret;
974 }
975
976 ret = hns3_dcb_pri_dwrr_cfg(hw);
977 if (ret)
978 hns3_err(hw, "config pri_dwrr failed: %d", ret);
979
980 return ret;
981}
982
983static int
984hns3_dcb_shaper_cfg(struct hns3_hw *hw)
985{
986 int ret;
987
988 ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
989 if (ret) {
990 hns3_err(hw, "config port shaper failed: %d", ret);
991 return ret;
992 }
993
994 ret = hns3_dcb_pg_shaper_cfg(hw);
995 if (ret) {
996 hns3_err(hw, "config pg shaper failed: %d", ret);
997 return ret;
998 }
999
1000 return hns3_dcb_pri_shaper_cfg(hw);
1001}
1002
1003static int
1004hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
1005{
1006 struct hns3_nq_to_qs_link_cmd *map;
1007 struct hns3_cmd_desc desc;
1008 uint16_t tmp_qs_id = 0;
1009 uint16_t qs_id_l;
1010 uint16_t qs_id_h;
1011
1012 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
1013
1014 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
1015
1016 map->nq_id = rte_cpu_to_le_16(q_id);
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1029 HNS3_DCB_QS_ID_L_S);
1030 qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1031 HNS3_DCB_QS_ID_H_S);
1032 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1033 qs_id_l);
1034 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1035 HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1036 map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1037
1038 return hns3_cmd_send(hw, &desc, 1);
1039}
1040
1041static int
1042hns3_q_to_qs_map(struct hns3_hw *hw)
1043{
1044 struct hns3_tc_queue_info *tc_queue;
1045 uint16_t q_id;
1046 uint32_t i, j;
1047 int ret;
1048
1049 for (i = 0; i < hw->num_tc; i++) {
1050 tc_queue = &hw->tc_queue[i];
1051 for (j = 0; j < tc_queue->tqp_count; j++) {
1052 q_id = tc_queue->tqp_offset + j;
1053 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1054 if (ret)
1055 return ret;
1056 }
1057 }
1058
1059 return 0;
1060}
1061
1062static int
1063hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1064{
1065 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1066 struct hns3_pf *pf = &hns->pf;
1067 uint32_t i;
1068 int ret;
1069
1070 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1071 return -EINVAL;
1072
1073
1074 for (i = 0; i < hw->num_tc; i++) {
1075 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1076 if (ret) {
1077 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1078 return ret;
1079 }
1080 }
1081
1082
1083 ret = hns3_q_to_qs_map(hw);
1084 if (ret)
1085 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1086
1087 return ret;
1088}
1089
1090static int
1091hns3_dcb_map_cfg(struct hns3_hw *hw)
1092{
1093 int ret;
1094
1095 ret = hns3_up_to_tc_map(hw);
1096 if (ret) {
1097 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1098 return ret;
1099 }
1100
1101 ret = hns3_pg_to_pri_map(hw);
1102 if (ret) {
1103 hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1104 return ret;
1105 }
1106
1107 return hns3_pri_q_qs_cfg(hw);
1108}
1109
1110static int
1111hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1112{
1113 int ret;
1114
1115
1116 ret = hns3_dcb_map_cfg(hw);
1117 if (ret)
1118 return ret;
1119
1120
1121 ret = hns3_dcb_shaper_cfg(hw);
1122 if (ret)
1123 return ret;
1124
1125
1126 ret = hns3_dcb_dwrr_cfg(hw);
1127 if (ret)
1128 return ret;
1129
1130
1131 return hns3_dcb_schd_mode_cfg(hw);
1132}
1133
1134static int
1135hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1136 uint8_t pause_trans_gap, uint16_t pause_trans_time)
1137{
1138 struct hns3_cfg_pause_param_cmd *pause_param;
1139 struct hns3_cmd_desc desc;
1140
1141 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1142
1143 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1144
1145 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1146 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1147 pause_param->pause_trans_gap = pause_trans_gap;
1148 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1149
1150 return hns3_cmd_send(hw, &desc, 1);
1151}
1152
1153int
1154hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1155{
1156 struct hns3_cfg_pause_param_cmd *pause_param;
1157 struct hns3_cmd_desc desc;
1158 uint16_t trans_time;
1159 uint8_t trans_gap;
1160 int ret;
1161
1162 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1163
1164 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1165
1166 ret = hns3_cmd_send(hw, &desc, 1);
1167 if (ret)
1168 return ret;
1169
1170 trans_gap = pause_param->pause_trans_gap;
1171 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1172
1173 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1174}
1175
1176static int
1177hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1178{
1179#define PAUSE_TIME_DIV_BY 2
1180#define PAUSE_TIME_MIN_VALUE 0x4
1181
1182 struct hns3_mac *mac = &hw->mac;
1183 uint8_t pause_trans_gap;
1184
1185
1186
1187
1188
1189 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1190 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1191 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1192 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1193 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1194 else {
1195 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1196 pause_time = PAUSE_TIME_MIN_VALUE;
1197 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1198 }
1199
1200 return hns3_pause_param_cfg(hw, mac->mac_addr,
1201 pause_trans_gap, pause_time);
1202}
1203
1204static int
1205hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1206{
1207 struct hns3_cmd_desc desc;
1208
1209 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1210
1211 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1212 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1213
1214 return hns3_cmd_send(hw, &desc, 1);
1215}
1216
1217static int
1218hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1219{
1220 struct hns3_cmd_desc desc;
1221 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1222
1223 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1224
1225 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1226 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1227
1228 pfc->pri_en_bitmap = pfc_bitmap;
1229
1230 return hns3_cmd_send(hw, &desc, 1);
1231}
1232
1233static int
1234hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1235{
1236 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1237 struct hns3_cmd_desc desc;
1238
1239 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1240
1241 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1242
1243 bp_to_qs_map_cmd->tc_id = tc;
1244 bp_to_qs_map_cmd->qs_group_id = grp_id;
1245 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1246
1247 return hns3_cmd_send(hw, &desc, 1);
1248}
1249
1250static void
1251hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1252{
1253 switch (hw->requested_fc_mode) {
1254 case HNS3_FC_NONE:
1255 *tx_en = false;
1256 *rx_en = false;
1257 break;
1258 case HNS3_FC_RX_PAUSE:
1259 *tx_en = false;
1260 *rx_en = true;
1261 break;
1262 case HNS3_FC_TX_PAUSE:
1263 *tx_en = true;
1264 *rx_en = false;
1265 break;
1266 case HNS3_FC_FULL:
1267 *tx_en = true;
1268 *rx_en = true;
1269 break;
1270 default:
1271 *tx_en = false;
1272 *rx_en = false;
1273 break;
1274 }
1275}
1276
1277static int
1278hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1279{
1280 bool tx_en, rx_en;
1281
1282 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1283 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1284 else {
1285 tx_en = false;
1286 rx_en = false;
1287 }
1288
1289 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1290}
1291
1292static int
1293hns3_pfc_setup_hw(struct hns3_hw *hw)
1294{
1295 bool tx_en, rx_en;
1296
1297 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1298 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1299 else {
1300 tx_en = false;
1301 rx_en = false;
1302 }
1303
1304 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1305}
1306
1307
1308
1309
1310
1311
1312static int
1313hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1314{
1315 uint32_t qs_bitmap;
1316 int ret;
1317 int i;
1318
1319 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1320 uint8_t grp, sub_grp;
1321 qs_bitmap = 0;
1322
1323 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1324 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1325 HNS3_BP_SUB_GRP_ID_S);
1326 if (i == grp)
1327 qs_bitmap |= (1 << sub_grp);
1328
1329 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1330 if (ret)
1331 return ret;
1332 }
1333
1334 return 0;
1335}
1336
1337static int
1338hns3_dcb_bp_setup(struct hns3_hw *hw)
1339{
1340 int ret, i;
1341
1342 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1343 ret = hns3_bp_setup_hw(hw, i);
1344 if (ret)
1345 return ret;
1346 }
1347
1348 return 0;
1349}
1350
1351static int
1352hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1353{
1354 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1355 struct hns3_pf *pf = &hns->pf;
1356 int ret;
1357
1358 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1359 if (ret) {
1360 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1361 return ret;
1362 }
1363
1364 ret = hns3_mac_pause_setup_hw(hw);
1365 if (ret) {
1366 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1367 return ret;
1368 }
1369
1370
1371 if (!hns3_dev_dcb_supported(hw))
1372 return 0;
1373
1374 ret = hns3_pfc_setup_hw(hw);
1375 if (ret) {
1376 hns3_err(hw, "config pfc failed! ret = %d", ret);
1377 return ret;
1378 }
1379
1380 return hns3_dcb_bp_setup(hw);
1381}
1382
1383static uint8_t
1384hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1385{
1386 uint8_t pfc_map = 0;
1387 uint8_t *prio_tc;
1388 uint8_t i, j;
1389
1390 prio_tc = hw->dcb_info.prio_tc;
1391 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1392 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1393 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1394 pfc_map |= BIT(i);
1395 break;
1396 }
1397 }
1398 }
1399
1400 return pfc_map;
1401}
1402
1403static uint8_t
1404hns3_dcb_parse_num_tc(struct hns3_adapter *hns)
1405{
1406 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1407 struct hns3_hw *hw = &hns->hw;
1408 uint8_t max_tc_id = 0;
1409 int i;
1410
1411 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1412 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1413 if (dcb_rx_conf->dcb_tc[i] > max_tc_id)
1414 max_tc_id = dcb_rx_conf->dcb_tc[i];
1415 }
1416
1417
1418 return max_tc_id + 1;
1419}
1420
1421static int
1422hns3_dcb_info_cfg(struct hns3_adapter *hns)
1423{
1424 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1425 struct hns3_pf *pf = &hns->pf;
1426 struct hns3_hw *hw = &hns->hw;
1427 uint8_t tc_bw, bw_rest;
1428 uint8_t i, j;
1429 int ret;
1430
1431 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1432 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1433 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1434
1435
1436 memset(hw->dcb_info.pg_info, 0,
1437 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1438 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1439 hw->dcb_info.pg_info[0].pg_id = 0;
1440 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1441 hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1442 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1443
1444
1445 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1446 for (i = 0; i < hw->dcb_info.num_tc; i++)
1447 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1448
1449 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1450 for (j = 0; j < bw_rest; j++)
1451 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1452 for (; i < dcb_rx_conf->nb_tcs; i++)
1453 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1454
1455
1456 memset(hw->dcb_info.tc_info, 0,
1457 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1458 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1459 hw->dcb_info.tc_info[i].tc_id = i;
1460 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1461 hw->dcb_info.tc_info[i].pgid = 0;
1462 hw->dcb_info.tc_info[i].bw_limit =
1463 hw->dcb_info.pg_info[0].bw_limit;
1464 }
1465
1466 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1467 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1468
1469 ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1470 hw->data->nb_tx_queues);
1471 if (ret)
1472 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1473
1474 return ret;
1475}
1476
1477static int
1478hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1479{
1480 struct hns3_pf *pf = &hns->pf;
1481 struct hns3_hw *hw = &hns->hw;
1482 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1483 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1484 uint8_t bit_map = 0;
1485 uint8_t i;
1486
1487 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1488 hw->dcb_info.num_pg != 1)
1489 return -EINVAL;
1490
1491 if (nb_rx_q < num_tc) {
1492 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1493 nb_rx_q, num_tc);
1494 return -EINVAL;
1495 }
1496
1497 if (nb_tx_q < num_tc) {
1498 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1499 nb_tx_q, num_tc);
1500 return -EINVAL;
1501 }
1502
1503
1504 hw->dcb_info.num_tc = num_tc;
1505 for (i = 0; i < hw->dcb_info.num_tc; i++)
1506 bit_map |= BIT(i);
1507
1508 if (!bit_map) {
1509 bit_map = 1;
1510 hw->dcb_info.num_tc = 1;
1511 }
1512 hw->hw_tc_map = bit_map;
1513
1514 return hns3_dcb_info_cfg(hns);
1515}
1516
1517static int
1518hns3_dcb_hw_configure(struct hns3_adapter *hns)
1519{
1520 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1521 struct hns3_pf *pf = &hns->pf;
1522 struct hns3_hw *hw = &hns->hw;
1523 enum hns3_fc_status fc_status = hw->current_fc_status;
1524 enum hns3_fc_mode requested_fc_mode = hw->requested_fc_mode;
1525 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1526 uint8_t pfc_en = hw->dcb_info.pfc_en;
1527 int ret;
1528
1529 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1530 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1531 return -ENOTSUP;
1532
1533 ret = hns3_dcb_schd_setup_hw(hw);
1534 if (ret) {
1535 hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1536 return ret;
1537 }
1538
1539 if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1540 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1541 if (dcb_rx_conf->nb_tcs == 0)
1542 hw->dcb_info.pfc_en = 1;
1543 else
1544 hw->dcb_info.pfc_en =
1545 RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1546
1547 hw->dcb_info.hw_pfc_map =
1548 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1549
1550 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1551 hw->requested_fc_mode = HNS3_FC_FULL;
1552 } else {
1553 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1554 hw->requested_fc_mode = HNS3_FC_NONE;
1555 hw->dcb_info.pfc_en = 0;
1556 hw->dcb_info.hw_pfc_map = 0;
1557 }
1558
1559 ret = hns3_buffer_alloc(hw);
1560 if (ret)
1561 goto cfg_fail;
1562
1563 ret = hns3_dcb_pause_setup_hw(hw);
1564 if (ret) {
1565 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1566 goto cfg_fail;
1567 }
1568
1569 return 0;
1570
1571cfg_fail:
1572 hw->requested_fc_mode = requested_fc_mode;
1573 hw->current_fc_status = fc_status;
1574 hw->dcb_info.pfc_en = pfc_en;
1575 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1576
1577 return ret;
1578}
1579
1580
1581
1582
1583
1584
1585int
1586hns3_dcb_configure(struct hns3_adapter *hns)
1587{
1588 struct hns3_hw *hw = &hns->hw;
1589 uint8_t num_tc;
1590 int ret;
1591
1592 num_tc = hns3_dcb_parse_num_tc(hns);
1593 ret = hns3_dcb_info_update(hns, num_tc);
1594 if (ret) {
1595 hns3_err(hw, "dcb info update failed: %d", ret);
1596 return ret;
1597 }
1598
1599 ret = hns3_dcb_hw_configure(hns);
1600 if (ret) {
1601 hns3_err(hw, "dcb sw configure failed: %d", ret);
1602 return ret;
1603 }
1604
1605 return 0;
1606}
1607
1608int
1609hns3_dcb_init_hw(struct hns3_hw *hw)
1610{
1611 int ret;
1612
1613 ret = hns3_dcb_schd_setup_hw(hw);
1614 if (ret) {
1615 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1616 return ret;
1617 }
1618
1619 ret = hns3_dcb_pause_setup_hw(hw);
1620 if (ret)
1621 hns3_err(hw, "PAUSE setup failed: %d", ret);
1622
1623 return ret;
1624}
1625
1626int
1627hns3_dcb_init(struct hns3_hw *hw)
1628{
1629 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1630 struct hns3_pf *pf = &hns->pf;
1631 uint16_t default_tqp_num;
1632 int ret;
1633
1634 PMD_INIT_FUNC_TRACE();
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1645 hw->requested_fc_mode = HNS3_FC_NONE;
1646 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1647 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1648
1649 ret = hns3_dcb_info_init(hw);
1650 if (ret) {
1651 hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1652 return ret;
1653 }
1654
1655
1656
1657
1658
1659 default_tqp_num = RTE_MIN(hw->rss_size_max,
1660 hw->tqps_num / hw->dcb_info.num_tc);
1661 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1662 default_tqp_num);
1663 if (ret) {
1664 hns3_err(hw,
1665 "update tc queue mapping failed, ret = %d.",
1666 ret);
1667 return ret;
1668 }
1669 }
1670
1671
1672
1673
1674
1675
1676
1677
1678 ret = hns3_dcb_init_hw(hw);
1679 if (ret) {
1680 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1681 return ret;
1682 }
1683
1684 return 0;
1685}
1686
1687int
1688hns3_update_queue_map_configure(struct hns3_adapter *hns)
1689{
1690 struct hns3_hw *hw = &hns->hw;
1691 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1692 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1693 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1694 int ret;
1695
1696 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
1697 return 0;
1698
1699 ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1700 if (ret) {
1701 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1702 ret);
1703 return ret;
1704 }
1705 ret = hns3_q_to_qs_map(hw);
1706 if (ret)
1707 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1708
1709 return ret;
1710}
1711
1712static void
1713hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
1714{
1715 switch (mode) {
1716 case RTE_FC_NONE:
1717 hw->requested_fc_mode = HNS3_FC_NONE;
1718 break;
1719 case RTE_FC_RX_PAUSE:
1720 hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
1721 break;
1722 case RTE_FC_TX_PAUSE:
1723 hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
1724 break;
1725 case RTE_FC_FULL:
1726 hw->requested_fc_mode = HNS3_FC_FULL;
1727 break;
1728 default:
1729 hw->requested_fc_mode = HNS3_FC_NONE;
1730 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
1731 "configured to RTE_FC_NONE", mode);
1732 break;
1733 }
1734}
1735
1736
1737
1738
1739
1740
1741
1742int
1743hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1744{
1745 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1747 enum hns3_fc_status fc_status = hw->current_fc_status;
1748 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1749 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1750 uint8_t pfc_en = hw->dcb_info.pfc_en;
1751 uint8_t priority = pfc_conf->priority;
1752 uint16_t pause_time = pf->pause_time;
1753 int ret;
1754
1755 hw->dcb_info.pfc_en |= BIT(priority);
1756 hw->dcb_info.hw_pfc_map =
1757 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1758 ret = hns3_buffer_alloc(hw);
1759 if (ret) {
1760 hns3_err(hw, "update packet buffer failed, ret = %d", ret);
1761 goto buffer_alloc_fail;
1762 }
1763
1764 pf->pause_time = pfc_conf->fc.pause_time;
1765 hns3_get_fc_mode(hw, pfc_conf->fc.mode);
1766 if (hw->requested_fc_mode == HNS3_FC_NONE)
1767 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1768 else
1769 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1770
1771
1772
1773
1774
1775 ret = hns3_dcb_pause_setup_hw(hw);
1776 if (ret) {
1777 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1778 goto pfc_setup_fail;
1779 }
1780
1781 return 0;
1782
1783pfc_setup_fail:
1784 hw->requested_fc_mode = old_fc_mode;
1785 hw->current_fc_status = fc_status;
1786 pf->pause_time = pause_time;
1787buffer_alloc_fail:
1788 hw->dcb_info.pfc_en = pfc_en;
1789 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1790
1791 return ret;
1792}
1793
1794
1795
1796
1797
1798
1799
1800int
1801hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1802{
1803 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1805 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1806 enum hns3_fc_status fc_status = hw->current_fc_status;
1807 uint16_t pause_time = pf->pause_time;
1808 int ret;
1809
1810 pf->pause_time = fc_conf->pause_time;
1811 hns3_get_fc_mode(hw, fc_conf->mode);
1812
1813
1814
1815
1816
1817 if (hw->requested_fc_mode == HNS3_FC_NONE)
1818 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1819 else
1820 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1821
1822 ret = hns3_dcb_pause_setup_hw(hw);
1823 if (ret) {
1824 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1825 goto setup_fc_fail;
1826 }
1827
1828 return 0;
1829
1830setup_fc_fail:
1831 hw->requested_fc_mode = old_fc_mode;
1832 hw->current_fc_status = fc_status;
1833 pf->pause_time = pause_time;
1834
1835 return ret;
1836}
1837