1
2
3
4
5#include <rte_malloc.h>
6
7#include "hns3_ethdev.h"
8#include "hns3_dcb.h"
9#include "hns3_logs.h"
10#include "hns3_tm.h"
11
12static inline uint32_t
13hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
14{
15
16
17
18
19
20 struct rte_eth_dev_info dev_info;
21
22 memset(&dev_info, 0, sizeof(dev_info));
23 (void)hns3_dev_infos_get(dev, &dev_info);
24 return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
25}
26
27void
28hns3_tm_conf_init(struct rte_eth_dev *dev)
29{
30 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
32 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
33
34 if (!hns3_dev_tm_supported(hw))
35 return;
36
37 pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
38 pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
39 pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
40
41 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
42 pf->tm_conf.nb_shaper_profile = 0;
43
44 pf->tm_conf.root = NULL;
45 TAILQ_INIT(&pf->tm_conf.tc_list);
46 TAILQ_INIT(&pf->tm_conf.queue_list);
47 pf->tm_conf.nb_tc_node = 0;
48 pf->tm_conf.nb_queue_node = 0;
49
50 pf->tm_conf.committed = false;
51}
52
53void
54hns3_tm_conf_uninit(struct rte_eth_dev *dev)
55{
56 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
57 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
58 struct hns3_tm_shaper_profile *shaper_profile;
59 struct hns3_tm_node *tm_node;
60
61 if (!hns3_dev_tm_supported(hw))
62 return;
63
64 if (pf->tm_conf.nb_queue_node > 0) {
65 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
66 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
67 rte_free(tm_node);
68 }
69 pf->tm_conf.nb_queue_node = 0;
70 }
71
72 if (pf->tm_conf.nb_tc_node > 0) {
73 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
74 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
75 rte_free(tm_node);
76 }
77 pf->tm_conf.nb_tc_node = 0;
78 }
79
80 if (pf->tm_conf.root != NULL) {
81 rte_free(pf->tm_conf.root);
82 pf->tm_conf.root = NULL;
83 }
84
85 if (pf->tm_conf.nb_shaper_profile > 0) {
86 while ((shaper_profile =
87 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
88 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
89 shaper_profile, node);
90 rte_free(shaper_profile);
91 }
92 pf->tm_conf.nb_shaper_profile = 0;
93 }
94
95 pf->tm_conf.nb_leaf_nodes_max = 0;
96 pf->tm_conf.nb_nodes_max = 0;
97 pf->tm_conf.nb_shaper_profile_max = 0;
98}
99
100static inline uint64_t
101hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
102{
103#define FIRMWARE_TO_TM_RATE_SCALE 125000
104
105 return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
106}
107
108static inline uint32_t
109hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
110{
111#define TM_TO_FIRMWARE_RATE_SCALE 125000
112
113 return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
114}
115
116static int
117hns3_tm_capabilities_get(struct rte_eth_dev *dev,
118 struct rte_tm_capabilities *cap,
119 struct rte_tm_error *error)
120{
121 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
122 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
123
124 if (cap == NULL || error == NULL)
125 return -EINVAL;
126
127 error->type = RTE_TM_ERROR_TYPE_NONE;
128
129 memset(cap, 0, sizeof(struct rte_tm_capabilities));
130
131 cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
132 cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
133 cap->non_leaf_nodes_identical = 1;
134 cap->leaf_nodes_identical = 1;
135 cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
136 cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
137 cap->shaper_private_rate_max =
138 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
139
140 cap->sched_n_children_max = max_tx_queues;
141 cap->sched_sp_n_priorities_max = 1;
142 cap->sched_wfq_weight_max = 1;
143
144 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
145 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
146
147 return 0;
148}
149
150static struct hns3_tm_shaper_profile *
151hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
152 uint32_t shaper_profile_id)
153{
154 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
155 struct hns3_shaper_profile_list *shaper_profile_list =
156 &pf->tm_conf.shaper_profile_list;
157 struct hns3_tm_shaper_profile *shaper_profile;
158
159 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
160 if (shaper_profile_id == shaper_profile->shaper_profile_id)
161 return shaper_profile;
162 }
163
164 return NULL;
165}
166
167static int
168hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
169 struct rte_tm_shaper_params *profile,
170 struct rte_tm_error *error)
171{
172 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
173
174 if (profile->committed.rate) {
175 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
176 error->message = "committed rate not supported";
177 return -EINVAL;
178 }
179
180 if (profile->committed.size) {
181 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
182 error->message = "committed bucket size not supported";
183 return -EINVAL;
184 }
185
186 if (profile->peak.rate >
187 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
188 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
189 error->message = "peak rate too large";
190 return -EINVAL;
191 }
192
193 if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
194 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
195 error->message = "peak rate must be at least 1Mbps";
196 return -EINVAL;
197 }
198
199 if (profile->peak.size) {
200 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
201 error->message = "peak bucket size not supported";
202 return -EINVAL;
203 }
204
205 if (profile->pkt_length_adjust) {
206 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
207 error->message = "packet length adjustment not supported";
208 return -EINVAL;
209 }
210
211 if (profile->packet_mode) {
212 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
213 error->message = "packet mode not supported";
214 return -EINVAL;
215 }
216
217 return 0;
218}
219
220static int
221hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
222 uint32_t shaper_profile_id,
223 struct rte_tm_shaper_params *profile,
224 struct rte_tm_error *error)
225{
226 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
227 struct hns3_tm_shaper_profile *shaper_profile;
228 int ret;
229
230 if (profile == NULL || error == NULL)
231 return -EINVAL;
232
233 if (pf->tm_conf.nb_shaper_profile >=
234 pf->tm_conf.nb_shaper_profile_max) {
235 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
236 error->message = "too much profiles";
237 return -EINVAL;
238 }
239
240 ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
241 if (ret)
242 return ret;
243
244 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
245 if (shaper_profile) {
246 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
247 error->message = "profile ID exist";
248 return -EINVAL;
249 }
250
251 shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
252 sizeof(struct hns3_tm_shaper_profile),
253 0);
254 if (shaper_profile == NULL)
255 return -ENOMEM;
256
257 shaper_profile->shaper_profile_id = shaper_profile_id;
258 memcpy(&shaper_profile->profile, profile,
259 sizeof(struct rte_tm_shaper_params));
260 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
261 shaper_profile, node);
262 pf->tm_conf.nb_shaper_profile++;
263
264 return 0;
265}
266
267static int
268hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
269 uint32_t shaper_profile_id,
270 struct rte_tm_error *error)
271{
272 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
273 struct hns3_tm_shaper_profile *shaper_profile;
274
275 if (error == NULL)
276 return -EINVAL;
277
278 shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
279 if (shaper_profile == NULL) {
280 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
281 error->message = "profile ID not exist";
282 return -EINVAL;
283 }
284
285 if (shaper_profile->reference_count) {
286 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
287 error->message = "profile in use";
288 return -EINVAL;
289 }
290
291 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
292 rte_free(shaper_profile);
293 pf->tm_conf.nb_shaper_profile--;
294
295 return 0;
296}
297
298static struct hns3_tm_node *
299hns3_tm_node_search(struct rte_eth_dev *dev,
300 uint32_t node_id,
301 enum hns3_tm_node_type *node_type)
302{
303 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
304 struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
305 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
306 struct hns3_tm_node *tm_node;
307
308 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
309 *node_type = HNS3_TM_NODE_TYPE_PORT;
310 return pf->tm_conf.root;
311 }
312
313 TAILQ_FOREACH(tm_node, tc_list, node) {
314 if (tm_node->id == node_id) {
315 *node_type = HNS3_TM_NODE_TYPE_TC;
316 return tm_node;
317 }
318 }
319
320 TAILQ_FOREACH(tm_node, queue_list, node) {
321 if (tm_node->id == node_id) {
322 *node_type = HNS3_TM_NODE_TYPE_QUEUE;
323 return tm_node;
324 }
325 }
326
327 return NULL;
328}
329
330static int
331hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
332 struct rte_tm_node_params *params,
333 struct rte_tm_error *error)
334{
335 struct hns3_tm_shaper_profile *shaper_profile;
336
337 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
338 shaper_profile = hns3_tm_shaper_profile_search(dev,
339 params->shaper_profile_id);
340 if (shaper_profile == NULL) {
341 error->type =
342 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
343 error->message = "shaper profile not exist";
344 return -EINVAL;
345 }
346 }
347
348 if (params->nonleaf.wfq_weight_mode) {
349 error->type =
350 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
351 error->message = "WFQ not supported";
352 return -EINVAL;
353 }
354
355 if (params->nonleaf.n_sp_priorities != 1) {
356 error->type =
357 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
358 error->message = "SP priority not supported";
359 return -EINVAL;
360 }
361
362 return 0;
363}
364
365static int
366hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
367 struct rte_tm_node_params *params,
368 struct rte_tm_error *error)
369
370{
371 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
372 error->type =
373 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
374 error->message = "shaper not supported";
375 return -EINVAL;
376 }
377
378 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
379 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
380 error->message = "congestion management not supported";
381 return -EINVAL;
382 }
383
384 if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
385 error->type =
386 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
387 error->message = "WRED not supported";
388 return -EINVAL;
389 }
390
391 if (params->leaf.wred.shared_wred_context_id) {
392 error->type =
393 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
394 error->message = "WRED not supported";
395 return -EINVAL;
396 }
397
398 if (params->leaf.wred.n_shared_wred_contexts) {
399 error->type =
400 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
401 error->message = "WRED not supported";
402 return -EINVAL;
403 }
404
405 return 0;
406}
407
408static int
409hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
410 uint32_t priority, uint32_t weight,
411 struct rte_tm_node_params *params,
412 struct rte_tm_error *error)
413{
414 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
415 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
416
417 if (node_id == RTE_TM_NODE_ID_NULL) {
418 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
419 error->message = "invalid node id";
420 return -EINVAL;
421 }
422
423 if (hns3_tm_node_search(dev, node_id, &node_type)) {
424 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
425 error->message = "node id already used";
426 return -EINVAL;
427 }
428
429 if (priority) {
430 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
431 error->message = "priority should be 0";
432 return -EINVAL;
433 }
434
435 if (weight != 1) {
436 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
437 error->message = "weight must be 1";
438 return -EINVAL;
439 }
440
441 if (params->shared_shaper_id) {
442 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
443 error->message = "shared shaper not supported";
444 return -EINVAL;
445 }
446 if (params->n_shared_shapers) {
447 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
448 error->message = "shared shaper not supported";
449 return -EINVAL;
450 }
451
452 if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
453 return hns3_tm_nonleaf_node_param_check(dev, params, error);
454 else
455 return hns3_tm_leaf_node_param_check(dev, params, error);
456}
457
458static int
459hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
460 uint32_t level_id, struct rte_tm_node_params *params,
461 struct rte_tm_error *error)
462{
463 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464 struct hns3_tm_node *tm_node;
465
466 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
467 level_id != HNS3_TM_NODE_LEVEL_PORT) {
468 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
469 error->message = "wrong level";
470 return -EINVAL;
471 }
472
473 if (node_id != pf->tm_conf.nb_nodes_max - 1) {
474 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
475 error->message = "invalid port node ID";
476 return -EINVAL;
477 }
478
479 if (pf->tm_conf.root) {
480 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
481 error->message = "already have a root";
482 return -EINVAL;
483 }
484
485 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
486 if (tm_node == NULL)
487 return -ENOMEM;
488
489 tm_node->id = node_id;
490 tm_node->reference_count = 0;
491 tm_node->parent = NULL;
492 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
493 params->shaper_profile_id);
494 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
495 pf->tm_conf.root = tm_node;
496
497 if (tm_node->shaper_profile)
498 tm_node->shaper_profile->reference_count++;
499
500 return 0;
501}
502
503static int
504hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
505 uint32_t level_id, struct hns3_tm_node *parent_node,
506 struct rte_tm_node_params *params,
507 struct rte_tm_error *error)
508{
509 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
510 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
511 struct hns3_tm_node *tm_node;
512
513 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
514 level_id != HNS3_TM_NODE_LEVEL_TC) {
515 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
516 error->message = "wrong level";
517 return -EINVAL;
518 }
519
520 if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
521 node_id < pf->tm_conf.nb_leaf_nodes_max ||
522 hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
523 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
524 error->message = "invalid tc node ID";
525 return -EINVAL;
526 }
527
528 if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
529 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
530 error->message = "too many TCs";
531 return -EINVAL;
532 }
533
534 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
535 if (tm_node == NULL)
536 return -ENOMEM;
537
538 tm_node->id = node_id;
539 tm_node->reference_count = 0;
540 tm_node->parent = parent_node;
541 tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
542 params->shaper_profile_id);
543 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
544 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
545 pf->tm_conf.nb_tc_node++;
546 tm_node->parent->reference_count++;
547
548 if (tm_node->shaper_profile)
549 tm_node->shaper_profile->reference_count++;
550
551 return 0;
552}
553
554static int
555hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
556 uint32_t level_id, struct hns3_tm_node *parent_node,
557 struct rte_tm_node_params *params,
558 struct rte_tm_error *error)
559{
560 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
561 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
562 struct hns3_tm_node *tm_node;
563
564 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
565 level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
566 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
567 error->message = "wrong level";
568 return -EINVAL;
569 }
570
571
572 if (node_id >= dev->data->nb_tx_queues) {
573 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
574 error->message = "invalid queue node ID";
575 return -EINVAL;
576 }
577
578 if (hns3_txq_mapped_tc_get(hw, node_id) !=
579 hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
580 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
581 error->message = "queue's TC not match parent's TC";
582 return -EINVAL;
583 }
584
585 tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
586 if (tm_node == NULL)
587 return -ENOMEM;
588
589 tm_node->id = node_id;
590 tm_node->reference_count = 0;
591 tm_node->parent = parent_node;
592 memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
593 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
594 pf->tm_conf.nb_queue_node++;
595 tm_node->parent->reference_count++;
596
597 return 0;
598}
599
600static int
601hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
602 uint32_t parent_node_id, uint32_t priority,
603 uint32_t weight, uint32_t level_id,
604 struct rte_tm_node_params *params,
605 struct rte_tm_error *error)
606{
607 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
608 enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
609 struct hns3_tm_node *parent_node;
610 int ret;
611
612 if (params == NULL || error == NULL)
613 return -EINVAL;
614
615 if (pf->tm_conf.committed) {
616 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
617 error->message = "already committed";
618 return -EINVAL;
619 }
620
621 ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
622 params, error);
623 if (ret)
624 return ret;
625
626
627 if (parent_node_id == RTE_TM_NODE_ID_NULL)
628 return hns3_tm_port_node_add(dev, node_id, level_id,
629 params, error);
630
631 parent_node = hns3_tm_node_search(dev, parent_node_id,
632 &parent_node_type);
633 if (parent_node == NULL) {
634 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
635 error->message = "parent not exist";
636 return -EINVAL;
637 }
638
639 if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
640 parent_node_type != HNS3_TM_NODE_TYPE_TC) {
641 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
642 error->message = "parent is not port or TC";
643 return -EINVAL;
644 }
645
646 if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
647 return hns3_tm_tc_node_add(dev, node_id, level_id,
648 parent_node, params, error);
649 else
650 return hns3_tm_queue_node_add(dev, node_id, level_id,
651 parent_node, params, error);
652}
653
654static void
655hns3_tm_node_do_delete(struct hns3_pf *pf,
656 enum hns3_tm_node_type node_type,
657 struct hns3_tm_node *tm_node)
658{
659 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
660 if (tm_node->shaper_profile)
661 tm_node->shaper_profile->reference_count--;
662 rte_free(tm_node);
663 pf->tm_conf.root = NULL;
664 return;
665 }
666
667 if (tm_node->shaper_profile)
668 tm_node->shaper_profile->reference_count--;
669 tm_node->parent->reference_count--;
670 if (node_type == HNS3_TM_NODE_TYPE_TC) {
671 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
672 pf->tm_conf.nb_tc_node--;
673 } else {
674 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
675 pf->tm_conf.nb_queue_node--;
676 }
677 rte_free(tm_node);
678}
679
680static int
681hns3_tm_node_delete(struct rte_eth_dev *dev,
682 uint32_t node_id,
683 struct rte_tm_error *error)
684{
685 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
686 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
687 struct hns3_tm_node *tm_node;
688
689 if (error == NULL)
690 return -EINVAL;
691
692 if (pf->tm_conf.committed) {
693 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
694 error->message = "already committed";
695 return -EINVAL;
696 }
697
698 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
699 if (tm_node == NULL) {
700 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
701 error->message = "no such node";
702 return -EINVAL;
703 }
704
705 if (tm_node->reference_count) {
706 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
707 error->message = "cannot delete a node which has children";
708 return -EINVAL;
709 }
710
711 hns3_tm_node_do_delete(pf, node_type, tm_node);
712
713 return 0;
714}
715
716static int
717hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
718 int *is_leaf, struct rte_tm_error *error)
719{
720 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
721 struct hns3_tm_node *tm_node;
722
723 if (is_leaf == NULL || error == NULL)
724 return -EINVAL;
725
726 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
727 if (tm_node == NULL) {
728 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
729 error->message = "no such node";
730 return -EINVAL;
731 }
732
733 if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
734 *is_leaf = true;
735 else
736 *is_leaf = false;
737
738 return 0;
739}
740
741static void
742hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
743 uint32_t level_id,
744 struct rte_tm_level_capabilities *cap)
745{
746 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
747 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
748
749 if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
750 cap->n_nodes_max = 1;
751 cap->n_nodes_nonleaf_max = 1;
752 cap->n_nodes_leaf_max = 0;
753 } else {
754 cap->n_nodes_max = HNS3_MAX_TC_NUM;
755 cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
756 cap->n_nodes_leaf_max = 0;
757 }
758
759 cap->non_leaf_nodes_identical = 1;
760 cap->leaf_nodes_identical = 1;
761
762 cap->nonleaf.shaper_private_supported = true;
763 cap->nonleaf.shaper_private_dual_rate_supported = false;
764 cap->nonleaf.shaper_private_rate_min = 0;
765 cap->nonleaf.shaper_private_rate_max =
766 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
767 cap->nonleaf.shaper_shared_n_max = 0;
768 if (level_id == HNS3_TM_NODE_LEVEL_PORT)
769 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
770 else
771 cap->nonleaf.sched_n_children_max = max_tx_queues;
772 cap->nonleaf.sched_sp_n_priorities_max = 1;
773 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
774 cap->nonleaf.sched_wfq_n_groups_max = 0;
775 cap->nonleaf.sched_wfq_weight_max = 1;
776 cap->nonleaf.stats_mask = 0;
777}
778
779static void
780hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
781 struct rte_tm_level_capabilities *cap)
782{
783 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
784
785 cap->n_nodes_max = max_tx_queues;
786 cap->n_nodes_nonleaf_max = 0;
787 cap->n_nodes_leaf_max = max_tx_queues;
788
789 cap->non_leaf_nodes_identical = 1;
790 cap->leaf_nodes_identical = 1;
791
792 cap->leaf.shaper_private_supported = false;
793 cap->leaf.shaper_private_dual_rate_supported = false;
794 cap->leaf.shaper_private_rate_min = 0;
795 cap->leaf.shaper_private_rate_max = 0;
796 cap->leaf.shaper_shared_n_max = 0;
797 cap->leaf.cman_head_drop_supported = false;
798 cap->leaf.cman_wred_context_private_supported = false;
799 cap->leaf.cman_wred_context_shared_n_max = 0;
800 cap->leaf.stats_mask = 0;
801}
802
803static int
804hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
805 uint32_t level_id,
806 struct rte_tm_level_capabilities *cap,
807 struct rte_tm_error *error)
808{
809 if (cap == NULL || error == NULL)
810 return -EINVAL;
811
812 if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
813 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
814 error->message = "too deep level";
815 return -EINVAL;
816 }
817
818 memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
819
820 if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
821 hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
822 else
823 hns3_tm_leaf_level_capabilities_get(dev, cap);
824
825 return 0;
826}
827
828static void
829hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
830 enum hns3_tm_node_type node_type,
831 struct rte_tm_node_capabilities *cap)
832{
833 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
834 uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
835
836 cap->shaper_private_supported = true;
837 cap->shaper_private_dual_rate_supported = false;
838 cap->shaper_private_rate_min = 0;
839 cap->shaper_private_rate_max =
840 hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
841 cap->shaper_shared_n_max = 0;
842
843 if (node_type == HNS3_TM_NODE_TYPE_PORT)
844 cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
845 else
846 cap->nonleaf.sched_n_children_max = max_tx_queues;
847 cap->nonleaf.sched_sp_n_priorities_max = 1;
848 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
849 cap->nonleaf.sched_wfq_n_groups_max = 0;
850 cap->nonleaf.sched_wfq_weight_max = 1;
851
852 cap->stats_mask = 0;
853}
854
855static void
856hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
857 struct rte_tm_node_capabilities *cap)
858{
859 cap->shaper_private_supported = false;
860 cap->shaper_private_dual_rate_supported = false;
861 cap->shaper_private_rate_min = 0;
862 cap->shaper_private_rate_max = 0;
863 cap->shaper_shared_n_max = 0;
864
865 cap->leaf.cman_head_drop_supported = false;
866 cap->leaf.cman_wred_context_private_supported = false;
867 cap->leaf.cman_wred_context_shared_n_max = 0;
868
869 cap->stats_mask = 0;
870}
871
872static int
873hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
874 uint32_t node_id,
875 struct rte_tm_node_capabilities *cap,
876 struct rte_tm_error *error)
877{
878 enum hns3_tm_node_type node_type;
879 struct hns3_tm_node *tm_node;
880
881 if (cap == NULL || error == NULL)
882 return -EINVAL;
883
884 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
885 if (tm_node == NULL) {
886 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
887 error->message = "no such node";
888 return -EINVAL;
889 }
890
891 memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
892
893 if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
894 hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
895 else
896 hns3_tm_leaf_node_capabilities_get(dev, cap);
897
898 return 0;
899}
900
901static int
902hns3_tm_config_port_rate(struct hns3_hw *hw,
903 struct hns3_tm_shaper_profile *shaper_profile)
904{
905 struct hns3_port_limit_rate_cmd *cfg;
906 struct hns3_cmd_desc desc;
907 uint32_t firmware_rate;
908 uint64_t rate;
909 int ret;
910
911 if (shaper_profile) {
912 rate = shaper_profile->profile.peak.rate;
913 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
914 } else {
915 firmware_rate = hw->max_tm_rate;
916 }
917
918 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
919 cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
920 cfg->speed = rte_cpu_to_le_32(firmware_rate);
921
922 ret = hns3_cmd_send(hw, &desc, 1);
923 if (ret)
924 hns3_err(hw, "failed to config port rate, ret = %d", ret);
925
926 return ret;
927}
928
929static int
930hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
931 struct hns3_tm_shaper_profile *shaper_profile)
932{
933 struct hns3_tc_limit_rate_cmd *cfg;
934 struct hns3_cmd_desc desc;
935 uint32_t firmware_rate;
936 uint64_t rate;
937 int ret;
938
939 if (shaper_profile) {
940 rate = shaper_profile->profile.peak.rate;
941 firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
942 } else {
943 firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
944 }
945
946 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
947 cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
948 cfg->speed = rte_cpu_to_le_32(firmware_rate);
949 cfg->tc_id = tc_no;
950
951 ret = hns3_cmd_send(hw, &desc, 1);
952 if (ret)
953 hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
954 tc_no, ret);
955
956 return ret;
957}
958
959static bool
960hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
961{
962 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
963 struct hns3_tm_conf *tm_conf = &pf->tm_conf;
964 struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
965 struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
966 struct hns3_tm_node *tm_node;
967
968
969 TAILQ_FOREACH(tm_node, tc_list, node) {
970 if (!tm_node->reference_count) {
971 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
972 error->message = "TC without queue assigned";
973 return false;
974 }
975
976 if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
977 hw->num_tc) {
978 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
979 error->message = "node's TC not exist";
980 return false;
981 }
982 }
983
984
985 TAILQ_FOREACH(tm_node, queue_list, node) {
986 if (tm_node->id >= hw->data->nb_tx_queues) {
987 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
988 error->message = "node's queue invalid";
989 return false;
990 }
991
992 if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
993 hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
994 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
995 error->message = "queue's TC not match parent's TC";
996 return false;
997 }
998 }
999
1000 return true;
1001}
1002
1003static int
1004hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1005 struct rte_tm_error *error)
1006{
1007 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1008 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1009 struct hns3_tm_node *tm_node;
1010 uint8_t tc_no;
1011 int ret;
1012
1013
1014 tm_node = pf->tm_conf.root;
1015 if (tm_node->shaper_profile) {
1016 ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1017 if (ret) {
1018 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1019 error->message = "fail to set port peak rate";
1020 return -EIO;
1021 }
1022 }
1023
1024
1025 TAILQ_FOREACH(tm_node, tc_list, node) {
1026 if (tm_node->shaper_profile == NULL)
1027 continue;
1028
1029 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1030 ret = hns3_tm_config_tc_rate(hw, tc_no,
1031 tm_node->shaper_profile);
1032 if (ret) {
1033 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1034 error->message = "fail to set TC peak rate";
1035 return -EIO;
1036 }
1037 }
1038
1039 return 0;
1040}
1041
1042static int
1043hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1044 int clear_on_fail,
1045 struct rte_tm_error *error)
1046{
1047 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1048 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049 int ret;
1050
1051 if (error == NULL)
1052 return -EINVAL;
1053
1054 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1055 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1056 error->message = "device is resetting";
1057
1058 return -EBUSY;
1059 }
1060
1061 if (pf->tm_conf.root == NULL)
1062 goto done;
1063
1064
1065 if (!hns3_tm_configure_check(hw, error))
1066 goto fail_clear;
1067
1068 ret = hns3_tm_hierarchy_do_commit(hw, error);
1069 if (ret)
1070 goto fail_clear;
1071
1072done:
1073 pf->tm_conf.committed = true;
1074 return 0;
1075
1076fail_clear:
1077 if (clear_on_fail) {
1078 hns3_tm_conf_uninit(dev);
1079 hns3_tm_conf_init(dev);
1080 }
1081 return -EINVAL;
1082}
1083
1084static int
1085hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1086 int clear_on_fail,
1087 struct rte_tm_error *error)
1088{
1089 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090 int ret;
1091
1092 rte_spinlock_lock(&hw->lock);
1093 ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1094 rte_spinlock_unlock(&hw->lock);
1095
1096 return ret;
1097}
1098
1099static int
1100hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1101 uint32_t node_id,
1102 enum hns3_tm_node_type node_type,
1103 struct hns3_tm_shaper_profile *shaper_profile,
1104 struct rte_tm_error *error)
1105{
1106 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1107 uint8_t tc_no;
1108 int ret;
1109
1110 if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1111 if (shaper_profile != NULL) {
1112 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1113 error->message = "queue node shaper not supported";
1114 return -EINVAL;
1115 }
1116 return 0;
1117 }
1118
1119 if (!pf->tm_conf.committed)
1120 return 0;
1121
1122 if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1123 ret = hns3_tm_config_port_rate(hw, shaper_profile);
1124 if (ret) {
1125 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1126 error->message = "fail to update port peak rate";
1127 }
1128
1129 return ret;
1130 }
1131
1132
1133
1134
1135 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1136 ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1137 if (ret) {
1138 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1139 error->message = "fail to update TC peak rate";
1140 }
1141
1142 return ret;
1143}
1144
1145static int
1146hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1147 uint32_t node_id,
1148 uint32_t shaper_profile_id,
1149 struct rte_tm_error *error)
1150{
1151 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152 enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1153 struct hns3_tm_shaper_profile *profile = NULL;
1154 struct hns3_tm_node *tm_node;
1155
1156 if (error == NULL)
1157 return -EINVAL;
1158
1159 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1160 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1161 error->message = "device is resetting";
1162 return -EBUSY;
1163 }
1164
1165 tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1166 if (tm_node == NULL) {
1167 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1168 error->message = "no such node";
1169 return -EINVAL;
1170 }
1171
1172 if (shaper_profile_id == tm_node->params.shaper_profile_id)
1173 return 0;
1174
1175 if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1176 profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1177 if (profile == NULL) {
1178 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1179 error->message = "profile ID not exist";
1180 return -EINVAL;
1181 }
1182 }
1183
1184 if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1185 profile, error))
1186 return -EINVAL;
1187
1188 if (tm_node->shaper_profile)
1189 tm_node->shaper_profile->reference_count--;
1190 tm_node->shaper_profile = profile;
1191 tm_node->params.shaper_profile_id = shaper_profile_id;
1192 if (profile != NULL)
1193 profile->reference_count++;
1194
1195 return 0;
1196}
1197
1198static int
1199hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1200 uint32_t node_id,
1201 uint32_t shaper_profile_id,
1202 struct rte_tm_error *error)
1203{
1204 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 int ret;
1206
1207 rte_spinlock_lock(&hw->lock);
1208 ret = hns3_tm_node_shaper_update(dev, node_id,
1209 shaper_profile_id, error);
1210 rte_spinlock_unlock(&hw->lock);
1211
1212 return ret;
1213}
1214
1215static const struct rte_tm_ops hns3_tm_ops = {
1216 .capabilities_get = hns3_tm_capabilities_get,
1217 .shaper_profile_add = hns3_tm_shaper_profile_add,
1218 .shaper_profile_delete = hns3_tm_shaper_profile_del,
1219 .node_add = hns3_tm_node_add,
1220 .node_delete = hns3_tm_node_delete,
1221 .node_type_get = hns3_tm_node_type_get,
1222 .level_capabilities_get = hns3_tm_level_capabilities_get,
1223 .node_capabilities_get = hns3_tm_node_capabilities_get,
1224 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
1225 .node_shaper_update = hns3_tm_node_shaper_update_wrap,
1226};
1227
1228int
1229hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
1230{
1231 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1232
1233 if (arg == NULL)
1234 return -EINVAL;
1235
1236 if (!hns3_dev_tm_supported(hw))
1237 return -EOPNOTSUPP;
1238
1239 *(const void **)arg = &hns3_tm_ops;
1240
1241 return 0;
1242}
1243
1244void
1245hns3_tm_dev_start_proc(struct hns3_hw *hw)
1246{
1247 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1248
1249 if (!hns3_dev_tm_supported(hw))
1250 return;
1251
1252 if (pf->tm_conf.root && !pf->tm_conf.committed)
1253 hns3_warn(hw,
1254 "please call hierarchy_commit() before starting the port.");
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267void
1268hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1269{
1270 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1271 struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1272 struct hns3_tm_node *tm_node;
1273 uint8_t tc_no;
1274
1275 if (!pf->tm_conf.committed)
1276 return;
1277
1278 tm_node = pf->tm_conf.root;
1279 if (tm_node != NULL && tm_node->shaper_profile)
1280 (void)hns3_tm_config_port_rate(hw, NULL);
1281
1282 TAILQ_FOREACH(tm_node, tc_list, node) {
1283 if (tm_node->shaper_profile == NULL)
1284 continue;
1285 tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1286 (void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1287 }
1288
1289 pf->tm_conf.committed = false;
1290}
1291
1292int
1293hns3_tm_conf_update(struct hns3_hw *hw)
1294{
1295 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1296 struct rte_tm_error error;
1297
1298 if (!hns3_dev_tm_supported(hw))
1299 return 0;
1300
1301 if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1302 return 0;
1303
1304 memset(&error, 0, sizeof(struct rte_tm_error));
1305 return hns3_tm_hierarchy_do_commit(hw, &error);
1306}
1307