dpdk/drivers/net/ice/ice_dcf_sched.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2017 Intel Corporation
   3 */
   4#include <rte_tm_driver.h>
   5
   6#include "base/ice_sched.h"
   7#include "ice_dcf_ethdev.h"
   8
   9static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
  10                                 __rte_unused int clear_on_fail,
  11                                 __rte_unused struct rte_tm_error *error);
  12static int ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
  13              uint32_t parent_node_id, uint32_t priority,
  14              uint32_t weight, uint32_t level_id,
  15              struct rte_tm_node_params *params,
  16              struct rte_tm_error *error);
  17static int ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
  18                            struct rte_tm_error *error);
  19static int ice_dcf_shaper_profile_add(struct rte_eth_dev *dev,
  20                        uint32_t shaper_profile_id,
  21                        struct rte_tm_shaper_params *profile,
  22                        struct rte_tm_error *error);
  23static int ice_dcf_shaper_profile_del(struct rte_eth_dev *dev,
  24                                   uint32_t shaper_profile_id,
  25                                   struct rte_tm_error *error);
  26
  27const struct rte_tm_ops ice_dcf_tm_ops = {
  28        .shaper_profile_add = ice_dcf_shaper_profile_add,
  29        .shaper_profile_delete = ice_dcf_shaper_profile_del,
  30        .hierarchy_commit = ice_dcf_hierarchy_commit,
  31        .node_add = ice_dcf_node_add,
  32        .node_delete = ice_dcf_node_delete,
  33};
  34
  35#define ICE_DCF_SCHED_TC_NODE 0xffff
  36#define ICE_DCF_VFID    0
  37
  38void
  39ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
  40{
  41        struct ice_dcf_adapter *adapter = dev->data->dev_private;
  42        struct ice_dcf_hw *hw = &adapter->real_hw;
  43
  44        /* initialize shaper profile list */
  45        TAILQ_INIT(&hw->tm_conf.shaper_profile_list);
  46
  47        /* initialize node configuration */
  48        hw->tm_conf.root = NULL;
  49        TAILQ_INIT(&hw->tm_conf.tc_list);
  50        TAILQ_INIT(&hw->tm_conf.vsi_list);
  51        hw->tm_conf.nb_tc_node = 0;
  52        hw->tm_conf.nb_vsi_node = 0;
  53        hw->tm_conf.committed = false;
  54}
  55
  56void
  57ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev)
  58{
  59        struct ice_dcf_adapter *adapter = dev->data->dev_private;
  60        struct ice_dcf_hw *hw = &adapter->real_hw;
  61        struct ice_dcf_tm_shaper_profile *shaper_profile;
  62        struct ice_dcf_tm_node *tm_node;
  63
  64        /* clear node configuration */
  65        while ((tm_node = TAILQ_FIRST(&hw->tm_conf.vsi_list))) {
  66                TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node);
  67                rte_free(tm_node);
  68        }
  69        hw->tm_conf.nb_vsi_node = 0;
  70        while ((tm_node = TAILQ_FIRST(&hw->tm_conf.tc_list))) {
  71                TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node);
  72                rte_free(tm_node);
  73        }
  74        hw->tm_conf.nb_tc_node = 0;
  75        if (hw->tm_conf.root) {
  76                rte_free(hw->tm_conf.root);
  77                hw->tm_conf.root = NULL;
  78        }
  79
  80        /* Remove all shaper profiles */
  81        while ((shaper_profile =
  82               TAILQ_FIRST(&hw->tm_conf.shaper_profile_list))) {
  83                TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list,
  84                             shaper_profile, node);
  85                rte_free(shaper_profile);
  86        }
  87}
  88
  89static inline struct ice_dcf_tm_node *
  90ice_dcf_tm_node_search(struct rte_eth_dev *dev,
  91                    uint32_t node_id, enum ice_dcf_tm_node_type *node_type)
  92{
  93        struct ice_dcf_adapter *adapter = dev->data->dev_private;
  94        struct ice_dcf_hw *hw = &adapter->real_hw;
  95        struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
  96        struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list;
  97        struct ice_dcf_tm_node *tm_node;
  98
  99        if (hw->tm_conf.root && hw->tm_conf.root->id == node_id) {
 100                *node_type = ICE_DCF_TM_NODE_TYPE_PORT;
 101                return hw->tm_conf.root;
 102        }
 103
 104        TAILQ_FOREACH(tm_node, tc_list, node) {
 105                if (tm_node->id == node_id) {
 106                        *node_type = ICE_DCF_TM_NODE_TYPE_TC;
 107                        return tm_node;
 108                }
 109        }
 110
 111        TAILQ_FOREACH(tm_node, vsi_list, node) {
 112                if (tm_node->id == node_id) {
 113                        *node_type = ICE_DCF_TM_NODE_TYPE_VSI;
 114                        return tm_node;
 115                }
 116        }
 117
 118        return NULL;
 119}
 120
 121static inline struct ice_dcf_tm_shaper_profile *
 122ice_dcf_shaper_profile_search(struct rte_eth_dev *dev,
 123                           uint32_t shaper_profile_id)
 124{
 125        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 126        struct ice_dcf_hw *hw = &adapter->real_hw;
 127        struct ice_dcf_shaper_profile_list *shaper_profile_list =
 128                &hw->tm_conf.shaper_profile_list;
 129        struct ice_dcf_tm_shaper_profile *shaper_profile;
 130
 131        TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
 132                if (shaper_profile_id == shaper_profile->shaper_profile_id)
 133                        return shaper_profile;
 134        }
 135
 136        return NULL;
 137}
 138
 139static int
 140ice_dcf_node_param_check(struct ice_dcf_hw *hw, uint32_t node_id,
 141                      uint32_t priority, uint32_t weight,
 142                      struct rte_tm_node_params *params,
 143                      struct rte_tm_error *error)
 144{
 145        /* checked all the unsupported parameter */
 146        if (node_id == RTE_TM_NODE_ID_NULL) {
 147                error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 148                error->message = "invalid node id";
 149                return -EINVAL;
 150        }
 151
 152        if (priority) {
 153                error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
 154                error->message = "priority should be 0";
 155                return -EINVAL;
 156        }
 157
 158        if (weight != 1) {
 159                error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
 160                error->message = "weight must be 1";
 161                return -EINVAL;
 162        }
 163
 164        /* not support shared shaper */
 165        if (params->shared_shaper_id) {
 166                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
 167                error->message = "shared shaper not supported";
 168                return -EINVAL;
 169        }
 170        if (params->n_shared_shapers) {
 171                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
 172                error->message = "shared shaper not supported";
 173                return -EINVAL;
 174        }
 175
 176        /* for non-leaf node */
 177        if (node_id >= 8 * hw->num_vfs) {
 178                if (params->nonleaf.wfq_weight_mode) {
 179                        error->type =
 180                                RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
 181                        error->message = "WFQ not supported";
 182                        return -EINVAL;
 183                }
 184                if (params->nonleaf.n_sp_priorities != 1) {
 185                        error->type =
 186                                RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
 187                        error->message = "SP priority not supported";
 188                        return -EINVAL;
 189                } else if (params->nonleaf.wfq_weight_mode &&
 190                           !(*params->nonleaf.wfq_weight_mode)) {
 191                        error->type =
 192                                RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
 193                        error->message = "WFP should be byte mode";
 194                        return -EINVAL;
 195                }
 196
 197                return 0;
 198        }
 199
 200        /* for leaf node */
 201        if (params->leaf.cman) {
 202                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
 203                error->message = "Congestion management not supported";
 204                return -EINVAL;
 205        }
 206        if (params->leaf.wred.wred_profile_id !=
 207            RTE_TM_WRED_PROFILE_ID_NONE) {
 208                error->type =
 209                        RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
 210                error->message = "WRED not supported";
 211                return -EINVAL;
 212        }
 213        if (params->leaf.wred.shared_wred_context_id) {
 214                error->type =
 215                        RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
 216                error->message = "WRED not supported";
 217                return -EINVAL;
 218        }
 219        if (params->leaf.wred.n_shared_wred_contexts) {
 220                error->type =
 221                        RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
 222                error->message = "WRED not supported";
 223                return -EINVAL;
 224        }
 225
 226        return 0;
 227}
 228
 229static int
 230ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 231              uint32_t parent_node_id, uint32_t priority,
 232              uint32_t weight, uint32_t level_id,
 233              struct rte_tm_node_params *params,
 234              struct rte_tm_error *error)
 235{
 236        enum ice_dcf_tm_node_type parent_node_type = ICE_DCF_TM_NODE_TYPE_MAX;
 237        enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX;
 238        struct ice_dcf_tm_shaper_profile *shaper_profile = NULL;
 239        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 240        struct ice_dcf_hw *hw = &adapter->real_hw;
 241        struct ice_dcf_tm_node *parent_node;
 242        struct ice_dcf_tm_node *tm_node;
 243        uint16_t tc_nb = 1;
 244        int i, ret;
 245
 246        if (!params || !error)
 247                return -EINVAL;
 248
 249        /* if already committed */
 250        if (hw->tm_conf.committed) {
 251                error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 252                error->message = "already committed";
 253                return -EINVAL;
 254        }
 255
 256        ret = ice_dcf_node_param_check(hw, node_id, priority, weight,
 257                                   params, error);
 258        if (ret)
 259                return ret;
 260
 261        for (i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) {
 262                if (hw->ets_config->tc_valid_bits & (1 << i))
 263                        tc_nb++;
 264        }
 265
 266        /* check if the node is already existed */
 267        if (ice_dcf_tm_node_search(dev, node_id, &node_type)) {
 268                error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 269                error->message = "node id already used";
 270                return -EINVAL;
 271        }
 272
 273        /* check the shaper profile id */
 274        if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
 275                shaper_profile = ice_dcf_shaper_profile_search(dev,
 276                        params->shaper_profile_id);
 277                if (!shaper_profile) {
 278                        error->type =
 279                                RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
 280                        error->message = "shaper profile not exist";
 281                        return -EINVAL;
 282                }
 283        }
 284
 285        /* add root node if not have a parent */
 286        if (parent_node_id == RTE_TM_NODE_ID_NULL) {
 287                /* check level */
 288                if (level_id != ICE_DCF_TM_NODE_TYPE_PORT) {
 289                        error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
 290                        error->message = "Wrong level";
 291                        return -EINVAL;
 292                }
 293
 294                /* obviously no more than one root */
 295                if (hw->tm_conf.root) {
 296                        error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
 297                        error->message = "already have a root";
 298                        return -EINVAL;
 299                }
 300
 301                /* add the root node */
 302                tm_node = rte_zmalloc("ice_dcf_tm_node",
 303                                      sizeof(struct ice_dcf_tm_node),
 304                                      0);
 305                if (!tm_node)
 306                        return -ENOMEM;
 307                tm_node->id = node_id;
 308                tm_node->parent = NULL;
 309                tm_node->reference_count = 0;
 310                rte_memcpy(&tm_node->params, params,
 311                                 sizeof(struct rte_tm_node_params));
 312                hw->tm_conf.root = tm_node;
 313
 314                return 0;
 315        }
 316
 317        /* TC or vsi node */
 318        /* check the parent node */
 319        parent_node = ice_dcf_tm_node_search(dev, parent_node_id,
 320                                          &parent_node_type);
 321        if (!parent_node) {
 322                error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
 323                error->message = "parent not exist";
 324                return -EINVAL;
 325        }
 326        if (parent_node_type != ICE_DCF_TM_NODE_TYPE_PORT &&
 327            parent_node_type != ICE_DCF_TM_NODE_TYPE_TC) {
 328                error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
 329                error->message = "parent is not port or TC";
 330                return -EINVAL;
 331        }
 332        /* check level */
 333        if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
 334            level_id != (uint32_t)(parent_node_type + 1)) {
 335                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
 336                error->message = "Wrong level";
 337                return -EINVAL;
 338        }
 339
 340        /* check the TC node number */
 341        if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
 342                /* check the TC number */
 343                if (hw->tm_conf.nb_tc_node >= tc_nb) {
 344                        error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 345                        error->message = "too many TCs";
 346                        return -EINVAL;
 347                }
 348        } else {
 349                /* check the vsi node number */
 350                if (parent_node->reference_count >= hw->num_vfs) {
 351                        error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 352                        error->message = "too many VSI for one TC";
 353                        return -EINVAL;
 354                }
 355                /* check the vsi node id */
 356                if (node_id > (uint32_t)(tc_nb * hw->num_vfs)) {
 357                        error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 358                        error->message = "too large VSI id";
 359                        return -EINVAL;
 360                }
 361        }
 362
 363        /* add the TC or vsi node */
 364        tm_node = rte_zmalloc("ice_dcf_tm_node",
 365                              sizeof(struct ice_dcf_tm_node),
 366                              0);
 367        if (!tm_node)
 368                return -ENOMEM;
 369        tm_node->id = node_id;
 370        tm_node->priority = priority;
 371        tm_node->weight = weight;
 372        tm_node->shaper_profile = shaper_profile;
 373        tm_node->reference_count = 0;
 374        tm_node->parent = parent_node;
 375        rte_memcpy(&tm_node->params, params,
 376                         sizeof(struct rte_tm_node_params));
 377        if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
 378                TAILQ_INSERT_TAIL(&hw->tm_conf.tc_list,
 379                                  tm_node, node);
 380                tm_node->tc = hw->tm_conf.nb_tc_node;
 381                hw->tm_conf.nb_tc_node++;
 382        } else {
 383                TAILQ_INSERT_TAIL(&hw->tm_conf.vsi_list,
 384                                  tm_node, node);
 385                tm_node->tc = parent_node->tc;
 386                hw->tm_conf.nb_vsi_node++;
 387        }
 388        tm_node->parent->reference_count++;
 389
 390        /* increase the reference counter of the shaper profile */
 391        if (shaper_profile)
 392                shaper_profile->reference_count++;
 393
 394        return 0;
 395}
 396
 397static int
 398ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 399                 struct rte_tm_error *error)
 400{
 401        enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX;
 402        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 403        struct ice_dcf_hw *hw = &adapter->real_hw;
 404        struct ice_dcf_tm_node *tm_node;
 405
 406        if (!error)
 407                return -EINVAL;
 408
 409        /* if already committed */
 410        if (hw->tm_conf.committed) {
 411                error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 412                error->message = "already committed";
 413                return -EINVAL;
 414        }
 415
 416        if (node_id == RTE_TM_NODE_ID_NULL) {
 417                error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 418                error->message = "invalid node id";
 419                return -EINVAL;
 420        }
 421
 422        /* check if the node id exists */
 423        tm_node = ice_dcf_tm_node_search(dev, node_id, &node_type);
 424        if (!tm_node) {
 425                error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 426                error->message = "no such node";
 427                return -EINVAL;
 428        }
 429
 430        /* the node should have no child */
 431        if (tm_node->reference_count) {
 432                error->type = RTE_TM_ERROR_TYPE_NODE_ID;
 433                error->message =
 434                        "cannot delete a node which has children";
 435                return -EINVAL;
 436        }
 437
 438        /* root node */
 439        if (node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
 440                if (tm_node->shaper_profile)
 441                        tm_node->shaper_profile->reference_count--;
 442                rte_free(tm_node);
 443                hw->tm_conf.root = NULL;
 444                return 0;
 445        }
 446
 447        /* TC or VSI node */
 448        if (tm_node->shaper_profile)
 449                tm_node->shaper_profile->reference_count--;
 450        tm_node->parent->reference_count--;
 451        if (node_type == ICE_DCF_TM_NODE_TYPE_TC) {
 452                TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node);
 453                hw->tm_conf.nb_tc_node--;
 454        } else {
 455                TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node);
 456                hw->tm_conf.nb_vsi_node--;
 457        }
 458        rte_free(tm_node);
 459
 460        return 0;
 461}
 462
 463static int
 464ice_dcf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
 465                                struct rte_tm_error *error)
 466{
 467        /* min bucket size not supported */
 468        if (profile->committed.size) {
 469                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
 470                error->message = "committed bucket size not supported";
 471                return -EINVAL;
 472        }
 473        /* max bucket size not supported */
 474        if (profile->peak.size) {
 475                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
 476                error->message = "peak bucket size not supported";
 477                return -EINVAL;
 478        }
 479        /* length adjustment not supported */
 480        if (profile->pkt_length_adjust) {
 481                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
 482                error->message = "packet length adjustment not supported";
 483                return -EINVAL;
 484        }
 485
 486        return 0;
 487}
 488
 489static int
 490ice_dcf_shaper_profile_add(struct rte_eth_dev *dev,
 491                        uint32_t shaper_profile_id,
 492                        struct rte_tm_shaper_params *profile,
 493                        struct rte_tm_error *error)
 494{
 495        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 496        struct ice_dcf_hw *hw = &adapter->real_hw;
 497        struct ice_dcf_tm_shaper_profile *shaper_profile;
 498        int ret;
 499
 500        if (!profile || !error)
 501                return -EINVAL;
 502
 503        ret = ice_dcf_shaper_profile_param_check(profile, error);
 504        if (ret)
 505                return ret;
 506
 507        shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id);
 508
 509        if (shaper_profile) {
 510                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
 511                error->message = "profile ID exist";
 512                return -EINVAL;
 513        }
 514
 515        shaper_profile = rte_zmalloc("ice_dcf_tm_shaper_profile",
 516                                     sizeof(struct ice_dcf_tm_shaper_profile),
 517                                     0);
 518        if (!shaper_profile)
 519                return -ENOMEM;
 520        shaper_profile->shaper_profile_id = shaper_profile_id;
 521        rte_memcpy(&shaper_profile->profile, profile,
 522                         sizeof(struct rte_tm_shaper_params));
 523        TAILQ_INSERT_TAIL(&hw->tm_conf.shaper_profile_list,
 524                          shaper_profile, node);
 525
 526        return 0;
 527}
 528
 529static int
 530ice_dcf_shaper_profile_del(struct rte_eth_dev *dev,
 531                        uint32_t shaper_profile_id,
 532                        struct rte_tm_error *error)
 533{
 534        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 535        struct ice_dcf_hw *hw = &adapter->real_hw;
 536        struct ice_dcf_tm_shaper_profile *shaper_profile;
 537
 538        if (!error)
 539                return -EINVAL;
 540
 541        shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id);
 542
 543        if (!shaper_profile) {
 544                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
 545                error->message = "profile ID not exist";
 546                return -EINVAL;
 547        }
 548
 549        /* don't delete a profile if it's used by one or several nodes */
 550        if (shaper_profile->reference_count) {
 551                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
 552                error->message = "profile in use";
 553                return -EINVAL;
 554        }
 555
 556        TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list, shaper_profile, node);
 557        rte_free(shaper_profile);
 558
 559        return 0;
 560}
 561
 562static int
 563ice_dcf_set_vf_bw(struct ice_dcf_hw *hw,
 564                        struct virtchnl_dcf_bw_cfg_list *vf_bw,
 565                        uint16_t len)
 566{
 567        struct dcf_virtchnl_cmd args;
 568        int err;
 569
 570        memset(&args, 0, sizeof(args));
 571        args.v_op = VIRTCHNL_OP_DCF_CONFIG_BW;
 572        args.req_msg = (uint8_t *)vf_bw;
 573        args.req_msglen  = len;
 574        err = ice_dcf_execute_virtchnl_cmd(hw, &args);
 575        if (err)
 576                PMD_DRV_LOG(ERR, "fail to execute command %s",
 577                            "VIRTCHNL_OP_DCF_CONFIG_BW");
 578        return err;
 579}
 580
 581static int
 582ice_dcf_validate_tc_bw(struct virtchnl_dcf_bw_cfg_list *tc_bw,
 583                        uint32_t port_bw)
 584{
 585        struct virtchnl_dcf_bw_cfg *cfg;
 586        bool lowest_cir_mark = false;
 587        u32 total_peak, rest_peak;
 588        u32 committed, peak;
 589        int i;
 590
 591        total_peak = 0;
 592        for (i = 0; i < tc_bw->num_elem; i++)
 593                total_peak += tc_bw->cfg[i].shaper.peak;
 594
 595        for (i = 0; i < tc_bw->num_elem; i++) {
 596                cfg = &tc_bw->cfg[i];
 597                peak = cfg->shaper.peak;
 598                committed = cfg->shaper.committed;
 599                rest_peak = total_peak - peak;
 600
 601                if (lowest_cir_mark && peak == 0) {
 602                        PMD_DRV_LOG(ERR, "Max bandwidth must be configured for TC%u",
 603                                cfg->tc_num);
 604                        return -EINVAL;
 605                }
 606
 607                if (!lowest_cir_mark && committed)
 608                        lowest_cir_mark = true;
 609
 610                if (committed && committed + rest_peak > port_bw) {
 611                        PMD_DRV_LOG(ERR, "Total value of TC%u min bandwidth and other TCs' max bandwidth %ukbps should be less than port link speed %ukbps",
 612                                cfg->tc_num, committed + rest_peak, port_bw);
 613                        return -EINVAL;
 614                }
 615
 616                if (committed && committed < ICE_SCHED_MIN_BW) {
 617                        PMD_DRV_LOG(ERR, "If TC%u min Tx bandwidth is set, it cannot be less than 500Kbps",
 618                                cfg->tc_num);
 619                        return -EINVAL;
 620                }
 621
 622                if (peak && committed > peak) {
 623                        PMD_DRV_LOG(ERR, "TC%u Min Tx bandwidth cannot be greater than max Tx bandwidth",
 624                                cfg->tc_num);
 625                        return -EINVAL;
 626                }
 627
 628                if (peak > port_bw) {
 629                        PMD_DRV_LOG(ERR, "TC%u max Tx bandwidth %uKbps is greater than current link speed %uKbps",
 630                                cfg->tc_num, peak, port_bw);
 631                        return -EINVAL;
 632                }
 633        }
 634
 635        return 0;
 636}
 637
 638static int ice_dcf_commit_check(struct ice_dcf_hw *hw)
 639{
 640        struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list;
 641        struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
 642        struct ice_dcf_tm_node *tm_node;
 643
 644        if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
 645                PMD_DRV_LOG(ERR, "Configure VF bandwidth is not supported");
 646                return ICE_ERR_NOT_SUPPORTED;
 647        }
 648
 649        /* check if all TC nodes are set */
 650        if (BIT(hw->tm_conf.nb_tc_node) & hw->ets_config->tc_valid_bits) {
 651                PMD_DRV_LOG(ERR, "Not all enabled TC nodes are set");
 652                return ICE_ERR_PARAM;
 653        }
 654
 655        /* check if all VF vsi nodes are binded to all TCs */
 656        TAILQ_FOREACH(tm_node, tc_list, node) {
 657                if (tm_node->reference_count != hw->num_vfs) {
 658                        PMD_DRV_LOG(ERR, "Not all VFs are binded to TC%u",
 659                                        tm_node->tc);
 660                        return ICE_ERR_PARAM;
 661                }
 662        }
 663
 664        /* check if VF vsi node id start with 0 */
 665        tm_node = TAILQ_FIRST(vsi_list);
 666        if (tm_node->id != 0) {
 667                PMD_DRV_LOG(ERR, "VF vsi node id must start with 0");
 668                return ICE_ERR_PARAM;
 669        }
 670
 671        return ICE_SUCCESS;
 672}
 673
 674int
 675ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
 676{
 677        struct ice_aqc_port_ets_elem old_ets_config;
 678        struct ice_dcf_adapter *adapter;
 679        struct ice_hw *parent_hw;
 680        int ret, size;
 681
 682        adapter = hw->eth_dev->data->dev_private;
 683        parent_hw = &adapter->parent.hw;
 684
 685        /* store the old ets config */
 686        old_ets_config = *hw->ets_config;
 687
 688        ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM);
 689        ret = ice_aq_query_port_ets(parent_hw->port_info,
 690                        hw->ets_config, sizeof(*hw->ets_config),
 691                        NULL);
 692        if (ret) {
 693                PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
 694                return ret;
 695        }
 696
 697        if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) {
 698                PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW");
 699                return ICE_SUCCESS;
 700        }
 701
 702        size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
 703                sizeof(struct virtchnl_dcf_bw_cfg) *
 704                (hw->tm_conf.nb_tc_node - 1);
 705
 706        ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
 707        if (ret) {
 708                PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id);
 709                return ICE_ERR_CFG;
 710        }
 711
 712        return ICE_SUCCESS;
 713}
 714
 715int
 716ice_dcf_clear_bw(struct ice_dcf_hw *hw)
 717{
 718        uint16_t vf_id;
 719        uint32_t tc;
 720        int ret, size;
 721
 722        size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
 723                sizeof(struct virtchnl_dcf_bw_cfg) *
 724                (hw->tm_conf.nb_tc_node - 1);
 725
 726        for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
 727                for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) {
 728                        hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0;
 729                        hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0;
 730                }
 731                ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
 732                if (ret) {
 733                        PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id);
 734                        return ICE_ERR_CFG;
 735                }
 736        }
 737
 738        return ICE_SUCCESS;
 739}
 740
 741static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 742                                 int clear_on_fail,
 743                                 __rte_unused struct rte_tm_error *error)
 744{
 745        struct ice_dcf_adapter *adapter = dev->data->dev_private;
 746        struct ice_dcf_hw *hw = &adapter->real_hw;
 747        struct virtchnl_dcf_bw_cfg_list *vf_bw;
 748        struct virtchnl_dcf_bw_cfg_list *tc_bw;
 749        struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
 750        struct rte_tm_shaper_params *profile;
 751        struct ice_dcf_tm_node *tm_node;
 752        uint32_t port_bw, cir_total;
 753        uint16_t size, vf_id;
 754        uint8_t num_elem = 0;
 755        int i, ret_val;
 756
 757        /* check if port is stopped */
 758        if (!adapter->parent.pf.adapter_stopped) {
 759                PMD_DRV_LOG(ERR, "Please stop port first");
 760                ret_val = ICE_ERR_NOT_READY;
 761                goto err;
 762        }
 763
 764        ret_val = ice_dcf_commit_check(hw);
 765        if (ret_val)
 766                goto fail_clear;
 767
 768        size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
 769                sizeof(struct virtchnl_dcf_bw_cfg) *
 770                (hw->tm_conf.nb_tc_node - 1);
 771        vf_bw = rte_zmalloc("vf_bw", size, 0);
 772        if (!vf_bw) {
 773                ret_val = ICE_ERR_NO_MEMORY;
 774                goto fail_clear;
 775        }
 776        tc_bw = rte_zmalloc("tc_bw", size, 0);
 777        if (!tc_bw) {
 778                ret_val = ICE_ERR_NO_MEMORY;
 779                goto fail_clear;
 780        }
 781
 782        /* port bandwidth (Kbps) */
 783        port_bw = hw->link_speed * 1000;
 784        cir_total = 0;
 785
 786        /* init tc bw configuration */
 787        tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
 788        tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
 789        tc_bw->num_elem = hw->tm_conf.nb_tc_node;
 790        for (i = 0; i < tc_bw->num_elem; i++) {
 791                tc_bw->cfg[i].tc_num = i;
 792                tc_bw->cfg[i].type = VIRTCHNL_BW_SHAPER;
 793                tc_bw->cfg[i].bw_type |=
 794                        VIRTCHNL_DCF_BW_PIR | VIRTCHNL_DCF_BW_CIR;
 795        }
 796
 797        /* start with VF1, skip VF0 since DCF does not need to configure
 798         * bandwidth for itself
 799         */
 800        for (vf_id = 1; vf_id < hw->num_vfs; vf_id++) {
 801                num_elem = 0;
 802                vf_bw->vf_id = vf_id;
 803                vf_bw->node_type = VIRTCHNL_DCF_TARGET_VF_BW;
 804                TAILQ_FOREACH(tm_node, vsi_list, node) {
 805                        /* scan the nodes belong to one VSI */
 806                        if (tm_node->id - hw->num_vfs * tm_node->tc != vf_id)
 807                                continue;
 808                        vf_bw->cfg[num_elem].tc_num = tm_node->tc;
 809                        vf_bw->cfg[num_elem].type = VIRTCHNL_BW_SHAPER;
 810                        if (tm_node->shaper_profile) {
 811                                /* Transfer from Byte per seconds to Kbps */
 812                                profile = &tm_node->shaper_profile->profile;
 813                                vf_bw->cfg[num_elem].shaper.peak =
 814                                profile->peak.rate / 1000 * BITS_PER_BYTE;
 815                                vf_bw->cfg[num_elem].shaper.committed =
 816                                profile->committed.rate / 1000 * BITS_PER_BYTE;
 817                                vf_bw->cfg[num_elem].bw_type |=
 818                                        VIRTCHNL_DCF_BW_PIR |
 819                                        VIRTCHNL_DCF_BW_CIR;
 820                        }
 821
 822                        /* update tc node bw configuration */
 823                        tc_bw->cfg[tm_node->tc].shaper.peak +=
 824                                vf_bw->cfg[num_elem].shaper.peak;
 825                        tc_bw->cfg[tm_node->tc].shaper.committed +=
 826                                vf_bw->cfg[num_elem].shaper.committed;
 827
 828                        cir_total += vf_bw->cfg[num_elem].shaper.committed;
 829                        num_elem++;
 830                }
 831
 832                vf_bw->num_elem = num_elem;
 833                ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size);
 834                if (ret_val)
 835                        goto fail_clear;
 836
 837                hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0);
 838                if (!hw->qos_bw_cfg[vf_id]) {
 839                        ret_val = ICE_ERR_NO_MEMORY;
 840                        goto fail_clear;
 841                }
 842                /* store the bandwidth information for replay */
 843                ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, size,
 844                           ICE_NONDMA_TO_NONDMA);
 845                ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM);
 846        }
 847
 848        /* check if total CIR is larger than port bandwidth */
 849        if (cir_total > port_bw) {
 850                PMD_DRV_LOG(ERR, "Total CIR of all VFs is larger than port bandwidth");
 851                ret_val = ICE_ERR_PARAM;
 852                goto fail_clear;
 853        }
 854
 855        /* check and commit tc node bw configuration */
 856        ret_val = ice_dcf_validate_tc_bw(tc_bw, port_bw);
 857        if (ret_val)
 858                goto fail_clear;
 859        ret_val = ice_dcf_set_vf_bw(hw, tc_bw, size);
 860        if (ret_val)
 861                goto fail_clear;
 862
 863        /* store TC node bw configuration */
 864        hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0);
 865        if (!hw->qos_bw_cfg[ICE_DCF_VFID]) {
 866                ret_val = ICE_ERR_NO_MEMORY;
 867                goto fail_clear;
 868        }
 869        ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, size,
 870                   ICE_NONDMA_TO_NONDMA);
 871
 872        hw->tm_conf.committed = true;
 873        return ret_val;
 874
 875fail_clear:
 876        /* clear all the traffic manager configuration */
 877        if (clear_on_fail) {
 878                ice_dcf_tm_conf_uninit(dev);
 879                ice_dcf_tm_conf_init(dev);
 880        }
 881err:
 882        return ret_val;
 883}
 884