dpdk/drivers/net/ipn3ke/ipn3ke_tm.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2019 Intel Corporation
   3 */
   4
   5#include <stdint.h>
   6#include <stdlib.h>
   7#include <string.h>
   8
   9#include <rte_bus_pci.h>
  10#include <rte_ethdev.h>
  11#include <rte_pci.h>
  12#include <rte_malloc.h>
  13#include <rte_tm_driver.h>
  14
  15#include <rte_mbuf.h>
  16#include <rte_sched.h>
  17#include <ethdev_driver.h>
  18
  19#include <rte_io.h>
  20#include <rte_rawdev.h>
  21#include <rte_rawdev_pmd.h>
  22#include <rte_bus_ifpga.h>
  23#include <ifpga_logs.h>
  24
  25#include "ipn3ke_rawdev_api.h"
  26#include "ipn3ke_flow.h"
  27#include "ipn3ke_logs.h"
  28#include "ipn3ke_ethdev.h"
  29
  30#define BYTES_IN_MBPS     (1000 * 1000 / 8)
  31#define SUBPORT_TC_PERIOD 10
  32#define PIPE_TC_PERIOD    40
  33
  34struct ipn3ke_tm_shaper_params_range_type {
  35        uint32_t m1;
  36        uint32_t m2;
  37        uint32_t exp;
  38        uint32_t exp2;
  39        uint32_t low;
  40        uint32_t high;
  41};
  42struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
  43        {  0,       1,     0,        1,           0,            4},
  44        {  2,       3,     0,        1,           8,           12},
  45        {  4,       7,     0,        1,          16,           28},
  46        {  8,      15,     0,        1,          32,           60},
  47        { 16,      31,     0,        1,          64,          124},
  48        { 32,      63,     0,        1,         128,          252},
  49        { 64,     127,     0,        1,         256,          508},
  50        {128,     255,     0,        1,         512,         1020},
  51        {256,     511,     0,        1,        1024,         2044},
  52        {512,    1023,     0,        1,        2048,         4092},
  53        {512,    1023,     1,        2,        4096,         8184},
  54        {512,    1023,     2,        4,        8192,        16368},
  55        {512,    1023,     3,        8,       16384,        32736},
  56        {512,    1023,     4,       16,       32768,        65472},
  57        {512,    1023,     5,       32,       65536,       130944},
  58        {512,    1023,     6,       64,      131072,       261888},
  59        {512,    1023,     7,      128,      262144,       523776},
  60        {512,    1023,     8,      256,      524288,      1047552},
  61        {512,    1023,     9,      512,     1048576,      2095104},
  62        {512,    1023,    10,     1024,     2097152,      4190208},
  63        {512,    1023,    11,     2048,     4194304,      8380416},
  64        {512,    1023,    12,     4096,     8388608,     16760832},
  65        {512,    1023,    13,     8192,    16777216,     33521664},
  66        {512,    1023,    14,    16384,    33554432,     67043328},
  67        {512,    1023,    15,    32768,    67108864,    134086656},
  68};
  69
  70#define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
  71        sizeof(struct ipn3ke_tm_shaper_params_range_type))
  72
  73#define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
  74        (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
  75
  76#define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
  77        (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
  78
  79int
  80ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
  81{
  82#define SCRATCH_DATA 0xABCDEF
  83        struct ipn3ke_tm_node *nodes;
  84        struct ipn3ke_tm_tdrop_profile *tdrop_profile;
  85        int node_num;
  86        int i;
  87
  88        if (hw == NULL)
  89                return -EINVAL;
  90#if IPN3KE_TM_SCRATCH_RW
  91        uint32_t scratch_data;
  92        IPN3KE_MASK_WRITE_REG(hw,
  93                                        IPN3KE_TM_SCRATCH,
  94                                        0,
  95                                        SCRATCH_DATA,
  96                                        0xFFFFFFFF);
  97        scratch_data = IPN3KE_MASK_READ_REG(hw,
  98                                        IPN3KE_TM_SCRATCH,
  99                                        0,
 100                                        0xFFFFFFFF);
 101        if (scratch_data != SCRATCH_DATA)
 102                return -EINVAL;
 103#endif
 104        /* alloc memory for all hierarchy nodes */
 105        node_num = hw->port_num +
 106                IPN3KE_TM_VT_NODE_NUM +
 107                IPN3KE_TM_COS_NODE_NUM;
 108
 109        nodes = rte_zmalloc("ipn3ke_tm_nodes",
 110                        sizeof(struct ipn3ke_tm_node) * node_num,
 111                        0);
 112        if (!nodes)
 113                return -ENOMEM;
 114
 115        /* alloc memory for Tail Drop Profile */
 116        tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
 117                                sizeof(struct ipn3ke_tm_tdrop_profile) *
 118                                IPN3KE_TM_TDROP_PROFILE_NUM,
 119                                0);
 120        if (!tdrop_profile) {
 121                rte_free(nodes);
 122                return -ENOMEM;
 123        }
 124
 125        hw->nodes = nodes;
 126        hw->port_nodes = nodes;
 127        hw->vt_nodes = hw->port_nodes + hw->port_num;
 128        hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
 129        hw->tdrop_profile = tdrop_profile;
 130        hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
 131
 132        for (i = 0, nodes = hw->port_nodes;
 133                i < hw->port_num;
 134                i++, nodes++) {
 135                nodes->node_index = i;
 136                nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
 137                nodes->tm_id = RTE_TM_NODE_ID_NULL;
 138                nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
 139                nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
 140                nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
 141                nodes->weight = 0;
 142                nodes->parent_node = NULL;
 143                nodes->shaper_profile.valid = 0;
 144                nodes->tdrop_profile = NULL;
 145                nodes->n_children = 0;
 146                TAILQ_INIT(&nodes->children_node_list);
 147        }
 148
 149        for (i = 0, nodes = hw->vt_nodes;
 150                i < IPN3KE_TM_VT_NODE_NUM;
 151                i++, nodes++) {
 152                nodes->node_index = i;
 153                nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
 154                nodes->tm_id = RTE_TM_NODE_ID_NULL;
 155                nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
 156                nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
 157                nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
 158                nodes->weight = 0;
 159                nodes->parent_node = NULL;
 160                nodes->shaper_profile.valid = 0;
 161                nodes->tdrop_profile = NULL;
 162                nodes->n_children = 0;
 163                TAILQ_INIT(&nodes->children_node_list);
 164        }
 165
 166        for (i = 0, nodes = hw->cos_nodes;
 167                i < IPN3KE_TM_COS_NODE_NUM;
 168                i++, nodes++) {
 169                nodes->node_index = i;
 170                nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
 171                nodes->tm_id = RTE_TM_NODE_ID_NULL;
 172                nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
 173                nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
 174                nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
 175                nodes->weight = 0;
 176                nodes->parent_node = NULL;
 177                nodes->shaper_profile.valid = 0;
 178                nodes->tdrop_profile = NULL;
 179                nodes->n_children = 0;
 180                TAILQ_INIT(&nodes->children_node_list);
 181        }
 182
 183        for (i = 0, tdrop_profile = hw->tdrop_profile;
 184                i < IPN3KE_TM_TDROP_PROFILE_NUM;
 185                i++, tdrop_profile++) {
 186                tdrop_profile->tdrop_profile_id = i;
 187                tdrop_profile->n_users = 0;
 188                tdrop_profile->valid = 0;
 189        }
 190
 191        return 0;
 192}
 193
 194void
 195ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
 196{
 197        struct ipn3ke_tm_internals *tm;
 198        struct ipn3ke_tm_node *port_node;
 199
 200        tm = &rpst->tm;
 201
 202        port_node = &rpst->hw->port_nodes[rpst->port_id];
 203        tm->h.port_node = port_node;
 204
 205        tm->h.n_shaper_profiles = 0;
 206        tm->h.n_tdrop_profiles = 0;
 207        tm->h.n_vt_nodes = 0;
 208        tm->h.n_cos_nodes = 0;
 209
 210        tm->h.port_commit_node = NULL;
 211        TAILQ_INIT(&tm->h.vt_commit_node_list);
 212        TAILQ_INIT(&tm->h.cos_commit_node_list);
 213
 214        tm->hierarchy_frozen = 0;
 215        tm->tm_started = 1;
 216        tm->tm_id = rpst->port_id;
 217}
 218
 219static struct ipn3ke_tm_shaper_profile *
 220ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
 221        uint32_t shaper_profile_id, struct rte_tm_error *error)
 222{
 223        struct ipn3ke_tm_shaper_profile *sp = NULL;
 224        uint32_t level_of_node_id;
 225        uint32_t node_index;
 226
 227        /* Shaper profile ID must not be NONE. */
 228        if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
 229                rte_tm_error_set(error,
 230                                EINVAL,
 231                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 232                                NULL,
 233                                rte_strerror(EINVAL));
 234
 235                return NULL;
 236        }
 237
 238        level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
 239        node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
 240
 241        switch (level_of_node_id) {
 242        case IPN3KE_TM_NODE_LEVEL_PORT:
 243                if (node_index >= hw->port_num)
 244                        rte_tm_error_set(error,
 245                                        EEXIST,
 246                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 247                                        NULL,
 248                                        rte_strerror(EEXIST));
 249                else
 250                        sp = &hw->port_nodes[node_index].shaper_profile;
 251
 252                break;
 253
 254        case IPN3KE_TM_NODE_LEVEL_VT:
 255                if (node_index >= IPN3KE_TM_VT_NODE_NUM)
 256                        rte_tm_error_set(error,
 257                                        EEXIST,
 258                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 259                                        NULL,
 260                                        rte_strerror(EEXIST));
 261                else
 262                        sp = &hw->vt_nodes[node_index].shaper_profile;
 263
 264                break;
 265
 266        case IPN3KE_TM_NODE_LEVEL_COS:
 267                if (node_index >= IPN3KE_TM_COS_NODE_NUM)
 268                        rte_tm_error_set(error,
 269                                        EEXIST,
 270                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 271                                        NULL,
 272                                        rte_strerror(EEXIST));
 273                else
 274                        sp = &hw->cos_nodes[node_index].shaper_profile;
 275
 276                break;
 277        default:
 278                rte_tm_error_set(error,
 279                                EEXIST,
 280                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 281                                NULL,
 282                                rte_strerror(EEXIST));
 283        }
 284
 285        return sp;
 286}
 287
 288static struct ipn3ke_tm_tdrop_profile *
 289ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
 290        uint32_t tdrop_profile_id)
 291{
 292        struct ipn3ke_tm_tdrop_profile *tdrop_profile;
 293
 294        if (tdrop_profile_id >= hw->tdrop_profile_num)
 295                return NULL;
 296
 297        tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
 298        if (tdrop_profile->valid)
 299                return tdrop_profile;
 300
 301        return NULL;
 302}
 303
 304static struct ipn3ke_tm_node *
 305ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
 306        uint32_t node_id, uint32_t state_mask)
 307{
 308        uint32_t level_of_node_id;
 309        uint32_t node_index;
 310        struct ipn3ke_tm_node *n;
 311
 312        level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
 313        node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
 314
 315        switch (level_of_node_id) {
 316        case IPN3KE_TM_NODE_LEVEL_PORT:
 317                if (node_index >= hw->port_num)
 318                        return NULL;
 319                n = &hw->port_nodes[node_index];
 320
 321                break;
 322        case IPN3KE_TM_NODE_LEVEL_VT:
 323                if (node_index >= IPN3KE_TM_VT_NODE_NUM)
 324                        return NULL;
 325                n = &hw->vt_nodes[node_index];
 326
 327                break;
 328        case IPN3KE_TM_NODE_LEVEL_COS:
 329                if (node_index >= IPN3KE_TM_COS_NODE_NUM)
 330                        return NULL;
 331                n = &hw->cos_nodes[node_index];
 332
 333                break;
 334        default:
 335                return NULL;
 336        }
 337
 338        /* Check tm node status */
 339        if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
 340                if (n->tm_id != RTE_TM_NODE_ID_NULL ||
 341                n->parent_node_id != RTE_TM_NODE_ID_NULL ||
 342                n->parent_node != NULL ||
 343                n->n_children > 0) {
 344                        IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
 345                }
 346        } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
 347                if (n->tm_id == RTE_TM_NODE_ID_NULL ||
 348                (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
 349                        n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
 350                (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
 351                        n->parent_node == NULL)) {
 352                        IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
 353                }
 354        } else {
 355                IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
 356        }
 357
 358        if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
 359                if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
 360                        return n;
 361                else if (n->tm_id == tm_id)
 362                        return n;
 363                else
 364                        return NULL;
 365        } else {
 366                return NULL;
 367        }
 368}
 369
 370/* Traffic manager node type get */
 371static int
 372ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
 373        uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
 374{
 375        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 376        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 377        uint32_t tm_id;
 378        struct ipn3ke_tm_node *node;
 379        uint32_t state_mask;
 380
 381        if (is_leaf == NULL)
 382                return -rte_tm_error_set(error,
 383                                        EINVAL,
 384                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
 385                                        NULL,
 386                                        rte_strerror(EINVAL));
 387
 388        tm_id = tm->tm_id;
 389
 390        state_mask = 0;
 391        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
 392        node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
 393        if (node_id == RTE_TM_NODE_ID_NULL ||
 394                node == NULL)
 395                return -rte_tm_error_set(error,
 396                                        EINVAL,
 397                                        RTE_TM_ERROR_TYPE_NODE_ID,
 398                                        NULL,
 399                                        rte_strerror(EINVAL));
 400
 401        *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
 402
 403        return 0;
 404}
 405
 406#define WRED_SUPPORTED    0
 407
 408#define STATS_MASK_DEFAULT \
 409        (RTE_TM_STATS_N_PKTS | \
 410        RTE_TM_STATS_N_BYTES | \
 411        RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
 412        RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
 413
 414#define STATS_MASK_QUEUE \
 415        (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
 416
 417/* Traffic manager capabilities get */
 418static int
 419ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
 420        struct rte_tm_capabilities *cap, struct rte_tm_error *error)
 421{
 422        if (cap == NULL)
 423                return -rte_tm_error_set(error,
 424                                        EINVAL,
 425                                        RTE_TM_ERROR_TYPE_CAPABILITIES,
 426                                        NULL,
 427                                        rte_strerror(EINVAL));
 428
 429        /* set all the parameters to 0 first. */
 430        memset(cap, 0, sizeof(*cap));
 431
 432        cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
 433        cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
 434
 435        cap->non_leaf_nodes_identical = 0;
 436        cap->leaf_nodes_identical = 1;
 437
 438        cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
 439        cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
 440        cap->shaper_private_dual_rate_n_max = 0;
 441        cap->shaper_private_rate_min = 1;
 442        cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
 443        cap->shaper_private_packet_mode_supported = 0;
 444        cap->shaper_private_byte_mode_supported = 1;
 445
 446        cap->shaper_shared_n_max = 0;
 447        cap->shaper_shared_n_nodes_per_shaper_max = 0;
 448        cap->shaper_shared_n_shapers_per_node_max = 0;
 449        cap->shaper_shared_dual_rate_n_max = 0;
 450        cap->shaper_shared_rate_min = 0;
 451        cap->shaper_shared_rate_max = 0;
 452        cap->shaper_shared_packet_mode_supported = 0;
 453        cap->shaper_shared_byte_mode_supported = 0;
 454
 455        cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
 456        cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
 457
 458        cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
 459        cap->sched_sp_n_priorities_max = 3;
 460        cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
 461        cap->sched_wfq_n_groups_max = 1;
 462        cap->sched_wfq_weight_max = UINT32_MAX;
 463        cap->sched_wfq_packet_mode_supported = 0;
 464        cap->sched_wfq_byte_mode_supported = 1;
 465
 466        cap->cman_wred_packet_mode_supported = 0;
 467        cap->cman_wred_byte_mode_supported = 0;
 468        cap->cman_head_drop_supported = 0;
 469        cap->cman_wred_context_n_max = 0;
 470        cap->cman_wred_context_private_n_max = 0;
 471        cap->cman_wred_context_shared_n_max = 0;
 472        cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
 473        cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
 474
 475        /**
 476         * cap->mark_vlan_dei_supported = {0, 0, 0};
 477         * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
 478         * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
 479         * cap->mark_ip_dscp_supported = {0, 0, 0};
 480         */
 481
 482        cap->dynamic_update_mask = 0;
 483
 484        cap->stats_mask = 0;
 485
 486        return 0;
 487}
 488
 489/* Traffic manager level capabilities get */
 490static int
 491ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 492        uint32_t level_id, struct rte_tm_level_capabilities *cap,
 493        struct rte_tm_error *error)
 494{
 495        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 496
 497        if (cap == NULL)
 498                return -rte_tm_error_set(error,
 499                                        EINVAL,
 500                                        RTE_TM_ERROR_TYPE_CAPABILITIES,
 501                                        NULL,
 502                                        rte_strerror(EINVAL));
 503
 504        if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
 505                return -rte_tm_error_set(error,
 506                                        EINVAL,
 507                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
 508                                        NULL,
 509                                        rte_strerror(EINVAL));
 510
 511        /* set all the parameters to 0 first. */
 512        memset(cap, 0, sizeof(*cap));
 513
 514        switch (level_id) {
 515        case IPN3KE_TM_NODE_LEVEL_PORT:
 516                cap->n_nodes_max = hw->port_num;
 517                cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
 518                cap->n_nodes_leaf_max = 0;
 519                cap->non_leaf_nodes_identical = 0;
 520                cap->leaf_nodes_identical = 0;
 521
 522                cap->nonleaf.shaper_private_supported = 0;
 523                cap->nonleaf.shaper_private_dual_rate_supported = 0;
 524                cap->nonleaf.shaper_private_rate_min = 1;
 525                cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
 526                cap->nonleaf.shaper_private_packet_mode_supported = 0;
 527                cap->nonleaf.shaper_private_byte_mode_supported = 1;
 528                cap->nonleaf.shaper_shared_n_max = 0;
 529                cap->nonleaf.shaper_shared_packet_mode_supported = 0;
 530                cap->nonleaf.shaper_shared_byte_mode_supported = 0;
 531
 532                cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
 533                cap->nonleaf.sched_sp_n_priorities_max = 1;
 534                cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 535                cap->nonleaf.sched_wfq_n_groups_max = 0;
 536                cap->nonleaf.sched_wfq_weight_max = 0;
 537                cap->nonleaf.sched_wfq_packet_mode_supported = 0;
 538                cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 539
 540                cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
 541                break;
 542
 543        case IPN3KE_TM_NODE_LEVEL_VT:
 544                cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
 545                cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
 546                cap->n_nodes_leaf_max = 0;
 547                cap->non_leaf_nodes_identical = 0;
 548                cap->leaf_nodes_identical = 0;
 549
 550                cap->nonleaf.shaper_private_supported = 0;
 551                cap->nonleaf.shaper_private_dual_rate_supported = 0;
 552                cap->nonleaf.shaper_private_rate_min = 1;
 553                cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
 554                cap->nonleaf.shaper_private_packet_mode_supported = 0;
 555                cap->nonleaf.shaper_private_byte_mode_supported = 1;
 556                cap->nonleaf.shaper_shared_n_max = 0;
 557                cap->nonleaf.shaper_shared_packet_mode_supported = 0;
 558                cap->nonleaf.shaper_shared_byte_mode_supported = 0;
 559
 560                cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
 561                cap->nonleaf.sched_sp_n_priorities_max = 1;
 562                cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 563                cap->nonleaf.sched_wfq_n_groups_max = 0;
 564                cap->nonleaf.sched_wfq_weight_max = 0;
 565                cap->nonleaf.sched_wfq_packet_mode_supported = 0;
 566                cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 567
 568                cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
 569                break;
 570
 571        case IPN3KE_TM_NODE_LEVEL_COS:
 572                cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
 573                cap->n_nodes_nonleaf_max = 0;
 574                cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
 575                cap->non_leaf_nodes_identical = 0;
 576                cap->leaf_nodes_identical = 0;
 577
 578                cap->leaf.shaper_private_supported = 0;
 579                cap->leaf.shaper_private_dual_rate_supported = 0;
 580                cap->leaf.shaper_private_rate_min = 0;
 581                cap->leaf.shaper_private_rate_max = 0;
 582                cap->leaf.shaper_private_packet_mode_supported = 0;
 583                cap->leaf.shaper_private_byte_mode_supported = 1;
 584                cap->leaf.shaper_shared_n_max = 0;
 585                cap->leaf.shaper_shared_packet_mode_supported = 0;
 586                cap->leaf.shaper_shared_byte_mode_supported = 0;
 587
 588                cap->leaf.cman_head_drop_supported = 0;
 589                cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
 590                cap->leaf.cman_wred_byte_mode_supported = 0;
 591                cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
 592                cap->leaf.cman_wred_context_shared_n_max = 0;
 593
 594                cap->leaf.stats_mask = STATS_MASK_QUEUE;
 595                break;
 596
 597        default:
 598                return -rte_tm_error_set(error,
 599                                        EINVAL,
 600                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
 601                                        NULL,
 602                                        rte_strerror(EINVAL));
 603                break;
 604        }
 605
 606        return 0;
 607}
 608
 609/* Traffic manager node capabilities get */
 610static int
 611ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 612        uint32_t node_id, struct rte_tm_node_capabilities *cap,
 613        struct rte_tm_error *error)
 614{
 615        struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
 616        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 617        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 618        uint32_t tm_id;
 619        struct ipn3ke_tm_node *tm_node;
 620        uint32_t state_mask;
 621
 622        if (cap == NULL)
 623                return -rte_tm_error_set(error,
 624                                        EINVAL,
 625                                        RTE_TM_ERROR_TYPE_CAPABILITIES,
 626                                        NULL,
 627                                        rte_strerror(EINVAL));
 628
 629        tm_id = tm->tm_id;
 630
 631        state_mask = 0;
 632        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
 633        tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
 634        if (tm_node == NULL)
 635                return -rte_tm_error_set(error,
 636                                        EINVAL,
 637                                        RTE_TM_ERROR_TYPE_NODE_ID,
 638                                        NULL,
 639                                        rte_strerror(EINVAL));
 640
 641        if (tm_node->tm_id != representor->port_id)
 642                return -rte_tm_error_set(error,
 643                                        EINVAL,
 644                                        RTE_TM_ERROR_TYPE_NODE_ID,
 645                                        NULL,
 646                                        rte_strerror(EINVAL));
 647
 648        /* set all the parameters to 0 first. */
 649        memset(cap, 0, sizeof(*cap));
 650
 651        switch (tm_node->level) {
 652        case IPN3KE_TM_NODE_LEVEL_PORT:
 653                cap->shaper_private_supported = 1;
 654                cap->shaper_private_dual_rate_supported = 0;
 655                cap->shaper_private_rate_min = 1;
 656                cap->shaper_private_rate_max = UINT32_MAX;
 657                cap->shaper_private_packet_mode_supported = 0;
 658                cap->shaper_private_byte_mode_supported = 1;
 659                cap->shaper_shared_n_max = 0;
 660                cap->shaper_shared_packet_mode_supported = 0;
 661                cap->shaper_shared_byte_mode_supported = 0;
 662
 663                cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
 664                cap->nonleaf.sched_sp_n_priorities_max = 1;
 665                cap->nonleaf.sched_wfq_n_children_per_group_max =
 666                        IPN3KE_TM_VT_NODE_NUM;
 667                cap->nonleaf.sched_wfq_n_groups_max = 1;
 668                cap->nonleaf.sched_wfq_weight_max = 1;
 669                cap->nonleaf.sched_wfq_packet_mode_supported = 0;
 670                cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 671
 672                cap->stats_mask = STATS_MASK_DEFAULT;
 673                break;
 674
 675        case IPN3KE_TM_NODE_LEVEL_VT:
 676                cap->shaper_private_supported = 1;
 677                cap->shaper_private_dual_rate_supported = 0;
 678                cap->shaper_private_rate_min = 1;
 679                cap->shaper_private_rate_max = UINT32_MAX;
 680                cap->shaper_private_packet_mode_supported = 0;
 681                cap->shaper_private_byte_mode_supported = 1;
 682                cap->shaper_shared_n_max = 0;
 683                cap->shaper_shared_packet_mode_supported = 0;
 684                cap->shaper_shared_byte_mode_supported = 0;
 685
 686                cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
 687                cap->nonleaf.sched_sp_n_priorities_max = 1;
 688                cap->nonleaf.sched_wfq_n_children_per_group_max =
 689                        IPN3KE_TM_COS_NODE_NUM;
 690                cap->nonleaf.sched_wfq_n_groups_max = 1;
 691                cap->nonleaf.sched_wfq_weight_max = 1;
 692                cap->nonleaf.sched_wfq_packet_mode_supported = 0;
 693                cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 694
 695                cap->stats_mask = STATS_MASK_DEFAULT;
 696                break;
 697
 698        case IPN3KE_TM_NODE_LEVEL_COS:
 699                cap->shaper_private_supported = 0;
 700                cap->shaper_private_dual_rate_supported = 0;
 701                cap->shaper_private_rate_min = 0;
 702                cap->shaper_private_rate_max = 0;
 703                cap->shaper_private_packet_mode_supported = 0;
 704                cap->shaper_private_byte_mode_supported = 0;
 705                cap->shaper_shared_n_max = 0;
 706                cap->shaper_shared_packet_mode_supported = 0;
 707                cap->shaper_shared_byte_mode_supported = 0;
 708
 709                cap->leaf.cman_head_drop_supported = 0;
 710                cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
 711                cap->leaf.cman_wred_byte_mode_supported = 0;
 712                cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
 713                cap->leaf.cman_wred_context_shared_n_max = 0;
 714
 715                cap->stats_mask = STATS_MASK_QUEUE;
 716                break;
 717        default:
 718                break;
 719        }
 720
 721        return 0;
 722}
 723
 724static int
 725ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
 726        struct ipn3ke_tm_shaper_profile *local_profile,
 727        const struct ipn3ke_tm_shaper_params_range_type *ref_data)
 728{
 729        uint32_t i;
 730        const struct ipn3ke_tm_shaper_params_range_type *r;
 731        uint64_t rate;
 732
 733        rate = profile->peak.rate;
 734        for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
 735                if (rate >= r->low &&
 736                rate <= r->high) {
 737                        local_profile->m = (rate / 4) / r->exp2;
 738                        local_profile->e = r->exp;
 739                        local_profile->rate = rate;
 740
 741                        return 0;
 742                }
 743        }
 744
 745        return -1;
 746}
 747
 748static int
 749ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
 750        uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
 751        struct rte_tm_error *error)
 752{
 753        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 754        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 755        struct ipn3ke_tm_shaper_profile *sp;
 756
 757        /* Shaper profile must not exist. */
 758        sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
 759        if (!sp || (sp && sp->valid))
 760                return -rte_tm_error_set(error,
 761                                        EEXIST,
 762                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 763                                        NULL,
 764                                        rte_strerror(EEXIST));
 765
 766        /* Profile must not be NULL. */
 767        if (profile == NULL)
 768                return -rte_tm_error_set(error,
 769                                        EINVAL,
 770                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
 771                                        NULL,
 772                                        rte_strerror(EINVAL));
 773
 774        /* Peak rate: non-zero, 32-bit */
 775        if (profile->peak.rate == 0 ||
 776                profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
 777                return -rte_tm_error_set(error,
 778                                EINVAL,
 779                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
 780                                NULL,
 781                                rte_strerror(EINVAL));
 782
 783        /* Peak size: non-zero, 32-bit */
 784        if (profile->peak.size != 0)
 785                return -rte_tm_error_set(error,
 786                                EINVAL,
 787                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
 788                                NULL,
 789                                rte_strerror(EINVAL));
 790
 791        /* Dual-rate profiles are not supported. */
 792        if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
 793                return -rte_tm_error_set(error,
 794                                EINVAL,
 795                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
 796                                NULL,
 797                                rte_strerror(EINVAL));
 798
 799        /* Packet length adjust: 24 bytes */
 800        if (profile->pkt_length_adjust != 0)
 801                return -rte_tm_error_set(error,
 802                                EINVAL,
 803                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
 804                                NULL,
 805                                rte_strerror(EINVAL));
 806
 807        if (ipn3ke_tm_shaper_parame_trans(profile,
 808                                        sp,
 809                                        ipn3ke_tm_shaper_params_rang)) {
 810                return -rte_tm_error_set(error,
 811                                EINVAL,
 812                                RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
 813                                NULL,
 814                                rte_strerror(EINVAL));
 815        } else {
 816                sp->valid = 1;
 817                rte_memcpy(&sp->params, profile, sizeof(sp->params));
 818        }
 819
 820        tm->h.n_shaper_profiles++;
 821
 822        return 0;
 823}
 824
 825/* Traffic manager shaper profile delete */
 826static int
 827ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
 828        uint32_t shaper_profile_id, struct rte_tm_error *error)
 829{
 830        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 831        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 832        struct ipn3ke_tm_shaper_profile *sp;
 833
 834        /* Check existing */
 835        sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
 836        if (!sp || (sp && !sp->valid))
 837                return -rte_tm_error_set(error,
 838                                        EINVAL,
 839                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
 840                                        NULL,
 841                                        rte_strerror(EINVAL));
 842
 843        sp->valid = 0;
 844        tm->h.n_shaper_profiles--;
 845
 846        return 0;
 847}
 848
 849static int
 850ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
 851        uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
 852        struct rte_tm_error *error)
 853{
 854        enum rte_color color;
 855
 856        /* TDROP profile ID must not be NONE. */
 857        if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
 858                return -rte_tm_error_set(error,
 859                                        EINVAL,
 860                                        RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
 861                                        NULL,
 862                                        rte_strerror(EINVAL));
 863
 864        /* Profile must not be NULL. */
 865        if (profile == NULL)
 866                return -rte_tm_error_set(error,
 867                                        EINVAL,
 868                                        RTE_TM_ERROR_TYPE_WRED_PROFILE,
 869                                        NULL,
 870                                        rte_strerror(EINVAL));
 871
 872        /* TDROP profile should be in packet mode */
 873        if (profile->packet_mode != 0)
 874                return -rte_tm_error_set(error,
 875                                        ENOTSUP,
 876                                        RTE_TM_ERROR_TYPE_WRED_PROFILE,
 877                                        NULL,
 878                                        rte_strerror(ENOTSUP));
 879
 880        /* min_th <= max_th, max_th > 0  */
 881        for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
 882                uint64_t min_th = profile->red_params[color].min_th;
 883                uint64_t max_th = profile->red_params[color].max_th;
 884
 885                if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
 886                                IPN3KE_TDROP_TH1_SHIFT) ||
 887                        max_th != 0)
 888                        return -rte_tm_error_set(error,
 889                                                EINVAL,
 890                                                RTE_TM_ERROR_TYPE_WRED_PROFILE,
 891                                                NULL,
 892                                                rte_strerror(EINVAL));
 893        }
 894
 895        return 0;
 896}
 897
 898static int
 899ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
 900                                struct ipn3ke_tm_tdrop_profile *tp)
 901{
 902        if (tp->valid) {
 903                IPN3KE_MASK_WRITE_REG(hw,
 904                                IPN3KE_CCB_PROFILE_MS,
 905                                0,
 906                                tp->th2,
 907                                IPN3KE_CCB_PROFILE_MS_MASK);
 908
 909                IPN3KE_MASK_WRITE_REG(hw,
 910                                IPN3KE_CCB_PROFILE_P,
 911                                tp->tdrop_profile_id,
 912                                tp->th1,
 913                                IPN3KE_CCB_PROFILE_MASK);
 914        } else {
 915                IPN3KE_MASK_WRITE_REG(hw,
 916                                IPN3KE_CCB_PROFILE_MS,
 917                                0,
 918                                0,
 919                                IPN3KE_CCB_PROFILE_MS_MASK);
 920
 921                IPN3KE_MASK_WRITE_REG(hw,
 922                                IPN3KE_CCB_PROFILE_P,
 923                                tp->tdrop_profile_id,
 924                                0,
 925                                IPN3KE_CCB_PROFILE_MASK);
 926        }
 927
 928        return 0;
 929}
 930
 931/* Traffic manager TDROP profile add */
 932static int
 933ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
 934        uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
 935        struct rte_tm_error *error)
 936{
 937        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 938        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 939        struct ipn3ke_tm_tdrop_profile *tp;
 940        int status;
 941        uint64_t min_th;
 942        uint32_t th1, th2;
 943
 944        /* Check input params */
 945        status = ipn3ke_tm_tdrop_profile_check(dev,
 946                                        tdrop_profile_id,
 947                                        profile,
 948                                        error);
 949        if (status)
 950                return status;
 951
 952        /* Memory allocation */
 953        tp = &hw->tdrop_profile[tdrop_profile_id];
 954
 955        /* Fill in */
 956        tp->valid = 1;
 957        min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
 958        th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
 959        th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
 960                        IPN3KE_TDROP_TH2_MASK);
 961        tp->th1 = th1;
 962        tp->th2 = th2;
 963        rte_memcpy(&tp->params, profile, sizeof(tp->params));
 964
 965        /* Add to list */
 966        tm->h.n_tdrop_profiles++;
 967
 968        /* Write FPGA */
 969        ipn3ke_hw_tm_tdrop_wr(hw, tp);
 970
 971        return 0;
 972}
 973
 974/* Traffic manager TDROP profile delete */
 975static int
 976ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
 977        uint32_t tdrop_profile_id, struct rte_tm_error *error)
 978{
 979        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
 980        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
 981        struct ipn3ke_tm_tdrop_profile *tp;
 982
 983        /* Check existing */
 984        tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
 985        if (tp == NULL)
 986                return -rte_tm_error_set(error,
 987                                        EINVAL,
 988                                        RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
 989                                        NULL,
 990                                        rte_strerror(EINVAL));
 991
 992        /* Check unused */
 993        if (tp->n_users)
 994                return -rte_tm_error_set(error,
 995                                        EBUSY,
 996                                        RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
 997                                        NULL,
 998                                        rte_strerror(EBUSY));
 999
1000        /* Set free */
1001        tp->valid = 0;
1002        tm->h.n_tdrop_profiles--;
1003
1004        /* Write FPGA */
1005        ipn3ke_hw_tm_tdrop_wr(hw, tp);
1006
1007        return 0;
1008}
1009
1010static int
1011ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
1012        uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1013        uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1014        struct rte_tm_error *error)
1015{
1016        uint32_t level_of_node_id;
1017        uint32_t node_index;
1018        uint32_t parent_level_id;
1019
1020        if (node_id == RTE_TM_NODE_ID_NULL)
1021                return -rte_tm_error_set(error,
1022                                        EINVAL,
1023                                        RTE_TM_ERROR_TYPE_NODE_ID,
1024                                        NULL,
1025                                        rte_strerror(EINVAL));
1026
1027        /* priority: must be 0, 1, 2, 3 */
1028        if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
1029                return -rte_tm_error_set(error,
1030                                        EINVAL,
1031                                        RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1032                                        NULL,
1033                                        rte_strerror(EINVAL));
1034
1035        /* weight: must be 1 .. 255 */
1036        if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
1037                return -rte_tm_error_set(error,
1038                                        EINVAL,
1039                                        RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1040                                        NULL,
1041                                        rte_strerror(EINVAL));
1042
1043        /* check node id and parent id*/
1044        level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1045        if (level_of_node_id != level_id)
1046                return -rte_tm_error_set(error,
1047                                        EINVAL,
1048                                        RTE_TM_ERROR_TYPE_NODE_ID,
1049                                        NULL,
1050                                        rte_strerror(EINVAL));
1051        node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1052        parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1053        switch (level_id) {
1054        case IPN3KE_TM_NODE_LEVEL_PORT:
1055                if (node_index != tm_id)
1056                        return -rte_tm_error_set(error,
1057                                                EINVAL,
1058                                                RTE_TM_ERROR_TYPE_NODE_ID,
1059                                                NULL,
1060                                                rte_strerror(EINVAL));
1061                if (parent_node_id != RTE_TM_NODE_ID_NULL)
1062                        return -rte_tm_error_set(error,
1063                                        EINVAL,
1064                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1065                                        NULL,
1066                                        rte_strerror(EINVAL));
1067                break;
1068
1069        case IPN3KE_TM_NODE_LEVEL_VT:
1070                if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1071                        return -rte_tm_error_set(error,
1072                                                EINVAL,
1073                                                RTE_TM_ERROR_TYPE_NODE_ID,
1074                                                NULL,
1075                                                rte_strerror(EINVAL));
1076                if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
1077                        return -rte_tm_error_set(error,
1078                                        EINVAL,
1079                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1080                                        NULL,
1081                                        rte_strerror(EINVAL));
1082                break;
1083
1084        case IPN3KE_TM_NODE_LEVEL_COS:
1085                if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1086                        return -rte_tm_error_set(error,
1087                                                EINVAL,
1088                                                RTE_TM_ERROR_TYPE_NODE_ID,
1089                                                NULL,
1090                                                rte_strerror(EINVAL));
1091                if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
1092                        return -rte_tm_error_set(error,
1093                                        EINVAL,
1094                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1095                                        NULL,
1096                                        rte_strerror(EINVAL));
1097                break;
1098        default:
1099                return -rte_tm_error_set(error,
1100                                        EINVAL,
1101                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
1102                                        NULL,
1103                                        rte_strerror(EINVAL));
1104        }
1105
1106        /* params: must not be NULL */
1107        if (params == NULL)
1108                return -rte_tm_error_set(error,
1109                                        EINVAL,
1110                                        RTE_TM_ERROR_TYPE_NODE_PARAMS,
1111                                        NULL,
1112                                        rte_strerror(EINVAL));
1113        /* No shared shapers */
1114        if (params->n_shared_shapers != 0)
1115                return -rte_tm_error_set(error,
1116                                EINVAL,
1117                                RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1118                                NULL,
1119                                rte_strerror(EINVAL));
1120        return 0;
1121}
1122
1123static int
1124ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
1125        uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
1126        struct rte_tm_error *error)
1127{
1128        uint32_t node_index;
1129        uint32_t parent_index;
1130        uint32_t parent_index1;
1131
1132        node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1133        parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1134        parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
1135        switch (level_id) {
1136        case IPN3KE_TM_NODE_LEVEL_PORT:
1137                break;
1138
1139        case IPN3KE_TM_NODE_LEVEL_VT:
1140                if (parent_index != tm_id)
1141                        return -rte_tm_error_set(error,
1142                                        EINVAL,
1143                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1144                                        NULL,
1145                                        rte_strerror(EINVAL));
1146                break;
1147
1148        case IPN3KE_TM_NODE_LEVEL_COS:
1149                if (parent_index != parent_index1)
1150                        return -rte_tm_error_set(error,
1151                                        EINVAL,
1152                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1153                                        NULL,
1154                                        rte_strerror(EINVAL));
1155                break;
1156        default:
1157                return -rte_tm_error_set(error,
1158                                        EINVAL,
1159                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
1160                                        NULL,
1161                                        rte_strerror(EINVAL));
1162        }
1163
1164        return 0;
1165}
1166
1167/* Traffic manager node add */
1168static int
1169ipn3ke_tm_node_add(struct rte_eth_dev *dev,
1170        uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
1171        uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
1172        struct rte_tm_error *error)
1173{
1174        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1175        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1176        uint32_t tm_id;
1177        struct ipn3ke_tm_node *n, *parent_node;
1178        uint32_t node_state, state_mask;
1179        int status;
1180
1181        /* Checks */
1182        if (tm->hierarchy_frozen)
1183                return -rte_tm_error_set(error,
1184                                        EBUSY,
1185                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
1186                                        NULL,
1187                                        rte_strerror(EBUSY));
1188
1189        tm_id = tm->tm_id;
1190
1191        status = ipn3ke_tm_node_add_check_parameter(tm_id,
1192                                                node_id,
1193                                                parent_node_id,
1194                                                priority,
1195                                                weight,
1196                                                level_id,
1197                                                params,
1198                                                error);
1199        if (status)
1200                return status;
1201
1202        status = ipn3ke_tm_node_add_check_mount(tm_id,
1203                                                node_id,
1204                                                parent_node_id,
1205                                                level_id,
1206                                                error);
1207        if (status)
1208                return status;
1209
1210        /* Shaper profile ID must not be NONE. */
1211        if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
1212                params->shaper_profile_id != node_id)
1213                return -rte_tm_error_set(error,
1214                                        EINVAL,
1215                                        RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1216                                        NULL,
1217                                        rte_strerror(EINVAL));
1218
1219        /* Memory allocation */
1220        state_mask = 0;
1221        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
1222        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
1223        n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1224        if (!n)
1225                return -rte_tm_error_set(error,
1226                                        EINVAL,
1227                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
1228                                        NULL,
1229                                        rte_strerror(EINVAL));
1230        node_state = n->node_state;
1231
1232        /* Check parent node */
1233        state_mask = 0;
1234        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1235        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1236        if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1237                parent_node = ipn3ke_hw_tm_node_search(hw,
1238                                                        tm_id,
1239                                                        parent_node_id,
1240                                                        state_mask);
1241                if (!parent_node)
1242                        return -rte_tm_error_set(error,
1243                                        EINVAL,
1244                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1245                                        NULL,
1246                                        rte_strerror(EINVAL));
1247        } else {
1248                parent_node = NULL;
1249        }
1250
1251        switch (level_id) {
1252        case IPN3KE_TM_NODE_LEVEL_PORT:
1253                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1254                n->tm_id = tm_id;
1255                tm->h.port_commit_node = n;
1256                break;
1257
1258        case IPN3KE_TM_NODE_LEVEL_VT:
1259                if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1260                        TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1261                        if (parent_node)
1262                                parent_node->n_children++;
1263                        tm->h.n_vt_nodes++;
1264                } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1265                        if (parent_node)
1266                                parent_node->n_children++;
1267                        tm->h.n_vt_nodes++;
1268                }
1269                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1270                n->parent_node_id = parent_node_id;
1271                n->tm_id = tm_id;
1272                n->parent_node = parent_node;
1273
1274                break;
1275
1276        case IPN3KE_TM_NODE_LEVEL_COS:
1277                if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
1278                        TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1279                                n, node);
1280                        if (parent_node)
1281                                parent_node->n_children++;
1282                        tm->h.n_cos_nodes++;
1283                } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1284                        if (parent_node)
1285                                parent_node->n_children++;
1286                        tm->h.n_cos_nodes++;
1287                }
1288                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
1289                n->parent_node_id = parent_node_id;
1290                n->tm_id = tm_id;
1291                n->parent_node = parent_node;
1292
1293                break;
1294        default:
1295                return -rte_tm_error_set(error,
1296                                        EINVAL,
1297                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
1298                                        NULL,
1299                                        rte_strerror(EINVAL));
1300        }
1301
1302        /* Fill in */
1303        n->priority = priority;
1304        n->weight = weight;
1305
1306        if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
1307                params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
1308                n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
1309                        params->leaf.wred.wred_profile_id);
1310
1311        rte_memcpy(&n->params, params, sizeof(n->params));
1312
1313        return 0;
1314}
1315
1316static int
1317ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
1318        uint32_t node_id, struct rte_tm_error *error)
1319{
1320        uint32_t level_of_node_id;
1321        uint32_t node_index;
1322
1323        if (node_id == RTE_TM_NODE_ID_NULL)
1324                return -rte_tm_error_set(error,
1325                                        EINVAL,
1326                                        RTE_TM_ERROR_TYPE_NODE_ID,
1327                                        NULL,
1328                                        rte_strerror(EINVAL));
1329
1330        /* check node id and parent id*/
1331        level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1332        node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
1333        switch (level_of_node_id) {
1334        case IPN3KE_TM_NODE_LEVEL_PORT:
1335                if (node_index != tm_id)
1336                        return -rte_tm_error_set(error,
1337                                                EINVAL,
1338                                                RTE_TM_ERROR_TYPE_NODE_ID,
1339                                                NULL,
1340                                                rte_strerror(EINVAL));
1341                break;
1342
1343        case IPN3KE_TM_NODE_LEVEL_VT:
1344                if (node_index >= IPN3KE_TM_VT_NODE_NUM)
1345                        return -rte_tm_error_set(error,
1346                                                EINVAL,
1347                                                RTE_TM_ERROR_TYPE_NODE_ID,
1348                                                NULL,
1349                                                rte_strerror(EINVAL));
1350                break;
1351
1352        case IPN3KE_TM_NODE_LEVEL_COS:
1353                if (node_index >= IPN3KE_TM_COS_NODE_NUM)
1354                        return -rte_tm_error_set(error,
1355                                                EINVAL,
1356                                                RTE_TM_ERROR_TYPE_NODE_ID,
1357                                                NULL,
1358                                                rte_strerror(EINVAL));
1359                break;
1360        default:
1361                return -rte_tm_error_set(error,
1362                                        EINVAL,
1363                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
1364                                        NULL,
1365                                        rte_strerror(EINVAL));
1366        }
1367
1368        return 0;
1369}
1370
1371/* Traffic manager node delete */
1372static int
1373ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
1374        uint32_t node_id, struct rte_tm_error *error)
1375{
1376        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1377        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1378        struct ipn3ke_tm_node *n, *parent_node;
1379        uint32_t tm_id;
1380        int status;
1381        uint32_t level_of_node_id;
1382        uint32_t node_state;
1383        uint32_t state_mask;
1384
1385        /* Check hierarchy changes are currently allowed */
1386        if (tm->hierarchy_frozen)
1387                return -rte_tm_error_set(error,
1388                                        EBUSY,
1389                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
1390                                        NULL,
1391                                        rte_strerror(EBUSY));
1392
1393        tm_id = tm->tm_id;
1394
1395        status = ipn3ke_tm_node_del_check_parameter(tm_id,
1396                                                node_id,
1397                                                error);
1398        if (status)
1399                return status;
1400
1401        /* Check existing */
1402        state_mask = 0;
1403        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1404        IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1405        n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
1406        if (n == NULL)
1407                return -rte_tm_error_set(error,
1408                                        EINVAL,
1409                                        RTE_TM_ERROR_TYPE_NODE_ID,
1410                                        NULL,
1411                                        rte_strerror(EINVAL));
1412
1413        if (n->n_children > 0)
1414                return -rte_tm_error_set(error,
1415                                        EINVAL,
1416                                        RTE_TM_ERROR_TYPE_NODE_ID,
1417                                        NULL,
1418                                        rte_strerror(EINVAL));
1419
1420        node_state = n->node_state;
1421
1422        level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
1423
1424        /* Check parent node */
1425        if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
1426                state_mask = 0;
1427                IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
1428                IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
1429                parent_node = ipn3ke_hw_tm_node_search(hw,
1430                                                tm_id,
1431                                                n->parent_node_id,
1432                                                state_mask);
1433                if (!parent_node)
1434                        return -rte_tm_error_set(error,
1435                                        EINVAL,
1436                                        RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1437                                        NULL,
1438                                        rte_strerror(EINVAL));
1439                if (n->parent_node != parent_node)
1440                        return -rte_tm_error_set(error,
1441                                                EINVAL,
1442                                                RTE_TM_ERROR_TYPE_NODE_ID,
1443                                                NULL,
1444                                                rte_strerror(EINVAL));
1445        } else {
1446                parent_node = NULL;
1447        }
1448
1449        switch (level_of_node_id) {
1450        case IPN3KE_TM_NODE_LEVEL_PORT:
1451                if (tm->h.port_node != n)
1452                        return -rte_tm_error_set(error,
1453                                                EINVAL,
1454                                                RTE_TM_ERROR_TYPE_NODE_ID,
1455                                                NULL,
1456                                                rte_strerror(EINVAL));
1457                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1458                tm->h.port_commit_node = n;
1459
1460                break;
1461
1462        case IPN3KE_TM_NODE_LEVEL_VT:
1463                if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1464                        if (parent_node)
1465                                TAILQ_REMOVE(&parent_node->children_node_list,
1466                                        n, node);
1467                        TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
1468                        if (parent_node)
1469                                parent_node->n_children--;
1470                        tm->h.n_vt_nodes--;
1471                } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1472                        if (parent_node)
1473                                parent_node->n_children--;
1474                        tm->h.n_vt_nodes--;
1475                }
1476                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1477
1478                break;
1479
1480        case IPN3KE_TM_NODE_LEVEL_COS:
1481                if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
1482                        if (parent_node)
1483                                TAILQ_REMOVE(&parent_node->children_node_list,
1484                                        n, node);
1485                        TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
1486                                n, node);
1487                        if (parent_node)
1488                                parent_node->n_children--;
1489                        tm->h.n_cos_nodes--;
1490                } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1491                        if (parent_node)
1492                                parent_node->n_children--;
1493                        tm->h.n_cos_nodes--;
1494                }
1495                n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
1496
1497                break;
1498        default:
1499                return -rte_tm_error_set(error,
1500                                        EINVAL,
1501                                        RTE_TM_ERROR_TYPE_LEVEL_ID,
1502                                        NULL,
1503                                        rte_strerror(EINVAL));
1504        }
1505
1506        return 0;
1507}
1508
1509static int
1510ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
1511                                                struct rte_tm_error *error)
1512{
1513        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1514        uint32_t tm_id;
1515        struct ipn3ke_tm_node_list *nl;
1516        struct ipn3ke_tm_node *n, *parent_node;
1517
1518        tm_id = tm->tm_id;
1519
1520        nl = &tm->h.cos_commit_node_list;
1521        TAILQ_FOREACH(n, nl, node) {
1522                parent_node = n->parent_node;
1523                if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1524                        if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1525                                n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1526                                n->tm_id != tm_id ||
1527                                parent_node == NULL ||
1528                                (parent_node &&
1529                                        parent_node->node_state ==
1530                                        IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1531                                (parent_node &&
1532                                        parent_node->node_state ==
1533                                                IPN3KE_TM_NODE_STATE_IDLE) ||
1534                                n->shaper_profile.valid == 0) {
1535                                return -rte_tm_error_set(error,
1536                                                EINVAL,
1537                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1538                                                NULL,
1539                                                rte_strerror(EINVAL));
1540                        }
1541                } else if (n->node_state ==
1542                                IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1543                        if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
1544                                n->n_children != 0) {
1545                                return -rte_tm_error_set(error,
1546                                                EINVAL,
1547                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1548                                                NULL,
1549                                                rte_strerror(EINVAL));
1550                        }
1551                }
1552        }
1553
1554        nl = &tm->h.vt_commit_node_list;
1555        TAILQ_FOREACH(n, nl, node) {
1556                parent_node = n->parent_node;
1557                if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1558                        if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
1559                                n->level != IPN3KE_TM_NODE_LEVEL_VT ||
1560                                n->tm_id != tm_id ||
1561                                parent_node == NULL ||
1562                                (parent_node &&
1563                                        parent_node->node_state ==
1564                                        IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
1565                                (parent_node &&
1566                                        parent_node->node_state ==
1567                                                IPN3KE_TM_NODE_STATE_IDLE) ||
1568                                n->shaper_profile.valid == 0) {
1569                                return -rte_tm_error_set(error,
1570                                                EINVAL,
1571                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1572                                                NULL,
1573                                                rte_strerror(EINVAL));
1574                        }
1575                } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL)
1576                        return -rte_tm_error_set(error,
1577                                                EINVAL,
1578                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1579                                                NULL,
1580                                                rte_strerror(EINVAL));
1581        }
1582
1583        n = tm->h.port_commit_node;
1584        if (n &&
1585                (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
1586                n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
1587                n->tm_id != tm_id ||
1588                n->parent_node != NULL ||
1589                n->shaper_profile.valid == 0)) {
1590                return -rte_tm_error_set(error,
1591                                        EINVAL,
1592                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
1593                                        NULL,
1594                                        rte_strerror(EINVAL));
1595        }
1596
1597        return 0;
1598}
1599
1600static int
1601ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
1602        struct ipn3ke_tm_node *n,
1603        struct ipn3ke_tm_node *parent_node)
1604{
1605        uint32_t level;
1606
1607        level = n->level;
1608
1609        switch (level) {
1610        case IPN3KE_TM_NODE_LEVEL_PORT:
1611                /**
1612                 * Configure Type
1613                 */
1614                IPN3KE_MASK_WRITE_REG(hw,
1615                                IPN3KE_QOS_TYPE_L3_X,
1616                                n->node_index,
1617                                n->priority,
1618                                IPN3KE_QOS_TYPE_MASK);
1619
1620                /**
1621                 * Configure Sch_wt
1622                 */
1623                IPN3KE_MASK_WRITE_REG(hw,
1624                                IPN3KE_QOS_SCH_WT_L3_X,
1625                                n->node_index,
1626                                n->weight,
1627                                IPN3KE_QOS_SCH_WT_MASK);
1628
1629                /**
1630                 * Configure Shap_wt
1631                 */
1632                if (n->shaper_profile.valid)
1633                        IPN3KE_MASK_WRITE_REG(hw,
1634                                        IPN3KE_QOS_SHAP_WT_L3_X,
1635                                        n->node_index,
1636                                        ((n->shaper_profile.e << 10) |
1637                                                n->shaper_profile.m),
1638                                        IPN3KE_QOS_SHAP_WT_MASK);
1639
1640                break;
1641        case IPN3KE_TM_NODE_LEVEL_VT:
1642                /**
1643                 * Configure Type
1644                 */
1645                IPN3KE_MASK_WRITE_REG(hw,
1646                                IPN3KE_QOS_TYPE_L2_X,
1647                                n->node_index,
1648                                n->priority,
1649                                IPN3KE_QOS_TYPE_MASK);
1650
1651                /**
1652                 * Configure Sch_wt
1653                 */
1654                IPN3KE_MASK_WRITE_REG(hw,
1655                                IPN3KE_QOS_SCH_WT_L2_X,
1656                                n->node_index,
1657                                n->weight,
1658                                IPN3KE_QOS_SCH_WT_MASK);
1659
1660                /**
1661                 * Configure Shap_wt
1662                 */
1663                if (n->shaper_profile.valid)
1664                        IPN3KE_MASK_WRITE_REG(hw,
1665                                        IPN3KE_QOS_SHAP_WT_L2_X,
1666                                        n->node_index,
1667                                        ((n->shaper_profile.e << 10) |
1668                                                n->shaper_profile.m),
1669                                        IPN3KE_QOS_SHAP_WT_MASK);
1670
1671                /**
1672                 * Configure Map
1673                 */
1674                if (parent_node)
1675                        IPN3KE_MASK_WRITE_REG(hw,
1676                                        IPN3KE_QOS_MAP_L2_X,
1677                                        n->node_index,
1678                                        parent_node->node_index,
1679                                        IPN3KE_QOS_MAP_L2_MASK);
1680
1681                break;
1682        case IPN3KE_TM_NODE_LEVEL_COS:
1683                /**
1684                 * Configure Tail Drop mapping
1685                 */
1686                if (n->tdrop_profile && n->tdrop_profile->valid) {
1687                        IPN3KE_MASK_WRITE_REG(hw,
1688                                        IPN3KE_CCB_QPROFILE_Q,
1689                                        n->node_index,
1690                                        n->tdrop_profile->tdrop_profile_id,
1691                                        IPN3KE_CCB_QPROFILE_MASK);
1692                }
1693
1694                /**
1695                 * Configure Type
1696                 */
1697                IPN3KE_MASK_WRITE_REG(hw,
1698                                IPN3KE_QOS_TYPE_L1_X,
1699                                n->node_index,
1700                                n->priority,
1701                                IPN3KE_QOS_TYPE_MASK);
1702
1703                /**
1704                 * Configure Sch_wt
1705                 */
1706                IPN3KE_MASK_WRITE_REG(hw,
1707                                IPN3KE_QOS_SCH_WT_L1_X,
1708                                n->node_index,
1709                                n->weight,
1710                                IPN3KE_QOS_SCH_WT_MASK);
1711
1712                /**
1713                 * Configure Shap_wt
1714                 */
1715                if (n->shaper_profile.valid)
1716                        IPN3KE_MASK_WRITE_REG(hw,
1717                                        IPN3KE_QOS_SHAP_WT_L1_X,
1718                                        n->node_index,
1719                                        ((n->shaper_profile.e << 10) |
1720                                                n->shaper_profile.m),
1721                                        IPN3KE_QOS_SHAP_WT_MASK);
1722
1723                /**
1724                 * Configure COS queue to port
1725                 */
1726                while (IPN3KE_MASK_READ_REG(hw,
1727                                        IPN3KE_QM_UID_CONFIG_CTRL,
1728                                        0,
1729                                        0x80000000))
1730                        ;
1731
1732                if (parent_node && parent_node->parent_node)
1733                        IPN3KE_MASK_WRITE_REG(hw,
1734                                IPN3KE_QM_UID_CONFIG_DATA,
1735                                0,
1736                                (1 << 8 | parent_node->parent_node->node_index),
1737                                0x1FF);
1738
1739                IPN3KE_MASK_WRITE_REG(hw,
1740                                IPN3KE_QM_UID_CONFIG_CTRL,
1741                                0,
1742                                n->node_index,
1743                                0xFFFFF);
1744
1745                while (IPN3KE_MASK_READ_REG(hw,
1746                                        IPN3KE_QM_UID_CONFIG_CTRL,
1747                                        0,
1748                                        0x80000000))
1749                        ;
1750
1751                /**
1752                 * Configure Map
1753                 */
1754                if (parent_node)
1755                        IPN3KE_MASK_WRITE_REG(hw,
1756                                        IPN3KE_QOS_MAP_L1_X,
1757                                        n->node_index,
1758                                        parent_node->node_index,
1759                                        IPN3KE_QOS_MAP_L1_MASK);
1760
1761                break;
1762        default:
1763                return -1;
1764        }
1765
1766        return 0;
1767}
1768
1769static int
1770ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
1771                                        struct rte_tm_error *error)
1772{
1773        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1774        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1775        struct ipn3ke_tm_node_list *nl;
1776        struct ipn3ke_tm_node *n, *nn, *parent_node;
1777
1778        n = tm->h.port_commit_node;
1779        if (n) {
1780                if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1781                        tm->h.port_commit_node = NULL;
1782
1783                        n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1784                } else if (n->node_state ==
1785                                        IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1786                        tm->h.port_commit_node = NULL;
1787
1788                        n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1789                        n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1790                        n->weight = 0;
1791                        n->tm_id = RTE_TM_NODE_ID_NULL;
1792                } else {
1793                        return -rte_tm_error_set(error,
1794                                                EINVAL,
1795                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1796                                                NULL,
1797                                                rte_strerror(EINVAL));
1798                }
1799                parent_node = n->parent_node;
1800                ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1801        }
1802
1803        nl = &tm->h.vt_commit_node_list;
1804        for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1805                nn = TAILQ_NEXT(n, node);
1806                if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1807                        n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1808                        parent_node = n->parent_node;
1809                        TAILQ_REMOVE(nl, n, node);
1810                        TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1811                                                n, node);
1812                } else if (n->node_state ==
1813                                        IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1814                        parent_node = n->parent_node;
1815                        TAILQ_REMOVE(nl, n, node);
1816
1817                        n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1818                        n->parent_node_id = RTE_TM_NODE_ID_NULL;
1819                        n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1820                        n->weight = 0;
1821                        n->tm_id = RTE_TM_NODE_ID_NULL;
1822                        n->parent_node = NULL;
1823                } else {
1824                        return -rte_tm_error_set(error,
1825                                                EINVAL,
1826                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1827                                                NULL,
1828                                                rte_strerror(EINVAL));
1829                }
1830                ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1831        }
1832
1833        nl = &tm->h.cos_commit_node_list;
1834        for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1835                nn = TAILQ_NEXT(n, node);
1836                if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
1837                        n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
1838                        parent_node = n->parent_node;
1839                        TAILQ_REMOVE(nl, n, node);
1840                        TAILQ_INSERT_TAIL(&parent_node->children_node_list,
1841                                        n, node);
1842                } else if (n->node_state ==
1843                                        IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
1844                        n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1845                        parent_node = n->parent_node;
1846                        TAILQ_REMOVE(nl, n, node);
1847
1848                        n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1849                        n->parent_node_id = RTE_TM_NODE_ID_NULL;
1850                        n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1851                        n->weight = 0;
1852                        n->tm_id = RTE_TM_NODE_ID_NULL;
1853                        n->parent_node = NULL;
1854
1855                        if (n->tdrop_profile)
1856                                n->tdrop_profile->n_users--;
1857                } else {
1858                        return -rte_tm_error_set(error,
1859                                                EINVAL,
1860                                                RTE_TM_ERROR_TYPE_UNSPECIFIED,
1861                                                NULL,
1862                                                rte_strerror(EINVAL));
1863                }
1864                ipn3ke_hw_tm_node_wr(hw, n, parent_node);
1865        }
1866
1867        return 0;
1868}
1869
1870static int
1871ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
1872{
1873        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1874        struct ipn3ke_tm_node_list *nl;
1875        struct ipn3ke_tm_node *n;
1876        struct ipn3ke_tm_node *nn;
1877
1878        n = tm->h.port_commit_node;
1879        if (n) {
1880                n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1881                n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1882                n->weight = 0;
1883                n->tm_id = RTE_TM_NODE_ID_NULL;
1884                n->n_children = 0;
1885
1886                tm->h.port_commit_node = NULL;
1887        }
1888
1889        nl = &tm->h.vt_commit_node_list;
1890        for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1891                nn = TAILQ_NEXT(n, node);
1892
1893                n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1894                n->parent_node_id = RTE_TM_NODE_ID_NULL;
1895                n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1896                n->weight = 0;
1897                n->tm_id = RTE_TM_NODE_ID_NULL;
1898                n->parent_node = NULL;
1899                n->n_children = 0;
1900                tm->h.n_vt_nodes--;
1901
1902                TAILQ_REMOVE(nl, n, node);
1903        }
1904
1905        nl = &tm->h.cos_commit_node_list;
1906        for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
1907                nn = TAILQ_NEXT(n, node);
1908
1909                n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
1910                n->parent_node_id = RTE_TM_NODE_ID_NULL;
1911                n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
1912                n->weight = 0;
1913                n->tm_id = RTE_TM_NODE_ID_NULL;
1914                n->parent_node = NULL;
1915                tm->h.n_cos_nodes--;
1916
1917                TAILQ_REMOVE(nl, n, node);
1918        }
1919
1920        return 0;
1921}
1922
1923static void
1924ipn3ke_tm_show(struct rte_eth_dev *dev)
1925{
1926        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1927        uint32_t tm_id;
1928        struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
1929        struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
1930        const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1931                                                "CfgAdd",
1932                                                "CfgDel",
1933                                                "Committed"};
1934
1935        tm_id = tm->tm_id;
1936
1937        IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
1938
1939        port_n = tm->h.port_node;
1940        IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
1941                                str_state[port_n->node_state]);
1942
1943        vt_nl = &tm->h.port_node->children_node_list;
1944        TAILQ_FOREACH(vt_n, vt_nl, node) {
1945                cos_nl = &vt_n->children_node_list;
1946                IPN3KE_AFU_PMD_DEBUG("    VT%d: ", vt_n->node_index);
1947                TAILQ_FOREACH(cos_n, cos_nl, node) {
1948                        if (cos_n->parent_node_id !=
1949                                (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
1950                                IPN3KE_AFU_PMD_ERR("(%d|%s), ",
1951                                        cos_n->node_index,
1952                                        str_state[cos_n->node_state]);
1953                }
1954                IPN3KE_AFU_PMD_DEBUG("\n");
1955        }
1956}
1957
1958static void
1959ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
1960{
1961        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
1962        uint32_t tm_id;
1963        struct ipn3ke_tm_node_list *nl;
1964        struct ipn3ke_tm_node *n;
1965        const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
1966                                                "CfgAdd",
1967                                                "CfgDel",
1968                                                "Committed"};
1969
1970        tm_id = tm->tm_id;
1971
1972        IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
1973        n = tm->h.port_commit_node;
1974        IPN3KE_AFU_PMD_DEBUG("Port: ");
1975        if (n)
1976                IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
1977                        n->node_index,
1978                        str_state[n->node_state]);
1979        IPN3KE_AFU_PMD_DEBUG("\n");
1980
1981        nl = &tm->h.vt_commit_node_list;
1982        IPN3KE_AFU_PMD_DEBUG("VT  : ");
1983        TAILQ_FOREACH(n, nl, node) {
1984                IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1985                                n->node_index,
1986                                str_state[n->node_state]);
1987        }
1988        IPN3KE_AFU_PMD_DEBUG("\n");
1989
1990        nl = &tm->h.cos_commit_node_list;
1991        IPN3KE_AFU_PMD_DEBUG("COS : ");
1992        TAILQ_FOREACH(n, nl, node) {
1993                IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
1994                                n->node_index,
1995                                str_state[n->node_state]);
1996        }
1997        IPN3KE_AFU_PMD_DEBUG("\n");
1998}
1999
2000/* Traffic manager hierarchy commit */
2001static int
2002ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
2003        int clear_on_fail, struct rte_tm_error *error)
2004{
2005        struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
2006        int status;
2007
2008        /* Checks */
2009        if (tm->hierarchy_frozen)
2010                return -rte_tm_error_set(error,
2011                                        EBUSY,
2012                                        RTE_TM_ERROR_TYPE_UNSPECIFIED,
2013                                        NULL,
2014                                        rte_strerror(EBUSY));
2015
2016        ipn3ke_tm_show_commmit(dev);
2017
2018        status = ipn3ke_tm_hierarchy_commit_check(dev, error);
2019        if (status) {
2020                if (clear_on_fail)
2021                        ipn3ke_tm_hierarchy_commit_clear(dev);
2022                return status;
2023        }
2024
2025        ipn3ke_tm_hierarchy_hw_commit(dev, error);
2026        ipn3ke_tm_show(dev);
2027
2028        return 0;
2029}
2030
2031const struct rte_tm_ops ipn3ke_tm_ops = {
2032        .node_type_get = ipn3ke_pmd_tm_node_type_get,
2033        .capabilities_get = ipn3ke_tm_capabilities_get,
2034        .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
2035        .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
2036
2037        .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
2038        .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
2039        .shared_wred_context_add_update = NULL,
2040        .shared_wred_context_delete = NULL,
2041
2042        .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
2043        .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
2044        .shared_shaper_add_update = NULL,
2045        .shared_shaper_delete = NULL,
2046
2047        .node_add = ipn3ke_tm_node_add,
2048        .node_delete = ipn3ke_pmd_tm_node_delete,
2049        .node_suspend = NULL,
2050        .node_resume = NULL,
2051        .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
2052
2053        .node_parent_update = NULL,
2054        .node_shaper_update = NULL,
2055        .node_shared_shaper_update = NULL,
2056        .node_stats_update = NULL,
2057        .node_wfq_weight_mode_update = NULL,
2058        .node_cman_update = NULL,
2059        .node_wred_context_update = NULL,
2060        .node_shared_wred_context_update = NULL,
2061
2062        .node_stats_read = NULL,
2063};
2064
2065int
2066ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
2067                void *arg)
2068{
2069        struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
2070        struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
2071        struct rte_eth_dev *i40e_pf_eth;
2072        const struct rte_tm_ops *ops;
2073
2074        if (!arg)
2075                return -EINVAL;
2076
2077        if (hw->acc_tm) {
2078                *(const void **)arg = &ipn3ke_tm_ops;
2079        } else if (rpst->i40e_pf_eth) {
2080                i40e_pf_eth = rpst->i40e_pf_eth;
2081                if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
2082                        i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
2083                        &ops) != 0 ||
2084                        ops == NULL) {
2085                        return -EINVAL;
2086                }
2087                *(const void **)arg = ops;
2088        } else {
2089                return -EINVAL;
2090        }
2091
2092        return 0;
2093}
2094