dpdk/drivers/net/octeontx2/otx2_flow.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2019 Marvell International Ltd.
   3 */
   4
   5#include "otx2_ethdev.h"
   6#include "otx2_ethdev_sec.h"
   7#include "otx2_flow.h"
   8
   9enum flow_vtag_cfg_dir { VTAG_TX, VTAG_RX };
  10
  11int
  12otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
  13{
  14        struct otx2_npc_flow_info *npc = &hw->npc_flow;
  15        struct otx2_mbox *mbox = hw->mbox;
  16        struct otx2_mcam_ents_info *info;
  17        struct rte_bitmap *bmap;
  18        struct rte_flow *flow;
  19        int entry_count = 0;
  20        int rc, idx;
  21
  22        for (idx = 0; idx < npc->flow_max_priority; idx++) {
  23                info = &npc->flow_entry_info[idx];
  24                entry_count += info->live_ent;
  25        }
  26
  27        if (entry_count == 0)
  28                return 0;
  29
  30        /* Free all MCAM entries allocated */
  31        rc = otx2_flow_mcam_free_all_entries(mbox);
  32
  33        /* Free any MCAM counters and delete flow list */
  34        for (idx = 0; idx < npc->flow_max_priority; idx++) {
  35                while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
  36                        if (flow->ctr_id != NPC_COUNTER_NONE)
  37                                rc |= otx2_flow_mcam_free_counter(mbox,
  38                                                             flow->ctr_id);
  39
  40                        TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
  41                        rte_free(flow);
  42                        bmap = npc->live_entries[flow->priority];
  43                        rte_bitmap_clear(bmap, flow->mcam_id);
  44                }
  45                info = &npc->flow_entry_info[idx];
  46                info->free_ent = 0;
  47                info->live_ent = 0;
  48        }
  49        return rc;
  50}
  51
  52
  53static int
  54flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
  55                 struct otx2_npc_flow_info *flow_info)
  56{
  57        /* This is non-LDATA part in search key */
  58        uint64_t key_data[2] = {0ULL, 0ULL};
  59        uint64_t key_mask[2] = {0ULL, 0ULL};
  60        int intf = pst->flow->nix_intf;
  61        int key_len, bit = 0, index;
  62        int off, idx, data_off = 0;
  63        uint8_t lid, mask, data;
  64        uint16_t layer_info;
  65        uint64_t lt, flags;
  66
  67
  68        /* Skip till Layer A data start */
  69        while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
  70                if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
  71                        data_off++;
  72                bit++;
  73        }
  74
  75        /* Each bit represents 1 nibble */
  76        data_off *= 4;
  77
  78        index = 0;
  79        for (lid = 0; lid < NPC_MAX_LID; lid++) {
  80                /* Offset in key */
  81                off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
  82                lt = pst->lt[lid] & 0xf;
  83                flags = pst->flags[lid] & 0xff;
  84
  85                /* NPC_LAYER_KEX_S */
  86                layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
  87
  88                if (layer_info) {
  89                        for (idx = 0; idx <= 2 ; idx++) {
  90                                if (layer_info & (1 << idx)) {
  91                                        if (idx == 2)
  92                                                data = lt;
  93                                        else if (idx == 1)
  94                                                data = ((flags >> 4) & 0xf);
  95                                        else
  96                                                data = (flags & 0xf);
  97
  98                                        if (data_off >= 64) {
  99                                                data_off = 0;
 100                                                index++;
 101                                        }
 102                                        key_data[index] |= ((uint64_t)data <<
 103                                                            data_off);
 104                                        mask = 0xf;
 105                                        if (lt == 0)
 106                                                mask = 0;
 107                                        key_mask[index] |= ((uint64_t)mask <<
 108                                                            data_off);
 109                                        data_off += 4;
 110                                }
 111                        }
 112                }
 113        }
 114
 115        otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
 116                     key_data[0], key_data[1]);
 117
 118        /* Copy this into mcam string */
 119        key_len = (pst->npc->keyx_len[intf] + 7) / 8;
 120        otx2_npc_dbg("Key_len  = %d", key_len);
 121        memcpy(pst->flow->mcam_data, key_data, key_len);
 122        memcpy(pst->flow->mcam_mask, key_mask, key_len);
 123
 124        otx2_npc_dbg("Final flow data");
 125        for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
 126                otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
 127                             idx, pst->flow->mcam_data[idx],
 128                             idx, pst->flow->mcam_mask[idx]);
 129        }
 130
 131        /*
 132         * Now we have mcam data and mask formatted as
 133         * [Key_len/4 nibbles][0 or 1 nibble hole][data]
 134         * hole is present if key_len is odd number of nibbles.
 135         * mcam data must be split into 64 bits + 48 bits segments
 136         * for each back W0, W1.
 137         */
 138
 139        return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
 140}
 141
 142static int
 143flow_parse_attr(struct rte_eth_dev *eth_dev,
 144                const struct rte_flow_attr *attr,
 145                struct rte_flow_error *error,
 146                struct rte_flow *flow)
 147{
 148        struct otx2_eth_dev *dev = eth_dev->data->dev_private;
 149        const char *errmsg = NULL;
 150
 151        if (attr == NULL)
 152                errmsg = "Attribute can't be empty";
 153        else if (attr->group)
 154                errmsg = "Groups are not supported";
 155        else if (attr->priority >= dev->npc_flow.flow_max_priority)
 156                errmsg = "Priority should be with in specified range";
 157        else if ((!attr->egress && !attr->ingress) ||
 158                 (attr->egress && attr->ingress))
 159                errmsg = "Exactly one of ingress or egress must be set";
 160
 161        if (errmsg != NULL) {
 162                rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
 163                                   attr, errmsg);
 164                return -ENOTSUP;
 165        }
 166
 167        if (attr->ingress)
 168                flow->nix_intf = OTX2_INTF_RX;
 169        else
 170                flow->nix_intf = OTX2_INTF_TX;
 171
 172        flow->priority = attr->priority;
 173        return 0;
 174}
 175
 176static inline int
 177flow_get_free_rss_grp(struct rte_bitmap *bmap,
 178                      uint32_t size, uint32_t *pos)
 179{
 180        for (*pos = 0; *pos < size; ++*pos) {
 181                if (!rte_bitmap_get(bmap, *pos))
 182                        break;
 183        }
 184
 185        return *pos < size ? 0 : -1;
 186}
 187
 188static int
 189flow_configure_rss_action(struct otx2_eth_dev *dev,
 190                          const struct rte_flow_action_rss *rss,
 191                          uint8_t *alg_idx, uint32_t *rss_grp,
 192                          int mcam_index)
 193{
 194        struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
 195        uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
 196        uint32_t flowkey_cfg, grp_aval, i;
 197        uint16_t *ind_tbl = NULL;
 198        uint8_t flowkey_algx;
 199        int rc;
 200
 201        rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
 202                                   flow_info->rss_grps, &grp_aval);
 203        /* RSS group :0 is not usable for flow rss action */
 204        if (rc < 0 || grp_aval == 0)
 205                return -ENOSPC;
 206
 207        *rss_grp = grp_aval;
 208
 209        otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
 210                             rss->key_len);
 211
 212        /* If queue count passed in the rss action is less than
 213         * HW configured reta size, replicate rss action reta
 214         * across HW reta table.
 215         */
 216        if (dev->rss_info.rss_size > rss->queue_num) {
 217                ind_tbl = reta;
 218
 219                for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
 220                        memcpy(reta + i * rss->queue_num, rss->queue,
 221                               sizeof(uint16_t) * rss->queue_num);
 222
 223                i = dev->rss_info.rss_size % rss->queue_num;
 224                if (i)
 225                        memcpy(&reta[dev->rss_info.rss_size] - i,
 226                               rss->queue, i * sizeof(uint16_t));
 227        } else {
 228                ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
 229        }
 230
 231        rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
 232        if (rc) {
 233                otx2_err("Failed to init rss table rc = %d", rc);
 234                return rc;
 235        }
 236
 237        flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
 238
 239        rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
 240                             *rss_grp, mcam_index);
 241        if (rc) {
 242                otx2_err("Failed to set rss hash function rc = %d", rc);
 243                return rc;
 244        }
 245
 246        *alg_idx = flowkey_algx;
 247
 248        rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
 249
 250        return 0;
 251}
 252
 253
 254static int
 255flow_program_rss_action(struct rte_eth_dev *eth_dev,
 256                        const struct rte_flow_action actions[],
 257                        struct rte_flow *flow)
 258{
 259        struct otx2_eth_dev *dev = eth_dev->data->dev_private;
 260        const struct rte_flow_action_rss *rss;
 261        uint32_t rss_grp;
 262        uint8_t alg_idx;
 263        int rc;
 264
 265        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 266                if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
 267                        rss = (const struct rte_flow_action_rss *)actions->conf;
 268
 269                        rc = flow_configure_rss_action(dev,
 270                                                       rss, &alg_idx, &rss_grp,
 271                                                       flow->mcam_id);
 272                        if (rc)
 273                                return rc;
 274
 275                        flow->npc_action &= (~(0xfULL));
 276                        flow->npc_action |= NIX_RX_ACTIONOP_RSS;
 277                        flow->npc_action |=
 278                                ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
 279                                 NIX_RSS_ACT_ALG_OFFSET) |
 280                                ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
 281                                 NIX_RSS_ACT_GRP_OFFSET);
 282                }
 283        }
 284        return 0;
 285}
 286
 287static int
 288flow_free_rss_action(struct rte_eth_dev *eth_dev,
 289                     struct rte_flow *flow)
 290{
 291        struct otx2_eth_dev *dev = eth_dev->data->dev_private;
 292        struct otx2_npc_flow_info *npc = &dev->npc_flow;
 293        uint32_t rss_grp;
 294
 295        if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
 296                rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
 297                        NIX_RSS_ACT_GRP_MASK;
 298                if (rss_grp == 0 || rss_grp >= npc->rss_grps)
 299                        return -EINVAL;
 300
 301                rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
 302        }
 303
 304        return 0;
 305}
 306
 307static int
 308flow_update_sec_tt(struct rte_eth_dev *eth_dev,
 309                   const struct rte_flow_action actions[])
 310{
 311        int rc = 0;
 312
 313        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 314                if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
 315                        rc = otx2_eth_sec_update_tag_type(eth_dev);
 316                        break;
 317                }
 318        }
 319
 320        return rc;
 321}
 322
 323static int
 324flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
 325{
 326        otx2_npc_dbg("Meta Item");
 327        return 0;
 328}
 329
 330/*
 331 * Parse function of each layer:
 332 *  - Consume one or more patterns that are relevant.
 333 *  - Update parse_state
 334 *  - Set parse_state.pattern = last item consumed
 335 *  - Set appropriate error code/message when returning error.
 336 */
 337typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
 338
 339static int
 340flow_parse_pattern(struct rte_eth_dev *dev,
 341                   const struct rte_flow_item pattern[],
 342                   struct rte_flow_error *error,
 343                   struct rte_flow *flow,
 344                   struct otx2_parse_state *pst)
 345{
 346        flow_parse_stage_func_t parse_stage_funcs[] = {
 347                flow_parse_meta_items,
 348                otx2_flow_parse_higig2_hdr,
 349                otx2_flow_parse_la,
 350                otx2_flow_parse_lb,
 351                otx2_flow_parse_lc,
 352                otx2_flow_parse_ld,
 353                otx2_flow_parse_le,
 354                otx2_flow_parse_lf,
 355                otx2_flow_parse_lg,
 356                otx2_flow_parse_lh,
 357        };
 358        struct otx2_eth_dev *hw = dev->data->dev_private;
 359        uint8_t layer = 0;
 360        int key_offset;
 361        int rc;
 362
 363        if (pattern == NULL) {
 364                rte_flow_error_set(error, EINVAL,
 365                                   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
 366                                   "pattern is NULL");
 367                return -EINVAL;
 368        }
 369
 370        memset(pst, 0, sizeof(*pst));
 371        pst->npc = &hw->npc_flow;
 372        pst->error = error;
 373        pst->flow = flow;
 374
 375        /* Use integral byte offset */
 376        key_offset = pst->npc->keyx_len[flow->nix_intf];
 377        key_offset = (key_offset + 7) / 8;
 378
 379        /* Location where LDATA would begin */
 380        pst->mcam_data = (uint8_t *)flow->mcam_data;
 381        pst->mcam_mask = (uint8_t *)flow->mcam_mask;
 382
 383        while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
 384               layer < RTE_DIM(parse_stage_funcs)) {
 385                otx2_npc_dbg("Pattern type = %d", pattern->type);
 386
 387                /* Skip place-holders */
 388                pattern = otx2_flow_skip_void_and_any_items(pattern);
 389
 390                pst->pattern = pattern;
 391                otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
 392                rc = parse_stage_funcs[layer](pst);
 393                if (rc != 0)
 394                        return -rte_errno;
 395
 396                layer++;
 397
 398                /*
 399                 * Parse stage function sets pst->pattern to
 400                 * 1 past the last item it consumed.
 401                 */
 402                pattern = pst->pattern;
 403
 404                if (pst->terminate)
 405                        break;
 406        }
 407
 408        /* Skip trailing place-holders */
 409        pattern = otx2_flow_skip_void_and_any_items(pattern);
 410
 411        /* Are there more items than what we can handle? */
 412        if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
 413                rte_flow_error_set(error, ENOTSUP,
 414                                   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
 415                                   "unsupported item in the sequence");
 416                return -ENOTSUP;
 417        }
 418
 419        return 0;
 420}
 421
 422static int
 423flow_parse_rule(struct rte_eth_dev *dev,
 424                const struct rte_flow_attr *attr,
 425                const struct rte_flow_item pattern[],
 426                const struct rte_flow_action actions[],
 427                struct rte_flow_error *error,
 428                struct rte_flow *flow,
 429                struct otx2_parse_state *pst)
 430{
 431        int err;
 432
 433        /* Check attributes */
 434        err = flow_parse_attr(dev, attr, error, flow);
 435        if (err)
 436                return err;
 437
 438        /* Check actions */
 439        err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
 440        if (err)
 441                return err;
 442
 443        /* Check pattern */
 444        err = flow_parse_pattern(dev, pattern, error, flow, pst);
 445        if (err)
 446                return err;
 447
 448        /* Check for overlaps? */
 449        return 0;
 450}
 451
 452static int
 453otx2_flow_validate(struct rte_eth_dev *dev,
 454                   const struct rte_flow_attr *attr,
 455                   const struct rte_flow_item pattern[],
 456                   const struct rte_flow_action actions[],
 457                   struct rte_flow_error *error)
 458{
 459        struct otx2_parse_state parse_state;
 460        struct rte_flow flow;
 461
 462        memset(&flow, 0, sizeof(flow));
 463        return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
 464                               &parse_state);
 465}
 466
 467static int
 468flow_program_vtag_action(struct rte_eth_dev *eth_dev,
 469                         const struct rte_flow_action actions[],
 470                         struct rte_flow *flow)
 471{
 472        uint16_t vlan_id = 0, vlan_ethtype = RTE_ETHER_TYPE_VLAN;
 473        struct otx2_eth_dev *dev = eth_dev->data->dev_private;
 474        union {
 475                uint64_t reg;
 476                struct nix_tx_vtag_action_s act;
 477        } tx_vtag_action;
 478        struct otx2_mbox *mbox = dev->mbox;
 479        struct nix_vtag_config *vtag_cfg;
 480        struct nix_vtag_config_rsp *rsp;
 481        bool vlan_insert_action = false;
 482        uint64_t rx_vtag_action = 0;
 483        uint8_t vlan_pcp = 0;
 484        int rc;
 485
 486        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 487                if (actions->type == RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) {
 488                        if (dev->npc_flow.vtag_actions == 1) {
 489                                vtag_cfg =
 490                                        otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
 491                                vtag_cfg->cfg_type = VTAG_RX;
 492                                vtag_cfg->rx.strip_vtag = 1;
 493                                /* Always capture */
 494                                vtag_cfg->rx.capture_vtag = 1;
 495                                vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
 496                                vtag_cfg->rx.vtag_type = 0;
 497
 498                                rc = otx2_mbox_process(mbox);
 499                                if (rc)
 500                                        return rc;
 501                        }
 502
 503                        rx_vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
 504                        rx_vtag_action |= (NPC_LID_LB << 8);
 505                        rx_vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
 506                        flow->vtag_action = rx_vtag_action;
 507                } else if (actions->type ==
 508                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
 509                        const struct rte_flow_action_of_set_vlan_vid *vtag =
 510                                (const struct rte_flow_action_of_set_vlan_vid *)
 511                                        actions->conf;
 512                        vlan_id = rte_be_to_cpu_16(vtag->vlan_vid);
 513                        if (vlan_id > 0xfff) {
 514                                otx2_err("Invalid vlan_id for set vlan action");
 515                                return -EINVAL;
 516                        }
 517                        vlan_insert_action = true;
 518                } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
 519                        const struct rte_flow_action_of_push_vlan *ethtype =
 520                                (const struct rte_flow_action_of_push_vlan *)
 521                                        actions->conf;
 522                        vlan_ethtype = rte_be_to_cpu_16(ethtype->ethertype);
 523                        if (vlan_ethtype != RTE_ETHER_TYPE_VLAN &&
 524                            vlan_ethtype != RTE_ETHER_TYPE_QINQ) {
 525                                otx2_err("Invalid ethtype specified for push"
 526                                         " vlan action");
 527                                return -EINVAL;
 528                        }
 529                        vlan_insert_action = true;
 530                } else if (actions->type ==
 531                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
 532                        const struct rte_flow_action_of_set_vlan_pcp *pcp =
 533                                (const struct rte_flow_action_of_set_vlan_pcp *)
 534                                        actions->conf;
 535                        vlan_pcp = pcp->vlan_pcp;
 536                        if (vlan_pcp > 0x7) {
 537                                otx2_err("Invalid PCP value for pcp action");
 538                                return -EINVAL;
 539                        }
 540                        vlan_insert_action = true;
 541                }
 542        }
 543
 544        if (vlan_insert_action) {
 545                vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
 546                vtag_cfg->cfg_type = VTAG_TX;
 547                vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
 548                vtag_cfg->tx.vtag0 =
 549                        ((vlan_ethtype << 16) | (vlan_pcp << 13) | vlan_id);
 550                vtag_cfg->tx.cfg_vtag0 = 1;
 551                rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
 552                if (rc)
 553                        return rc;
 554
 555                tx_vtag_action.reg = 0;
 556                tx_vtag_action.act.vtag0_def = rsp->vtag0_idx;
 557                if (tx_vtag_action.act.vtag0_def < 0) {
 558                        otx2_err("Failed to config TX VTAG action");
 559                        return -EINVAL;
 560                }
 561                tx_vtag_action.act.vtag0_lid = NPC_LID_LA;
 562                tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
 563                tx_vtag_action.act.vtag0_relptr =
 564                        NIX_TX_VTAGACTION_VTAG0_RELPTR;
 565                flow->vtag_action = tx_vtag_action.reg;
 566        }
 567        return 0;
 568}
 569
 570static struct rte_flow *
 571otx2_flow_create(struct rte_eth_dev *dev,
 572                 const struct rte_flow_attr *attr,
 573                 const struct rte_flow_item pattern[],
 574                 const struct rte_flow_action actions[],
 575                 struct rte_flow_error *error)
 576{
 577        struct otx2_eth_dev *hw = dev->data->dev_private;
 578        struct otx2_parse_state parse_state;
 579        struct otx2_mbox *mbox = hw->mbox;
 580        struct rte_flow *flow, *flow_iter;
 581        struct otx2_flow_list *list;
 582        int rc;
 583
 584        flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
 585        if (flow == NULL) {
 586                rte_flow_error_set(error, ENOMEM,
 587                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 588                                   NULL,
 589                                   "Memory allocation failed");
 590                return NULL;
 591        }
 592        memset(flow, 0, sizeof(*flow));
 593
 594        rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
 595                             &parse_state);
 596        if (rc != 0)
 597                goto err_exit;
 598
 599        rc = flow_program_vtag_action(dev, actions, flow);
 600        if (rc != 0) {
 601                rte_flow_error_set(error, EIO,
 602                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 603                                   NULL,
 604                                   "Failed to program vlan action");
 605                goto err_exit;
 606        }
 607
 608        parse_state.is_vf = otx2_dev_is_vf(hw);
 609
 610        rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
 611        if (rc != 0) {
 612                rte_flow_error_set(error, EIO,
 613                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 614                                   NULL,
 615                                   "Failed to insert filter");
 616                goto err_exit;
 617        }
 618
 619        rc = flow_program_rss_action(dev, actions, flow);
 620        if (rc != 0) {
 621                rte_flow_error_set(error, EIO,
 622                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 623                                   NULL,
 624                                   "Failed to program rss action");
 625                goto err_exit;
 626        }
 627
 628        if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 629                rc = flow_update_sec_tt(dev, actions);
 630                if (rc != 0) {
 631                        rte_flow_error_set(error, EIO,
 632                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 633                                           NULL,
 634                                           "Failed to update tt with sec act");
 635                        goto err_exit;
 636                }
 637        }
 638
 639        list = &hw->npc_flow.flow_list[flow->priority];
 640        /* List in ascending order of mcam entries */
 641        TAILQ_FOREACH(flow_iter, list, next) {
 642                if (flow_iter->mcam_id > flow->mcam_id) {
 643                        TAILQ_INSERT_BEFORE(flow_iter, flow, next);
 644                        return flow;
 645                }
 646        }
 647
 648        TAILQ_INSERT_TAIL(list, flow, next);
 649        return flow;
 650
 651err_exit:
 652        rte_free(flow);
 653        return NULL;
 654}
 655
 656static int
 657otx2_flow_destroy(struct rte_eth_dev *dev,
 658                  struct rte_flow *flow,
 659                  struct rte_flow_error *error)
 660{
 661        struct otx2_eth_dev *hw = dev->data->dev_private;
 662        struct otx2_npc_flow_info *npc = &hw->npc_flow;
 663        struct otx2_mbox *mbox = hw->mbox;
 664        struct rte_bitmap *bmap;
 665        uint16_t match_id;
 666        int rc;
 667
 668        match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
 669                NIX_RX_ACT_MATCH_MASK;
 670
 671        if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
 672                if (rte_atomic32_read(&npc->mark_actions) == 0)
 673                        return -EINVAL;
 674
 675                /* Clear mark offload flag if there are no more mark actions */
 676                if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
 677                        hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
 678                        otx2_eth_set_rx_function(dev);
 679                }
 680        }
 681
 682        if (flow->nix_intf == OTX2_INTF_RX && flow->vtag_action) {
 683                npc->vtag_actions--;
 684                if (npc->vtag_actions == 0) {
 685                        if (hw->vlan_info.strip_on == 0) {
 686                                hw->rx_offload_flags &=
 687                                        ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
 688                                otx2_eth_set_rx_function(dev);
 689                        }
 690                }
 691        }
 692
 693        rc = flow_free_rss_action(dev, flow);
 694        if (rc != 0) {
 695                rte_flow_error_set(error, EIO,
 696                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 697                                   NULL,
 698                                   "Failed to free rss action");
 699        }
 700
 701        rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
 702        if (rc != 0) {
 703                rte_flow_error_set(error, EIO,
 704                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 705                                   NULL,
 706                                   "Failed to destroy filter");
 707        }
 708
 709        TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
 710
 711        bmap = npc->live_entries[flow->priority];
 712        rte_bitmap_clear(bmap, flow->mcam_id);
 713
 714        rte_free(flow);
 715        return 0;
 716}
 717
 718static int
 719otx2_flow_flush(struct rte_eth_dev *dev,
 720                struct rte_flow_error *error)
 721{
 722        struct otx2_eth_dev *hw = dev->data->dev_private;
 723        int rc;
 724
 725        rc = otx2_flow_free_all_resources(hw);
 726        if (rc) {
 727                otx2_err("Error when deleting NPC MCAM entries "
 728                                ", counters");
 729                rte_flow_error_set(error, EIO,
 730                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 731                                   NULL,
 732                                   "Failed to flush filter");
 733                return -rte_errno;
 734        }
 735
 736        return 0;
 737}
 738
 739static int
 740otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
 741                  int enable __rte_unused,
 742                  struct rte_flow_error *error)
 743{
 744        /*
 745         * If we support, we need to un-install the default mcam
 746         * entry for this port.
 747         */
 748
 749        rte_flow_error_set(error, ENOTSUP,
 750                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 751                           NULL,
 752                           "Flow isolation not supported");
 753
 754        return -rte_errno;
 755}
 756
 757static int
 758otx2_flow_query(struct rte_eth_dev *dev,
 759                struct rte_flow *flow,
 760                const struct rte_flow_action *action,
 761                void *data,
 762                struct rte_flow_error *error)
 763{
 764        struct otx2_eth_dev *hw = dev->data->dev_private;
 765        struct rte_flow_query_count *query = data;
 766        struct otx2_mbox *mbox = hw->mbox;
 767        const char *errmsg = NULL;
 768        int errcode = ENOTSUP;
 769        int rc;
 770
 771        if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
 772                errmsg = "Only COUNT is supported in query";
 773                goto err_exit;
 774        }
 775
 776        if (flow->ctr_id == NPC_COUNTER_NONE) {
 777                errmsg = "Counter is not available";
 778                goto err_exit;
 779        }
 780
 781        rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
 782        if (rc != 0) {
 783                errcode = EIO;
 784                errmsg = "Error reading flow counter";
 785                goto err_exit;
 786        }
 787        query->hits_set = 1;
 788        query->bytes_set = 0;
 789
 790        if (query->reset)
 791                rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
 792        if (rc != 0) {
 793                errcode = EIO;
 794                errmsg = "Error clearing flow counter";
 795                goto err_exit;
 796        }
 797
 798        return 0;
 799
 800err_exit:
 801        rte_flow_error_set(error, errcode,
 802                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 803                           NULL,
 804                           errmsg);
 805        return -rte_errno;
 806}
 807
 808static int
 809otx2_flow_dev_dump(struct rte_eth_dev *dev,
 810                  struct rte_flow *flow, FILE *file,
 811                  struct rte_flow_error *error)
 812{
 813        struct otx2_eth_dev *hw = dev->data->dev_private;
 814        struct otx2_flow_list *list;
 815        struct rte_flow *flow_iter;
 816        uint32_t max_prio, i;
 817
 818        if (file == NULL) {
 819                rte_flow_error_set(error, EINVAL,
 820                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 821                                   NULL,
 822                                   "Invalid file");
 823                return -EINVAL;
 824        }
 825        if (flow != NULL) {
 826                rte_flow_error_set(error, EINVAL,
 827                                   RTE_FLOW_ERROR_TYPE_HANDLE,
 828                                   NULL,
 829                                   "Invalid argument");
 830                return -EINVAL;
 831        }
 832
 833        max_prio = hw->npc_flow.flow_max_priority;
 834
 835        for (i = 0; i < max_prio; i++) {
 836                list = &hw->npc_flow.flow_list[i];
 837
 838                /* List in ascending order of mcam entries */
 839                TAILQ_FOREACH(flow_iter, list, next) {
 840                        otx2_flow_dump(file, hw, flow_iter);
 841                }
 842        }
 843
 844        return 0;
 845}
 846
 847const struct rte_flow_ops otx2_flow_ops = {
 848        .validate = otx2_flow_validate,
 849        .create = otx2_flow_create,
 850        .destroy = otx2_flow_destroy,
 851        .flush = otx2_flow_flush,
 852        .query = otx2_flow_query,
 853        .isolate = otx2_flow_isolate,
 854        .dev_dump = otx2_flow_dev_dump,
 855};
 856
 857static int
 858flow_supp_key_len(uint32_t supp_mask)
 859{
 860        int nib_count = 0;
 861        while (supp_mask) {
 862                nib_count++;
 863                supp_mask &= (supp_mask - 1);
 864        }
 865        return nib_count * 4;
 866}
 867
 868/* Refer HRM register:
 869 * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
 870 * and
 871 * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
 872 **/
 873#define BYTESM1_SHIFT   16
 874#define HDR_OFF_SHIFT   8
 875static void
 876flow_update_kex_info(struct npc_xtract_info *xtract_info,
 877                     uint64_t val)
 878{
 879        xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
 880        xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
 881        xtract_info->key_off = val & 0x3f;
 882        xtract_info->enable = ((val >> 7) & 0x1);
 883        xtract_info->flags_enable = ((val >> 6) & 0x1);
 884}
 885
 886static void
 887flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
 888                      struct npc_get_kex_cfg_rsp *kex_rsp)
 889{
 890        volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
 891                [NPC_MAX_LD];
 892        struct npc_xtract_info *x_info = NULL;
 893        int lid, lt, ld, fl, ix;
 894        otx2_dxcfg_t *p;
 895        uint64_t keyw;
 896        uint64_t val;
 897
 898        npc->keyx_supp_nmask[NPC_MCAM_RX] =
 899                kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
 900        npc->keyx_supp_nmask[NPC_MCAM_TX] =
 901                kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
 902        npc->keyx_len[NPC_MCAM_RX] =
 903                flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
 904        npc->keyx_len[NPC_MCAM_TX] =
 905                flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
 906
 907        keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
 908        npc->keyw[NPC_MCAM_RX] = keyw;
 909        keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
 910        npc->keyw[NPC_MCAM_TX] = keyw;
 911
 912        /* Update KEX_LD_FLAG */
 913        for (ix = 0; ix < NPC_MAX_INTF; ix++) {
 914                for (ld = 0; ld < NPC_MAX_LD; ld++) {
 915                        for (fl = 0; fl < NPC_MAX_LFL; fl++) {
 916                                x_info =
 917                                    &npc->prx_fxcfg[ix][ld][fl].xtract[0];
 918                                val = kex_rsp->intf_ld_flags[ix][ld][fl];
 919                                flow_update_kex_info(x_info, val);
 920                        }
 921                }
 922        }
 923
 924        /* Update LID, LT and LDATA cfg */
 925        p = &npc->prx_dxcfg;
 926        q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
 927                        (&kex_rsp->intf_lid_lt_ld);
 928        for (ix = 0; ix < NPC_MAX_INTF; ix++) {
 929                for (lid = 0; lid < NPC_MAX_LID; lid++) {
 930                        for (lt = 0; lt < NPC_MAX_LT; lt++) {
 931                                for (ld = 0; ld < NPC_MAX_LD; ld++) {
 932                                        x_info = &(*p)[ix][lid][lt].xtract[ld];
 933                                        val = (*q)[ix][lid][lt][ld];
 934                                        flow_update_kex_info(x_info, val);
 935                                }
 936                        }
 937                }
 938        }
 939        /* Update LDATA Flags cfg */
 940        npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
 941        npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
 942}
 943
 944static struct otx2_idev_kex_cfg *
 945flow_intra_dev_kex_cfg(void)
 946{
 947        static const char name[] = "octeontx2_intra_device_kex_conf";
 948        struct otx2_idev_kex_cfg *idev;
 949        const struct rte_memzone *mz;
 950
 951        mz = rte_memzone_lookup(name);
 952        if (mz)
 953                return mz->addr;
 954
 955        /* Request for the first time */
 956        mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
 957                                         SOCKET_ID_ANY, 0, OTX2_ALIGN);
 958        if (mz) {
 959                idev = mz->addr;
 960                rte_atomic16_set(&idev->kex_refcnt, 0);
 961                return idev;
 962        }
 963        return NULL;
 964}
 965
 966static int
 967flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
 968{
 969        struct otx2_npc_flow_info *npc = &dev->npc_flow;
 970        struct npc_get_kex_cfg_rsp *kex_rsp;
 971        struct otx2_mbox *mbox = dev->mbox;
 972        char mkex_pfl_name[MKEX_NAME_LEN];
 973        struct otx2_idev_kex_cfg *idev;
 974        int rc = 0;
 975
 976        idev = flow_intra_dev_kex_cfg();
 977        if (!idev)
 978                return -ENOMEM;
 979
 980        /* Is kex_cfg read by any another driver? */
 981        if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
 982                /* Call mailbox to get key & data size */
 983                (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
 984                otx2_mbox_msg_send(mbox, 0);
 985                rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
 986                if (rc) {
 987                        otx2_err("Failed to fetch NPC keyx config");
 988                        goto done;
 989                }
 990                memcpy(&idev->kex_cfg, kex_rsp,
 991                       sizeof(struct npc_get_kex_cfg_rsp));
 992        }
 993
 994        otx2_mbox_memcpy(mkex_pfl_name,
 995                         idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
 996
 997        strlcpy((char *)dev->mkex_pfl_name,
 998                mkex_pfl_name, sizeof(dev->mkex_pfl_name));
 999
1000        flow_process_mkex_cfg(npc, &idev->kex_cfg);
1001
1002done:
1003        return rc;
1004}
1005
1006#define OTX2_MCAM_TOT_ENTRIES_96XX (4096)
1007#define OTX2_MCAM_TOT_ENTRIES_98XX (16384)
1008
1009static int otx2_mcam_tot_entries(struct otx2_eth_dev *dev)
1010{
1011        if (otx2_dev_is_98xx(dev))
1012                return OTX2_MCAM_TOT_ENTRIES_98XX;
1013        else
1014                return OTX2_MCAM_TOT_ENTRIES_96XX;
1015}
1016
1017int
1018otx2_flow_init(struct otx2_eth_dev *hw)
1019{
1020        uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
1021        struct otx2_npc_flow_info *npc = &hw->npc_flow;
1022        uint32_t bmap_sz, tot_mcam_entries = 0;
1023        int rc = 0, idx;
1024
1025        rc = flow_fetch_kex_cfg(hw);
1026        if (rc) {
1027                otx2_err("Failed to fetch NPC keyx config from idev");
1028                return rc;
1029        }
1030
1031        rte_atomic32_init(&npc->mark_actions);
1032        npc->vtag_actions = 0;
1033
1034        tot_mcam_entries = otx2_mcam_tot_entries(hw);
1035        npc->mcam_entries = tot_mcam_entries >> npc->keyw[NPC_MCAM_RX];
1036        /* Free, free_rev, live and live_rev entries */
1037        bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
1038        mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
1039                          RTE_CACHE_LINE_SIZE);
1040        if (mem == NULL) {
1041                otx2_err("Bmap alloc failed");
1042                rc = -ENOMEM;
1043                return rc;
1044        }
1045
1046        npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
1047                                           * sizeof(struct otx2_mcam_ents_info),
1048                                           0);
1049        if (npc->flow_entry_info == NULL) {
1050                otx2_err("flow_entry_info alloc failed");
1051                rc = -ENOMEM;
1052                goto err;
1053        }
1054
1055        npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
1056                                        * sizeof(struct rte_bitmap *),
1057                                        0);
1058        if (npc->free_entries == NULL) {
1059                otx2_err("free_entries alloc failed");
1060                rc = -ENOMEM;
1061                goto err;
1062        }
1063
1064        npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1065                                        * sizeof(struct rte_bitmap *),
1066                                        0);
1067        if (npc->free_entries_rev == NULL) {
1068                otx2_err("free_entries_rev alloc failed");
1069                rc = -ENOMEM;
1070                goto err;
1071        }
1072
1073        npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
1074                                        * sizeof(struct rte_bitmap *),
1075                                        0);
1076        if (npc->live_entries == NULL) {
1077                otx2_err("live_entries alloc failed");
1078                rc = -ENOMEM;
1079                goto err;
1080        }
1081
1082        npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1083                                        * sizeof(struct rte_bitmap *),
1084                                        0);
1085        if (npc->live_entries_rev == NULL) {
1086                otx2_err("live_entries_rev alloc failed");
1087                rc = -ENOMEM;
1088                goto err;
1089        }
1090
1091        npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
1092                                        * sizeof(struct otx2_flow_list),
1093                                        0);
1094        if (npc->flow_list == NULL) {
1095                otx2_err("flow_list alloc failed");
1096                rc = -ENOMEM;
1097                goto err;
1098        }
1099
1100        npc_mem = mem;
1101        for (idx = 0; idx < npc->flow_max_priority; idx++) {
1102                TAILQ_INIT(&npc->flow_list[idx]);
1103
1104                npc->free_entries[idx] =
1105                        rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1106                mem += bmap_sz;
1107
1108                npc->free_entries_rev[idx] =
1109                        rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1110                mem += bmap_sz;
1111
1112                npc->live_entries[idx] =
1113                        rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1114                mem += bmap_sz;
1115
1116                npc->live_entries_rev[idx] =
1117                        rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1118                mem += bmap_sz;
1119
1120                npc->flow_entry_info[idx].free_ent = 0;
1121                npc->flow_entry_info[idx].live_ent = 0;
1122                npc->flow_entry_info[idx].max_id = 0;
1123                npc->flow_entry_info[idx].min_id = ~(0);
1124        }
1125
1126        npc->rss_grps = NIX_RSS_GRPS;
1127
1128        bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
1129        nix_mem = rte_zmalloc(NULL, bmap_sz,  RTE_CACHE_LINE_SIZE);
1130        if (nix_mem == NULL) {
1131                otx2_err("Bmap alloc failed");
1132                rc = -ENOMEM;
1133                goto err;
1134        }
1135
1136        npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
1137
1138        /* Group 0 will be used for RSS,
1139         * 1 -7 will be used for rte_flow RSS action
1140         */
1141        rte_bitmap_set(npc->rss_grp_entries, 0);
1142
1143        return 0;
1144
1145err:
1146        if (npc->flow_list)
1147                rte_free(npc->flow_list);
1148        if (npc->live_entries_rev)
1149                rte_free(npc->live_entries_rev);
1150        if (npc->live_entries)
1151                rte_free(npc->live_entries);
1152        if (npc->free_entries_rev)
1153                rte_free(npc->free_entries_rev);
1154        if (npc->free_entries)
1155                rte_free(npc->free_entries);
1156        if (npc->flow_entry_info)
1157                rte_free(npc->flow_entry_info);
1158        if (npc_mem)
1159                rte_free(npc_mem);
1160        return rc;
1161}
1162
1163int
1164otx2_flow_fini(struct otx2_eth_dev *hw)
1165{
1166        struct otx2_npc_flow_info *npc = &hw->npc_flow;
1167        int rc;
1168
1169        rc = otx2_flow_free_all_resources(hw);
1170        if (rc) {
1171                otx2_err("Error when deleting NPC MCAM entries, counters");
1172                return rc;
1173        }
1174
1175        if (npc->flow_list)
1176                rte_free(npc->flow_list);
1177        if (npc->live_entries_rev)
1178                rte_free(npc->live_entries_rev);
1179        if (npc->live_entries)
1180                rte_free(npc->live_entries);
1181        if (npc->free_entries_rev)
1182                rte_free(npc->free_entries_rev);
1183        if (npc->free_entries)
1184                rte_free(npc->free_entries);
1185        if (npc->flow_entry_info)
1186                rte_free(npc->flow_entry_info);
1187
1188        return 0;
1189}
1190