dpdk/drivers/net/hns3/hns3_flow.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <rte_flow_driver.h>
   6#include <rte_io.h>
   7#include <rte_malloc.h>
   8
   9#include "hns3_ethdev.h"
  10#include "hns3_logs.h"
  11
  12/* Default default keys */
  13static uint8_t hns3_hash_key[] = {
  14        0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
  15        0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
  16        0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
  17        0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
  18        0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
  19};
  20
  21static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
  22static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
  23
  24/* Special Filter id for non-specific packet flagging. Don't change value */
  25#define HNS3_MAX_FILTER_ID      0x0FFF
  26
  27#define ETHER_TYPE_MASK         0xFFFF
  28#define IPPROTO_MASK            0xFF
  29#define TUNNEL_TYPE_MASK        0xFFFF
  30
  31#define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
  32#define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
  33#define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
  34#define HNS3_TUNNEL_TYPE_NVGRE          0x6558
  35
  36static enum rte_flow_item_type first_items[] = {
  37        RTE_FLOW_ITEM_TYPE_ETH,
  38        RTE_FLOW_ITEM_TYPE_IPV4,
  39        RTE_FLOW_ITEM_TYPE_IPV6,
  40        RTE_FLOW_ITEM_TYPE_TCP,
  41        RTE_FLOW_ITEM_TYPE_UDP,
  42        RTE_FLOW_ITEM_TYPE_SCTP,
  43        RTE_FLOW_ITEM_TYPE_ICMP,
  44        RTE_FLOW_ITEM_TYPE_NVGRE,
  45        RTE_FLOW_ITEM_TYPE_VXLAN,
  46        RTE_FLOW_ITEM_TYPE_GENEVE,
  47        RTE_FLOW_ITEM_TYPE_VXLAN_GPE
  48};
  49
  50static enum rte_flow_item_type L2_next_items[] = {
  51        RTE_FLOW_ITEM_TYPE_VLAN,
  52        RTE_FLOW_ITEM_TYPE_IPV4,
  53        RTE_FLOW_ITEM_TYPE_IPV6
  54};
  55
  56static enum rte_flow_item_type L3_next_items[] = {
  57        RTE_FLOW_ITEM_TYPE_TCP,
  58        RTE_FLOW_ITEM_TYPE_UDP,
  59        RTE_FLOW_ITEM_TYPE_SCTP,
  60        RTE_FLOW_ITEM_TYPE_NVGRE,
  61        RTE_FLOW_ITEM_TYPE_ICMP
  62};
  63
  64static enum rte_flow_item_type L4_next_items[] = {
  65        RTE_FLOW_ITEM_TYPE_VXLAN,
  66        RTE_FLOW_ITEM_TYPE_GENEVE,
  67        RTE_FLOW_ITEM_TYPE_VXLAN_GPE
  68};
  69
  70static enum rte_flow_item_type tunnel_next_items[] = {
  71        RTE_FLOW_ITEM_TYPE_ETH,
  72        RTE_FLOW_ITEM_TYPE_VLAN
  73};
  74
  75struct items_step_mngr {
  76        enum rte_flow_item_type *items;
  77        int count;
  78};
  79
  80static inline void
  81net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
  82{
  83        size_t i;
  84
  85        for (i = 0; i < len; i++)
  86                dst[i] = rte_be_to_cpu_32(src[i]);
  87}
  88
  89/*
  90 * This function is used to find rss general action.
  91 * 1. As we know RSS is used to spread packets among several queues, the flow
  92 *    API provide the struct rte_flow_action_rss, user could config its field
  93 *    sush as: func/level/types/key/queue to control RSS function.
  94 * 2. The flow API also supports queue region configuration for hns3. It was
  95 *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
  96 *    which action is RSS queues region.
  97 * 3. When action is RSS, we use the following rule to distinguish:
  98 *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
  99 *            region configuration.
 100 *    Case other: an rss general action.
 101 */
 102static const struct rte_flow_action *
 103hns3_find_rss_general_action(const struct rte_flow_item pattern[],
 104                             const struct rte_flow_action actions[])
 105{
 106        const struct rte_flow_action *act = NULL;
 107        const struct hns3_rss_conf *rss;
 108        bool have_eth = false;
 109
 110        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 111                if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
 112                        act = actions;
 113                        break;
 114                }
 115        }
 116        if (!act)
 117                return NULL;
 118
 119        for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
 120                if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
 121                        have_eth = true;
 122                        break;
 123                }
 124        }
 125
 126        rss = act->conf;
 127        if (have_eth && rss->conf.queue_num) {
 128                /*
 129                 * Pattern have ETH and action's queue_num > 0, indicate this is
 130                 * queue region configuration.
 131                 * Because queue region is implemented by FDIR + RSS in hns3
 132                 * hardware, it needs to enter FDIR process, so here return NULL
 133                 * to avoid enter RSS process.
 134                 */
 135                return NULL;
 136        }
 137
 138        return act;
 139}
 140
 141static inline struct hns3_flow_counter *
 142hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
 143{
 144        struct hns3_adapter *hns = dev->data->dev_private;
 145        struct hns3_pf *pf = &hns->pf;
 146        struct hns3_flow_counter *cnt;
 147
 148        LIST_FOREACH(cnt, &pf->flow_counters, next) {
 149                if (cnt->id == id)
 150                        return cnt;
 151        }
 152        return NULL;
 153}
 154
 155static int
 156hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
 157                 struct rte_flow_error *error)
 158{
 159        struct hns3_adapter *hns = dev->data->dev_private;
 160        struct hns3_pf *pf = &hns->pf;
 161        struct hns3_hw *hw = &hns->hw;
 162        struct hns3_flow_counter *cnt;
 163        uint64_t value;
 164        int ret;
 165
 166        cnt = hns3_counter_lookup(dev, id);
 167        if (cnt) {
 168                if (!cnt->shared || cnt->shared != shared)
 169                        return rte_flow_error_set(error, ENOTSUP,
 170                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 171                                cnt,
 172                                "Counter id is used, shared flag not match");
 173                cnt->ref_cnt++;
 174                return 0;
 175        }
 176
 177        /* Clear the counter by read ops because the counter is read-clear */
 178        ret = hns3_get_count(hw, id, &value);
 179        if (ret)
 180                return rte_flow_error_set(error, EIO,
 181                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 182                                          "Clear counter failed!");
 183
 184        cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
 185        if (cnt == NULL)
 186                return rte_flow_error_set(error, ENOMEM,
 187                                          RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
 188                                          "Alloc mem for counter failed");
 189        cnt->id = id;
 190        cnt->shared = shared;
 191        cnt->ref_cnt = 1;
 192        cnt->hits = 0;
 193        LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
 194        return 0;
 195}
 196
 197static int
 198hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 199                   struct rte_flow_query_count *qc,
 200                   struct rte_flow_error *error)
 201{
 202        struct hns3_adapter *hns = dev->data->dev_private;
 203        struct hns3_flow_counter *cnt;
 204        uint64_t value;
 205        int ret;
 206
 207        /* FDIR is available only in PF driver */
 208        if (hns->is_vf)
 209                return rte_flow_error_set(error, ENOTSUP,
 210                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 211                                          "Fdir is not supported in VF");
 212        cnt = hns3_counter_lookup(dev, flow->counter_id);
 213        if (cnt == NULL)
 214                return rte_flow_error_set(error, EINVAL,
 215                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 216                                          "Can't find counter id");
 217
 218        ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
 219        if (ret) {
 220                rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
 221                                   NULL, "Read counter fail.");
 222                return ret;
 223        }
 224        qc->hits_set = 1;
 225        qc->hits = value;
 226        qc->bytes_set = 0;
 227        qc->bytes = 0;
 228
 229        return 0;
 230}
 231
 232static int
 233hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
 234{
 235        struct hns3_adapter *hns = dev->data->dev_private;
 236        struct hns3_hw *hw = &hns->hw;
 237        struct hns3_flow_counter *cnt;
 238
 239        cnt = hns3_counter_lookup(dev, id);
 240        if (cnt == NULL) {
 241                hns3_err(hw, "Can't find available counter to release");
 242                return -EINVAL;
 243        }
 244        cnt->ref_cnt--;
 245        if (cnt->ref_cnt == 0) {
 246                LIST_REMOVE(cnt, next);
 247                rte_free(cnt);
 248        }
 249        return 0;
 250}
 251
 252static void
 253hns3_counter_flush(struct rte_eth_dev *dev)
 254{
 255        struct hns3_adapter *hns = dev->data->dev_private;
 256        struct hns3_pf *pf = &hns->pf;
 257        struct hns3_flow_counter *cnt_ptr;
 258
 259        cnt_ptr = LIST_FIRST(&pf->flow_counters);
 260        while (cnt_ptr) {
 261                LIST_REMOVE(cnt_ptr, next);
 262                rte_free(cnt_ptr);
 263                cnt_ptr = LIST_FIRST(&pf->flow_counters);
 264        }
 265}
 266
 267static int
 268hns3_handle_action_queue(struct rte_eth_dev *dev,
 269                         const struct rte_flow_action *action,
 270                         struct hns3_fdir_rule *rule,
 271                         struct rte_flow_error *error)
 272{
 273        struct hns3_adapter *hns = dev->data->dev_private;
 274        const struct rte_flow_action_queue *queue;
 275        struct hns3_hw *hw = &hns->hw;
 276
 277        queue = (const struct rte_flow_action_queue *)action->conf;
 278        if (queue->index >= hw->used_rx_queues) {
 279                hns3_err(hw, "queue ID(%u) is greater than number of "
 280                          "available queue (%u) in driver.",
 281                          queue->index, hw->used_rx_queues);
 282                return rte_flow_error_set(error, EINVAL,
 283                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 284                                          action, "Invalid queue ID in PF");
 285        }
 286
 287        rule->queue_id = queue->index;
 288        rule->nb_queues = 1;
 289        rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
 290        return 0;
 291}
 292
 293static int
 294hns3_handle_action_queue_region(struct rte_eth_dev *dev,
 295                                const struct rte_flow_action *action,
 296                                struct hns3_fdir_rule *rule,
 297                                struct rte_flow_error *error)
 298{
 299        struct hns3_adapter *hns = dev->data->dev_private;
 300        const struct rte_flow_action_rss *conf = action->conf;
 301        struct hns3_hw *hw = &hns->hw;
 302        uint16_t idx;
 303
 304        if (!hns3_dev_fd_queue_region_supported(hw))
 305                return rte_flow_error_set(error, ENOTSUP,
 306                        RTE_FLOW_ERROR_TYPE_ACTION, action,
 307                        "Not support config queue region!");
 308
 309        if ((!rte_is_power_of_2(conf->queue_num)) ||
 310                conf->queue_num > hw->rss_size_max ||
 311                conf->queue[0] >= hw->used_rx_queues ||
 312                conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
 313                return rte_flow_error_set(error, EINVAL,
 314                        RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
 315                        "Invalid start queue ID and queue num! the start queue "
 316                        "ID must valid, the queue num must be power of 2 and "
 317                        "<= rss_size_max.");
 318        }
 319
 320        for (idx = 1; idx < conf->queue_num; idx++) {
 321                if (conf->queue[idx] != conf->queue[idx - 1] + 1)
 322                        return rte_flow_error_set(error, EINVAL,
 323                                RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
 324                                "Invalid queue ID sequence! the queue ID "
 325                                "must be continuous increment.");
 326        }
 327
 328        rule->queue_id = conf->queue[0];
 329        rule->nb_queues = conf->queue_num;
 330        rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
 331        return 0;
 332}
 333
 334/*
 335 * Parse actions structure from the provided pattern.
 336 * The pattern is validated as the items are copied.
 337 *
 338 * @param actions[in]
 339 * @param rule[out]
 340 *   NIC specfilc actions derived from the actions.
 341 * @param error[out]
 342 */
 343static int
 344hns3_handle_actions(struct rte_eth_dev *dev,
 345                    const struct rte_flow_action actions[],
 346                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
 347{
 348        struct hns3_adapter *hns = dev->data->dev_private;
 349        const struct rte_flow_action_count *act_count;
 350        const struct rte_flow_action_mark *mark;
 351        struct hns3_pf *pf = &hns->pf;
 352        uint32_t counter_num;
 353        int ret;
 354
 355        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 356                switch (actions->type) {
 357                case RTE_FLOW_ACTION_TYPE_QUEUE:
 358                        ret = hns3_handle_action_queue(dev, actions, rule,
 359                                                       error);
 360                        if (ret)
 361                                return ret;
 362                        break;
 363                case RTE_FLOW_ACTION_TYPE_DROP:
 364                        rule->action = HNS3_FD_ACTION_DROP_PACKET;
 365                        break;
 366                /*
 367                 * Here RSS's real action is queue region.
 368                 * Queue region is implemented by FDIR + RSS in hns3 hardware,
 369                 * the FDIR's action is one queue region (start_queue_id and
 370                 * queue_num), then RSS spread packets to the queue region by
 371                 * RSS algorigthm.
 372                 */
 373                case RTE_FLOW_ACTION_TYPE_RSS:
 374                        ret = hns3_handle_action_queue_region(dev, actions,
 375                                                              rule, error);
 376                        if (ret)
 377                                return ret;
 378                        break;
 379                case RTE_FLOW_ACTION_TYPE_MARK:
 380                        mark =
 381                            (const struct rte_flow_action_mark *)actions->conf;
 382                        if (mark->id >= HNS3_MAX_FILTER_ID)
 383                                return rte_flow_error_set(error, EINVAL,
 384                                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 385                                                actions,
 386                                                "Invalid Mark ID");
 387                        rule->fd_id = mark->id;
 388                        rule->flags |= HNS3_RULE_FLAG_FDID;
 389                        break;
 390                case RTE_FLOW_ACTION_TYPE_FLAG:
 391                        rule->fd_id = HNS3_MAX_FILTER_ID;
 392                        rule->flags |= HNS3_RULE_FLAG_FDID;
 393                        break;
 394                case RTE_FLOW_ACTION_TYPE_COUNT:
 395                        act_count =
 396                            (const struct rte_flow_action_count *)actions->conf;
 397                        counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
 398                        if (act_count->id >= counter_num)
 399                                return rte_flow_error_set(error, EINVAL,
 400                                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 401                                                actions,
 402                                                "Invalid counter id");
 403                        rule->act_cnt = *act_count;
 404                        rule->flags |= HNS3_RULE_FLAG_COUNTER;
 405                        break;
 406                case RTE_FLOW_ACTION_TYPE_VOID:
 407                        break;
 408                default:
 409                        return rte_flow_error_set(error, ENOTSUP,
 410                                                  RTE_FLOW_ERROR_TYPE_ACTION,
 411                                                  NULL, "Unsupported action");
 412                }
 413        }
 414
 415        return 0;
 416}
 417
 418static int
 419hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
 420{
 421        if (!attr->ingress)
 422                return rte_flow_error_set(error, EINVAL,
 423                                          RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
 424                                          attr, "Ingress can't be zero");
 425        if (attr->egress)
 426                return rte_flow_error_set(error, ENOTSUP,
 427                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
 428                                          attr, "Not support egress");
 429        if (attr->transfer)
 430                return rte_flow_error_set(error, ENOTSUP,
 431                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
 432                                          attr, "No support for transfer");
 433        if (attr->priority)
 434                return rte_flow_error_set(error, ENOTSUP,
 435                                          RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
 436                                          attr, "Not support priority");
 437        if (attr->group)
 438                return rte_flow_error_set(error, ENOTSUP,
 439                                          RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
 440                                          attr, "Not support group");
 441        return 0;
 442}
 443
 444static int
 445hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 446               struct rte_flow_error *error __rte_unused)
 447{
 448        const struct rte_flow_item_eth *eth_spec;
 449        const struct rte_flow_item_eth *eth_mask;
 450
 451        /* Only used to describe the protocol stack. */
 452        if (item->spec == NULL && item->mask == NULL)
 453                return 0;
 454
 455        if (item->mask) {
 456                eth_mask = item->mask;
 457                if (eth_mask->type) {
 458                        hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
 459                        rule->key_conf.mask.ether_type =
 460                            rte_be_to_cpu_16(eth_mask->type);
 461                }
 462                if (!rte_is_zero_ether_addr(&eth_mask->src)) {
 463                        hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
 464                        memcpy(rule->key_conf.mask.src_mac,
 465                               eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
 466                }
 467                if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
 468                        hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
 469                        memcpy(rule->key_conf.mask.dst_mac,
 470                               eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
 471                }
 472        }
 473
 474        eth_spec = item->spec;
 475        rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
 476        memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
 477               RTE_ETHER_ADDR_LEN);
 478        memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
 479               RTE_ETHER_ADDR_LEN);
 480        return 0;
 481}
 482
 483static int
 484hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 485                struct rte_flow_error *error)
 486{
 487        const struct rte_flow_item_vlan *vlan_spec;
 488        const struct rte_flow_item_vlan *vlan_mask;
 489
 490        rule->key_conf.vlan_num++;
 491        if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
 492                return rte_flow_error_set(error, EINVAL,
 493                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
 494                                          "Vlan_num is more than 2");
 495
 496        /* Only used to describe the protocol stack. */
 497        if (item->spec == NULL && item->mask == NULL)
 498                return 0;
 499
 500        if (item->mask) {
 501                vlan_mask = item->mask;
 502                if (vlan_mask->tci) {
 503                        if (rule->key_conf.vlan_num == 1) {
 504                                hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
 505                                             1);
 506                                rule->key_conf.mask.vlan_tag1 =
 507                                    rte_be_to_cpu_16(vlan_mask->tci);
 508                        } else {
 509                                hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
 510                                             1);
 511                                rule->key_conf.mask.vlan_tag2 =
 512                                    rte_be_to_cpu_16(vlan_mask->tci);
 513                        }
 514                }
 515        }
 516
 517        vlan_spec = item->spec;
 518        if (rule->key_conf.vlan_num == 1)
 519                rule->key_conf.spec.vlan_tag1 =
 520                    rte_be_to_cpu_16(vlan_spec->tci);
 521        else
 522                rule->key_conf.spec.vlan_tag2 =
 523                    rte_be_to_cpu_16(vlan_spec->tci);
 524        return 0;
 525}
 526
 527static bool
 528hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
 529{
 530        if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
 531            ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
 532            ipv4_mask->hdr.hdr_checksum)
 533                return false;
 534
 535        return true;
 536}
 537
 538static int
 539hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 540                struct rte_flow_error *error)
 541{
 542        const struct rte_flow_item_ipv4 *ipv4_spec;
 543        const struct rte_flow_item_ipv4 *ipv4_mask;
 544
 545        hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
 546        rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
 547        rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
 548
 549        /* Only used to describe the protocol stack. */
 550        if (item->spec == NULL && item->mask == NULL)
 551                return 0;
 552
 553        if (item->mask) {
 554                ipv4_mask = item->mask;
 555                if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
 556                        return rte_flow_error_set(error, EINVAL,
 557                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 558                                                  item,
 559                                                  "Only support src & dst ip,tos,proto in IPV4");
 560                }
 561
 562                if (ipv4_mask->hdr.src_addr) {
 563                        hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
 564                        rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
 565                            rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
 566                }
 567
 568                if (ipv4_mask->hdr.dst_addr) {
 569                        hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
 570                        rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
 571                            rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
 572                }
 573
 574                if (ipv4_mask->hdr.type_of_service) {
 575                        hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
 576                        rule->key_conf.mask.ip_tos =
 577                            ipv4_mask->hdr.type_of_service;
 578                }
 579
 580                if (ipv4_mask->hdr.next_proto_id) {
 581                        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
 582                        rule->key_conf.mask.ip_proto =
 583                            ipv4_mask->hdr.next_proto_id;
 584                }
 585        }
 586
 587        ipv4_spec = item->spec;
 588        rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
 589            rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
 590        rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
 591            rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
 592        rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
 593        rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
 594        return 0;
 595}
 596
 597static int
 598hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 599                struct rte_flow_error *error)
 600{
 601        const struct rte_flow_item_ipv6 *ipv6_spec;
 602        const struct rte_flow_item_ipv6 *ipv6_mask;
 603
 604        hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
 605        rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
 606        rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
 607
 608        /* Only used to describe the protocol stack. */
 609        if (item->spec == NULL && item->mask == NULL)
 610                return 0;
 611
 612        if (item->mask) {
 613                ipv6_mask = item->mask;
 614                if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
 615                    ipv6_mask->hdr.hop_limits) {
 616                        return rte_flow_error_set(error, EINVAL,
 617                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 618                                                  item,
 619                                                  "Only support src & dst ip,proto in IPV6");
 620                }
 621                net_addr_to_host(rule->key_conf.mask.src_ip,
 622                                 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
 623                                 IP_ADDR_LEN);
 624                net_addr_to_host(rule->key_conf.mask.dst_ip,
 625                                 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
 626                                 IP_ADDR_LEN);
 627                rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
 628                if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
 629                        hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
 630                if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
 631                        hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
 632                if (ipv6_mask->hdr.proto)
 633                        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
 634        }
 635
 636        ipv6_spec = item->spec;
 637        net_addr_to_host(rule->key_conf.spec.src_ip,
 638                         (const rte_be32_t *)ipv6_spec->hdr.src_addr,
 639                         IP_ADDR_LEN);
 640        net_addr_to_host(rule->key_conf.spec.dst_ip,
 641                         (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
 642                         IP_ADDR_LEN);
 643        rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
 644
 645        return 0;
 646}
 647
 648static bool
 649hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
 650{
 651        if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
 652            tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
 653            tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
 654            tcp_mask->hdr.tcp_urp)
 655                return false;
 656
 657        return true;
 658}
 659
 660static int
 661hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 662               struct rte_flow_error *error)
 663{
 664        const struct rte_flow_item_tcp *tcp_spec;
 665        const struct rte_flow_item_tcp *tcp_mask;
 666
 667        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
 668        rule->key_conf.spec.ip_proto = IPPROTO_TCP;
 669        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
 670
 671        /* Only used to describe the protocol stack. */
 672        if (item->spec == NULL && item->mask == NULL)
 673                return 0;
 674
 675        if (item->mask) {
 676                tcp_mask = item->mask;
 677                if (!hns3_check_tcp_mask_supported(tcp_mask)) {
 678                        return rte_flow_error_set(error, EINVAL,
 679                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 680                                                  item,
 681                                                  "Only support src & dst port in TCP");
 682                }
 683
 684                if (tcp_mask->hdr.src_port) {
 685                        hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
 686                        rule->key_conf.mask.src_port =
 687                            rte_be_to_cpu_16(tcp_mask->hdr.src_port);
 688                }
 689                if (tcp_mask->hdr.dst_port) {
 690                        hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
 691                        rule->key_conf.mask.dst_port =
 692                            rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
 693                }
 694        }
 695
 696        tcp_spec = item->spec;
 697        rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
 698        rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
 699
 700        return 0;
 701}
 702
 703static int
 704hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 705               struct rte_flow_error *error)
 706{
 707        const struct rte_flow_item_udp *udp_spec;
 708        const struct rte_flow_item_udp *udp_mask;
 709
 710        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
 711        rule->key_conf.spec.ip_proto = IPPROTO_UDP;
 712        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
 713
 714        /* Only used to describe the protocol stack. */
 715        if (item->spec == NULL && item->mask == NULL)
 716                return 0;
 717
 718        if (item->mask) {
 719                udp_mask = item->mask;
 720                if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
 721                        return rte_flow_error_set(error, EINVAL,
 722                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 723                                                  item,
 724                                                  "Only support src & dst port in UDP");
 725                }
 726                if (udp_mask->hdr.src_port) {
 727                        hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
 728                        rule->key_conf.mask.src_port =
 729                            rte_be_to_cpu_16(udp_mask->hdr.src_port);
 730                }
 731                if (udp_mask->hdr.dst_port) {
 732                        hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
 733                        rule->key_conf.mask.dst_port =
 734                            rte_be_to_cpu_16(udp_mask->hdr.dst_port);
 735                }
 736        }
 737
 738        udp_spec = item->spec;
 739        rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
 740        rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
 741
 742        return 0;
 743}
 744
 745static int
 746hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 747                struct rte_flow_error *error)
 748{
 749        const struct rte_flow_item_sctp *sctp_spec;
 750        const struct rte_flow_item_sctp *sctp_mask;
 751
 752        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
 753        rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
 754        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
 755
 756        /* Only used to describe the protocol stack. */
 757        if (item->spec == NULL && item->mask == NULL)
 758                return 0;
 759
 760        if (item->mask) {
 761                sctp_mask = item->mask;
 762                if (sctp_mask->hdr.cksum)
 763                        return rte_flow_error_set(error, EINVAL,
 764                                                  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 765                                                  item,
 766                                                  "Only support src & dst port in SCTP");
 767                if (sctp_mask->hdr.src_port) {
 768                        hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
 769                        rule->key_conf.mask.src_port =
 770                            rte_be_to_cpu_16(sctp_mask->hdr.src_port);
 771                }
 772                if (sctp_mask->hdr.dst_port) {
 773                        hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
 774                        rule->key_conf.mask.dst_port =
 775                            rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
 776                }
 777                if (sctp_mask->hdr.tag) {
 778                        hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
 779                        rule->key_conf.mask.sctp_tag =
 780                            rte_be_to_cpu_32(sctp_mask->hdr.tag);
 781                }
 782        }
 783
 784        sctp_spec = item->spec;
 785        rule->key_conf.spec.src_port =
 786            rte_be_to_cpu_16(sctp_spec->hdr.src_port);
 787        rule->key_conf.spec.dst_port =
 788            rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
 789        rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
 790
 791        return 0;
 792}
 793
 794/*
 795 * Check items before tunnel, save inner configs to outer configs, and clear
 796 * inner configs.
 797 * The key consists of two parts: meta_data and tuple keys.
 798 * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
 799 * packet(1bit).
 800 * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
 801 * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
 802 * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
 803 * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
 804 * Vlantag2(16bit) and sctp-tag(32bit).
 805 */
 806static int
 807hns3_handle_tunnel(const struct rte_flow_item *item,
 808                   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
 809{
 810        /* check eth config */
 811        if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
 812                return rte_flow_error_set(error, EINVAL,
 813                                          RTE_FLOW_ERROR_TYPE_ITEM,
 814                                          item, "Outer eth mac is unsupported");
 815        if (rule->input_set & BIT(INNER_ETH_TYPE)) {
 816                hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
 817                rule->key_conf.spec.outer_ether_type =
 818                    rule->key_conf.spec.ether_type;
 819                rule->key_conf.mask.outer_ether_type =
 820                    rule->key_conf.mask.ether_type;
 821                hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
 822                rule->key_conf.spec.ether_type = 0;
 823                rule->key_conf.mask.ether_type = 0;
 824        }
 825
 826        /* check vlan config */
 827        if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
 828                return rte_flow_error_set(error, EINVAL,
 829                                          RTE_FLOW_ERROR_TYPE_ITEM,
 830                                          item,
 831                                          "Outer vlan tags is unsupported");
 832
 833        /* clear vlan_num for inner vlan select */
 834        rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
 835        rule->key_conf.vlan_num = 0;
 836
 837        /* check L3 config */
 838        if (rule->input_set &
 839            (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
 840                return rte_flow_error_set(error, EINVAL,
 841                                          RTE_FLOW_ERROR_TYPE_ITEM,
 842                                          item, "Outer ip is unsupported");
 843        if (rule->input_set & BIT(INNER_IP_PROTO)) {
 844                hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
 845                rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
 846                rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
 847                hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
 848                rule->key_conf.spec.ip_proto = 0;
 849                rule->key_conf.mask.ip_proto = 0;
 850        }
 851
 852        /* check L4 config */
 853        if (rule->input_set & BIT(INNER_SCTP_TAG))
 854                return rte_flow_error_set(error, EINVAL,
 855                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
 856                                          "Outer sctp tag is unsupported");
 857
 858        if (rule->input_set & BIT(INNER_SRC_PORT)) {
 859                hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
 860                rule->key_conf.spec.outer_src_port =
 861                    rule->key_conf.spec.src_port;
 862                rule->key_conf.mask.outer_src_port =
 863                    rule->key_conf.mask.src_port;
 864                hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
 865                rule->key_conf.spec.src_port = 0;
 866                rule->key_conf.mask.src_port = 0;
 867        }
 868        if (rule->input_set & BIT(INNER_DST_PORT)) {
 869                hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
 870                rule->key_conf.spec.dst_port = 0;
 871                rule->key_conf.mask.dst_port = 0;
 872        }
 873        return 0;
 874}
 875
 876static int
 877hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 878                 struct rte_flow_error *error)
 879{
 880        const struct rte_flow_item_vxlan *vxlan_spec;
 881        const struct rte_flow_item_vxlan *vxlan_mask;
 882
 883        hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
 884        rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
 885        if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
 886                rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
 887        else
 888                rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
 889
 890        /* Only used to describe the protocol stack. */
 891        if (item->spec == NULL && item->mask == NULL)
 892                return 0;
 893
 894        vxlan_mask = item->mask;
 895        vxlan_spec = item->spec;
 896
 897        if (vxlan_mask->flags)
 898                return rte_flow_error_set(error, EINVAL,
 899                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 900                                          "Flags is not supported in VxLAN");
 901
 902        /* VNI must be totally masked or not. */
 903        if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
 904            memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
 905                return rte_flow_error_set(error, EINVAL,
 906                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 907                                          "VNI must be totally masked or not in VxLAN");
 908        if (vxlan_mask->vni[0]) {
 909                hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
 910                memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
 911                           VNI_OR_TNI_LEN);
 912        }
 913        memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
 914                   VNI_OR_TNI_LEN);
 915        return 0;
 916}
 917
 918static int
 919hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 920                 struct rte_flow_error *error)
 921{
 922        const struct rte_flow_item_nvgre *nvgre_spec;
 923        const struct rte_flow_item_nvgre *nvgre_mask;
 924
 925        hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
 926        rule->key_conf.spec.outer_proto = IPPROTO_GRE;
 927        rule->key_conf.mask.outer_proto = IPPROTO_MASK;
 928
 929        hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
 930        rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
 931        rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
 932        /* Only used to describe the protocol stack. */
 933        if (item->spec == NULL && item->mask == NULL)
 934                return 0;
 935
 936        nvgre_mask = item->mask;
 937        nvgre_spec = item->spec;
 938
 939        if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
 940                return rte_flow_error_set(error, EINVAL,
 941                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 942                                          "Ver/protocal is not supported in NVGRE");
 943
 944        /* TNI must be totally masked or not. */
 945        if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
 946            memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
 947                return rte_flow_error_set(error, EINVAL,
 948                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 949                                          "TNI must be totally masked or not in NVGRE");
 950
 951        if (nvgre_mask->tni[0]) {
 952                hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
 953                memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
 954                           VNI_OR_TNI_LEN);
 955        }
 956        memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
 957                   VNI_OR_TNI_LEN);
 958
 959        if (nvgre_mask->flow_id) {
 960                hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
 961                rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
 962        }
 963        rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
 964        return 0;
 965}
 966
 967static int
 968hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 969                  struct rte_flow_error *error)
 970{
 971        const struct rte_flow_item_geneve *geneve_spec;
 972        const struct rte_flow_item_geneve *geneve_mask;
 973
 974        hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
 975        rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
 976        rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
 977        /* Only used to describe the protocol stack. */
 978        if (item->spec == NULL && item->mask == NULL)
 979                return 0;
 980
 981        geneve_mask = item->mask;
 982        geneve_spec = item->spec;
 983
 984        if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
 985                return rte_flow_error_set(error, EINVAL,
 986                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 987                                          "Ver/protocal is not supported in GENEVE");
 988        /* VNI must be totally masked or not. */
 989        if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
 990            memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
 991                return rte_flow_error_set(error, EINVAL,
 992                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
 993                                          "VNI must be totally masked or not in GENEVE");
 994        if (geneve_mask->vni[0]) {
 995                hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
 996                memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
 997                           VNI_OR_TNI_LEN);
 998        }
 999        memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1000                   VNI_OR_TNI_LEN);
1001        return 0;
1002}
1003
1004static int
1005hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1006                  struct rte_flow_error *error)
1007{
1008        int ret;
1009
1010        if (item->spec == NULL && item->mask)
1011                return rte_flow_error_set(error, EINVAL,
1012                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1013                                          "Can't configure FDIR with mask "
1014                                          "but without spec");
1015        else if (item->spec && (item->mask == NULL))
1016                return rte_flow_error_set(error, EINVAL,
1017                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1018                                          "Tunnel packets must configure "
1019                                          "with mask");
1020
1021        switch (item->type) {
1022        case RTE_FLOW_ITEM_TYPE_VXLAN:
1023        case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1024                ret = hns3_parse_vxlan(item, rule, error);
1025                break;
1026        case RTE_FLOW_ITEM_TYPE_NVGRE:
1027                ret = hns3_parse_nvgre(item, rule, error);
1028                break;
1029        case RTE_FLOW_ITEM_TYPE_GENEVE:
1030                ret = hns3_parse_geneve(item, rule, error);
1031                break;
1032        default:
1033                return rte_flow_error_set(error, ENOTSUP,
1034                                          RTE_FLOW_ERROR_TYPE_ITEM,
1035                                          NULL, "Unsupported tunnel type!");
1036        }
1037        if (ret)
1038                return ret;
1039        return hns3_handle_tunnel(item, rule, error);
1040}
1041
1042static int
1043hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1044                  struct items_step_mngr *step_mngr,
1045                  struct rte_flow_error *error)
1046{
1047        int ret;
1048
1049        if (item->spec == NULL && item->mask)
1050                return rte_flow_error_set(error, EINVAL,
1051                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
1052                                          "Can't configure FDIR with mask "
1053                                          "but without spec");
1054
1055        switch (item->type) {
1056        case RTE_FLOW_ITEM_TYPE_ETH:
1057                ret = hns3_parse_eth(item, rule, error);
1058                step_mngr->items = L2_next_items;
1059                step_mngr->count = RTE_DIM(L2_next_items);
1060                break;
1061        case RTE_FLOW_ITEM_TYPE_VLAN:
1062                ret = hns3_parse_vlan(item, rule, error);
1063                step_mngr->items = L2_next_items;
1064                step_mngr->count = RTE_DIM(L2_next_items);
1065                break;
1066        case RTE_FLOW_ITEM_TYPE_IPV4:
1067                ret = hns3_parse_ipv4(item, rule, error);
1068                step_mngr->items = L3_next_items;
1069                step_mngr->count = RTE_DIM(L3_next_items);
1070                break;
1071        case RTE_FLOW_ITEM_TYPE_IPV6:
1072                ret = hns3_parse_ipv6(item, rule, error);
1073                step_mngr->items = L3_next_items;
1074                step_mngr->count = RTE_DIM(L3_next_items);
1075                break;
1076        case RTE_FLOW_ITEM_TYPE_TCP:
1077                ret = hns3_parse_tcp(item, rule, error);
1078                step_mngr->items = L4_next_items;
1079                step_mngr->count = RTE_DIM(L4_next_items);
1080                break;
1081        case RTE_FLOW_ITEM_TYPE_UDP:
1082                ret = hns3_parse_udp(item, rule, error);
1083                step_mngr->items = L4_next_items;
1084                step_mngr->count = RTE_DIM(L4_next_items);
1085                break;
1086        case RTE_FLOW_ITEM_TYPE_SCTP:
1087                ret = hns3_parse_sctp(item, rule, error);
1088                step_mngr->items = L4_next_items;
1089                step_mngr->count = RTE_DIM(L4_next_items);
1090                break;
1091        default:
1092                return rte_flow_error_set(error, ENOTSUP,
1093                                          RTE_FLOW_ERROR_TYPE_ITEM,
1094                                          NULL, "Unsupported normal type!");
1095        }
1096
1097        return ret;
1098}
1099
1100static int
1101hns3_validate_item(const struct rte_flow_item *item,
1102                   struct items_step_mngr step_mngr,
1103                   struct rte_flow_error *error)
1104{
1105        int i;
1106
1107        if (item->last)
1108                return rte_flow_error_set(error, ENOTSUP,
1109                                          RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1110                                          "Not supported last point for range");
1111
1112        for (i = 0; i < step_mngr.count; i++) {
1113                if (item->type == step_mngr.items[i])
1114                        break;
1115        }
1116
1117        if (i == step_mngr.count) {
1118                return rte_flow_error_set(error, EINVAL,
1119                                          RTE_FLOW_ERROR_TYPE_ITEM,
1120                                          item, "Inval or missing item");
1121        }
1122        return 0;
1123}
1124
1125static inline bool
1126is_tunnel_packet(enum rte_flow_item_type type)
1127{
1128        if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1129            type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1130            type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1131            type == RTE_FLOW_ITEM_TYPE_GENEVE)
1132                return true;
1133        return false;
1134}
1135
1136/*
1137 * Parse the flow director rule.
1138 * The supported PATTERN:
1139 *   case: non-tunnel packet:
1140 *     ETH : src-mac, dst-mac, ethertype
1141 *     VLAN: tag1, tag2
1142 *     IPv4: src-ip, dst-ip, tos, proto
1143 *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1144 *     UDP : src-port, dst-port
1145 *     TCP : src-port, dst-port
1146 *     SCTP: src-port, dst-port, tag
1147 *   case: tunnel packet:
1148 *     OUTER-ETH: ethertype
1149 *     OUTER-L3 : proto
1150 *     OUTER-L4 : src-port, dst-port
1151 *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1152 *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1153 * The supported ACTION:
1154 *    QUEUE
1155 *    DROP
1156 *    COUNT
1157 *    MARK: the id range [0, 4094]
1158 *    FLAG
1159 *    RSS: only valid if firmware support FD_QUEUE_REGION.
1160 */
1161static int
1162hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1163                       const struct rte_flow_item pattern[],
1164                       const struct rte_flow_action actions[],
1165                       struct hns3_fdir_rule *rule,
1166                       struct rte_flow_error *error)
1167{
1168        struct hns3_adapter *hns = dev->data->dev_private;
1169        const struct rte_flow_item *item;
1170        struct items_step_mngr step_mngr;
1171        int ret;
1172
1173        /* FDIR is available only in PF driver */
1174        if (hns->is_vf)
1175                return rte_flow_error_set(error, ENOTSUP,
1176                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1177                                          "Fdir not supported in VF");
1178
1179        step_mngr.items = first_items;
1180        step_mngr.count = RTE_DIM(first_items);
1181        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1182                if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1183                        continue;
1184
1185                ret = hns3_validate_item(item, step_mngr, error);
1186                if (ret)
1187                        return ret;
1188
1189                if (is_tunnel_packet(item->type)) {
1190                        ret = hns3_parse_tunnel(item, rule, error);
1191                        if (ret)
1192                                return ret;
1193                        step_mngr.items = tunnel_next_items;
1194                        step_mngr.count = RTE_DIM(tunnel_next_items);
1195                } else {
1196                        ret = hns3_parse_normal(item, rule, &step_mngr, error);
1197                        if (ret)
1198                                return ret;
1199                }
1200        }
1201
1202        return hns3_handle_actions(dev, actions, rule, error);
1203}
1204
1205static void
1206hns3_filterlist_flush(struct rte_eth_dev *dev)
1207{
1208        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1209        struct hns3_fdir_rule_ele *fdir_rule_ptr;
1210        struct hns3_rss_conf_ele *rss_filter_ptr;
1211        struct hns3_flow_mem *flow_node;
1212
1213        fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1214        while (fdir_rule_ptr) {
1215                TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1216                rte_free(fdir_rule_ptr);
1217                fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1218        }
1219
1220        rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1221        while (rss_filter_ptr) {
1222                TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1223                rte_free(rss_filter_ptr);
1224                rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1225        }
1226
1227        flow_node = TAILQ_FIRST(&hw->flow_list);
1228        while (flow_node) {
1229                TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1230                rte_free(flow_node->flow);
1231                rte_free(flow_node);
1232                flow_node = TAILQ_FIRST(&hw->flow_list);
1233        }
1234}
1235
1236static bool
1237hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1238                     const struct rte_flow_action_rss *with)
1239{
1240        bool func_is_same;
1241
1242        /*
1243         * When user flush all RSS rule, RSS func is set invalid with
1244         * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1245         * flushed, any validate RSS func is different with it before
1246         * flushed. Others, when user create an action RSS with RSS func
1247         * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1248         * between continuous RSS flow.
1249         */
1250        if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1251                func_is_same = false;
1252        else
1253                func_is_same = with->func ? (comp->func == with->func) : true;
1254
1255        return (func_is_same &&
1256                comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1257                comp->level == with->level && comp->key_len == with->key_len &&
1258                comp->queue_num == with->queue_num &&
1259                !memcmp(comp->key, with->key, with->key_len) &&
1260                !memcmp(comp->queue, with->queue,
1261                        sizeof(*with->queue) * with->queue_num));
1262}
1263
1264static int
1265hns3_rss_conf_copy(struct hns3_rss_conf *out,
1266                   const struct rte_flow_action_rss *in)
1267{
1268        if (in->key_len > RTE_DIM(out->key) ||
1269            in->queue_num > RTE_DIM(out->queue))
1270                return -EINVAL;
1271        if (in->key == NULL && in->key_len)
1272                return -EINVAL;
1273        out->conf = (struct rte_flow_action_rss) {
1274                .func = in->func,
1275                .level = in->level,
1276                .types = in->types,
1277                .key_len = in->key_len,
1278                .queue_num = in->queue_num,
1279        };
1280        out->conf.queue = memcpy(out->queue, in->queue,
1281                                sizeof(*in->queue) * in->queue_num);
1282        if (in->key)
1283                out->conf.key = memcpy(out->key, in->key, in->key_len);
1284
1285        return 0;
1286}
1287
1288static bool
1289hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1290                               const struct rte_flow_action_rss *rss)
1291{
1292        /*
1293         * For IP packet, it is not supported to use src/dst port fields to RSS
1294         * hash for the following packet types.
1295         * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1296         * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1297         * port fields to RSS hash for IPV6 SCTP packet type. However, the
1298         * Kunpeng930 and future kunpeng series support to use src/dst port
1299         * fields to RSS hash for IPv6 SCTP packet type.
1300         */
1301        if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1302            (rss->types & ETH_RSS_IP ||
1303            (!hw->rss_info.ipv6_sctp_offload_supported &&
1304            rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1305                return false;
1306
1307        return true;
1308}
1309
1310/*
1311 * This function is used to parse rss action validatation.
1312 */
1313static int
1314hns3_parse_rss_filter(struct rte_eth_dev *dev,
1315                      const struct rte_flow_action *actions,
1316                      struct rte_flow_error *error)
1317{
1318        struct hns3_adapter *hns = dev->data->dev_private;
1319        struct hns3_hw *hw = &hns->hw;
1320        struct hns3_rss_conf *rss_conf = &hw->rss_info;
1321        const struct rte_flow_action_rss *rss;
1322        const struct rte_flow_action *act;
1323        uint32_t act_index = 0;
1324        uint16_t n;
1325
1326        NEXT_ITEM_OF_ACTION(act, actions, act_index);
1327        rss = act->conf;
1328
1329        if (rss == NULL) {
1330                return rte_flow_error_set(error, EINVAL,
1331                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1332                                          act, "no valid queues");
1333        }
1334
1335        if (rss->queue_num > RTE_DIM(rss_conf->queue))
1336                return rte_flow_error_set(error, ENOTSUP,
1337                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1338                                          "queue number configured exceeds "
1339                                          "queue buffer size driver supported");
1340
1341        for (n = 0; n < rss->queue_num; n++) {
1342                if (rss->queue[n] < hw->alloc_rss_size)
1343                        continue;
1344                return rte_flow_error_set(error, EINVAL,
1345                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1346                                          "queue id must be less than queue number allocated to a TC");
1347        }
1348
1349        if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1350                return rte_flow_error_set(error, EINVAL,
1351                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1352                                          act,
1353                                          "Flow types is unsupported by "
1354                                          "hns3's RSS");
1355        if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1356                return rte_flow_error_set(error, ENOTSUP,
1357                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1358                                          "RSS hash func are not supported");
1359        if (rss->level)
1360                return rte_flow_error_set(error, ENOTSUP,
1361                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1362                                          "a nonzero RSS encapsulation level is not supported");
1363        if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1364                return rte_flow_error_set(error, ENOTSUP,
1365                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1366                                          "RSS hash key must be exactly 40 bytes");
1367
1368        if (!hns3_rss_input_tuple_supported(hw, rss))
1369                return rte_flow_error_set(error, EINVAL,
1370                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1371                                          &rss->types,
1372                                          "input RSS types are not supported");
1373
1374        act_index++;
1375
1376        /* Check if the next not void action is END */
1377        NEXT_ITEM_OF_ACTION(act, actions, act_index);
1378        if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379                memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1380                return rte_flow_error_set(error, EINVAL,
1381                                          RTE_FLOW_ERROR_TYPE_ACTION,
1382                                          act, "Not supported action.");
1383        }
1384
1385        return 0;
1386}
1387
1388static int
1389hns3_disable_rss(struct hns3_hw *hw)
1390{
1391        int ret;
1392
1393        /* Redirected the redirection table to queue 0 */
1394        ret = hns3_rss_reset_indir_table(hw);
1395        if (ret)
1396                return ret;
1397
1398        /* Disable RSS */
1399        hw->rss_info.conf.types = 0;
1400        hw->rss_dis_flag = true;
1401
1402        return 0;
1403}
1404
1405static void
1406hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1407{
1408        if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1409                hns3_warn(hw, "Default RSS hash key to be set");
1410                rss_conf->key = hns3_hash_key;
1411                rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1412        }
1413}
1414
1415static int
1416hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1417                         uint8_t *hash_algo)
1418{
1419        enum rte_eth_hash_function algo_func = *func;
1420        switch (algo_func) {
1421        case RTE_ETH_HASH_FUNCTION_DEFAULT:
1422                /* Keep *hash_algo as what it used to be */
1423                algo_func = hw->rss_info.conf.func;
1424                break;
1425        case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1426                *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1427                break;
1428        case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1429                *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1430                break;
1431        case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1432                *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1433                break;
1434        default:
1435                hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1436                         algo_func);
1437                return -EINVAL;
1438        }
1439        *func = algo_func;
1440
1441        return 0;
1442}
1443
1444static int
1445hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1446{
1447        struct hns3_rss_tuple_cfg *tuple;
1448        int ret;
1449
1450        hns3_parse_rss_key(hw, rss_config);
1451
1452        ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1453                                       &hw->rss_info.hash_algo);
1454        if (ret)
1455                return ret;
1456
1457        ret = hns3_rss_set_algo_key(hw, rss_config->key);
1458        if (ret)
1459                return ret;
1460
1461        hw->rss_info.conf.func = rss_config->func;
1462
1463        tuple = &hw->rss_info.rss_tuple_sets;
1464        ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1465        if (ret)
1466                hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1467
1468        return ret;
1469}
1470
1471static int
1472hns3_update_indir_table(struct rte_eth_dev *dev,
1473                        const struct rte_flow_action_rss *conf, uint16_t num)
1474{
1475        struct hns3_adapter *hns = dev->data->dev_private;
1476        struct hns3_hw *hw = &hns->hw;
1477        uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1478        uint16_t j;
1479        uint32_t i;
1480
1481        /* Fill in redirection table */
1482        memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1483               sizeof(hw->rss_info.rss_indirection_tbl));
1484        for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1485                j %= num;
1486                if (conf->queue[j] >= hw->alloc_rss_size) {
1487                        hns3_err(hw, "queue id(%u) set to redirection table "
1488                                 "exceeds queue number(%u) allocated to a TC.",
1489                                 conf->queue[j], hw->alloc_rss_size);
1490                        return -EINVAL;
1491                }
1492                indir_tbl[i] = conf->queue[j];
1493        }
1494
1495        return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1496}
1497
1498static int
1499hns3_config_rss_filter(struct rte_eth_dev *dev,
1500                       const struct hns3_rss_conf *conf, bool add)
1501{
1502        struct hns3_adapter *hns = dev->data->dev_private;
1503        struct hns3_rss_conf_ele *rss_filter_ptr;
1504        struct hns3_hw *hw = &hns->hw;
1505        struct hns3_rss_conf *rss_info;
1506        uint64_t flow_types;
1507        uint16_t num;
1508        int ret;
1509
1510        struct rte_flow_action_rss rss_flow_conf = {
1511                .func = conf->conf.func,
1512                .level = conf->conf.level,
1513                .types = conf->conf.types,
1514                .key_len = conf->conf.key_len,
1515                .queue_num = conf->conf.queue_num,
1516                .key = conf->conf.key_len ?
1517                    (void *)(uintptr_t)conf->conf.key : NULL,
1518                .queue = conf->conf.queue,
1519        };
1520
1521        /* Filter the unsupported flow types */
1522        flow_types = conf->conf.types ?
1523                     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1524                     hw->rss_info.conf.types;
1525        if (flow_types != rss_flow_conf.types)
1526                hns3_warn(hw, "modified RSS types based on hardware support, "
1527                              "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1528                          rss_flow_conf.types, flow_types);
1529        /* Update the useful flow types */
1530        rss_flow_conf.types = flow_types;
1531
1532        rss_info = &hw->rss_info;
1533        if (!add) {
1534                if (!conf->valid)
1535                        return 0;
1536
1537                ret = hns3_disable_rss(hw);
1538                if (ret) {
1539                        hns3_err(hw, "RSS disable failed(%d)", ret);
1540                        return ret;
1541                }
1542
1543                if (rss_flow_conf.queue_num) {
1544                        /*
1545                         * Due the content of queue pointer have been reset to
1546                         * 0, the rss_info->conf.queue should be set to NULL
1547                         */
1548                        rss_info->conf.queue = NULL;
1549                        rss_info->conf.queue_num = 0;
1550                }
1551
1552                /* set RSS func invalid after flushed */
1553                rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1554                return 0;
1555        }
1556
1557        /* Set rx queues to use */
1558        num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1559        if (rss_flow_conf.queue_num > num)
1560                hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1561                          rss_flow_conf.queue_num);
1562        hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1563
1564        rte_spinlock_lock(&hw->lock);
1565        if (num) {
1566                ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1567                if (ret)
1568                        goto rss_config_err;
1569        }
1570
1571        /* Set hash algorithm and flow types by the user's config */
1572        ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1573        if (ret)
1574                goto rss_config_err;
1575
1576        ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1577        if (ret) {
1578                hns3_err(hw, "RSS config init fail(%d)", ret);
1579                goto rss_config_err;
1580        }
1581
1582        /*
1583         * When create a new RSS rule, the old rule will be overlaid and set
1584         * invalid.
1585         */
1586        TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1587                rss_filter_ptr->filter_info.valid = false;
1588
1589rss_config_err:
1590        rte_spinlock_unlock(&hw->lock);
1591
1592        return ret;
1593}
1594
1595static int
1596hns3_clear_rss_filter(struct rte_eth_dev *dev)
1597{
1598        struct hns3_adapter *hns = dev->data->dev_private;
1599        struct hns3_rss_conf_ele *rss_filter_ptr;
1600        struct hns3_hw *hw = &hns->hw;
1601        int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1602        int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1603        int ret = 0;
1604
1605        rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1606        while (rss_filter_ptr) {
1607                TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1608                ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1609                                             false);
1610                if (ret)
1611                        rss_rule_fail_cnt++;
1612                else
1613                        rss_rule_succ_cnt++;
1614                rte_free(rss_filter_ptr);
1615                rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1616        }
1617
1618        if (rss_rule_fail_cnt) {
1619                hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1620                             "fail num = %d", rss_rule_succ_cnt,
1621                             rss_rule_fail_cnt);
1622                ret = -EIO;
1623        }
1624
1625        return ret;
1626}
1627
1628int
1629hns3_restore_rss_filter(struct rte_eth_dev *dev)
1630{
1631        struct hns3_adapter *hns = dev->data->dev_private;
1632        struct hns3_hw *hw = &hns->hw;
1633
1634        /* When user flush all rules, it doesn't need to restore RSS rule */
1635        if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1636                return 0;
1637
1638        return hns3_config_rss_filter(dev, &hw->rss_info, true);
1639}
1640
1641static int
1642hns3_flow_parse_rss(struct rte_eth_dev *dev,
1643                    const struct hns3_rss_conf *conf, bool add)
1644{
1645        struct hns3_adapter *hns = dev->data->dev_private;
1646        struct hns3_hw *hw = &hns->hw;
1647        bool ret;
1648
1649        ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1650        if (ret) {
1651                hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1652                return -EINVAL;
1653        }
1654
1655        return hns3_config_rss_filter(dev, conf, add);
1656}
1657
1658static int
1659hns3_flow_args_check(const struct rte_flow_attr *attr,
1660                     const struct rte_flow_item pattern[],
1661                     const struct rte_flow_action actions[],
1662                     struct rte_flow_error *error)
1663{
1664        if (pattern == NULL)
1665                return rte_flow_error_set(error, EINVAL,
1666                                          RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1667                                          NULL, "NULL pattern.");
1668
1669        if (actions == NULL)
1670                return rte_flow_error_set(error, EINVAL,
1671                                          RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1672                                          NULL, "NULL action.");
1673
1674        if (attr == NULL)
1675                return rte_flow_error_set(error, EINVAL,
1676                                          RTE_FLOW_ERROR_TYPE_ATTR,
1677                                          NULL, "NULL attribute.");
1678
1679        return hns3_check_attr(attr, error);
1680}
1681
1682/*
1683 * Check if the flow rule is supported by hns3.
1684 * It only checkes the format. Don't guarantee the rule can be programmed into
1685 * the HW. Because there can be no enough room for the rule.
1686 */
1687static int
1688hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1689                   const struct rte_flow_item pattern[],
1690                   const struct rte_flow_action actions[],
1691                   struct rte_flow_error *error)
1692{
1693        struct hns3_fdir_rule fdir_rule;
1694        int ret;
1695
1696        ret = hns3_flow_args_check(attr, pattern, actions, error);
1697        if (ret)
1698                return ret;
1699
1700        if (hns3_find_rss_general_action(pattern, actions))
1701                return hns3_parse_rss_filter(dev, actions, error);
1702
1703        memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1704        return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1705}
1706
1707/*
1708 * Create or destroy a flow rule.
1709 * Theorically one rule can match more than one filters.
1710 * We will let it use the filter which it hit first.
1711 * So, the sequence matters.
1712 */
1713static struct rte_flow *
1714hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1715                 const struct rte_flow_item pattern[],
1716                 const struct rte_flow_action actions[],
1717                 struct rte_flow_error *error)
1718{
1719        struct hns3_adapter *hns = dev->data->dev_private;
1720        struct hns3_hw *hw = &hns->hw;
1721        const struct hns3_rss_conf *rss_conf;
1722        struct hns3_fdir_rule_ele *fdir_rule_ptr;
1723        struct hns3_rss_conf_ele *rss_filter_ptr;
1724        struct hns3_flow_mem *flow_node;
1725        const struct rte_flow_action *act;
1726        struct rte_flow *flow;
1727        struct hns3_fdir_rule fdir_rule;
1728        int ret;
1729
1730        ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1731        if (ret)
1732                return NULL;
1733
1734        flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1735        if (flow == NULL) {
1736                rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1737                                   NULL, "Failed to allocate flow memory");
1738                return NULL;
1739        }
1740        flow_node = rte_zmalloc("hns3 flow node",
1741                                sizeof(struct hns3_flow_mem), 0);
1742        if (flow_node == NULL) {
1743                rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1744                                   NULL, "Failed to allocate flow list memory");
1745                rte_free(flow);
1746                return NULL;
1747        }
1748
1749        flow_node->flow = flow;
1750        TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1751
1752        act = hns3_find_rss_general_action(pattern, actions);
1753        if (act) {
1754                rss_conf = act->conf;
1755
1756                ret = hns3_flow_parse_rss(dev, rss_conf, true);
1757                if (ret)
1758                        goto err;
1759
1760                rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1761                                             sizeof(struct hns3_rss_conf_ele),
1762                                             0);
1763                if (rss_filter_ptr == NULL) {
1764                        hns3_err(hw,
1765                                    "Failed to allocate hns3_rss_filter memory");
1766                        ret = -ENOMEM;
1767                        goto err;
1768                }
1769                hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1770                                   &rss_conf->conf);
1771                rss_filter_ptr->filter_info.valid = true;
1772                TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1773
1774                flow->rule = rss_filter_ptr;
1775                flow->filter_type = RTE_ETH_FILTER_HASH;
1776                return flow;
1777        }
1778
1779        memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1780        ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1781        if (ret)
1782                goto out;
1783
1784        if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1785                ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1786                                       fdir_rule.act_cnt.id, error);
1787                if (ret)
1788                        goto out;
1789
1790                flow->counter_id = fdir_rule.act_cnt.id;
1791        }
1792
1793        fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1794                                    sizeof(struct hns3_fdir_rule_ele),
1795                                    0);
1796        if (fdir_rule_ptr == NULL) {
1797                hns3_err(hw, "failed to allocate fdir_rule memory.");
1798                ret = -ENOMEM;
1799                goto err_fdir;
1800        }
1801
1802        ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1803        if (!ret) {
1804                memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1805                        sizeof(struct hns3_fdir_rule));
1806                TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1807                flow->rule = fdir_rule_ptr;
1808                flow->filter_type = RTE_ETH_FILTER_FDIR;
1809
1810                return flow;
1811        }
1812
1813        rte_free(fdir_rule_ptr);
1814err_fdir:
1815        if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1816                hns3_counter_release(dev, fdir_rule.act_cnt.id);
1817err:
1818        rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1819                           "Failed to create flow");
1820out:
1821        TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1822        rte_free(flow_node);
1823        rte_free(flow);
1824        return NULL;
1825}
1826
1827/* Destroy a flow rule on hns3. */
1828static int
1829hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1830                  struct rte_flow_error *error)
1831{
1832        struct hns3_adapter *hns = dev->data->dev_private;
1833        struct hns3_fdir_rule_ele *fdir_rule_ptr;
1834        struct hns3_rss_conf_ele *rss_filter_ptr;
1835        struct hns3_flow_mem *flow_node;
1836        enum rte_filter_type filter_type;
1837        struct hns3_fdir_rule fdir_rule;
1838        struct hns3_hw *hw = &hns->hw;
1839        int ret;
1840
1841        if (flow == NULL)
1842                return rte_flow_error_set(error, EINVAL,
1843                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1844                                          flow, "Flow is NULL");
1845
1846        filter_type = flow->filter_type;
1847        switch (filter_type) {
1848        case RTE_ETH_FILTER_FDIR:
1849                fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1850                memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1851                           sizeof(struct hns3_fdir_rule));
1852
1853                ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1854                if (ret)
1855                        return rte_flow_error_set(error, EIO,
1856                                                  RTE_FLOW_ERROR_TYPE_HANDLE,
1857                                                  flow,
1858                                                  "Destroy FDIR fail.Try again");
1859                if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1860                        hns3_counter_release(dev, fdir_rule.act_cnt.id);
1861                TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1862                rte_free(fdir_rule_ptr);
1863                fdir_rule_ptr = NULL;
1864                break;
1865        case RTE_ETH_FILTER_HASH:
1866                rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1867                ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1868                                             false);
1869                if (ret)
1870                        return rte_flow_error_set(error, EIO,
1871                                                  RTE_FLOW_ERROR_TYPE_HANDLE,
1872                                                  flow,
1873                                                  "Destroy RSS fail.Try again");
1874                TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1875                rte_free(rss_filter_ptr);
1876                rss_filter_ptr = NULL;
1877                break;
1878        default:
1879                return rte_flow_error_set(error, EINVAL,
1880                                          RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1881                                          "Unsupported filter type");
1882        }
1883
1884        TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1885                if (flow_node->flow == flow) {
1886                        TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1887                        rte_free(flow_node);
1888                        flow_node = NULL;
1889                        break;
1890                }
1891        }
1892        rte_free(flow);
1893        flow = NULL;
1894
1895        return 0;
1896}
1897
1898/*  Destroy all flow rules associated with a port on hns3. */
1899static int
1900hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1901{
1902        struct hns3_adapter *hns = dev->data->dev_private;
1903        int ret;
1904
1905        /* FDIR is available only in PF driver */
1906        if (!hns->is_vf) {
1907                ret = hns3_clear_all_fdir_filter(hns);
1908                if (ret) {
1909                        rte_flow_error_set(error, ret,
1910                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1911                                           NULL, "Failed to flush rule");
1912                        return ret;
1913                }
1914                hns3_counter_flush(dev);
1915        }
1916
1917        ret = hns3_clear_rss_filter(dev);
1918        if (ret) {
1919                rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1920                                   NULL, "Failed to flush rss filter");
1921                return ret;
1922        }
1923
1924        hns3_filterlist_flush(dev);
1925
1926        return 0;
1927}
1928
1929/* Query an existing flow rule. */
1930static int
1931hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1932                const struct rte_flow_action *actions, void *data,
1933                struct rte_flow_error *error)
1934{
1935        struct rte_flow_action_rss *rss_conf;
1936        struct hns3_rss_conf_ele *rss_rule;
1937        struct rte_flow_query_count *qc;
1938        int ret;
1939
1940        if (!flow->rule)
1941                return rte_flow_error_set(error, EINVAL,
1942                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1943
1944        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1945                switch (actions->type) {
1946                case RTE_FLOW_ACTION_TYPE_VOID:
1947                        break;
1948                case RTE_FLOW_ACTION_TYPE_COUNT:
1949                        qc = (struct rte_flow_query_count *)data;
1950                        ret = hns3_counter_query(dev, flow, qc, error);
1951                        if (ret)
1952                                return ret;
1953                        break;
1954                case RTE_FLOW_ACTION_TYPE_RSS:
1955                        if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1956                                return rte_flow_error_set(error, ENOTSUP,
1957                                        RTE_FLOW_ERROR_TYPE_ACTION,
1958                                        actions, "action is not supported");
1959                        }
1960                        rss_conf = (struct rte_flow_action_rss *)data;
1961                        rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
1962                        rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
1963                                   sizeof(struct rte_flow_action_rss));
1964                        break;
1965                default:
1966                        return rte_flow_error_set(error, ENOTSUP,
1967                                RTE_FLOW_ERROR_TYPE_ACTION,
1968                                actions, "action is not supported");
1969                }
1970        }
1971
1972        return 0;
1973}
1974
1975static int
1976hns3_flow_validate_wrap(struct rte_eth_dev *dev,
1977                        const struct rte_flow_attr *attr,
1978                        const struct rte_flow_item pattern[],
1979                        const struct rte_flow_action actions[],
1980                        struct rte_flow_error *error)
1981{
1982        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983        int ret;
1984
1985        pthread_mutex_lock(&hw->flows_lock);
1986        ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1987        pthread_mutex_unlock(&hw->flows_lock);
1988
1989        return ret;
1990}
1991
1992static struct rte_flow *
1993hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1994                      const struct rte_flow_item pattern[],
1995                      const struct rte_flow_action actions[],
1996                      struct rte_flow_error *error)
1997{
1998        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1999        struct rte_flow *flow;
2000
2001        pthread_mutex_lock(&hw->flows_lock);
2002        flow = hns3_flow_create(dev, attr, pattern, actions, error);
2003        pthread_mutex_unlock(&hw->flows_lock);
2004
2005        return flow;
2006}
2007
2008static int
2009hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2010                       struct rte_flow_error *error)
2011{
2012        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2013        int ret;
2014
2015        pthread_mutex_lock(&hw->flows_lock);
2016        ret = hns3_flow_destroy(dev, flow, error);
2017        pthread_mutex_unlock(&hw->flows_lock);
2018
2019        return ret;
2020}
2021
2022static int
2023hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2024{
2025        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2026        int ret;
2027
2028        pthread_mutex_lock(&hw->flows_lock);
2029        ret = hns3_flow_flush(dev, error);
2030        pthread_mutex_unlock(&hw->flows_lock);
2031
2032        return ret;
2033}
2034
2035static int
2036hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2037                     const struct rte_flow_action *actions, void *data,
2038                     struct rte_flow_error *error)
2039{
2040        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2041        int ret;
2042
2043        pthread_mutex_lock(&hw->flows_lock);
2044        ret = hns3_flow_query(dev, flow, actions, data, error);
2045        pthread_mutex_unlock(&hw->flows_lock);
2046
2047        return ret;
2048}
2049
2050static const struct rte_flow_ops hns3_flow_ops = {
2051        .validate = hns3_flow_validate_wrap,
2052        .create = hns3_flow_create_wrap,
2053        .destroy = hns3_flow_destroy_wrap,
2054        .flush = hns3_flow_flush_wrap,
2055        .query = hns3_flow_query_wrap,
2056        .isolate = NULL,
2057};
2058
2059int
2060hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2061                      const struct rte_flow_ops **ops)
2062{
2063        struct hns3_hw *hw;
2064
2065        hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2066        if (hw->adapter_state >= HNS3_NIC_CLOSED)
2067                return -ENODEV;
2068
2069        *ops = &hns3_flow_ops;
2070        return 0;
2071}
2072
2073void
2074hns3_flow_init(struct rte_eth_dev *dev)
2075{
2076        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2077        pthread_mutexattr_t attr;
2078
2079        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2080                return;
2081
2082        pthread_mutexattr_init(&attr);
2083        pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2084        pthread_mutex_init(&hw->flows_lock, &attr);
2085        dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2086
2087        TAILQ_INIT(&hw->flow_fdir_list);
2088        TAILQ_INIT(&hw->flow_rss_list);
2089        TAILQ_INIT(&hw->flow_list);
2090}
2091
2092void
2093hns3_flow_uninit(struct rte_eth_dev *dev)
2094{
2095        struct rte_flow_error error;
2096        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2097                hns3_flow_flush_wrap(dev, &error);
2098}
2099