linux/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/list.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/tcp.h>
  37#include <linux/mlx5/fs.h>
  38#include <linux/mlx5/mpfs.h>
  39#include "en.h"
  40#include "en_rep.h"
  41#include "lib/mpfs.h"
  42
  43static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
  44                                  struct mlx5e_l2_rule *ai, int type);
  45static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
  46                                   struct mlx5e_l2_rule *ai);
  47
  48enum {
  49        MLX5E_FULLMATCH = 0,
  50        MLX5E_ALLMULTI  = 1,
  51};
  52
  53enum {
  54        MLX5E_UC        = 0,
  55        MLX5E_MC_IPV4   = 1,
  56        MLX5E_MC_IPV6   = 2,
  57        MLX5E_MC_OTHER  = 3,
  58};
  59
  60enum {
  61        MLX5E_ACTION_NONE = 0,
  62        MLX5E_ACTION_ADD  = 1,
  63        MLX5E_ACTION_DEL  = 2,
  64};
  65
  66struct mlx5e_l2_hash_node {
  67        struct hlist_node          hlist;
  68        u8                         action;
  69        struct mlx5e_l2_rule ai;
  70        bool   mpfs;
  71};
  72
  73static inline int mlx5e_hash_l2(u8 *addr)
  74{
  75        return addr[5];
  76}
  77
  78static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
  79{
  80        struct mlx5e_l2_hash_node *hn;
  81        int ix = mlx5e_hash_l2(addr);
  82        int found = 0;
  83
  84        hlist_for_each_entry(hn, &hash[ix], hlist)
  85                if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
  86                        found = 1;
  87                        break;
  88                }
  89
  90        if (found) {
  91                hn->action = MLX5E_ACTION_NONE;
  92                return;
  93        }
  94
  95        hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
  96        if (!hn)
  97                return;
  98
  99        ether_addr_copy(hn->ai.addr, addr);
 100        hn->action = MLX5E_ACTION_ADD;
 101
 102        hlist_add_head(&hn->hlist, &hash[ix]);
 103}
 104
 105static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
 106{
 107        hlist_del(&hn->hlist);
 108        kfree(hn);
 109}
 110
 111static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
 112{
 113        struct net_device *ndev = priv->netdev;
 114        int max_list_size;
 115        int list_size;
 116        u16 *vlans;
 117        int vlan;
 118        int err;
 119        int i;
 120
 121        list_size = 0;
 122        for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
 123                list_size++;
 124
 125        max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
 126
 127        if (list_size > max_list_size) {
 128                netdev_warn(ndev,
 129                            "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
 130                            list_size, max_list_size);
 131                list_size = max_list_size;
 132        }
 133
 134        vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
 135        if (!vlans)
 136                return -ENOMEM;
 137
 138        i = 0;
 139        for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 140                if (i >= list_size)
 141                        break;
 142                vlans[i++] = vlan;
 143        }
 144
 145        err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
 146        if (err)
 147                netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
 148                           err);
 149
 150        kfree(vlans);
 151        return err;
 152}
 153
 154enum mlx5e_vlan_rule_type {
 155        MLX5E_VLAN_RULE_TYPE_UNTAGGED,
 156        MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
 157        MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
 158        MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
 159        MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
 160};
 161
 162static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
 163                                 enum mlx5e_vlan_rule_type rule_type,
 164                                 u16 vid, struct mlx5_flow_spec *spec)
 165{
 166        struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
 167        struct mlx5_flow_destination dest = {};
 168        struct mlx5_flow_handle **rule_p;
 169        MLX5_DECLARE_FLOW_ACT(flow_act);
 170        int err = 0;
 171
 172        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 173        dest.ft = priv->fs.l2.ft.t;
 174
 175        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 176
 177        switch (rule_type) {
 178        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
 179                /* cvlan_tag enabled in match criteria and
 180                 * disabled in match value means both S & C tags
 181                 * don't exist (untagged of both)
 182                 */
 183                rule_p = &priv->fs.vlan.untagged_rule;
 184                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 185                                 outer_headers.cvlan_tag);
 186                break;
 187        case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
 188                rule_p = &priv->fs.vlan.any_cvlan_rule;
 189                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 190                                 outer_headers.cvlan_tag);
 191                MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
 192                break;
 193        case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
 194                rule_p = &priv->fs.vlan.any_svlan_rule;
 195                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 196                                 outer_headers.svlan_tag);
 197                MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
 198                break;
 199        case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
 200                rule_p = &priv->fs.vlan.active_svlans_rule[vid];
 201                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 202                                 outer_headers.svlan_tag);
 203                MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
 204                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 205                                 outer_headers.first_vid);
 206                MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
 207                         vid);
 208                break;
 209        default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
 210                rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
 211                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 212                                 outer_headers.cvlan_tag);
 213                MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
 214                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 215                                 outer_headers.first_vid);
 216                MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
 217                         vid);
 218                break;
 219        }
 220
 221        if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
 222                return 0;
 223
 224        *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 225
 226        if (IS_ERR(*rule_p)) {
 227                err = PTR_ERR(*rule_p);
 228                *rule_p = NULL;
 229                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
 230        }
 231
 232        return err;
 233}
 234
 235static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
 236                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
 237{
 238        struct mlx5_flow_spec *spec;
 239        int err = 0;
 240
 241        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 242        if (!spec)
 243                return -ENOMEM;
 244
 245        if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
 246                mlx5e_vport_context_update_vlans(priv);
 247
 248        err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
 249
 250        kvfree(spec);
 251
 252        return err;
 253}
 254
 255static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 256                                enum mlx5e_vlan_rule_type rule_type, u16 vid)
 257{
 258        switch (rule_type) {
 259        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
 260                if (priv->fs.vlan.untagged_rule) {
 261                        mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
 262                        priv->fs.vlan.untagged_rule = NULL;
 263                }
 264                break;
 265        case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
 266                if (priv->fs.vlan.any_cvlan_rule) {
 267                        mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
 268                        priv->fs.vlan.any_cvlan_rule = NULL;
 269                }
 270                break;
 271        case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
 272                if (priv->fs.vlan.any_svlan_rule) {
 273                        mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
 274                        priv->fs.vlan.any_svlan_rule = NULL;
 275                }
 276                break;
 277        case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
 278                if (priv->fs.vlan.active_svlans_rule[vid]) {
 279                        mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
 280                        priv->fs.vlan.active_svlans_rule[vid] = NULL;
 281                }
 282                break;
 283        case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
 284                if (priv->fs.vlan.active_cvlans_rule[vid]) {
 285                        mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
 286                        priv->fs.vlan.active_cvlans_rule[vid] = NULL;
 287                }
 288                mlx5e_vport_context_update_vlans(priv);
 289                break;
 290        }
 291}
 292
 293static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
 294{
 295        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 296        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
 297}
 298
 299static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
 300{
 301        int err;
 302
 303        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 304        if (err)
 305                return err;
 306
 307        return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
 308}
 309
 310static struct mlx5_flow_handle *
 311mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
 312{
 313        struct mlx5_flow_destination dest = {};
 314        MLX5_DECLARE_FLOW_ACT(flow_act);
 315        struct mlx5_flow_handle *rule;
 316        struct mlx5_flow_spec *spec;
 317
 318        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 319        if (!spec)
 320                return ERR_PTR(-ENOMEM);
 321        spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
 322        spec->flow_context.flow_tag = trap_id;
 323        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 324        dest.tir_num = tir_num;
 325
 326        rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 327        kvfree(spec);
 328        return rule;
 329}
 330
 331int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
 332{
 333        struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
 334        struct mlx5_flow_handle *rule;
 335        int err;
 336
 337        rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
 338        if (IS_ERR(rule)) {
 339                err = PTR_ERR(rule);
 340                priv->fs.vlan.trap_rule = NULL;
 341                netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
 342                           __func__, err);
 343                return err;
 344        }
 345        priv->fs.vlan.trap_rule = rule;
 346        return 0;
 347}
 348
 349void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
 350{
 351        if (priv->fs.vlan.trap_rule) {
 352                mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
 353                priv->fs.vlan.trap_rule = NULL;
 354        }
 355}
 356
 357int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
 358{
 359        struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
 360        struct mlx5_flow_handle *rule;
 361        int err;
 362
 363        rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
 364        if (IS_ERR(rule)) {
 365                err = PTR_ERR(rule);
 366                priv->fs.l2.trap_rule = NULL;
 367                netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
 368                           __func__, err);
 369                return err;
 370        }
 371        priv->fs.l2.trap_rule = rule;
 372        return 0;
 373}
 374
 375void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
 376{
 377        if (priv->fs.l2.trap_rule) {
 378                mlx5_del_flow_rules(priv->fs.l2.trap_rule);
 379                priv->fs.l2.trap_rule = NULL;
 380        }
 381}
 382
 383void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
 384{
 385        if (!priv->fs.vlan.cvlan_filter_disabled)
 386                return;
 387
 388        priv->fs.vlan.cvlan_filter_disabled = false;
 389        if (priv->netdev->flags & IFF_PROMISC)
 390                return;
 391        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 392}
 393
 394void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
 395{
 396        if (priv->fs.vlan.cvlan_filter_disabled)
 397                return;
 398
 399        priv->fs.vlan.cvlan_filter_disabled = true;
 400        if (priv->netdev->flags & IFF_PROMISC)
 401                return;
 402        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 403}
 404
 405static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
 406{
 407        int err;
 408
 409        set_bit(vid, priv->fs.vlan.active_cvlans);
 410
 411        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 412        if (err)
 413                clear_bit(vid, priv->fs.vlan.active_cvlans);
 414
 415        return err;
 416}
 417
 418static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
 419{
 420        struct net_device *netdev = priv->netdev;
 421        int err;
 422
 423        set_bit(vid, priv->fs.vlan.active_svlans);
 424
 425        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 426        if (err) {
 427                clear_bit(vid, priv->fs.vlan.active_svlans);
 428                return err;
 429        }
 430
 431        /* Need to fix some features.. */
 432        netdev_update_features(netdev);
 433        return err;
 434}
 435
 436int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 437{
 438        struct mlx5e_priv *priv = netdev_priv(dev);
 439
 440        if (mlx5e_is_uplink_rep(priv))
 441                return 0; /* no vlan table for uplink rep */
 442
 443        if (be16_to_cpu(proto) == ETH_P_8021Q)
 444                return mlx5e_vlan_rx_add_cvid(priv, vid);
 445        else if (be16_to_cpu(proto) == ETH_P_8021AD)
 446                return mlx5e_vlan_rx_add_svid(priv, vid);
 447
 448        return -EOPNOTSUPP;
 449}
 450
 451int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 452{
 453        struct mlx5e_priv *priv = netdev_priv(dev);
 454
 455        if (mlx5e_is_uplink_rep(priv))
 456                return 0; /* no vlan table for uplink rep */
 457
 458        if (be16_to_cpu(proto) == ETH_P_8021Q) {
 459                clear_bit(vid, priv->fs.vlan.active_cvlans);
 460                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 461        } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
 462                clear_bit(vid, priv->fs.vlan.active_svlans);
 463                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 464                netdev_update_features(dev);
 465        }
 466
 467        return 0;
 468}
 469
 470static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
 471{
 472        int i;
 473
 474        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 475
 476        for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 477                mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 478        }
 479
 480        for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
 481                mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 482
 483        if (priv->fs.vlan.cvlan_filter_disabled)
 484                mlx5e_add_any_vid_rules(priv);
 485}
 486
 487static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
 488{
 489        int i;
 490
 491        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 492
 493        for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 494                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 495        }
 496
 497        for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
 498                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 499
 500        WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
 501
 502        mlx5e_remove_vlan_trap(priv);
 503
 504        /* must be called after DESTROY bit is set and
 505         * set_rx_mode is called and flushed
 506         */
 507        if (priv->fs.vlan.cvlan_filter_disabled)
 508                mlx5e_del_any_vid_rules(priv);
 509}
 510
 511#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
 512        for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
 513                hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
 514
 515static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
 516                                    struct mlx5e_l2_hash_node *hn)
 517{
 518        u8 action = hn->action;
 519        u8 mac_addr[ETH_ALEN];
 520        int l2_err = 0;
 521
 522        ether_addr_copy(mac_addr, hn->ai.addr);
 523
 524        switch (action) {
 525        case MLX5E_ACTION_ADD:
 526                mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
 527                if (!is_multicast_ether_addr(mac_addr)) {
 528                        l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
 529                        hn->mpfs = !l2_err;
 530                }
 531                hn->action = MLX5E_ACTION_NONE;
 532                break;
 533
 534        case MLX5E_ACTION_DEL:
 535                if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
 536                        l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
 537                mlx5e_del_l2_flow_rule(priv, &hn->ai);
 538                mlx5e_del_l2_from_hash(hn);
 539                break;
 540        }
 541
 542        if (l2_err)
 543                netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
 544                            action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
 545}
 546
 547static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
 548{
 549        struct net_device *netdev = priv->netdev;
 550        struct netdev_hw_addr *ha;
 551
 552        netif_addr_lock_bh(netdev);
 553
 554        mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
 555                             priv->netdev->dev_addr);
 556
 557        netdev_for_each_uc_addr(ha, netdev)
 558                mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
 559
 560        netdev_for_each_mc_addr(ha, netdev)
 561                mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
 562
 563        netif_addr_unlock_bh(netdev);
 564}
 565
 566static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
 567                                  u8 addr_array[][ETH_ALEN], int size)
 568{
 569        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
 570        struct net_device *ndev = priv->netdev;
 571        struct mlx5e_l2_hash_node *hn;
 572        struct hlist_head *addr_list;
 573        struct hlist_node *tmp;
 574        int i = 0;
 575        int hi;
 576
 577        addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 578
 579        if (is_uc) /* Make sure our own address is pushed first */
 580                ether_addr_copy(addr_array[i++], ndev->dev_addr);
 581        else if (priv->fs.l2.broadcast_enabled)
 582                ether_addr_copy(addr_array[i++], ndev->broadcast);
 583
 584        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
 585                if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
 586                        continue;
 587                if (i >= size)
 588                        break;
 589                ether_addr_copy(addr_array[i++], hn->ai.addr);
 590        }
 591}
 592
 593static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
 594                                                 int list_type)
 595{
 596        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
 597        struct mlx5e_l2_hash_node *hn;
 598        u8 (*addr_array)[ETH_ALEN] = NULL;
 599        struct hlist_head *addr_list;
 600        struct hlist_node *tmp;
 601        int max_size;
 602        int size;
 603        int err;
 604        int hi;
 605
 606        size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
 607        max_size = is_uc ?
 608                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
 609                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
 610
 611        addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 612        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
 613                size++;
 614
 615        if (size > max_size) {
 616                netdev_warn(priv->netdev,
 617                            "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
 618                            is_uc ? "UC" : "MC", size, max_size);
 619                size = max_size;
 620        }
 621
 622        if (size) {
 623                addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
 624                if (!addr_array) {
 625                        err = -ENOMEM;
 626                        goto out;
 627                }
 628                mlx5e_fill_addr_array(priv, list_type, addr_array, size);
 629        }
 630
 631        err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
 632out:
 633        if (err)
 634                netdev_err(priv->netdev,
 635                           "Failed to modify vport %s list err(%d)\n",
 636                           is_uc ? "UC" : "MC", err);
 637        kfree(addr_array);
 638}
 639
 640static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 641{
 642        struct mlx5e_l2_table *ea = &priv->fs.l2;
 643
 644        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
 645        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
 646        mlx5_modify_nic_vport_promisc(priv->mdev, 0,
 647                                      ea->allmulti_enabled,
 648                                      ea->promisc_enabled);
 649}
 650
 651static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
 652{
 653        struct mlx5e_l2_hash_node *hn;
 654        struct hlist_node *tmp;
 655        int i;
 656
 657        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
 658                mlx5e_execute_l2_action(priv, hn);
 659
 660        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
 661                mlx5e_execute_l2_action(priv, hn);
 662}
 663
 664static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
 665{
 666        struct mlx5e_l2_hash_node *hn;
 667        struct hlist_node *tmp;
 668        int i;
 669
 670        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
 671                hn->action = MLX5E_ACTION_DEL;
 672        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
 673                hn->action = MLX5E_ACTION_DEL;
 674
 675        if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
 676                mlx5e_sync_netdev_addr(priv);
 677
 678        mlx5e_apply_netdev_addr(priv);
 679}
 680
 681#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
 682#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
 683
 684static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
 685{
 686        struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
 687        struct mlx5_flow_destination dest = {};
 688        struct mlx5_flow_handle **rule_p;
 689        MLX5_DECLARE_FLOW_ACT(flow_act);
 690        struct mlx5_flow_spec *spec;
 691        int err = 0;
 692
 693        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 694        if (!spec)
 695                return -ENOMEM;
 696        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 697        dest.ft = priv->fs.ttc.ft.t;
 698
 699        rule_p = &priv->fs.promisc.rule;
 700        *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 701        if (IS_ERR(*rule_p)) {
 702                err = PTR_ERR(*rule_p);
 703                *rule_p = NULL;
 704                netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
 705        }
 706        kvfree(spec);
 707        return err;
 708}
 709
 710static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
 711{
 712        struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
 713        struct mlx5_flow_table_attr ft_attr = {};
 714        int err;
 715
 716        ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
 717        ft_attr.autogroup.max_num_groups = 1;
 718        ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
 719        ft_attr.prio = MLX5E_NIC_PRIO;
 720
 721        ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
 722        if (IS_ERR(ft->t)) {
 723                err = PTR_ERR(ft->t);
 724                netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
 725                return err;
 726        }
 727
 728        err = mlx5e_add_promisc_rule(priv);
 729        if (err)
 730                goto err_destroy_promisc_table;
 731
 732        return 0;
 733
 734err_destroy_promisc_table:
 735        mlx5_destroy_flow_table(ft->t);
 736        ft->t = NULL;
 737
 738        return err;
 739}
 740
 741static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
 742{
 743        if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
 744                return;
 745        mlx5_del_flow_rules(priv->fs.promisc.rule);
 746        priv->fs.promisc.rule = NULL;
 747}
 748
 749static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
 750{
 751        if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
 752                return;
 753        mlx5e_del_promisc_rule(priv);
 754        mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
 755        priv->fs.promisc.ft.t = NULL;
 756}
 757
 758void mlx5e_set_rx_mode_work(struct work_struct *work)
 759{
 760        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
 761                                               set_rx_mode_work);
 762
 763        struct mlx5e_l2_table *ea = &priv->fs.l2;
 764        struct net_device *ndev = priv->netdev;
 765
 766        bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
 767        bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
 768        bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
 769        bool broadcast_enabled = rx_mode_enable;
 770
 771        bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
 772        bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
 773        bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
 774        bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
 775        bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
 776        bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 777        int err;
 778
 779        if (enable_promisc) {
 780                err = mlx5e_create_promisc_table(priv);
 781                if (err)
 782                        enable_promisc = false;
 783                if (!priv->channels.params.vlan_strip_disable && !err)
 784                        netdev_warn_once(ndev,
 785                                         "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
 786        }
 787        if (enable_allmulti)
 788                mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
 789        if (enable_broadcast)
 790                mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
 791
 792        mlx5e_handle_netdev_addr(priv);
 793
 794        if (disable_broadcast)
 795                mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
 796        if (disable_allmulti)
 797                mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
 798        if (disable_promisc)
 799                mlx5e_destroy_promisc_table(priv);
 800
 801        ea->promisc_enabled   = promisc_enabled;
 802        ea->allmulti_enabled  = allmulti_enabled;
 803        ea->broadcast_enabled = broadcast_enabled;
 804
 805        mlx5e_vport_context_update(priv);
 806}
 807
 808static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
 809{
 810        int i;
 811
 812        for (i = ft->num_groups - 1; i >= 0; i--) {
 813                if (!IS_ERR_OR_NULL(ft->g[i]))
 814                        mlx5_destroy_flow_group(ft->g[i]);
 815                ft->g[i] = NULL;
 816        }
 817        ft->num_groups = 0;
 818}
 819
 820void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
 821{
 822        ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
 823}
 824
 825void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
 826{
 827        mlx5e_destroy_groups(ft);
 828        kfree(ft->g);
 829        mlx5_destroy_flow_table(ft->t);
 830        ft->t = NULL;
 831}
 832
 833static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
 834{
 835        int i;
 836
 837        for (i = 0; i < MLX5E_NUM_TT; i++) {
 838                if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
 839                        mlx5_del_flow_rules(ttc->rules[i].rule);
 840                        ttc->rules[i].rule = NULL;
 841                }
 842        }
 843
 844        for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
 845                if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
 846                        mlx5_del_flow_rules(ttc->tunnel_rules[i]);
 847                        ttc->tunnel_rules[i] = NULL;
 848                }
 849        }
 850}
 851
 852struct mlx5e_etype_proto {
 853        u16 etype;
 854        u8 proto;
 855};
 856
 857static struct mlx5e_etype_proto ttc_rules[] = {
 858        [MLX5E_TT_IPV4_TCP] = {
 859                .etype = ETH_P_IP,
 860                .proto = IPPROTO_TCP,
 861        },
 862        [MLX5E_TT_IPV6_TCP] = {
 863                .etype = ETH_P_IPV6,
 864                .proto = IPPROTO_TCP,
 865        },
 866        [MLX5E_TT_IPV4_UDP] = {
 867                .etype = ETH_P_IP,
 868                .proto = IPPROTO_UDP,
 869        },
 870        [MLX5E_TT_IPV6_UDP] = {
 871                .etype = ETH_P_IPV6,
 872                .proto = IPPROTO_UDP,
 873        },
 874        [MLX5E_TT_IPV4_IPSEC_AH] = {
 875                .etype = ETH_P_IP,
 876                .proto = IPPROTO_AH,
 877        },
 878        [MLX5E_TT_IPV6_IPSEC_AH] = {
 879                .etype = ETH_P_IPV6,
 880                .proto = IPPROTO_AH,
 881        },
 882        [MLX5E_TT_IPV4_IPSEC_ESP] = {
 883                .etype = ETH_P_IP,
 884                .proto = IPPROTO_ESP,
 885        },
 886        [MLX5E_TT_IPV6_IPSEC_ESP] = {
 887                .etype = ETH_P_IPV6,
 888                .proto = IPPROTO_ESP,
 889        },
 890        [MLX5E_TT_IPV4] = {
 891                .etype = ETH_P_IP,
 892                .proto = 0,
 893        },
 894        [MLX5E_TT_IPV6] = {
 895                .etype = ETH_P_IPV6,
 896                .proto = 0,
 897        },
 898        [MLX5E_TT_ANY] = {
 899                .etype = 0,
 900                .proto = 0,
 901        },
 902};
 903
 904static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
 905        [MLX5E_TT_IPV4_GRE] = {
 906                .etype = ETH_P_IP,
 907                .proto = IPPROTO_GRE,
 908        },
 909        [MLX5E_TT_IPV6_GRE] = {
 910                .etype = ETH_P_IPV6,
 911                .proto = IPPROTO_GRE,
 912        },
 913        [MLX5E_TT_IPV4_IPIP] = {
 914                .etype = ETH_P_IP,
 915                .proto = IPPROTO_IPIP,
 916        },
 917        [MLX5E_TT_IPV6_IPIP] = {
 918                .etype = ETH_P_IPV6,
 919                .proto = IPPROTO_IPIP,
 920        },
 921        [MLX5E_TT_IPV4_IPV6] = {
 922                .etype = ETH_P_IP,
 923                .proto = IPPROTO_IPV6,
 924        },
 925        [MLX5E_TT_IPV6_IPV6] = {
 926                .etype = ETH_P_IPV6,
 927                .proto = IPPROTO_IPV6,
 928        },
 929
 930};
 931
 932u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
 933{
 934        return ttc_tunnel_rules[tt].proto;
 935}
 936
 937static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
 938{
 939        switch (proto_type) {
 940        case IPPROTO_GRE:
 941                return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
 942        case IPPROTO_IPIP:
 943        case IPPROTO_IPV6:
 944                return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
 945                        MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
 946        default:
 947                return false;
 948        }
 949}
 950
 951static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
 952{
 953        int tt;
 954
 955        for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
 956                if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
 957                        return true;
 958        }
 959        return false;
 960}
 961
 962bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 963{
 964        return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
 965                MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
 966}
 967
 968static u8 mlx5e_etype_to_ipv(u16 ethertype)
 969{
 970        if (ethertype == ETH_P_IP)
 971                return 4;
 972
 973        if (ethertype == ETH_P_IPV6)
 974                return 6;
 975
 976        return 0;
 977}
 978
 979static struct mlx5_flow_handle *
 980mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
 981                        struct mlx5_flow_table *ft,
 982                        struct mlx5_flow_destination *dest,
 983                        u16 etype,
 984                        u8 proto)
 985{
 986        int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
 987        MLX5_DECLARE_FLOW_ACT(flow_act);
 988        struct mlx5_flow_handle *rule;
 989        struct mlx5_flow_spec *spec;
 990        int err = 0;
 991        u8 ipv;
 992
 993        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 994        if (!spec)
 995                return ERR_PTR(-ENOMEM);
 996
 997        if (proto) {
 998                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 999                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1000                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
1001        }
1002
1003        ipv = mlx5e_etype_to_ipv(etype);
1004        if (match_ipv_outer && ipv) {
1005                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1006                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1007                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
1008        } else if (etype) {
1009                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1010                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
1011                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
1012        }
1013
1014        rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1015        if (IS_ERR(rule)) {
1016                err = PTR_ERR(rule);
1017                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1018        }
1019
1020        kvfree(spec);
1021        return err ? ERR_PTR(err) : rule;
1022}
1023
1024static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
1025                                          struct ttc_params *params,
1026                                          struct mlx5e_ttc_table *ttc)
1027{
1028        struct mlx5_flow_destination dest = {};
1029        struct mlx5_flow_handle **trules;
1030        struct mlx5e_ttc_rule *rules;
1031        struct mlx5_flow_table *ft;
1032        int tt;
1033        int err;
1034
1035        ft = ttc->ft.t;
1036        rules = ttc->rules;
1037
1038        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1039        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1040                struct mlx5e_ttc_rule *rule = &rules[tt];
1041
1042                if (tt == MLX5E_TT_ANY)
1043                        dest.tir_num = params->any_tt_tirn;
1044                else
1045                        dest.tir_num = params->indir_tirn[tt];
1046
1047                rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
1048                                                     ttc_rules[tt].etype,
1049                                                     ttc_rules[tt].proto);
1050                if (IS_ERR(rule->rule)) {
1051                        err = PTR_ERR(rule->rule);
1052                        rule->rule = NULL;
1053                        goto del_rules;
1054                }
1055                rule->default_dest = dest;
1056        }
1057
1058        if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
1059                return 0;
1060
1061        trules    = ttc->tunnel_rules;
1062        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1063        dest.ft   = params->inner_ttc->ft.t;
1064        for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
1065                if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
1066                                                     ttc_tunnel_rules[tt].proto))
1067                        continue;
1068                trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
1069                                                     ttc_tunnel_rules[tt].etype,
1070                                                     ttc_tunnel_rules[tt].proto);
1071                if (IS_ERR(trules[tt])) {
1072                        err = PTR_ERR(trules[tt]);
1073                        trules[tt] = NULL;
1074                        goto del_rules;
1075                }
1076        }
1077
1078        return 0;
1079
1080del_rules:
1081        mlx5e_cleanup_ttc_rules(ttc);
1082        return err;
1083}
1084
1085static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
1086                                         bool use_ipv)
1087{
1088        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1089        struct mlx5e_flow_table *ft = &ttc->ft;
1090        int ix = 0;
1091        u32 *in;
1092        int err;
1093        u8 *mc;
1094
1095        ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
1096                        sizeof(*ft->g), GFP_KERNEL);
1097        if (!ft->g)
1098                return -ENOMEM;
1099        in = kvzalloc(inlen, GFP_KERNEL);
1100        if (!in) {
1101                kfree(ft->g);
1102                ft->g = NULL;
1103                return -ENOMEM;
1104        }
1105
1106        /* L4 Group */
1107        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1108        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1109        if (use_ipv)
1110                MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
1111        else
1112                MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1113        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1114        MLX5_SET_CFG(in, start_flow_index, ix);
1115        ix += MLX5E_TTC_GROUP1_SIZE;
1116        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1117        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1118        if (IS_ERR(ft->g[ft->num_groups]))
1119                goto err;
1120        ft->num_groups++;
1121
1122        /* L3 Group */
1123        MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
1124        MLX5_SET_CFG(in, start_flow_index, ix);
1125        ix += MLX5E_TTC_GROUP2_SIZE;
1126        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1127        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1128        if (IS_ERR(ft->g[ft->num_groups]))
1129                goto err;
1130        ft->num_groups++;
1131
1132        /* Any Group */
1133        memset(in, 0, inlen);
1134        MLX5_SET_CFG(in, start_flow_index, ix);
1135        ix += MLX5E_TTC_GROUP3_SIZE;
1136        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1137        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1138        if (IS_ERR(ft->g[ft->num_groups]))
1139                goto err;
1140        ft->num_groups++;
1141
1142        kvfree(in);
1143        return 0;
1144
1145err:
1146        err = PTR_ERR(ft->g[ft->num_groups]);
1147        ft->g[ft->num_groups] = NULL;
1148        kvfree(in);
1149
1150        return err;
1151}
1152
1153static struct mlx5_flow_handle *
1154mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
1155                              struct mlx5_flow_table *ft,
1156                              struct mlx5_flow_destination *dest,
1157                              u16 etype, u8 proto)
1158{
1159        MLX5_DECLARE_FLOW_ACT(flow_act);
1160        struct mlx5_flow_handle *rule;
1161        struct mlx5_flow_spec *spec;
1162        int err = 0;
1163        u8 ipv;
1164
1165        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1166        if (!spec)
1167                return ERR_PTR(-ENOMEM);
1168
1169        ipv = mlx5e_etype_to_ipv(etype);
1170        if (etype && ipv) {
1171                spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1172                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1173                MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1174        }
1175
1176        if (proto) {
1177                spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1178                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1179                MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1180        }
1181
1182        rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1183        if (IS_ERR(rule)) {
1184                err = PTR_ERR(rule);
1185                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1186        }
1187
1188        kvfree(spec);
1189        return err ? ERR_PTR(err) : rule;
1190}
1191
1192static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1193                                                struct ttc_params *params,
1194                                                struct mlx5e_ttc_table *ttc)
1195{
1196        struct mlx5_flow_destination dest = {};
1197        struct mlx5e_ttc_rule *rules;
1198        struct mlx5_flow_table *ft;
1199        int err;
1200        int tt;
1201
1202        ft = ttc->ft.t;
1203        rules = ttc->rules;
1204        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1205
1206        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1207                struct mlx5e_ttc_rule *rule = &rules[tt];
1208
1209                if (tt == MLX5E_TT_ANY)
1210                        dest.tir_num = params->any_tt_tirn;
1211                else
1212                        dest.tir_num = params->indir_tirn[tt];
1213
1214                rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1215                                                           ttc_rules[tt].etype,
1216                                                           ttc_rules[tt].proto);
1217                if (IS_ERR(rule->rule)) {
1218                        err = PTR_ERR(rule->rule);
1219                        rule->rule = NULL;
1220                        goto del_rules;
1221                }
1222                rule->default_dest = dest;
1223        }
1224
1225        return 0;
1226
1227del_rules:
1228
1229        mlx5e_cleanup_ttc_rules(ttc);
1230        return err;
1231}
1232
1233static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1234{
1235        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1236        struct mlx5e_flow_table *ft = &ttc->ft;
1237        int ix = 0;
1238        u32 *in;
1239        int err;
1240        u8 *mc;
1241
1242        ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1243        if (!ft->g)
1244                return -ENOMEM;
1245        in = kvzalloc(inlen, GFP_KERNEL);
1246        if (!in) {
1247                kfree(ft->g);
1248                ft->g = NULL;
1249                return -ENOMEM;
1250        }
1251
1252        /* L4 Group */
1253        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1254        MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1255        MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1256        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1257        MLX5_SET_CFG(in, start_flow_index, ix);
1258        ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1259        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1260        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1261        if (IS_ERR(ft->g[ft->num_groups]))
1262                goto err;
1263        ft->num_groups++;
1264
1265        /* L3 Group */
1266        MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1267        MLX5_SET_CFG(in, start_flow_index, ix);
1268        ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1269        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1270        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1271        if (IS_ERR(ft->g[ft->num_groups]))
1272                goto err;
1273        ft->num_groups++;
1274
1275        /* Any Group */
1276        memset(in, 0, inlen);
1277        MLX5_SET_CFG(in, start_flow_index, ix);
1278        ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1279        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1280        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1281        if (IS_ERR(ft->g[ft->num_groups]))
1282                goto err;
1283        ft->num_groups++;
1284
1285        kvfree(in);
1286        return 0;
1287
1288err:
1289        err = PTR_ERR(ft->g[ft->num_groups]);
1290        ft->g[ft->num_groups] = NULL;
1291        kvfree(in);
1292
1293        return err;
1294}
1295
1296void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1297                                struct ttc_params *ttc_params)
1298{
1299        ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1300        ttc_params->inner_ttc = &priv->fs.inner_ttc;
1301}
1302
1303void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1304{
1305        struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1306
1307        ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1308        ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1309        ft_attr->prio = MLX5E_NIC_PRIO;
1310}
1311
1312void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1313
1314{
1315        struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1316
1317        ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1318        ft_attr->level = MLX5E_TTC_FT_LEVEL;
1319        ft_attr->prio = MLX5E_NIC_PRIO;
1320}
1321
1322int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1323                                 struct mlx5e_ttc_table *ttc)
1324{
1325        struct mlx5e_flow_table *ft = &ttc->ft;
1326        int err;
1327
1328        if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1329                return 0;
1330
1331        ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
1332        if (IS_ERR(ft->t)) {
1333                err = PTR_ERR(ft->t);
1334                ft->t = NULL;
1335                return err;
1336        }
1337
1338        err = mlx5e_create_inner_ttc_table_groups(ttc);
1339        if (err)
1340                goto err;
1341
1342        err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1343        if (err)
1344                goto err;
1345
1346        return 0;
1347
1348err:
1349        mlx5e_destroy_flow_table(ft);
1350        return err;
1351}
1352
1353void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1354                                   struct mlx5e_ttc_table *ttc)
1355{
1356        if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1357                return;
1358
1359        mlx5e_cleanup_ttc_rules(ttc);
1360        mlx5e_destroy_flow_table(&ttc->ft);
1361}
1362
1363void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1364                             struct mlx5e_ttc_table *ttc)
1365{
1366        mlx5e_cleanup_ttc_rules(ttc);
1367        mlx5e_destroy_flow_table(&ttc->ft);
1368}
1369
1370int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1371                           struct mlx5e_ttc_table *ttc)
1372{
1373        bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1374        struct mlx5e_flow_table *ft = &ttc->ft;
1375        int err;
1376
1377        ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
1378        if (IS_ERR(ft->t)) {
1379                err = PTR_ERR(ft->t);
1380                ft->t = NULL;
1381                return err;
1382        }
1383
1384        err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1385        if (err)
1386                goto err;
1387
1388        err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1389        if (err)
1390                goto err;
1391
1392        return 0;
1393err:
1394        mlx5e_destroy_flow_table(ft);
1395        return err;
1396}
1397
1398int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
1399                       struct mlx5_flow_destination *new_dest)
1400{
1401        return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
1402}
1403
1404struct mlx5_flow_destination
1405mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1406{
1407        struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
1408
1409        WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
1410                  "TTC[%d] default dest is not setup yet", type);
1411
1412        return *dest;
1413}
1414
1415int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1416{
1417        struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
1418
1419        return mlx5e_ttc_fwd_dest(priv, type, &dest);
1420}
1421
1422static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1423                                   struct mlx5e_l2_rule *ai)
1424{
1425        if (!IS_ERR_OR_NULL(ai->rule)) {
1426                mlx5_del_flow_rules(ai->rule);
1427                ai->rule = NULL;
1428        }
1429}
1430
1431static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1432                                  struct mlx5e_l2_rule *ai, int type)
1433{
1434        struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1435        struct mlx5_flow_destination dest = {};
1436        MLX5_DECLARE_FLOW_ACT(flow_act);
1437        struct mlx5_flow_spec *spec;
1438        int err = 0;
1439        u8 *mc_dmac;
1440        u8 *mv_dmac;
1441
1442        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1443        if (!spec)
1444                return -ENOMEM;
1445
1446        mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1447                               outer_headers.dmac_47_16);
1448        mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1449                               outer_headers.dmac_47_16);
1450
1451        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1452        dest.ft = priv->fs.ttc.ft.t;
1453
1454        switch (type) {
1455        case MLX5E_FULLMATCH:
1456                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1457                eth_broadcast_addr(mc_dmac);
1458                ether_addr_copy(mv_dmac, ai->addr);
1459                break;
1460
1461        case MLX5E_ALLMULTI:
1462                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1463                mc_dmac[0] = 0x01;
1464                mv_dmac[0] = 0x01;
1465                break;
1466        }
1467
1468        ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1469        if (IS_ERR(ai->rule)) {
1470                netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1471                           __func__, mv_dmac);
1472                err = PTR_ERR(ai->rule);
1473                ai->rule = NULL;
1474        }
1475
1476        kvfree(spec);
1477
1478        return err;
1479}
1480
1481#define MLX5E_NUM_L2_GROUPS        3
1482#define MLX5E_L2_GROUP1_SIZE       BIT(15)
1483#define MLX5E_L2_GROUP2_SIZE       BIT(0)
1484#define MLX5E_L2_GROUP_TRAP_SIZE   BIT(0) /* must be last */
1485#define MLX5E_L2_TABLE_SIZE        (MLX5E_L2_GROUP1_SIZE +\
1486                                    MLX5E_L2_GROUP2_SIZE +\
1487                                    MLX5E_L2_GROUP_TRAP_SIZE)
1488static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1489{
1490        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1491        struct mlx5e_flow_table *ft = &l2_table->ft;
1492        int ix = 0;
1493        u8 *mc_dmac;
1494        u32 *in;
1495        int err;
1496        u8 *mc;
1497
1498        ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1499        if (!ft->g)
1500                return -ENOMEM;
1501        in = kvzalloc(inlen, GFP_KERNEL);
1502        if (!in) {
1503                kfree(ft->g);
1504                return -ENOMEM;
1505        }
1506
1507        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1508        mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1509                               outer_headers.dmac_47_16);
1510        /* Flow Group for full match */
1511        eth_broadcast_addr(mc_dmac);
1512        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1513        MLX5_SET_CFG(in, start_flow_index, ix);
1514        ix += MLX5E_L2_GROUP1_SIZE;
1515        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1516        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1517        if (IS_ERR(ft->g[ft->num_groups]))
1518                goto err_destroy_groups;
1519        ft->num_groups++;
1520
1521        /* Flow Group for allmulti */
1522        eth_zero_addr(mc_dmac);
1523        mc_dmac[0] = 0x01;
1524        MLX5_SET_CFG(in, start_flow_index, ix);
1525        ix += MLX5E_L2_GROUP2_SIZE;
1526        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1527        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1528        if (IS_ERR(ft->g[ft->num_groups]))
1529                goto err_destroy_groups;
1530        ft->num_groups++;
1531
1532        /* Flow Group for l2 traps */
1533        memset(in, 0, inlen);
1534        MLX5_SET_CFG(in, start_flow_index, ix);
1535        ix += MLX5E_L2_GROUP_TRAP_SIZE;
1536        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1537        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1538        if (IS_ERR(ft->g[ft->num_groups]))
1539                goto err_destroy_groups;
1540        ft->num_groups++;
1541
1542        kvfree(in);
1543        return 0;
1544
1545err_destroy_groups:
1546        err = PTR_ERR(ft->g[ft->num_groups]);
1547        ft->g[ft->num_groups] = NULL;
1548        mlx5e_destroy_groups(ft);
1549        kvfree(in);
1550        kfree(ft->g);
1551
1552        return err;
1553}
1554
1555static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1556{
1557        mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1558}
1559
1560static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1561{
1562        struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1563        struct mlx5e_flow_table *ft = &l2_table->ft;
1564        struct mlx5_flow_table_attr ft_attr = {};
1565        int err;
1566
1567        ft->num_groups = 0;
1568
1569        ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1570        ft_attr.level = MLX5E_L2_FT_LEVEL;
1571        ft_attr.prio = MLX5E_NIC_PRIO;
1572
1573        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1574        if (IS_ERR(ft->t)) {
1575                err = PTR_ERR(ft->t);
1576                ft->t = NULL;
1577                return err;
1578        }
1579
1580        err = mlx5e_create_l2_table_groups(l2_table);
1581        if (err)
1582                goto err_destroy_flow_table;
1583
1584        return 0;
1585
1586err_destroy_flow_table:
1587        mlx5_destroy_flow_table(ft->t);
1588        ft->t = NULL;
1589
1590        return err;
1591}
1592
1593#define MLX5E_NUM_VLAN_GROUPS   5
1594#define MLX5E_VLAN_GROUP0_SIZE  BIT(12)
1595#define MLX5E_VLAN_GROUP1_SIZE  BIT(12)
1596#define MLX5E_VLAN_GROUP2_SIZE  BIT(1)
1597#define MLX5E_VLAN_GROUP3_SIZE  BIT(0)
1598#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
1599#define MLX5E_VLAN_TABLE_SIZE   (MLX5E_VLAN_GROUP0_SIZE +\
1600                                 MLX5E_VLAN_GROUP1_SIZE +\
1601                                 MLX5E_VLAN_GROUP2_SIZE +\
1602                                 MLX5E_VLAN_GROUP3_SIZE +\
1603                                 MLX5E_VLAN_GROUP_TRAP_SIZE)
1604
1605static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1606                                            int inlen)
1607{
1608        int err;
1609        int ix = 0;
1610        u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1611
1612        memset(in, 0, inlen);
1613        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1614        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1615        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1616        MLX5_SET_CFG(in, start_flow_index, ix);
1617        ix += MLX5E_VLAN_GROUP0_SIZE;
1618        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1619        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1620        if (IS_ERR(ft->g[ft->num_groups]))
1621                goto err_destroy_groups;
1622        ft->num_groups++;
1623
1624        memset(in, 0, inlen);
1625        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1626        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1627        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1628        MLX5_SET_CFG(in, start_flow_index, ix);
1629        ix += MLX5E_VLAN_GROUP1_SIZE;
1630        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1631        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1632        if (IS_ERR(ft->g[ft->num_groups]))
1633                goto err_destroy_groups;
1634        ft->num_groups++;
1635
1636        memset(in, 0, inlen);
1637        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1638        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1639        MLX5_SET_CFG(in, start_flow_index, ix);
1640        ix += MLX5E_VLAN_GROUP2_SIZE;
1641        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1642        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1643        if (IS_ERR(ft->g[ft->num_groups]))
1644                goto err_destroy_groups;
1645        ft->num_groups++;
1646
1647        memset(in, 0, inlen);
1648        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1649        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1650        MLX5_SET_CFG(in, start_flow_index, ix);
1651        ix += MLX5E_VLAN_GROUP3_SIZE;
1652        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1653        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1654        if (IS_ERR(ft->g[ft->num_groups]))
1655                goto err_destroy_groups;
1656        ft->num_groups++;
1657
1658        memset(in, 0, inlen);
1659        MLX5_SET_CFG(in, start_flow_index, ix);
1660        ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1661        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1662        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1663        if (IS_ERR(ft->g[ft->num_groups]))
1664                goto err_destroy_groups;
1665        ft->num_groups++;
1666
1667        return 0;
1668
1669err_destroy_groups:
1670        err = PTR_ERR(ft->g[ft->num_groups]);
1671        ft->g[ft->num_groups] = NULL;
1672        mlx5e_destroy_groups(ft);
1673
1674        return err;
1675}
1676
1677static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1678{
1679        u32 *in;
1680        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1681        int err;
1682
1683        in = kvzalloc(inlen, GFP_KERNEL);
1684        if (!in)
1685                return -ENOMEM;
1686
1687        err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1688
1689        kvfree(in);
1690        return err;
1691}
1692
1693static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1694{
1695        struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1696        struct mlx5_flow_table_attr ft_attr = {};
1697        int err;
1698
1699        ft->num_groups = 0;
1700
1701        ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1702        ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1703        ft_attr.prio = MLX5E_NIC_PRIO;
1704
1705        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1706
1707        if (IS_ERR(ft->t)) {
1708                err = PTR_ERR(ft->t);
1709                ft->t = NULL;
1710                return err;
1711        }
1712        ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1713        if (!ft->g) {
1714                err = -ENOMEM;
1715                goto err_destroy_vlan_table;
1716        }
1717
1718        err = mlx5e_create_vlan_table_groups(ft);
1719        if (err)
1720                goto err_free_g;
1721
1722        mlx5e_add_vlan_rules(priv);
1723
1724        return 0;
1725
1726err_free_g:
1727        kfree(ft->g);
1728err_destroy_vlan_table:
1729        mlx5_destroy_flow_table(ft->t);
1730        ft->t = NULL;
1731
1732        return err;
1733}
1734
1735static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1736{
1737        mlx5e_del_vlan_rules(priv);
1738        mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1739}
1740
1741int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1742{
1743        struct ttc_params ttc_params = {};
1744        int tt, err;
1745
1746        priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1747                                               MLX5_FLOW_NAMESPACE_KERNEL);
1748
1749        if (!priv->fs.ns)
1750                return -EOPNOTSUPP;
1751
1752        err = mlx5e_arfs_create_tables(priv);
1753        if (err) {
1754                netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1755                           err);
1756                priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1757        }
1758
1759        mlx5e_set_ttc_basic_params(priv, &ttc_params);
1760        mlx5e_set_inner_ttc_ft_params(&ttc_params);
1761        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1762                ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1763
1764        err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1765        if (err) {
1766                netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1767                           err);
1768                goto err_destroy_arfs_tables;
1769        }
1770
1771        mlx5e_set_ttc_ft_params(&ttc_params);
1772        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1773                ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1774
1775        err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1776        if (err) {
1777                netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1778                           err);
1779                goto err_destroy_inner_ttc_table;
1780        }
1781
1782        err = mlx5e_create_l2_table(priv);
1783        if (err) {
1784                netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1785                           err);
1786                goto err_destroy_ttc_table;
1787        }
1788
1789        err = mlx5e_create_vlan_table(priv);
1790        if (err) {
1791                netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1792                           err);
1793                goto err_destroy_l2_table;
1794        }
1795
1796        mlx5e_ethtool_init_steering(priv);
1797
1798        return 0;
1799
1800err_destroy_l2_table:
1801        mlx5e_destroy_l2_table(priv);
1802err_destroy_ttc_table:
1803        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1804err_destroy_inner_ttc_table:
1805        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1806err_destroy_arfs_tables:
1807        mlx5e_arfs_destroy_tables(priv);
1808
1809        return err;
1810}
1811
1812void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1813{
1814        mlx5e_destroy_vlan_table(priv);
1815        mlx5e_destroy_l2_table(priv);
1816        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1817        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1818        mlx5e_arfs_destroy_tables(priv);
1819        mlx5e_ethtool_cleanup_steering(priv);
1820}
1821