linux/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/list.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/tcp.h>
  37#include <linux/mlx5/fs.h>
  38#include "en.h"
  39#include "lib/mpfs.h"
  40
  41static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
  42                                  struct mlx5e_l2_rule *ai, int type);
  43static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
  44                                   struct mlx5e_l2_rule *ai);
  45
  46enum {
  47        MLX5E_FULLMATCH = 0,
  48        MLX5E_ALLMULTI  = 1,
  49        MLX5E_PROMISC   = 2,
  50};
  51
  52enum {
  53        MLX5E_UC        = 0,
  54        MLX5E_MC_IPV4   = 1,
  55        MLX5E_MC_IPV6   = 2,
  56        MLX5E_MC_OTHER  = 3,
  57};
  58
  59enum {
  60        MLX5E_ACTION_NONE = 0,
  61        MLX5E_ACTION_ADD  = 1,
  62        MLX5E_ACTION_DEL  = 2,
  63};
  64
  65struct mlx5e_l2_hash_node {
  66        struct hlist_node          hlist;
  67        u8                         action;
  68        struct mlx5e_l2_rule ai;
  69        bool   mpfs;
  70};
  71
  72static inline int mlx5e_hash_l2(u8 *addr)
  73{
  74        return addr[5];
  75}
  76
  77static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
  78{
  79        struct mlx5e_l2_hash_node *hn;
  80        int ix = mlx5e_hash_l2(addr);
  81        int found = 0;
  82
  83        hlist_for_each_entry(hn, &hash[ix], hlist)
  84                if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
  85                        found = 1;
  86                        break;
  87                }
  88
  89        if (found) {
  90                hn->action = MLX5E_ACTION_NONE;
  91                return;
  92        }
  93
  94        hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
  95        if (!hn)
  96                return;
  97
  98        ether_addr_copy(hn->ai.addr, addr);
  99        hn->action = MLX5E_ACTION_ADD;
 100
 101        hlist_add_head(&hn->hlist, &hash[ix]);
 102}
 103
 104static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
 105{
 106        hlist_del(&hn->hlist);
 107        kfree(hn);
 108}
 109
 110static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
 111{
 112        struct net_device *ndev = priv->netdev;
 113        int max_list_size;
 114        int list_size;
 115        u16 *vlans;
 116        int vlan;
 117        int err;
 118        int i;
 119
 120        list_size = 0;
 121        for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
 122                list_size++;
 123
 124        max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
 125
 126        if (list_size > max_list_size) {
 127                netdev_warn(ndev,
 128                            "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
 129                            list_size, max_list_size);
 130                list_size = max_list_size;
 131        }
 132
 133        vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
 134        if (!vlans)
 135                return -ENOMEM;
 136
 137        i = 0;
 138        for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 139                if (i >= list_size)
 140                        break;
 141                vlans[i++] = vlan;
 142        }
 143
 144        err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
 145        if (err)
 146                netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
 147                           err);
 148
 149        kfree(vlans);
 150        return err;
 151}
 152
 153enum mlx5e_vlan_rule_type {
 154        MLX5E_VLAN_RULE_TYPE_UNTAGGED,
 155        MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
 156        MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
 157        MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
 158        MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
 159};
 160
 161static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
 162                                 enum mlx5e_vlan_rule_type rule_type,
 163                                 u16 vid, struct mlx5_flow_spec *spec)
 164{
 165        struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
 166        struct mlx5_flow_destination dest = {};
 167        struct mlx5_flow_handle **rule_p;
 168        MLX5_DECLARE_FLOW_ACT(flow_act);
 169        int err = 0;
 170
 171        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 172        dest.ft = priv->fs.l2.ft.t;
 173
 174        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 175
 176        switch (rule_type) {
 177        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
 178                /* cvlan_tag enabled in match criteria and
 179                 * disabled in match value means both S & C tags
 180                 * don't exist (untagged of both)
 181                 */
 182                rule_p = &priv->fs.vlan.untagged_rule;
 183                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 184                                 outer_headers.cvlan_tag);
 185                break;
 186        case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
 187                rule_p = &priv->fs.vlan.any_cvlan_rule;
 188                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 189                                 outer_headers.cvlan_tag);
 190                MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
 191                break;
 192        case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
 193                rule_p = &priv->fs.vlan.any_svlan_rule;
 194                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 195                                 outer_headers.svlan_tag);
 196                MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
 197                break;
 198        case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
 199                rule_p = &priv->fs.vlan.active_svlans_rule[vid];
 200                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 201                                 outer_headers.svlan_tag);
 202                MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
 203                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 204                                 outer_headers.first_vid);
 205                MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
 206                         vid);
 207                break;
 208        default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
 209                rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
 210                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 211                                 outer_headers.cvlan_tag);
 212                MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
 213                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 214                                 outer_headers.first_vid);
 215                MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
 216                         vid);
 217                break;
 218        }
 219
 220        *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 221
 222        if (IS_ERR(*rule_p)) {
 223                err = PTR_ERR(*rule_p);
 224                *rule_p = NULL;
 225                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
 226        }
 227
 228        return err;
 229}
 230
 231static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
 232                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
 233{
 234        struct mlx5_flow_spec *spec;
 235        int err = 0;
 236
 237        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 238        if (!spec)
 239                return -ENOMEM;
 240
 241        if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
 242                mlx5e_vport_context_update_vlans(priv);
 243
 244        err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
 245
 246        kvfree(spec);
 247
 248        return err;
 249}
 250
 251static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 252                                enum mlx5e_vlan_rule_type rule_type, u16 vid)
 253{
 254        switch (rule_type) {
 255        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
 256                if (priv->fs.vlan.untagged_rule) {
 257                        mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
 258                        priv->fs.vlan.untagged_rule = NULL;
 259                }
 260                break;
 261        case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
 262                if (priv->fs.vlan.any_cvlan_rule) {
 263                        mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
 264                        priv->fs.vlan.any_cvlan_rule = NULL;
 265                }
 266                break;
 267        case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
 268                if (priv->fs.vlan.any_svlan_rule) {
 269                        mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
 270                        priv->fs.vlan.any_svlan_rule = NULL;
 271                }
 272                break;
 273        case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
 274                if (priv->fs.vlan.active_svlans_rule[vid]) {
 275                        mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
 276                        priv->fs.vlan.active_svlans_rule[vid] = NULL;
 277                }
 278                break;
 279        case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
 280                if (priv->fs.vlan.active_cvlans_rule[vid]) {
 281                        mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
 282                        priv->fs.vlan.active_cvlans_rule[vid] = NULL;
 283                }
 284                mlx5e_vport_context_update_vlans(priv);
 285                break;
 286        }
 287}
 288
 289static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
 290{
 291        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 292        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
 293}
 294
 295static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
 296{
 297        int err;
 298
 299        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 300        if (err)
 301                return err;
 302
 303        return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
 304}
 305
 306void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
 307{
 308        if (!priv->fs.vlan.cvlan_filter_disabled)
 309                return;
 310
 311        priv->fs.vlan.cvlan_filter_disabled = false;
 312        if (priv->netdev->flags & IFF_PROMISC)
 313                return;
 314        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 315}
 316
 317void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
 318{
 319        if (priv->fs.vlan.cvlan_filter_disabled)
 320                return;
 321
 322        priv->fs.vlan.cvlan_filter_disabled = true;
 323        if (priv->netdev->flags & IFF_PROMISC)
 324                return;
 325        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 326}
 327
 328static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
 329{
 330        int err;
 331
 332        set_bit(vid, priv->fs.vlan.active_cvlans);
 333
 334        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 335        if (err)
 336                clear_bit(vid, priv->fs.vlan.active_cvlans);
 337
 338        return err;
 339}
 340
 341static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
 342{
 343        struct net_device *netdev = priv->netdev;
 344        int err;
 345
 346        set_bit(vid, priv->fs.vlan.active_svlans);
 347
 348        err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 349        if (err) {
 350                clear_bit(vid, priv->fs.vlan.active_svlans);
 351                return err;
 352        }
 353
 354        /* Need to fix some features.. */
 355        netdev_update_features(netdev);
 356        return err;
 357}
 358
 359int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 360{
 361        struct mlx5e_priv *priv = netdev_priv(dev);
 362
 363        if (be16_to_cpu(proto) == ETH_P_8021Q)
 364                return mlx5e_vlan_rx_add_cvid(priv, vid);
 365        else if (be16_to_cpu(proto) == ETH_P_8021AD)
 366                return mlx5e_vlan_rx_add_svid(priv, vid);
 367
 368        return -EOPNOTSUPP;
 369}
 370
 371int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 372{
 373        struct mlx5e_priv *priv = netdev_priv(dev);
 374
 375        if (be16_to_cpu(proto) == ETH_P_8021Q) {
 376                clear_bit(vid, priv->fs.vlan.active_cvlans);
 377                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 378        } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
 379                clear_bit(vid, priv->fs.vlan.active_svlans);
 380                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 381                netdev_update_features(dev);
 382        }
 383
 384        return 0;
 385}
 386
 387static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
 388{
 389        int i;
 390
 391        mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 392
 393        for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 394                mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 395        }
 396
 397        for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
 398                mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 399
 400        if (priv->fs.vlan.cvlan_filter_disabled &&
 401            !(priv->netdev->flags & IFF_PROMISC))
 402                mlx5e_add_any_vid_rules(priv);
 403}
 404
 405static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
 406{
 407        int i;
 408
 409        mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 410
 411        for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
 412                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 413        }
 414
 415        for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
 416                mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 417
 418        if (priv->fs.vlan.cvlan_filter_disabled &&
 419            !(priv->netdev->flags & IFF_PROMISC))
 420                mlx5e_del_any_vid_rules(priv);
 421}
 422
 423#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
 424        for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
 425                hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
 426
 427static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
 428                                    struct mlx5e_l2_hash_node *hn)
 429{
 430        u8 action = hn->action;
 431        u8 mac_addr[ETH_ALEN];
 432        int l2_err = 0;
 433
 434        ether_addr_copy(mac_addr, hn->ai.addr);
 435
 436        switch (action) {
 437        case MLX5E_ACTION_ADD:
 438                mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
 439                if (!is_multicast_ether_addr(mac_addr)) {
 440                        l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
 441                        hn->mpfs = !l2_err;
 442                }
 443                hn->action = MLX5E_ACTION_NONE;
 444                break;
 445
 446        case MLX5E_ACTION_DEL:
 447                if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
 448                        l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
 449                mlx5e_del_l2_flow_rule(priv, &hn->ai);
 450                mlx5e_del_l2_from_hash(hn);
 451                break;
 452        }
 453
 454        if (l2_err)
 455                netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
 456                            action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
 457}
 458
 459static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
 460{
 461        struct net_device *netdev = priv->netdev;
 462        struct netdev_hw_addr *ha;
 463
 464        netif_addr_lock_bh(netdev);
 465
 466        mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
 467                             priv->netdev->dev_addr);
 468
 469        netdev_for_each_uc_addr(ha, netdev)
 470                mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
 471
 472        netdev_for_each_mc_addr(ha, netdev)
 473                mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
 474
 475        netif_addr_unlock_bh(netdev);
 476}
 477
 478static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
 479                                  u8 addr_array[][ETH_ALEN], int size)
 480{
 481        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
 482        struct net_device *ndev = priv->netdev;
 483        struct mlx5e_l2_hash_node *hn;
 484        struct hlist_head *addr_list;
 485        struct hlist_node *tmp;
 486        int i = 0;
 487        int hi;
 488
 489        addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 490
 491        if (is_uc) /* Make sure our own address is pushed first */
 492                ether_addr_copy(addr_array[i++], ndev->dev_addr);
 493        else if (priv->fs.l2.broadcast_enabled)
 494                ether_addr_copy(addr_array[i++], ndev->broadcast);
 495
 496        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
 497                if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
 498                        continue;
 499                if (i >= size)
 500                        break;
 501                ether_addr_copy(addr_array[i++], hn->ai.addr);
 502        }
 503}
 504
 505static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
 506                                                 int list_type)
 507{
 508        bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
 509        struct mlx5e_l2_hash_node *hn;
 510        u8 (*addr_array)[ETH_ALEN] = NULL;
 511        struct hlist_head *addr_list;
 512        struct hlist_node *tmp;
 513        int max_size;
 514        int size;
 515        int err;
 516        int hi;
 517
 518        size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
 519        max_size = is_uc ?
 520                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
 521                1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
 522
 523        addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 524        mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
 525                size++;
 526
 527        if (size > max_size) {
 528                netdev_warn(priv->netdev,
 529                            "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
 530                            is_uc ? "UC" : "MC", size, max_size);
 531                size = max_size;
 532        }
 533
 534        if (size) {
 535                addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
 536                if (!addr_array) {
 537                        err = -ENOMEM;
 538                        goto out;
 539                }
 540                mlx5e_fill_addr_array(priv, list_type, addr_array, size);
 541        }
 542
 543        err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
 544out:
 545        if (err)
 546                netdev_err(priv->netdev,
 547                           "Failed to modify vport %s list err(%d)\n",
 548                           is_uc ? "UC" : "MC", err);
 549        kfree(addr_array);
 550}
 551
 552static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 553{
 554        struct mlx5e_l2_table *ea = &priv->fs.l2;
 555
 556        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
 557        mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
 558        mlx5_modify_nic_vport_promisc(priv->mdev, 0,
 559                                      ea->allmulti_enabled,
 560                                      ea->promisc_enabled);
 561}
 562
 563static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
 564{
 565        struct mlx5e_l2_hash_node *hn;
 566        struct hlist_node *tmp;
 567        int i;
 568
 569        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
 570                mlx5e_execute_l2_action(priv, hn);
 571
 572        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
 573                mlx5e_execute_l2_action(priv, hn);
 574}
 575
 576static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
 577{
 578        struct mlx5e_l2_hash_node *hn;
 579        struct hlist_node *tmp;
 580        int i;
 581
 582        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
 583                hn->action = MLX5E_ACTION_DEL;
 584        mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
 585                hn->action = MLX5E_ACTION_DEL;
 586
 587        if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
 588                mlx5e_sync_netdev_addr(priv);
 589
 590        mlx5e_apply_netdev_addr(priv);
 591}
 592
 593void mlx5e_set_rx_mode_work(struct work_struct *work)
 594{
 595        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
 596                                               set_rx_mode_work);
 597
 598        struct mlx5e_l2_table *ea = &priv->fs.l2;
 599        struct net_device *ndev = priv->netdev;
 600
 601        bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
 602        bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
 603        bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
 604        bool broadcast_enabled = rx_mode_enable;
 605
 606        bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
 607        bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
 608        bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
 609        bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
 610        bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
 611        bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 612
 613        if (enable_promisc) {
 614                if (!priv->channels.params.vlan_strip_disable)
 615                        netdev_warn_once(ndev,
 616                                         "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
 617                mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
 618                if (!priv->fs.vlan.cvlan_filter_disabled)
 619                        mlx5e_add_any_vid_rules(priv);
 620        }
 621        if (enable_allmulti)
 622                mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
 623        if (enable_broadcast)
 624                mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
 625
 626        mlx5e_handle_netdev_addr(priv);
 627
 628        if (disable_broadcast)
 629                mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
 630        if (disable_allmulti)
 631                mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
 632        if (disable_promisc) {
 633                if (!priv->fs.vlan.cvlan_filter_disabled)
 634                        mlx5e_del_any_vid_rules(priv);
 635                mlx5e_del_l2_flow_rule(priv, &ea->promisc);
 636        }
 637
 638        ea->promisc_enabled   = promisc_enabled;
 639        ea->allmulti_enabled  = allmulti_enabled;
 640        ea->broadcast_enabled = broadcast_enabled;
 641
 642        mlx5e_vport_context_update(priv);
 643}
 644
 645static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
 646{
 647        int i;
 648
 649        for (i = ft->num_groups - 1; i >= 0; i--) {
 650                if (!IS_ERR_OR_NULL(ft->g[i]))
 651                        mlx5_destroy_flow_group(ft->g[i]);
 652                ft->g[i] = NULL;
 653        }
 654        ft->num_groups = 0;
 655}
 656
 657void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
 658{
 659        ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
 660}
 661
 662void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
 663{
 664        mlx5e_destroy_groups(ft);
 665        kfree(ft->g);
 666        mlx5_destroy_flow_table(ft->t);
 667        ft->t = NULL;
 668}
 669
 670static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
 671{
 672        int i;
 673
 674        for (i = 0; i < MLX5E_NUM_TT; i++) {
 675                if (!IS_ERR_OR_NULL(ttc->rules[i])) {
 676                        mlx5_del_flow_rules(ttc->rules[i]);
 677                        ttc->rules[i] = NULL;
 678                }
 679        }
 680
 681        for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
 682                if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
 683                        mlx5_del_flow_rules(ttc->tunnel_rules[i]);
 684                        ttc->tunnel_rules[i] = NULL;
 685                }
 686        }
 687}
 688
 689struct mlx5e_etype_proto {
 690        u16 etype;
 691        u8 proto;
 692};
 693
 694static struct mlx5e_etype_proto ttc_rules[] = {
 695        [MLX5E_TT_IPV4_TCP] = {
 696                .etype = ETH_P_IP,
 697                .proto = IPPROTO_TCP,
 698        },
 699        [MLX5E_TT_IPV6_TCP] = {
 700                .etype = ETH_P_IPV6,
 701                .proto = IPPROTO_TCP,
 702        },
 703        [MLX5E_TT_IPV4_UDP] = {
 704                .etype = ETH_P_IP,
 705                .proto = IPPROTO_UDP,
 706        },
 707        [MLX5E_TT_IPV6_UDP] = {
 708                .etype = ETH_P_IPV6,
 709                .proto = IPPROTO_UDP,
 710        },
 711        [MLX5E_TT_IPV4_IPSEC_AH] = {
 712                .etype = ETH_P_IP,
 713                .proto = IPPROTO_AH,
 714        },
 715        [MLX5E_TT_IPV6_IPSEC_AH] = {
 716                .etype = ETH_P_IPV6,
 717                .proto = IPPROTO_AH,
 718        },
 719        [MLX5E_TT_IPV4_IPSEC_ESP] = {
 720                .etype = ETH_P_IP,
 721                .proto = IPPROTO_ESP,
 722        },
 723        [MLX5E_TT_IPV6_IPSEC_ESP] = {
 724                .etype = ETH_P_IPV6,
 725                .proto = IPPROTO_ESP,
 726        },
 727        [MLX5E_TT_IPV4] = {
 728                .etype = ETH_P_IP,
 729                .proto = 0,
 730        },
 731        [MLX5E_TT_IPV6] = {
 732                .etype = ETH_P_IPV6,
 733                .proto = 0,
 734        },
 735        [MLX5E_TT_ANY] = {
 736                .etype = 0,
 737                .proto = 0,
 738        },
 739};
 740
 741static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
 742        [MLX5E_TT_IPV4_GRE] = {
 743                .etype = ETH_P_IP,
 744                .proto = IPPROTO_GRE,
 745        },
 746        [MLX5E_TT_IPV6_GRE] = {
 747                .etype = ETH_P_IPV6,
 748                .proto = IPPROTO_GRE,
 749        },
 750};
 751
 752static u8 mlx5e_etype_to_ipv(u16 ethertype)
 753{
 754        if (ethertype == ETH_P_IP)
 755                return 4;
 756
 757        if (ethertype == ETH_P_IPV6)
 758                return 6;
 759
 760        return 0;
 761}
 762
 763static struct mlx5_flow_handle *
 764mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
 765                        struct mlx5_flow_table *ft,
 766                        struct mlx5_flow_destination *dest,
 767                        u16 etype,
 768                        u8 proto)
 769{
 770        int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
 771        MLX5_DECLARE_FLOW_ACT(flow_act);
 772        struct mlx5_flow_handle *rule;
 773        struct mlx5_flow_spec *spec;
 774        int err = 0;
 775        u8 ipv;
 776
 777        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 778        if (!spec)
 779                return ERR_PTR(-ENOMEM);
 780
 781        if (proto) {
 782                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 783                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
 784                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
 785        }
 786
 787        ipv = mlx5e_etype_to_ipv(etype);
 788        if (match_ipv_outer && ipv) {
 789                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 790                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
 791                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
 792        } else if (etype) {
 793                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 794                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
 795                MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
 796        }
 797
 798        rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
 799        if (IS_ERR(rule)) {
 800                err = PTR_ERR(rule);
 801                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
 802        }
 803
 804        kvfree(spec);
 805        return err ? ERR_PTR(err) : rule;
 806}
 807
 808static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
 809                                          struct ttc_params *params,
 810                                          struct mlx5e_ttc_table *ttc)
 811{
 812        struct mlx5_flow_destination dest = {};
 813        struct mlx5_flow_handle **rules;
 814        struct mlx5_flow_table *ft;
 815        int tt;
 816        int err;
 817
 818        ft = ttc->ft.t;
 819        rules = ttc->rules;
 820
 821        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 822        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
 823                if (tt == MLX5E_TT_ANY)
 824                        dest.tir_num = params->any_tt_tirn;
 825                else
 826                        dest.tir_num = params->indir_tirn[tt];
 827                rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
 828                                                    ttc_rules[tt].etype,
 829                                                    ttc_rules[tt].proto);
 830                if (IS_ERR(rules[tt]))
 831                        goto del_rules;
 832        }
 833
 834        if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
 835                return 0;
 836
 837        rules     = ttc->tunnel_rules;
 838        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 839        dest.ft   = params->inner_ttc->ft.t;
 840        for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
 841                rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
 842                                                    ttc_tunnel_rules[tt].etype,
 843                                                    ttc_tunnel_rules[tt].proto);
 844                if (IS_ERR(rules[tt]))
 845                        goto del_rules;
 846        }
 847
 848        return 0;
 849
 850del_rules:
 851        err = PTR_ERR(rules[tt]);
 852        rules[tt] = NULL;
 853        mlx5e_cleanup_ttc_rules(ttc);
 854        return err;
 855}
 856
 857#define MLX5E_TTC_NUM_GROUPS    3
 858#define MLX5E_TTC_GROUP1_SIZE   (BIT(3) + MLX5E_NUM_TUNNEL_TT)
 859#define MLX5E_TTC_GROUP2_SIZE    BIT(1)
 860#define MLX5E_TTC_GROUP3_SIZE    BIT(0)
 861#define MLX5E_TTC_TABLE_SIZE    (MLX5E_TTC_GROUP1_SIZE +\
 862                                 MLX5E_TTC_GROUP2_SIZE +\
 863                                 MLX5E_TTC_GROUP3_SIZE)
 864
 865#define MLX5E_INNER_TTC_NUM_GROUPS      3
 866#define MLX5E_INNER_TTC_GROUP1_SIZE     BIT(3)
 867#define MLX5E_INNER_TTC_GROUP2_SIZE     BIT(1)
 868#define MLX5E_INNER_TTC_GROUP3_SIZE     BIT(0)
 869#define MLX5E_INNER_TTC_TABLE_SIZE      (MLX5E_INNER_TTC_GROUP1_SIZE +\
 870                                         MLX5E_INNER_TTC_GROUP2_SIZE +\
 871                                         MLX5E_INNER_TTC_GROUP3_SIZE)
 872
 873static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
 874                                         bool use_ipv)
 875{
 876        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
 877        struct mlx5e_flow_table *ft = &ttc->ft;
 878        int ix = 0;
 879        u32 *in;
 880        int err;
 881        u8 *mc;
 882
 883        ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
 884                        sizeof(*ft->g), GFP_KERNEL);
 885        if (!ft->g)
 886                return -ENOMEM;
 887        in = kvzalloc(inlen, GFP_KERNEL);
 888        if (!in) {
 889                kfree(ft->g);
 890                return -ENOMEM;
 891        }
 892
 893        /* L4 Group */
 894        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
 895        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
 896        if (use_ipv)
 897                MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
 898        else
 899                MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
 900        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
 901        MLX5_SET_CFG(in, start_flow_index, ix);
 902        ix += MLX5E_TTC_GROUP1_SIZE;
 903        MLX5_SET_CFG(in, end_flow_index, ix - 1);
 904        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 905        if (IS_ERR(ft->g[ft->num_groups]))
 906                goto err;
 907        ft->num_groups++;
 908
 909        /* L3 Group */
 910        MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
 911        MLX5_SET_CFG(in, start_flow_index, ix);
 912        ix += MLX5E_TTC_GROUP2_SIZE;
 913        MLX5_SET_CFG(in, end_flow_index, ix - 1);
 914        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 915        if (IS_ERR(ft->g[ft->num_groups]))
 916                goto err;
 917        ft->num_groups++;
 918
 919        /* Any Group */
 920        memset(in, 0, inlen);
 921        MLX5_SET_CFG(in, start_flow_index, ix);
 922        ix += MLX5E_TTC_GROUP3_SIZE;
 923        MLX5_SET_CFG(in, end_flow_index, ix - 1);
 924        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 925        if (IS_ERR(ft->g[ft->num_groups]))
 926                goto err;
 927        ft->num_groups++;
 928
 929        kvfree(in);
 930        return 0;
 931
 932err:
 933        err = PTR_ERR(ft->g[ft->num_groups]);
 934        ft->g[ft->num_groups] = NULL;
 935        kvfree(in);
 936
 937        return err;
 938}
 939
 940static struct mlx5_flow_handle *
 941mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
 942                              struct mlx5_flow_table *ft,
 943                              struct mlx5_flow_destination *dest,
 944                              u16 etype, u8 proto)
 945{
 946        MLX5_DECLARE_FLOW_ACT(flow_act);
 947        struct mlx5_flow_handle *rule;
 948        struct mlx5_flow_spec *spec;
 949        int err = 0;
 950        u8 ipv;
 951
 952        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 953        if (!spec)
 954                return ERR_PTR(-ENOMEM);
 955
 956        ipv = mlx5e_etype_to_ipv(etype);
 957        if (etype && ipv) {
 958                spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
 959                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
 960                MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
 961        }
 962
 963        if (proto) {
 964                spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
 965                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
 966                MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
 967        }
 968
 969        rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
 970        if (IS_ERR(rule)) {
 971                err = PTR_ERR(rule);
 972                netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
 973        }
 974
 975        kvfree(spec);
 976        return err ? ERR_PTR(err) : rule;
 977}
 978
 979static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
 980                                                struct ttc_params *params,
 981                                                struct mlx5e_ttc_table *ttc)
 982{
 983        struct mlx5_flow_destination dest = {};
 984        struct mlx5_flow_handle **rules;
 985        struct mlx5_flow_table *ft;
 986        int err;
 987        int tt;
 988
 989        ft = ttc->ft.t;
 990        rules = ttc->rules;
 991
 992        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 993        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
 994                if (tt == MLX5E_TT_ANY)
 995                        dest.tir_num = params->any_tt_tirn;
 996                else
 997                        dest.tir_num = params->indir_tirn[tt];
 998
 999                rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1000                                                          ttc_rules[tt].etype,
1001                                                          ttc_rules[tt].proto);
1002                if (IS_ERR(rules[tt]))
1003                        goto del_rules;
1004        }
1005
1006        return 0;
1007
1008del_rules:
1009        err = PTR_ERR(rules[tt]);
1010        rules[tt] = NULL;
1011        mlx5e_cleanup_ttc_rules(ttc);
1012        return err;
1013}
1014
1015static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1016{
1017        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1018        struct mlx5e_flow_table *ft = &ttc->ft;
1019        int ix = 0;
1020        u32 *in;
1021        int err;
1022        u8 *mc;
1023
1024        ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1025        if (!ft->g)
1026                return -ENOMEM;
1027        in = kvzalloc(inlen, GFP_KERNEL);
1028        if (!in) {
1029                kfree(ft->g);
1030                return -ENOMEM;
1031        }
1032
1033        /* L4 Group */
1034        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1035        MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1036        MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1037        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1038        MLX5_SET_CFG(in, start_flow_index, ix);
1039        ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1040        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1041        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1042        if (IS_ERR(ft->g[ft->num_groups]))
1043                goto err;
1044        ft->num_groups++;
1045
1046        /* L3 Group */
1047        MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1048        MLX5_SET_CFG(in, start_flow_index, ix);
1049        ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1050        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1051        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1052        if (IS_ERR(ft->g[ft->num_groups]))
1053                goto err;
1054        ft->num_groups++;
1055
1056        /* Any Group */
1057        memset(in, 0, inlen);
1058        MLX5_SET_CFG(in, start_flow_index, ix);
1059        ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1060        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1061        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1062        if (IS_ERR(ft->g[ft->num_groups]))
1063                goto err;
1064        ft->num_groups++;
1065
1066        kvfree(in);
1067        return 0;
1068
1069err:
1070        err = PTR_ERR(ft->g[ft->num_groups]);
1071        ft->g[ft->num_groups] = NULL;
1072        kvfree(in);
1073
1074        return err;
1075}
1076
1077void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1078                                struct ttc_params *ttc_params)
1079{
1080        ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1081        ttc_params->inner_ttc = &priv->fs.inner_ttc;
1082}
1083
1084void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1085{
1086        struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1087
1088        ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1089        ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1090        ft_attr->prio = MLX5E_NIC_PRIO;
1091}
1092
1093void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1094
1095{
1096        struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1097
1098        ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1099        ft_attr->level = MLX5E_TTC_FT_LEVEL;
1100        ft_attr->prio = MLX5E_NIC_PRIO;
1101}
1102
1103int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1104                                 struct mlx5e_ttc_table *ttc)
1105{
1106        struct mlx5e_flow_table *ft = &ttc->ft;
1107        int err;
1108
1109        if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1110                return 0;
1111
1112        ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
1113        if (IS_ERR(ft->t)) {
1114                err = PTR_ERR(ft->t);
1115                ft->t = NULL;
1116                return err;
1117        }
1118
1119        err = mlx5e_create_inner_ttc_table_groups(ttc);
1120        if (err)
1121                goto err;
1122
1123        err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1124        if (err)
1125                goto err;
1126
1127        return 0;
1128
1129err:
1130        mlx5e_destroy_flow_table(ft);
1131        return err;
1132}
1133
1134void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1135                                   struct mlx5e_ttc_table *ttc)
1136{
1137        if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1138                return;
1139
1140        mlx5e_cleanup_ttc_rules(ttc);
1141        mlx5e_destroy_flow_table(&ttc->ft);
1142}
1143
1144void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1145                             struct mlx5e_ttc_table *ttc)
1146{
1147        mlx5e_cleanup_ttc_rules(ttc);
1148        mlx5e_destroy_flow_table(&ttc->ft);
1149}
1150
1151int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1152                           struct mlx5e_ttc_table *ttc)
1153{
1154        bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1155        struct mlx5e_flow_table *ft = &ttc->ft;
1156        int err;
1157
1158        ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
1159        if (IS_ERR(ft->t)) {
1160                err = PTR_ERR(ft->t);
1161                ft->t = NULL;
1162                return err;
1163        }
1164
1165        err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1166        if (err)
1167                goto err;
1168
1169        err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1170        if (err)
1171                goto err;
1172
1173        return 0;
1174err:
1175        mlx5e_destroy_flow_table(ft);
1176        return err;
1177}
1178
1179static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1180                                   struct mlx5e_l2_rule *ai)
1181{
1182        if (!IS_ERR_OR_NULL(ai->rule)) {
1183                mlx5_del_flow_rules(ai->rule);
1184                ai->rule = NULL;
1185        }
1186}
1187
1188static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1189                                  struct mlx5e_l2_rule *ai, int type)
1190{
1191        struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1192        struct mlx5_flow_destination dest = {};
1193        MLX5_DECLARE_FLOW_ACT(flow_act);
1194        struct mlx5_flow_spec *spec;
1195        int err = 0;
1196        u8 *mc_dmac;
1197        u8 *mv_dmac;
1198
1199        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1200        if (!spec)
1201                return -ENOMEM;
1202
1203        mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1204                               outer_headers.dmac_47_16);
1205        mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1206                               outer_headers.dmac_47_16);
1207
1208        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1209        dest.ft = priv->fs.ttc.ft.t;
1210
1211        switch (type) {
1212        case MLX5E_FULLMATCH:
1213                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1214                eth_broadcast_addr(mc_dmac);
1215                ether_addr_copy(mv_dmac, ai->addr);
1216                break;
1217
1218        case MLX5E_ALLMULTI:
1219                spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1220                mc_dmac[0] = 0x01;
1221                mv_dmac[0] = 0x01;
1222                break;
1223
1224        case MLX5E_PROMISC:
1225                break;
1226        }
1227
1228        ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1229        if (IS_ERR(ai->rule)) {
1230                netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1231                           __func__, mv_dmac);
1232                err = PTR_ERR(ai->rule);
1233                ai->rule = NULL;
1234        }
1235
1236        kvfree(spec);
1237
1238        return err;
1239}
1240
1241#define MLX5E_NUM_L2_GROUPS        3
1242#define MLX5E_L2_GROUP1_SIZE       BIT(0)
1243#define MLX5E_L2_GROUP2_SIZE       BIT(15)
1244#define MLX5E_L2_GROUP3_SIZE       BIT(0)
1245#define MLX5E_L2_TABLE_SIZE        (MLX5E_L2_GROUP1_SIZE +\
1246                                    MLX5E_L2_GROUP2_SIZE +\
1247                                    MLX5E_L2_GROUP3_SIZE)
1248static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1249{
1250        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1251        struct mlx5e_flow_table *ft = &l2_table->ft;
1252        int ix = 0;
1253        u8 *mc_dmac;
1254        u32 *in;
1255        int err;
1256        u8 *mc;
1257
1258        ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1259        if (!ft->g)
1260                return -ENOMEM;
1261        in = kvzalloc(inlen, GFP_KERNEL);
1262        if (!in) {
1263                kfree(ft->g);
1264                return -ENOMEM;
1265        }
1266
1267        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1268        mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1269                               outer_headers.dmac_47_16);
1270        /* Flow Group for promiscuous */
1271        MLX5_SET_CFG(in, start_flow_index, ix);
1272        ix += MLX5E_L2_GROUP1_SIZE;
1273        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1274        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1275        if (IS_ERR(ft->g[ft->num_groups]))
1276                goto err_destroy_groups;
1277        ft->num_groups++;
1278
1279        /* Flow Group for full match */
1280        eth_broadcast_addr(mc_dmac);
1281        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1282        MLX5_SET_CFG(in, start_flow_index, ix);
1283        ix += MLX5E_L2_GROUP2_SIZE;
1284        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1285        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1286        if (IS_ERR(ft->g[ft->num_groups]))
1287                goto err_destroy_groups;
1288        ft->num_groups++;
1289
1290        /* Flow Group for allmulti */
1291        eth_zero_addr(mc_dmac);
1292        mc_dmac[0] = 0x01;
1293        MLX5_SET_CFG(in, start_flow_index, ix);
1294        ix += MLX5E_L2_GROUP3_SIZE;
1295        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1296        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1297        if (IS_ERR(ft->g[ft->num_groups]))
1298                goto err_destroy_groups;
1299        ft->num_groups++;
1300
1301        kvfree(in);
1302        return 0;
1303
1304err_destroy_groups:
1305        err = PTR_ERR(ft->g[ft->num_groups]);
1306        ft->g[ft->num_groups] = NULL;
1307        mlx5e_destroy_groups(ft);
1308        kvfree(in);
1309
1310        return err;
1311}
1312
1313static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1314{
1315        mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1316}
1317
1318static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1319{
1320        struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1321        struct mlx5e_flow_table *ft = &l2_table->ft;
1322        struct mlx5_flow_table_attr ft_attr = {};
1323        int err;
1324
1325        ft->num_groups = 0;
1326
1327        ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1328        ft_attr.level = MLX5E_L2_FT_LEVEL;
1329        ft_attr.prio = MLX5E_NIC_PRIO;
1330
1331        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1332        if (IS_ERR(ft->t)) {
1333                err = PTR_ERR(ft->t);
1334                ft->t = NULL;
1335                return err;
1336        }
1337
1338        err = mlx5e_create_l2_table_groups(l2_table);
1339        if (err)
1340                goto err_destroy_flow_table;
1341
1342        return 0;
1343
1344err_destroy_flow_table:
1345        mlx5_destroy_flow_table(ft->t);
1346        ft->t = NULL;
1347
1348        return err;
1349}
1350
1351#define MLX5E_NUM_VLAN_GROUPS   4
1352#define MLX5E_VLAN_GROUP0_SIZE  BIT(12)
1353#define MLX5E_VLAN_GROUP1_SIZE  BIT(12)
1354#define MLX5E_VLAN_GROUP2_SIZE  BIT(1)
1355#define MLX5E_VLAN_GROUP3_SIZE  BIT(0)
1356#define MLX5E_VLAN_TABLE_SIZE   (MLX5E_VLAN_GROUP0_SIZE +\
1357                                 MLX5E_VLAN_GROUP1_SIZE +\
1358                                 MLX5E_VLAN_GROUP2_SIZE +\
1359                                 MLX5E_VLAN_GROUP3_SIZE)
1360
1361static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1362                                            int inlen)
1363{
1364        int err;
1365        int ix = 0;
1366        u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1367
1368        memset(in, 0, inlen);
1369        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1370        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1371        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1372        MLX5_SET_CFG(in, start_flow_index, ix);
1373        ix += MLX5E_VLAN_GROUP0_SIZE;
1374        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1375        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1376        if (IS_ERR(ft->g[ft->num_groups]))
1377                goto err_destroy_groups;
1378        ft->num_groups++;
1379
1380        memset(in, 0, inlen);
1381        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1382        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1383        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1384        MLX5_SET_CFG(in, start_flow_index, ix);
1385        ix += MLX5E_VLAN_GROUP1_SIZE;
1386        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1387        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1388        if (IS_ERR(ft->g[ft->num_groups]))
1389                goto err_destroy_groups;
1390        ft->num_groups++;
1391
1392        memset(in, 0, inlen);
1393        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1394        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1395        MLX5_SET_CFG(in, start_flow_index, ix);
1396        ix += MLX5E_VLAN_GROUP2_SIZE;
1397        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1398        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1399        if (IS_ERR(ft->g[ft->num_groups]))
1400                goto err_destroy_groups;
1401        ft->num_groups++;
1402
1403        memset(in, 0, inlen);
1404        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1405        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1406        MLX5_SET_CFG(in, start_flow_index, ix);
1407        ix += MLX5E_VLAN_GROUP3_SIZE;
1408        MLX5_SET_CFG(in, end_flow_index, ix - 1);
1409        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1410        if (IS_ERR(ft->g[ft->num_groups]))
1411                goto err_destroy_groups;
1412        ft->num_groups++;
1413
1414        return 0;
1415
1416err_destroy_groups:
1417        err = PTR_ERR(ft->g[ft->num_groups]);
1418        ft->g[ft->num_groups] = NULL;
1419        mlx5e_destroy_groups(ft);
1420
1421        return err;
1422}
1423
1424static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1425{
1426        u32 *in;
1427        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1428        int err;
1429
1430        in = kvzalloc(inlen, GFP_KERNEL);
1431        if (!in)
1432                return -ENOMEM;
1433
1434        err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1435
1436        kvfree(in);
1437        return err;
1438}
1439
1440static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1441{
1442        struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1443        struct mlx5_flow_table_attr ft_attr = {};
1444        int err;
1445
1446        ft->num_groups = 0;
1447
1448        ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1449        ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1450        ft_attr.prio = MLX5E_NIC_PRIO;
1451
1452        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1453
1454        if (IS_ERR(ft->t)) {
1455                err = PTR_ERR(ft->t);
1456                ft->t = NULL;
1457                return err;
1458        }
1459        ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1460        if (!ft->g) {
1461                err = -ENOMEM;
1462                goto err_destroy_vlan_table;
1463        }
1464
1465        err = mlx5e_create_vlan_table_groups(ft);
1466        if (err)
1467                goto err_free_g;
1468
1469        mlx5e_add_vlan_rules(priv);
1470
1471        return 0;
1472
1473err_free_g:
1474        kfree(ft->g);
1475err_destroy_vlan_table:
1476        mlx5_destroy_flow_table(ft->t);
1477        ft->t = NULL;
1478
1479        return err;
1480}
1481
1482static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1483{
1484        mlx5e_del_vlan_rules(priv);
1485        mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1486}
1487
1488int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1489{
1490        struct ttc_params ttc_params = {};
1491        int tt, err;
1492
1493        priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1494                                               MLX5_FLOW_NAMESPACE_KERNEL);
1495
1496        if (!priv->fs.ns)
1497                return -EOPNOTSUPP;
1498
1499        err = mlx5e_arfs_create_tables(priv);
1500        if (err) {
1501                netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1502                           err);
1503                priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1504        }
1505
1506        mlx5e_set_ttc_basic_params(priv, &ttc_params);
1507        mlx5e_set_inner_ttc_ft_params(&ttc_params);
1508        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1509                ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1510
1511        err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1512        if (err) {
1513                netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1514                           err);
1515                goto err_destroy_arfs_tables;
1516        }
1517
1518        mlx5e_set_ttc_ft_params(&ttc_params);
1519        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1520                ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1521
1522        err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1523        if (err) {
1524                netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1525                           err);
1526                goto err_destroy_inner_ttc_table;
1527        }
1528
1529        err = mlx5e_create_l2_table(priv);
1530        if (err) {
1531                netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1532                           err);
1533                goto err_destroy_ttc_table;
1534        }
1535
1536        err = mlx5e_create_vlan_table(priv);
1537        if (err) {
1538                netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1539                           err);
1540                goto err_destroy_l2_table;
1541        }
1542
1543        mlx5e_ethtool_init_steering(priv);
1544
1545        return 0;
1546
1547err_destroy_l2_table:
1548        mlx5e_destroy_l2_table(priv);
1549err_destroy_ttc_table:
1550        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1551err_destroy_inner_ttc_table:
1552        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1553err_destroy_arfs_tables:
1554        mlx5e_arfs_destroy_tables(priv);
1555
1556        return err;
1557}
1558
1559void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1560{
1561        mlx5e_destroy_vlan_table(priv);
1562        mlx5e_destroy_l2_table(priv);
1563        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1564        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1565        mlx5e_arfs_destroy_tables(priv);
1566        mlx5e_ethtool_cleanup_steering(priv);
1567}
1568