linux/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <generated/utsrelease.h>
  34#include <linux/mlx5/fs.h>
  35#include <net/switchdev.h>
  36#include <net/pkt_cls.h>
  37#include <net/act_api.h>
  38#include <net/netevent.h>
  39#include <net/arp.h>
  40
  41#include "eswitch.h"
  42#include "en.h"
  43#include "en_rep.h"
  44#include "en_tc.h"
  45#include "fs_core.h"
  46
  47#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
  48        max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
  49#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
  50        max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
  51
  52static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
  53
  54static void mlx5e_rep_get_drvinfo(struct net_device *dev,
  55                                  struct ethtool_drvinfo *drvinfo)
  56{
  57        strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
  58                sizeof(drvinfo->driver));
  59        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
  60}
  61
  62static const struct counter_desc sw_rep_stats_desc[] = {
  63        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
  64        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
  65        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
  66        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
  67};
  68
  69struct vport_stats {
  70        u64 vport_rx_packets;
  71        u64 vport_tx_packets;
  72        u64 vport_rx_bytes;
  73        u64 vport_tx_bytes;
  74};
  75
  76static const struct counter_desc vport_rep_stats_desc[] = {
  77        { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
  78        { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
  79        { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
  80        { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
  81};
  82
  83#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
  84#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
  85
  86static void mlx5e_rep_get_strings(struct net_device *dev,
  87                                  u32 stringset, uint8_t *data)
  88{
  89        int i, j;
  90
  91        switch (stringset) {
  92        case ETH_SS_STATS:
  93                for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
  94                        strcpy(data + (i * ETH_GSTRING_LEN),
  95                               sw_rep_stats_desc[i].format);
  96                for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
  97                        strcpy(data + (i * ETH_GSTRING_LEN),
  98                               vport_rep_stats_desc[j].format);
  99                break;
 100        }
 101}
 102
 103static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
 104{
 105        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 106        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 107        struct mlx5_eswitch_rep *rep = rpriv->rep;
 108        struct rtnl_link_stats64 *vport_stats;
 109        struct ifla_vf_stats vf_stats;
 110        int err;
 111
 112        err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
 113        if (err) {
 114                pr_warn("vport %d error %d reading stats\n", rep->vport, err);
 115                return;
 116        }
 117
 118        vport_stats = &priv->stats.vf_vport;
 119        /* flip tx/rx as we are reporting the counters for the switch vport */
 120        vport_stats->rx_packets = vf_stats.tx_packets;
 121        vport_stats->rx_bytes   = vf_stats.tx_bytes;
 122        vport_stats->tx_packets = vf_stats.rx_packets;
 123        vport_stats->tx_bytes   = vf_stats.rx_bytes;
 124}
 125
 126static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
 127{
 128        struct mlx5e_sw_stats *s = &priv->stats.sw;
 129        struct mlx5e_rq_stats *rq_stats;
 130        struct mlx5e_sq_stats *sq_stats;
 131        int i, j;
 132
 133        memset(s, 0, sizeof(*s));
 134        for (i = 0; i < priv->channels.num; i++) {
 135                struct mlx5e_channel *c = priv->channels.c[i];
 136
 137                rq_stats = c->rq.stats;
 138
 139                s->rx_packets   += rq_stats->packets;
 140                s->rx_bytes     += rq_stats->bytes;
 141
 142                for (j = 0; j < priv->channels.params.num_tc; j++) {
 143                        sq_stats = c->sq[j].stats;
 144
 145                        s->tx_packets           += sq_stats->packets;
 146                        s->tx_bytes             += sq_stats->bytes;
 147                }
 148        }
 149}
 150
 151static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
 152                                        struct ethtool_stats *stats, u64 *data)
 153{
 154        struct mlx5e_priv *priv = netdev_priv(dev);
 155        int i, j;
 156
 157        if (!data)
 158                return;
 159
 160        mutex_lock(&priv->state_lock);
 161        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
 162                mlx5e_rep_update_sw_counters(priv);
 163        mlx5e_rep_update_hw_counters(priv);
 164        mutex_unlock(&priv->state_lock);
 165
 166        for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
 167                data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
 168                                               sw_rep_stats_desc, i);
 169
 170        for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
 171                data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
 172                                               vport_rep_stats_desc, j);
 173}
 174
 175static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
 176{
 177        switch (sset) {
 178        case ETH_SS_STATS:
 179                return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
 180        default:
 181                return -EOPNOTSUPP;
 182        }
 183}
 184
 185static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
 186        .get_drvinfo       = mlx5e_rep_get_drvinfo,
 187        .get_link          = ethtool_op_get_link,
 188        .get_strings       = mlx5e_rep_get_strings,
 189        .get_sset_count    = mlx5e_rep_get_sset_count,
 190        .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
 191};
 192
 193int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 194{
 195        struct mlx5e_priv *priv = netdev_priv(dev);
 196        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 197        struct mlx5_eswitch_rep *rep = rpriv->rep;
 198        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 199
 200        if (esw->mode == SRIOV_NONE)
 201                return -EOPNOTSUPP;
 202
 203        switch (attr->id) {
 204        case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 205                attr->u.ppid.id_len = ETH_ALEN;
 206                ether_addr_copy(attr->u.ppid.id, rep->hw_id);
 207                break;
 208        default:
 209                return -EOPNOTSUPP;
 210        }
 211
 212        return 0;
 213}
 214
 215static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
 216                                 struct mlx5_eswitch_rep *rep)
 217{
 218        struct mlx5e_rep_sq *rep_sq, *tmp;
 219        struct mlx5e_rep_priv *rpriv;
 220
 221        if (esw->mode != SRIOV_OFFLOADS)
 222                return;
 223
 224        rpriv = mlx5e_rep_to_rep_priv(rep);
 225        list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
 226                mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
 227                list_del(&rep_sq->list);
 228                kfree(rep_sq);
 229        }
 230}
 231
 232static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
 233                                 struct mlx5_eswitch_rep *rep,
 234                                 u32 *sqns_array, int sqns_num)
 235{
 236        struct mlx5_flow_handle *flow_rule;
 237        struct mlx5e_rep_priv *rpriv;
 238        struct mlx5e_rep_sq *rep_sq;
 239        int err;
 240        int i;
 241
 242        if (esw->mode != SRIOV_OFFLOADS)
 243                return 0;
 244
 245        rpriv = mlx5e_rep_to_rep_priv(rep);
 246        for (i = 0; i < sqns_num; i++) {
 247                rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
 248                if (!rep_sq) {
 249                        err = -ENOMEM;
 250                        goto out_err;
 251                }
 252
 253                /* Add re-inject rule to the PF/representor sqs */
 254                flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
 255                                                                rep->vport,
 256                                                                sqns_array[i]);
 257                if (IS_ERR(flow_rule)) {
 258                        err = PTR_ERR(flow_rule);
 259                        kfree(rep_sq);
 260                        goto out_err;
 261                }
 262                rep_sq->send_to_vport_rule = flow_rule;
 263                list_add(&rep_sq->list, &rpriv->vport_sqs_list);
 264        }
 265        return 0;
 266
 267out_err:
 268        mlx5e_sqs2vport_stop(esw, rep);
 269        return err;
 270}
 271
 272int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 273{
 274        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 275        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 276        struct mlx5_eswitch_rep *rep = rpriv->rep;
 277        struct mlx5e_channel *c;
 278        int n, tc, num_sqs = 0;
 279        int err = -ENOMEM;
 280        u32 *sqs;
 281
 282        sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
 283        if (!sqs)
 284                goto out;
 285
 286        for (n = 0; n < priv->channels.num; n++) {
 287                c = priv->channels.c[n];
 288                for (tc = 0; tc < c->num_tc; tc++)
 289                        sqs[num_sqs++] = c->sq[tc].sqn;
 290        }
 291
 292        err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
 293        kfree(sqs);
 294
 295out:
 296        if (err)
 297                netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
 298        return err;
 299}
 300
 301void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
 302{
 303        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 304        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 305        struct mlx5_eswitch_rep *rep = rpriv->rep;
 306
 307        mlx5e_sqs2vport_stop(esw, rep);
 308}
 309
 310static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
 311{
 312#if IS_ENABLED(CONFIG_IPV6)
 313        unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
 314                                                DELAY_PROBE_TIME);
 315#else
 316        unsigned long ipv6_interval = ~0UL;
 317#endif
 318        unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
 319                                                DELAY_PROBE_TIME);
 320        struct net_device *netdev = rpriv->netdev;
 321        struct mlx5e_priv *priv = netdev_priv(netdev);
 322
 323        rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
 324        mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
 325}
 326
 327void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
 328{
 329        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 330        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
 331
 332        mlx5_fc_queue_stats_work(priv->mdev,
 333                                 &neigh_update->neigh_stats_work,
 334                                 neigh_update->min_interval);
 335}
 336
 337static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
 338{
 339        struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
 340                                                    neigh_update.neigh_stats_work.work);
 341        struct net_device *netdev = rpriv->netdev;
 342        struct mlx5e_priv *priv = netdev_priv(netdev);
 343        struct mlx5e_neigh_hash_entry *nhe;
 344
 345        rtnl_lock();
 346        if (!list_empty(&rpriv->neigh_update.neigh_list))
 347                mlx5e_rep_queue_neigh_stats_work(priv);
 348
 349        list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
 350                mlx5e_tc_update_neigh_used_value(nhe);
 351
 352        rtnl_unlock();
 353}
 354
 355static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
 356{
 357        refcount_inc(&nhe->refcnt);
 358}
 359
 360static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
 361{
 362        if (refcount_dec_and_test(&nhe->refcnt))
 363                kfree(nhe);
 364}
 365
 366static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 367                                   struct mlx5e_encap_entry *e,
 368                                   bool neigh_connected,
 369                                   unsigned char ha[ETH_ALEN])
 370{
 371        struct ethhdr *eth = (struct ethhdr *)e->encap_header;
 372
 373        ASSERT_RTNL();
 374
 375        if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
 376            !ether_addr_equal(e->h_dest, ha))
 377                mlx5e_tc_encap_flows_del(priv, e);
 378
 379        if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
 380                ether_addr_copy(e->h_dest, ha);
 381                ether_addr_copy(eth->h_dest, ha);
 382
 383                mlx5e_tc_encap_flows_add(priv, e);
 384        }
 385}
 386
 387static void mlx5e_rep_neigh_update(struct work_struct *work)
 388{
 389        struct mlx5e_neigh_hash_entry *nhe =
 390                container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
 391        struct neighbour *n = nhe->n;
 392        struct mlx5e_encap_entry *e;
 393        unsigned char ha[ETH_ALEN];
 394        struct mlx5e_priv *priv;
 395        bool neigh_connected;
 396        bool encap_connected;
 397        u8 nud_state, dead;
 398
 399        rtnl_lock();
 400
 401        /* If these parameters are changed after we release the lock,
 402         * we'll receive another event letting us know about it.
 403         * We use this lock to avoid inconsistency between the neigh validity
 404         * and it's hw address.
 405         */
 406        read_lock_bh(&n->lock);
 407        memcpy(ha, n->ha, ETH_ALEN);
 408        nud_state = n->nud_state;
 409        dead = n->dead;
 410        read_unlock_bh(&n->lock);
 411
 412        neigh_connected = (nud_state & NUD_VALID) && !dead;
 413
 414        list_for_each_entry(e, &nhe->encap_list, encap_list) {
 415                encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
 416                priv = netdev_priv(e->out_dev);
 417
 418                if (encap_connected != neigh_connected ||
 419                    !ether_addr_equal(e->h_dest, ha))
 420                        mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
 421        }
 422        mlx5e_rep_neigh_entry_release(nhe);
 423        rtnl_unlock();
 424        neigh_release(n);
 425}
 426
 427static struct mlx5e_neigh_hash_entry *
 428mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
 429                             struct mlx5e_neigh *m_neigh);
 430
 431static int mlx5e_rep_netevent_event(struct notifier_block *nb,
 432                                    unsigned long event, void *ptr)
 433{
 434        struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
 435                                                    neigh_update.netevent_nb);
 436        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
 437        struct net_device *netdev = rpriv->netdev;
 438        struct mlx5e_priv *priv = netdev_priv(netdev);
 439        struct mlx5e_neigh_hash_entry *nhe = NULL;
 440        struct mlx5e_neigh m_neigh = {};
 441        struct neigh_parms *p;
 442        struct neighbour *n;
 443        bool found = false;
 444
 445        switch (event) {
 446        case NETEVENT_NEIGH_UPDATE:
 447                n = ptr;
 448#if IS_ENABLED(CONFIG_IPV6)
 449                if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
 450#else
 451                if (n->tbl != &arp_tbl)
 452#endif
 453                        return NOTIFY_DONE;
 454
 455                m_neigh.dev = n->dev;
 456                m_neigh.family = n->ops->family;
 457                memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
 458
 459                /* We are in atomic context and can't take RTNL mutex, so use
 460                 * spin_lock_bh to lookup the neigh table. bh is used since
 461                 * netevent can be called from a softirq context.
 462                 */
 463                spin_lock_bh(&neigh_update->encap_lock);
 464                nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
 465                if (!nhe) {
 466                        spin_unlock_bh(&neigh_update->encap_lock);
 467                        return NOTIFY_DONE;
 468                }
 469
 470                /* This assignment is valid as long as the the neigh reference
 471                 * is taken
 472                 */
 473                nhe->n = n;
 474
 475                /* Take a reference to ensure the neighbour and mlx5 encap
 476                 * entry won't be destructed until we drop the reference in
 477                 * delayed work.
 478                 */
 479                neigh_hold(n);
 480                mlx5e_rep_neigh_entry_hold(nhe);
 481
 482                if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
 483                        mlx5e_rep_neigh_entry_release(nhe);
 484                        neigh_release(n);
 485                }
 486                spin_unlock_bh(&neigh_update->encap_lock);
 487                break;
 488
 489        case NETEVENT_DELAY_PROBE_TIME_UPDATE:
 490                p = ptr;
 491
 492                /* We check the device is present since we don't care about
 493                 * changes in the default table, we only care about changes
 494                 * done per device delay prob time parameter.
 495                 */
 496#if IS_ENABLED(CONFIG_IPV6)
 497                if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
 498#else
 499                if (!p->dev || p->tbl != &arp_tbl)
 500#endif
 501                        return NOTIFY_DONE;
 502
 503                /* We are in atomic context and can't take RTNL mutex,
 504                 * so use spin_lock_bh to walk the neigh list and look for
 505                 * the relevant device. bh is used since netevent can be
 506                 * called from a softirq context.
 507                 */
 508                spin_lock_bh(&neigh_update->encap_lock);
 509                list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
 510                        if (p->dev == nhe->m_neigh.dev) {
 511                                found = true;
 512                                break;
 513                        }
 514                }
 515                spin_unlock_bh(&neigh_update->encap_lock);
 516                if (!found)
 517                        return NOTIFY_DONE;
 518
 519                neigh_update->min_interval = min_t(unsigned long,
 520                                                   NEIGH_VAR(p, DELAY_PROBE_TIME),
 521                                                   neigh_update->min_interval);
 522                mlx5_fc_update_sampling_interval(priv->mdev,
 523                                                 neigh_update->min_interval);
 524                break;
 525        }
 526        return NOTIFY_DONE;
 527}
 528
 529static const struct rhashtable_params mlx5e_neigh_ht_params = {
 530        .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
 531        .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
 532        .key_len = sizeof(struct mlx5e_neigh),
 533        .automatic_shrinking = true,
 534};
 535
 536static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
 537{
 538        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
 539        int err;
 540
 541        err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
 542        if (err)
 543                return err;
 544
 545        INIT_LIST_HEAD(&neigh_update->neigh_list);
 546        spin_lock_init(&neigh_update->encap_lock);
 547        INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
 548                          mlx5e_rep_neigh_stats_work);
 549        mlx5e_rep_neigh_update_init_interval(rpriv);
 550
 551        rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
 552        err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
 553        if (err)
 554                goto out_err;
 555        return 0;
 556
 557out_err:
 558        rhashtable_destroy(&neigh_update->neigh_ht);
 559        return err;
 560}
 561
 562static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
 563{
 564        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
 565        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 566
 567        unregister_netevent_notifier(&neigh_update->netevent_nb);
 568
 569        flush_workqueue(priv->wq); /* flush neigh update works */
 570
 571        cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
 572
 573        rhashtable_destroy(&neigh_update->neigh_ht);
 574}
 575
 576static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
 577                                        struct mlx5e_neigh_hash_entry *nhe)
 578{
 579        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 580        int err;
 581
 582        err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
 583                                     &nhe->rhash_node,
 584                                     mlx5e_neigh_ht_params);
 585        if (err)
 586                return err;
 587
 588        list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
 589
 590        return err;
 591}
 592
 593static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
 594                                         struct mlx5e_neigh_hash_entry *nhe)
 595{
 596        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 597
 598        spin_lock_bh(&rpriv->neigh_update.encap_lock);
 599
 600        list_del(&nhe->neigh_list);
 601
 602        rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
 603                               &nhe->rhash_node,
 604                               mlx5e_neigh_ht_params);
 605        spin_unlock_bh(&rpriv->neigh_update.encap_lock);
 606}
 607
 608/* This function must only be called under RTNL lock or under the
 609 * representor's encap_lock in case RTNL mutex can't be held.
 610 */
 611static struct mlx5e_neigh_hash_entry *
 612mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
 613                             struct mlx5e_neigh *m_neigh)
 614{
 615        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 616        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
 617
 618        return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
 619                                      mlx5e_neigh_ht_params);
 620}
 621
 622static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
 623                                        struct mlx5e_encap_entry *e,
 624                                        struct mlx5e_neigh_hash_entry **nhe)
 625{
 626        int err;
 627
 628        *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
 629        if (!*nhe)
 630                return -ENOMEM;
 631
 632        memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
 633        INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
 634        INIT_LIST_HEAD(&(*nhe)->encap_list);
 635        refcount_set(&(*nhe)->refcnt, 1);
 636
 637        err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
 638        if (err)
 639                goto out_free;
 640        return 0;
 641
 642out_free:
 643        kfree(*nhe);
 644        return err;
 645}
 646
 647static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
 648                                          struct mlx5e_neigh_hash_entry *nhe)
 649{
 650        /* The neigh hash entry must be removed from the hash table regardless
 651         * of the reference count value, so it won't be found by the next
 652         * neigh notification call. The neigh hash entry reference count is
 653         * incremented only during creation and neigh notification calls and
 654         * protects from freeing the nhe struct.
 655         */
 656        mlx5e_rep_neigh_entry_remove(priv, nhe);
 657        mlx5e_rep_neigh_entry_release(nhe);
 658}
 659
 660int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
 661                                 struct mlx5e_encap_entry *e)
 662{
 663        struct mlx5e_neigh_hash_entry *nhe;
 664        int err;
 665
 666        nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
 667        if (!nhe) {
 668                err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
 669                if (err)
 670                        return err;
 671        }
 672        list_add(&e->encap_list, &nhe->encap_list);
 673        return 0;
 674}
 675
 676void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
 677                                  struct mlx5e_encap_entry *e)
 678{
 679        struct mlx5e_neigh_hash_entry *nhe;
 680
 681        list_del(&e->encap_list);
 682        nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
 683
 684        if (list_empty(&nhe->encap_list))
 685                mlx5e_rep_neigh_entry_destroy(priv, nhe);
 686}
 687
 688static int mlx5e_rep_open(struct net_device *dev)
 689{
 690        struct mlx5e_priv *priv = netdev_priv(dev);
 691        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 692        struct mlx5_eswitch_rep *rep = rpriv->rep;
 693        int err;
 694
 695        mutex_lock(&priv->state_lock);
 696        err = mlx5e_open_locked(dev);
 697        if (err)
 698                goto unlock;
 699
 700        if (!mlx5_modify_vport_admin_state(priv->mdev,
 701                                           MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
 702                                           rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
 703                netif_carrier_on(dev);
 704
 705unlock:
 706        mutex_unlock(&priv->state_lock);
 707        return err;
 708}
 709
 710static int mlx5e_rep_close(struct net_device *dev)
 711{
 712        struct mlx5e_priv *priv = netdev_priv(dev);
 713        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 714        struct mlx5_eswitch_rep *rep = rpriv->rep;
 715        int ret;
 716
 717        mutex_lock(&priv->state_lock);
 718        mlx5_modify_vport_admin_state(priv->mdev,
 719                                      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
 720                                      rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
 721        ret = mlx5e_close_locked(dev);
 722        mutex_unlock(&priv->state_lock);
 723        return ret;
 724}
 725
 726static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
 727                                        char *buf, size_t len)
 728{
 729        struct mlx5e_priv *priv = netdev_priv(dev);
 730        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 731        struct mlx5_eswitch_rep *rep = rpriv->rep;
 732        int ret;
 733
 734        ret = snprintf(buf, len, "%d", rep->vport - 1);
 735        if (ret >= len)
 736                return -EOPNOTSUPP;
 737
 738        return 0;
 739}
 740
 741static int
 742mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
 743                              struct tc_cls_flower_offload *cls_flower, int flags)
 744{
 745        switch (cls_flower->command) {
 746        case TC_CLSFLOWER_REPLACE:
 747                return mlx5e_configure_flower(priv, cls_flower, flags);
 748        case TC_CLSFLOWER_DESTROY:
 749                return mlx5e_delete_flower(priv, cls_flower, flags);
 750        case TC_CLSFLOWER_STATS:
 751                return mlx5e_stats_flower(priv, cls_flower, flags);
 752        default:
 753                return -EOPNOTSUPP;
 754        }
 755}
 756
 757static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
 758                                       void *cb_priv)
 759{
 760        struct mlx5e_priv *priv = cb_priv;
 761
 762        if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
 763                return -EOPNOTSUPP;
 764
 765        switch (type) {
 766        case TC_SETUP_CLSFLOWER:
 767                return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
 768        default:
 769                return -EOPNOTSUPP;
 770        }
 771}
 772
 773static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
 774                                 void *cb_priv)
 775{
 776        struct mlx5e_priv *priv = cb_priv;
 777
 778        if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
 779                return -EOPNOTSUPP;
 780
 781        switch (type) {
 782        case TC_SETUP_CLSFLOWER:
 783                return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
 784        default:
 785                return -EOPNOTSUPP;
 786        }
 787}
 788
 789static int mlx5e_rep_setup_tc_block(struct net_device *dev,
 790                                    struct tc_block_offload *f)
 791{
 792        struct mlx5e_priv *priv = netdev_priv(dev);
 793
 794        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
 795                return -EOPNOTSUPP;
 796
 797        switch (f->command) {
 798        case TC_BLOCK_BIND:
 799                return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
 800                                             priv, priv);
 801        case TC_BLOCK_UNBIND:
 802                tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
 803                return 0;
 804        default:
 805                return -EOPNOTSUPP;
 806        }
 807}
 808
 809static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
 810                              void *type_data)
 811{
 812        switch (type) {
 813        case TC_SETUP_BLOCK:
 814                return mlx5e_rep_setup_tc_block(dev, type_data);
 815        default:
 816                return -EOPNOTSUPP;
 817        }
 818}
 819
 820bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 821{
 822        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 823        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 824        struct mlx5_eswitch_rep *rep;
 825
 826        if (!MLX5_ESWITCH_MANAGER(priv->mdev))
 827                return false;
 828
 829        rep = rpriv->rep;
 830        if (esw->mode == SRIOV_OFFLOADS &&
 831            rep && rep->vport == FDB_UPLINK_VPORT)
 832                return true;
 833
 834        return false;
 835}
 836
 837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 838{
 839        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 840        struct mlx5_eswitch_rep *rep;
 841
 842        if (!MLX5_ESWITCH_MANAGER(priv->mdev))
 843                return false;
 844
 845        rep = rpriv->rep;
 846        if (rep && rep->vport != FDB_UPLINK_VPORT)
 847                return true;
 848
 849        return false;
 850}
 851
 852bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
 853{
 854        struct mlx5e_priv *priv = netdev_priv(dev);
 855
 856        switch (attr_id) {
 857        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 858                if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
 859                        return true;
 860        }
 861
 862        return false;
 863}
 864
 865static int
 866mlx5e_get_sw_stats64(const struct net_device *dev,
 867                     struct rtnl_link_stats64 *stats)
 868{
 869        struct mlx5e_priv *priv = netdev_priv(dev);
 870        struct mlx5e_sw_stats *sstats = &priv->stats.sw;
 871
 872        mlx5e_rep_update_sw_counters(priv);
 873
 874        stats->rx_packets = sstats->rx_packets;
 875        stats->rx_bytes   = sstats->rx_bytes;
 876        stats->tx_packets = sstats->tx_packets;
 877        stats->tx_bytes   = sstats->tx_bytes;
 878
 879        stats->tx_dropped = sstats->tx_queue_dropped;
 880
 881        return 0;
 882}
 883
 884int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
 885                            void *sp)
 886{
 887        switch (attr_id) {
 888        case IFLA_OFFLOAD_XSTATS_CPU_HIT:
 889                return mlx5e_get_sw_stats64(dev, sp);
 890        }
 891
 892        return -EINVAL;
 893}
 894
 895static void
 896mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 897{
 898        struct mlx5e_priv *priv = netdev_priv(dev);
 899
 900        memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
 901}
 902
 903static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
 904        .switchdev_port_attr_get        = mlx5e_attr_get,
 905};
 906
 907static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
 908{
 909        return mlx5e_change_mtu(netdev, new_mtu, NULL);
 910}
 911
 912static const struct net_device_ops mlx5e_netdev_ops_rep = {
 913        .ndo_open                = mlx5e_rep_open,
 914        .ndo_stop                = mlx5e_rep_close,
 915        .ndo_start_xmit          = mlx5e_xmit,
 916        .ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
 917        .ndo_setup_tc            = mlx5e_rep_setup_tc,
 918        .ndo_get_stats64         = mlx5e_rep_get_stats,
 919        .ndo_has_offload_stats   = mlx5e_has_offload_stats,
 920        .ndo_get_offload_stats   = mlx5e_get_offload_stats,
 921        .ndo_change_mtu          = mlx5e_change_rep_mtu,
 922};
 923
 924static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
 925                                   struct mlx5e_params *params, u16 mtu)
 926{
 927        u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 928                                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
 929                                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 930
 931        params->hard_mtu    = MLX5E_ETH_HARD_MTU;
 932        params->sw_mtu      = mtu;
 933        params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
 934        params->rq_wq_type  = MLX5_WQ_TYPE_CYCLIC;
 935        params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
 936
 937        params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 938        mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 939
 940        params->num_tc                = 1;
 941        params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 942
 943        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 944}
 945
 946static void mlx5e_build_rep_netdev(struct net_device *netdev)
 947{
 948        struct mlx5e_priv *priv = netdev_priv(netdev);
 949        struct mlx5_core_dev *mdev = priv->mdev;
 950        u16 max_mtu;
 951
 952        netdev->netdev_ops = &mlx5e_netdev_ops_rep;
 953
 954        netdev->watchdog_timeo    = 15 * HZ;
 955
 956        netdev->ethtool_ops       = &mlx5e_rep_ethtool_ops;
 957
 958        netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
 959
 960        netdev->features         |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
 961        netdev->hw_features      |= NETIF_F_HW_TC;
 962
 963        eth_hw_addr_random(netdev);
 964
 965        netdev->min_mtu = ETH_MIN_MTU;
 966        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 967        netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
 968}
 969
 970static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
 971                           struct net_device *netdev,
 972                           const struct mlx5e_profile *profile,
 973                           void *ppriv)
 974{
 975        struct mlx5e_priv *priv = netdev_priv(netdev);
 976
 977        priv->mdev                         = mdev;
 978        priv->netdev                       = netdev;
 979        priv->profile                      = profile;
 980        priv->ppriv                        = ppriv;
 981
 982        mutex_init(&priv->state_lock);
 983
 984        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
 985
 986        priv->channels.params.num_channels = profile->max_nch(mdev);
 987
 988        mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
 989        mlx5e_build_rep_netdev(netdev);
 990
 991        mlx5e_timestamp_init(priv);
 992}
 993
 994static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 995{
 996        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 997        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 998        struct mlx5_eswitch_rep *rep = rpriv->rep;
 999        struct mlx5_flow_handle *flow_rule;
1000        int err;
1001
1002        mlx5e_init_l2_addr(priv);
1003
1004        err = mlx5e_create_direct_rqts(priv);
1005        if (err)
1006                return err;
1007
1008        err = mlx5e_create_direct_tirs(priv);
1009        if (err)
1010                goto err_destroy_direct_rqts;
1011
1012        flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1013                                                      rep->vport,
1014                                                      priv->direct_tir[0].tirn);
1015        if (IS_ERR(flow_rule)) {
1016                err = PTR_ERR(flow_rule);
1017                goto err_destroy_direct_tirs;
1018        }
1019        rpriv->vport_rx_rule = flow_rule;
1020
1021        return 0;
1022
1023err_destroy_direct_tirs:
1024        mlx5e_destroy_direct_tirs(priv);
1025err_destroy_direct_rqts:
1026        mlx5e_destroy_direct_rqts(priv);
1027        return err;
1028}
1029
1030static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1031{
1032        struct mlx5e_rep_priv *rpriv = priv->ppriv;
1033
1034        mlx5_del_flow_rules(rpriv->vport_rx_rule);
1035        mlx5e_destroy_direct_tirs(priv);
1036        mlx5e_destroy_direct_rqts(priv);
1037}
1038
1039static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1040{
1041        int err;
1042
1043        err = mlx5e_create_tises(priv);
1044        if (err) {
1045                mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1046                return err;
1047        }
1048        return 0;
1049}
1050
1051static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
1052{
1053#define MLX5E_PORT_REPRESENTOR_NCH 1
1054        return MLX5E_PORT_REPRESENTOR_NCH;
1055}
1056
1057static const struct mlx5e_profile mlx5e_rep_profile = {
1058        .init                   = mlx5e_init_rep,
1059        .init_rx                = mlx5e_init_rep_rx,
1060        .cleanup_rx             = mlx5e_cleanup_rep_rx,
1061        .init_tx                = mlx5e_init_rep_tx,
1062        .cleanup_tx             = mlx5e_cleanup_nic_tx,
1063        .update_stats           = mlx5e_rep_update_hw_counters,
1064        .max_nch                = mlx5e_get_rep_max_num_channels,
1065        .update_carrier         = NULL,
1066        .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1067        .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
1068        .max_tc                 = 1,
1069};
1070
1071/* e-Switch vport representors */
1072
1073static int
1074mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1075{
1076        struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1077        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1078
1079        int err;
1080
1081        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1082                err = mlx5e_add_sqs_fwd_rules(priv);
1083                if (err)
1084                        return err;
1085        }
1086
1087        err = mlx5e_rep_neigh_init(rpriv);
1088        if (err)
1089                goto err_remove_sqs;
1090
1091        /* init shared tc flow table */
1092        err = mlx5e_tc_esw_init(&rpriv->tc_ht);
1093        if (err)
1094                goto  err_neigh_cleanup;
1095
1096        return 0;
1097
1098err_neigh_cleanup:
1099        mlx5e_rep_neigh_cleanup(rpriv);
1100err_remove_sqs:
1101        mlx5e_remove_sqs_fwd_rules(priv);
1102        return err;
1103}
1104
1105static void
1106mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
1107{
1108        struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1109        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1110
1111        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1112                mlx5e_remove_sqs_fwd_rules(priv);
1113
1114        /* clean uplink offloaded TC rules, delete shared tc flow table */
1115        mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
1116
1117        mlx5e_rep_neigh_cleanup(rpriv);
1118}
1119
1120static int
1121mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1122{
1123        struct mlx5e_rep_priv *uplink_rpriv;
1124        struct mlx5e_rep_priv *rpriv;
1125        struct net_device *netdev;
1126        struct mlx5e_priv *upriv;
1127        int err;
1128
1129        rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1130        if (!rpriv)
1131                return -ENOMEM;
1132
1133        netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
1134        if (!netdev) {
1135                pr_warn("Failed to create representor netdev for vport %d\n",
1136                        rep->vport);
1137                kfree(rpriv);
1138                return -EINVAL;
1139        }
1140
1141        rpriv->netdev = netdev;
1142        rpriv->rep = rep;
1143        rep->rep_if[REP_ETH].priv = rpriv;
1144        INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1145
1146        err = mlx5e_attach_netdev(netdev_priv(netdev));
1147        if (err) {
1148                pr_warn("Failed to attach representor netdev for vport %d\n",
1149                        rep->vport);
1150                goto err_destroy_netdev;
1151        }
1152
1153        err = mlx5e_rep_neigh_init(rpriv);
1154        if (err) {
1155                pr_warn("Failed to initialized neighbours handling for vport %d\n",
1156                        rep->vport);
1157                goto err_detach_netdev;
1158        }
1159
1160        uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1161        upriv = netdev_priv(uplink_rpriv->netdev);
1162        err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
1163                                         upriv);
1164        if (err)
1165                goto err_neigh_cleanup;
1166
1167        err = register_netdev(netdev);
1168        if (err) {
1169                pr_warn("Failed to register representor netdev for vport %d\n",
1170                        rep->vport);
1171                goto err_egdev_cleanup;
1172        }
1173
1174        return 0;
1175
1176err_egdev_cleanup:
1177        tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1178                                     upriv);
1179
1180err_neigh_cleanup:
1181        mlx5e_rep_neigh_cleanup(rpriv);
1182
1183err_detach_netdev:
1184        mlx5e_detach_netdev(netdev_priv(netdev));
1185
1186err_destroy_netdev:
1187        mlx5e_destroy_netdev(netdev_priv(netdev));
1188        kfree(rpriv);
1189        return err;
1190}
1191
1192static void
1193mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1194{
1195        struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1196        struct net_device *netdev = rpriv->netdev;
1197        struct mlx5e_priv *priv = netdev_priv(netdev);
1198        struct mlx5e_rep_priv *uplink_rpriv;
1199        void *ppriv = priv->ppriv;
1200        struct mlx5e_priv *upriv;
1201
1202        unregister_netdev(netdev);
1203        uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1204                                                    REP_ETH);
1205        upriv = netdev_priv(uplink_rpriv->netdev);
1206        tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1207                                     upriv);
1208        mlx5e_rep_neigh_cleanup(rpriv);
1209        mlx5e_detach_netdev(priv);
1210        mlx5e_destroy_netdev(priv);
1211        kfree(ppriv); /* mlx5e_rep_priv */
1212}
1213
1214static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1215{
1216        struct mlx5e_rep_priv *rpriv;
1217
1218        rpriv = mlx5e_rep_to_rep_priv(rep);
1219
1220        return rpriv->netdev;
1221}
1222
1223static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1224{
1225        struct mlx5_core_dev *mdev = priv->mdev;
1226        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
1227        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1228        int vport;
1229
1230        for (vport = 1; vport < total_vfs; vport++) {
1231                struct mlx5_eswitch_rep_if rep_if = {};
1232
1233                rep_if.load = mlx5e_vport_rep_load;
1234                rep_if.unload = mlx5e_vport_rep_unload;
1235                rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1236                mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1237        }
1238}
1239
1240static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1241{
1242        struct mlx5_core_dev *mdev = priv->mdev;
1243        struct mlx5_eswitch *esw = mdev->priv.eswitch;
1244        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1245        int vport;
1246
1247        for (vport = 1; vport < total_vfs; vport++)
1248                mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1249}
1250
1251void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1252{
1253        struct mlx5_core_dev *mdev = priv->mdev;
1254        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
1255        struct mlx5_eswitch_rep_if rep_if;
1256        struct mlx5e_rep_priv *rpriv;
1257
1258        rpriv = priv->ppriv;
1259        rpriv->netdev = priv->netdev;
1260
1261        rep_if.load = mlx5e_nic_rep_load;
1262        rep_if.unload = mlx5e_nic_rep_unload;
1263        rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1264        rep_if.priv = rpriv;
1265        INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1266        mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
1267
1268        mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1269}
1270
1271void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1272{
1273        struct mlx5_core_dev *mdev = priv->mdev;
1274        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
1275
1276        mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1277        mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
1278}
1279
1280void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1281{
1282        struct mlx5_eswitch *esw = mdev->priv.eswitch;
1283        struct mlx5e_rep_priv *rpriv;
1284
1285        rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1286        if (!rpriv)
1287                return NULL;
1288
1289        rpriv->rep = &esw->offloads.vport_reps[0];
1290        return rpriv;
1291}
1292