linux/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/etherdevice.h>
  34#include <linux/mlx5/driver.h>
  35#include <linux/mlx5/mlx5_ifc.h>
  36#include <linux/mlx5/vport.h>
  37#include <linux/mlx5/fs.h>
  38#include "mlx5_core.h"
  39#include "eswitch.h"
  40#include "rdma.h"
  41#include "en.h"
  42#include "fs_core.h"
  43#include "lib/devcom.h"
  44#include "lib/eq.h"
  45
  46/* There are two match-all miss flows, one for unicast dst mac and
  47 * one for multicast.
  48 */
  49#define MLX5_ESW_MISS_FLOWS (2)
  50
  51#define fdb_prio_table(esw, chain, prio, level) \
  52        (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
  53
  54#define UPLINK_REP_INDEX 0
  55
  56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
  57                                                     u16 vport_num)
  58{
  59        int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
  60
  61        WARN_ON(idx > esw->total_vports - 1);
  62        return &esw->offloads.vport_reps[idx];
  63}
  64
  65static struct mlx5_flow_table *
  66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
  67static void
  68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
  69
  70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
  71{
  72        return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
  73}
  74
  75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
  76{
  77        if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
  78                return FDB_MAX_CHAIN;
  79
  80        return 0;
  81}
  82
  83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
  84{
  85        if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
  86                return FDB_MAX_PRIO;
  87
  88        return 1;
  89}
  90
  91static void
  92mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
  93                                  struct mlx5_flow_spec *spec,
  94                                  struct mlx5_esw_flow_attr *attr)
  95{
  96        void *misc2;
  97        void *misc;
  98
  99        /* Use metadata matching because vport is not represented by single
 100         * VHCA in dual-port RoCE mode, and matching on source vport may fail.
 101         */
 102        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
 103                misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
 104                MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
 105                         mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
 106                                                                   attr->in_rep->vport));
 107
 108                misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
 109                MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
 110
 111                spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
 112                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
 113                if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
 114                        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
 115        } else {
 116                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
 117                MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
 118
 119                if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
 120                        MLX5_SET(fte_match_set_misc, misc,
 121                                 source_eswitch_owner_vhca_id,
 122                                 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
 123
 124                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
 125                MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
 126                if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
 127                        MLX5_SET_TO_ONES(fte_match_set_misc, misc,
 128                                         source_eswitch_owner_vhca_id);
 129
 130                spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
 131        }
 132
 133        if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
 134            attr->in_rep->vport == MLX5_VPORT_UPLINK)
 135                spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
 136}
 137
 138struct mlx5_flow_handle *
 139mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
 140                                struct mlx5_flow_spec *spec,
 141                                struct mlx5_esw_flow_attr *attr)
 142{
 143        struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
 144        struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
 145        bool split = !!(attr->split_count);
 146        struct mlx5_flow_handle *rule;
 147        struct mlx5_flow_table *fdb;
 148        int j, i = 0;
 149
 150        if (esw->mode != MLX5_ESWITCH_OFFLOADS)
 151                return ERR_PTR(-EOPNOTSUPP);
 152
 153        flow_act.action = attr->action;
 154        /* if per flow vlan pop/push is emulated, don't set that into the firmware */
 155        if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
 156                flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
 157                                     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
 158        else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
 159                flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
 160                flow_act.vlan[0].vid = attr->vlan_vid[0];
 161                flow_act.vlan[0].prio = attr->vlan_prio[0];
 162                if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
 163                        flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
 164                        flow_act.vlan[1].vid = attr->vlan_vid[1];
 165                        flow_act.vlan[1].prio = attr->vlan_prio[1];
 166                }
 167        }
 168
 169        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
 170                if (attr->dest_chain) {
 171                        struct mlx5_flow_table *ft;
 172
 173                        ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
 174                        if (IS_ERR(ft)) {
 175                                rule = ERR_CAST(ft);
 176                                goto err_create_goto_table;
 177                        }
 178
 179                        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 180                        dest[i].ft = ft;
 181                        i++;
 182                } else {
 183                        for (j = attr->split_count; j < attr->out_count; j++) {
 184                                dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 185                                dest[i].vport.num = attr->dests[j].rep->vport;
 186                                dest[i].vport.vhca_id =
 187                                        MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
 188                                if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
 189                                        dest[i].vport.flags |=
 190                                                MLX5_FLOW_DEST_VPORT_VHCA_ID;
 191                                if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
 192                                        flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
 193                                        flow_act.reformat_id = attr->dests[j].encap_id;
 194                                        dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
 195                                        dest[i].vport.reformat_id =
 196                                                attr->dests[j].encap_id;
 197                                }
 198                                i++;
 199                        }
 200                }
 201        }
 202        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
 203                dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
 204                dest[i].counter_id = mlx5_fc_id(attr->counter);
 205                i++;
 206        }
 207
 208        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
 209
 210        if (attr->outer_match_level != MLX5_MATCH_NONE)
 211                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 212        if (attr->inner_match_level != MLX5_MATCH_NONE)
 213                spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
 214
 215        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
 216                flow_act.modify_id = attr->mod_hdr_id;
 217
 218        fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
 219        if (IS_ERR(fdb)) {
 220                rule = ERR_CAST(fdb);
 221                goto err_esw_get;
 222        }
 223
 224        if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
 225                rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
 226                                                     &flow_act, dest, i);
 227        else
 228                rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
 229        if (IS_ERR(rule))
 230                goto err_add_rule;
 231        else
 232                esw->offloads.num_flows++;
 233
 234        return rule;
 235
 236err_add_rule:
 237        esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
 238err_esw_get:
 239        if (attr->dest_chain)
 240                esw_put_prio_table(esw, attr->dest_chain, 1, 0);
 241err_create_goto_table:
 242        return rule;
 243}
 244
 245struct mlx5_flow_handle *
 246mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
 247                          struct mlx5_flow_spec *spec,
 248                          struct mlx5_esw_flow_attr *attr)
 249{
 250        struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
 251        struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
 252        struct mlx5_flow_table *fast_fdb;
 253        struct mlx5_flow_table *fwd_fdb;
 254        struct mlx5_flow_handle *rule;
 255        int i;
 256
 257        fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
 258        if (IS_ERR(fast_fdb)) {
 259                rule = ERR_CAST(fast_fdb);
 260                goto err_get_fast;
 261        }
 262
 263        fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
 264        if (IS_ERR(fwd_fdb)) {
 265                rule = ERR_CAST(fwd_fdb);
 266                goto err_get_fwd;
 267        }
 268
 269        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 270        for (i = 0; i < attr->split_count; i++) {
 271                dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 272                dest[i].vport.num = attr->dests[i].rep->vport;
 273                dest[i].vport.vhca_id =
 274                        MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
 275                if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
 276                        dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
 277                if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
 278                        dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
 279                        dest[i].vport.reformat_id = attr->dests[i].encap_id;
 280                }
 281        }
 282        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 283        dest[i].ft = fwd_fdb,
 284        i++;
 285
 286        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
 287
 288        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
 289        if (attr->outer_match_level != MLX5_MATCH_NONE)
 290                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 291
 292        rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
 293
 294        if (IS_ERR(rule))
 295                goto add_err;
 296
 297        esw->offloads.num_flows++;
 298
 299        return rule;
 300add_err:
 301        esw_put_prio_table(esw, attr->chain, attr->prio, 1);
 302err_get_fwd:
 303        esw_put_prio_table(esw, attr->chain, attr->prio, 0);
 304err_get_fast:
 305        return rule;
 306}
 307
 308static void
 309__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
 310                        struct mlx5_flow_handle *rule,
 311                        struct mlx5_esw_flow_attr *attr,
 312                        bool fwd_rule)
 313{
 314        bool split = (attr->split_count > 0);
 315        int i;
 316
 317        mlx5_del_flow_rules(rule);
 318
 319        /* unref the term table */
 320        for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
 321                if (attr->dests[i].termtbl)
 322                        mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
 323        }
 324
 325        esw->offloads.num_flows--;
 326
 327        if (fwd_rule)  {
 328                esw_put_prio_table(esw, attr->chain, attr->prio, 1);
 329                esw_put_prio_table(esw, attr->chain, attr->prio, 0);
 330        } else {
 331                esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
 332                if (attr->dest_chain)
 333                        esw_put_prio_table(esw, attr->dest_chain, 1, 0);
 334        }
 335}
 336
 337void
 338mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
 339                                struct mlx5_flow_handle *rule,
 340                                struct mlx5_esw_flow_attr *attr)
 341{
 342        __mlx5_eswitch_del_rule(esw, rule, attr, false);
 343}
 344
 345void
 346mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
 347                          struct mlx5_flow_handle *rule,
 348                          struct mlx5_esw_flow_attr *attr)
 349{
 350        __mlx5_eswitch_del_rule(esw, rule, attr, true);
 351}
 352
 353static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
 354{
 355        struct mlx5_eswitch_rep *rep;
 356        int i, err = 0;
 357
 358        esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
 359        mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
 360                if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
 361                        continue;
 362
 363                err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
 364                if (err)
 365                        goto out;
 366        }
 367
 368out:
 369        return err;
 370}
 371
 372static struct mlx5_eswitch_rep *
 373esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
 374{
 375        struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
 376
 377        in_rep  = attr->in_rep;
 378        out_rep = attr->dests[0].rep;
 379
 380        if (push)
 381                vport = in_rep;
 382        else if (pop)
 383                vport = out_rep;
 384        else
 385                vport = in_rep;
 386
 387        return vport;
 388}
 389
 390static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
 391                                     bool push, bool pop, bool fwd)
 392{
 393        struct mlx5_eswitch_rep *in_rep, *out_rep;
 394
 395        if ((push || pop) && !fwd)
 396                goto out_notsupp;
 397
 398        in_rep  = attr->in_rep;
 399        out_rep = attr->dests[0].rep;
 400
 401        if (push && in_rep->vport == MLX5_VPORT_UPLINK)
 402                goto out_notsupp;
 403
 404        if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
 405                goto out_notsupp;
 406
 407        /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
 408        if (!push && !pop && fwd)
 409                if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
 410                        goto out_notsupp;
 411
 412        /* protects against (1) setting rules with different vlans to push and
 413         * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
 414         */
 415        if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
 416                goto out_notsupp;
 417
 418        return 0;
 419
 420out_notsupp:
 421        return -EOPNOTSUPP;
 422}
 423
 424int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
 425                                 struct mlx5_esw_flow_attr *attr)
 426{
 427        struct offloads_fdb *offloads = &esw->fdb_table.offloads;
 428        struct mlx5_eswitch_rep *vport = NULL;
 429        bool push, pop, fwd;
 430        int err = 0;
 431
 432        /* nop if we're on the vlan push/pop non emulation mode */
 433        if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
 434                return 0;
 435
 436        push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
 437        pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
 438        fwd  = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
 439                   !attr->dest_chain);
 440
 441        err = esw_add_vlan_action_check(attr, push, pop, fwd);
 442        if (err)
 443                return err;
 444
 445        attr->vlan_handled = false;
 446
 447        vport = esw_vlan_action_get_vport(attr, push, pop);
 448
 449        if (!push && !pop && fwd) {
 450                /* tracks VF --> wire rules without vlan push action */
 451                if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
 452                        vport->vlan_refcount++;
 453                        attr->vlan_handled = true;
 454                }
 455
 456                return 0;
 457        }
 458
 459        if (!push && !pop)
 460                return 0;
 461
 462        if (!(offloads->vlan_push_pop_refcount)) {
 463                /* it's the 1st vlan rule, apply global vlan pop policy */
 464                err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
 465                if (err)
 466                        goto out;
 467        }
 468        offloads->vlan_push_pop_refcount++;
 469
 470        if (push) {
 471                if (vport->vlan_refcount)
 472                        goto skip_set_push;
 473
 474                err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
 475                                                    SET_VLAN_INSERT | SET_VLAN_STRIP);
 476                if (err)
 477                        goto out;
 478                vport->vlan = attr->vlan_vid[0];
 479skip_set_push:
 480                vport->vlan_refcount++;
 481        }
 482out:
 483        if (!err)
 484                attr->vlan_handled = true;
 485        return err;
 486}
 487
 488int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
 489                                 struct mlx5_esw_flow_attr *attr)
 490{
 491        struct offloads_fdb *offloads = &esw->fdb_table.offloads;
 492        struct mlx5_eswitch_rep *vport = NULL;
 493        bool push, pop, fwd;
 494        int err = 0;
 495
 496        /* nop if we're on the vlan push/pop non emulation mode */
 497        if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
 498                return 0;
 499
 500        if (!attr->vlan_handled)
 501                return 0;
 502
 503        push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
 504        pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
 505        fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
 506
 507        vport = esw_vlan_action_get_vport(attr, push, pop);
 508
 509        if (!push && !pop && fwd) {
 510                /* tracks VF --> wire rules without vlan push action */
 511                if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
 512                        vport->vlan_refcount--;
 513
 514                return 0;
 515        }
 516
 517        if (push) {
 518                vport->vlan_refcount--;
 519                if (vport->vlan_refcount)
 520                        goto skip_unset_push;
 521
 522                vport->vlan = 0;
 523                err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
 524                                                    0, 0, SET_VLAN_STRIP);
 525                if (err)
 526                        goto out;
 527        }
 528
 529skip_unset_push:
 530        offloads->vlan_push_pop_refcount--;
 531        if (offloads->vlan_push_pop_refcount)
 532                return 0;
 533
 534        /* no more vlan rules, stop global vlan pop policy */
 535        err = esw_set_global_vlan_pop(esw, 0);
 536
 537out:
 538        return err;
 539}
 540
 541struct mlx5_flow_handle *
 542mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
 543                                    u32 sqn)
 544{
 545        struct mlx5_flow_act flow_act = {0};
 546        struct mlx5_flow_destination dest = {};
 547        struct mlx5_flow_handle *flow_rule;
 548        struct mlx5_flow_spec *spec;
 549        void *misc;
 550
 551        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 552        if (!spec) {
 553                flow_rule = ERR_PTR(-ENOMEM);
 554                goto out;
 555        }
 556
 557        misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
 558        MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
 559        /* source vport is the esw manager */
 560        MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
 561
 562        misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
 563        MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
 564        MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
 565
 566        spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
 567        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 568        dest.vport.num = vport;
 569        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 570
 571        flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
 572                                        &flow_act, &dest, 1);
 573        if (IS_ERR(flow_rule))
 574                esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
 575out:
 576        kvfree(spec);
 577        return flow_rule;
 578}
 579EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
 580
 581void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
 582{
 583        mlx5_del_flow_rules(rule);
 584}
 585
 586static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
 587{
 588        u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
 589        u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
 590        u8 fdb_to_vport_reg_c_id;
 591        int err;
 592
 593        err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
 594                                                   out, sizeof(out));
 595        if (err)
 596                return err;
 597
 598        fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
 599                                         esw_vport_context.fdb_to_vport_reg_c_id);
 600
 601        fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
 602        MLX5_SET(modify_esw_vport_context_in, in,
 603                 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
 604
 605        MLX5_SET(modify_esw_vport_context_in, in,
 606                 field_select.fdb_to_vport_reg_c_id, 1);
 607
 608        return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
 609                                                     in, sizeof(in));
 610}
 611
 612static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
 613{
 614        u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
 615        u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
 616        u8 fdb_to_vport_reg_c_id;
 617        int err;
 618
 619        err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
 620                                                   out, sizeof(out));
 621        if (err)
 622                return err;
 623
 624        fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
 625                                         esw_vport_context.fdb_to_vport_reg_c_id);
 626
 627        fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
 628
 629        MLX5_SET(modify_esw_vport_context_in, in,
 630                 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
 631
 632        MLX5_SET(modify_esw_vport_context_in, in,
 633                 field_select.fdb_to_vport_reg_c_id, 1);
 634
 635        return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
 636                                                     in, sizeof(in));
 637}
 638
 639static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
 640                                  struct mlx5_core_dev *peer_dev,
 641                                  struct mlx5_flow_spec *spec,
 642                                  struct mlx5_flow_destination *dest)
 643{
 644        void *misc;
 645
 646        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
 647                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
 648                                    misc_parameters_2);
 649                MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
 650
 651                spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
 652        } else {
 653                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
 654                                    misc_parameters);
 655
 656                MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
 657                         MLX5_CAP_GEN(peer_dev, vhca_id));
 658
 659                spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
 660
 661                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
 662                                    misc_parameters);
 663                MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
 664                MLX5_SET_TO_ONES(fte_match_set_misc, misc,
 665                                 source_eswitch_owner_vhca_id);
 666        }
 667
 668        dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 669        dest->vport.num = peer_dev->priv.eswitch->manager_vport;
 670        dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
 671        dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
 672}
 673
 674static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
 675                                               struct mlx5_eswitch *peer_esw,
 676                                               struct mlx5_flow_spec *spec,
 677                                               u16 vport)
 678{
 679        void *misc;
 680
 681        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
 682                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
 683                                    misc_parameters_2);
 684                MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
 685                         mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
 686                                                                   vport));
 687        } else {
 688                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
 689                                    misc_parameters);
 690                MLX5_SET(fte_match_set_misc, misc, source_port, vport);
 691        }
 692}
 693
 694static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
 695                                       struct mlx5_core_dev *peer_dev)
 696{
 697        struct mlx5_flow_destination dest = {};
 698        struct mlx5_flow_act flow_act = {0};
 699        struct mlx5_flow_handle **flows;
 700        struct mlx5_flow_handle *flow;
 701        struct mlx5_flow_spec *spec;
 702        /* total vports is the same for both e-switches */
 703        int nvports = esw->total_vports;
 704        void *misc;
 705        int err, i;
 706
 707        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 708        if (!spec)
 709                return -ENOMEM;
 710
 711        peer_miss_rules_setup(esw, peer_dev, spec, &dest);
 712
 713        flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
 714        if (!flows) {
 715                err = -ENOMEM;
 716                goto alloc_flows_err;
 717        }
 718
 719        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 720        misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
 721                            misc_parameters);
 722
 723        if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
 724                esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
 725                                                   spec, MLX5_VPORT_PF);
 726
 727                flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
 728                                           spec, &flow_act, &dest, 1);
 729                if (IS_ERR(flow)) {
 730                        err = PTR_ERR(flow);
 731                        goto add_pf_flow_err;
 732                }
 733                flows[MLX5_VPORT_PF] = flow;
 734        }
 735
 736        if (mlx5_ecpf_vport_exists(esw->dev)) {
 737                MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
 738                flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
 739                                           spec, &flow_act, &dest, 1);
 740                if (IS_ERR(flow)) {
 741                        err = PTR_ERR(flow);
 742                        goto add_ecpf_flow_err;
 743                }
 744                flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
 745        }
 746
 747        mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
 748                esw_set_peer_miss_rule_source_port(esw,
 749                                                   peer_dev->priv.eswitch,
 750                                                   spec, i);
 751
 752                flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
 753                                           spec, &flow_act, &dest, 1);
 754                if (IS_ERR(flow)) {
 755                        err = PTR_ERR(flow);
 756                        goto add_vf_flow_err;
 757                }
 758                flows[i] = flow;
 759        }
 760
 761        esw->fdb_table.offloads.peer_miss_rules = flows;
 762
 763        kvfree(spec);
 764        return 0;
 765
 766add_vf_flow_err:
 767        nvports = --i;
 768        mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
 769                mlx5_del_flow_rules(flows[i]);
 770
 771        if (mlx5_ecpf_vport_exists(esw->dev))
 772                mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
 773add_ecpf_flow_err:
 774        if (mlx5_core_is_ecpf_esw_manager(esw->dev))
 775                mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
 776add_pf_flow_err:
 777        esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
 778        kvfree(flows);
 779alloc_flows_err:
 780        kvfree(spec);
 781        return err;
 782}
 783
 784static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
 785{
 786        struct mlx5_flow_handle **flows;
 787        int i;
 788
 789        flows = esw->fdb_table.offloads.peer_miss_rules;
 790
 791        mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
 792                                               mlx5_core_max_vfs(esw->dev))
 793                mlx5_del_flow_rules(flows[i]);
 794
 795        if (mlx5_ecpf_vport_exists(esw->dev))
 796                mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
 797
 798        if (mlx5_core_is_ecpf_esw_manager(esw->dev))
 799                mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
 800
 801        kvfree(flows);
 802}
 803
 804static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
 805{
 806        struct mlx5_flow_act flow_act = {0};
 807        struct mlx5_flow_destination dest = {};
 808        struct mlx5_flow_handle *flow_rule = NULL;
 809        struct mlx5_flow_spec *spec;
 810        void *headers_c;
 811        void *headers_v;
 812        int err = 0;
 813        u8 *dmac_c;
 814        u8 *dmac_v;
 815
 816        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 817        if (!spec) {
 818                err = -ENOMEM;
 819                goto out;
 820        }
 821
 822        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 823        headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
 824                                 outer_headers);
 825        dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
 826                              outer_headers.dmac_47_16);
 827        dmac_c[0] = 0x01;
 828
 829        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 830        dest.vport.num = esw->manager_vport;
 831        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 832
 833        flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
 834                                        &flow_act, &dest, 1);
 835        if (IS_ERR(flow_rule)) {
 836                err = PTR_ERR(flow_rule);
 837                esw_warn(esw->dev,  "FDB: Failed to add unicast miss flow rule err %d\n", err);
 838                goto out;
 839        }
 840
 841        esw->fdb_table.offloads.miss_rule_uni = flow_rule;
 842
 843        headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
 844                                 outer_headers);
 845        dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
 846                              outer_headers.dmac_47_16);
 847        dmac_v[0] = 0x01;
 848        flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
 849                                        &flow_act, &dest, 1);
 850        if (IS_ERR(flow_rule)) {
 851                err = PTR_ERR(flow_rule);
 852                esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
 853                mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
 854                goto out;
 855        }
 856
 857        esw->fdb_table.offloads.miss_rule_multi = flow_rule;
 858
 859out:
 860        kvfree(spec);
 861        return err;
 862}
 863
 864#define ESW_OFFLOADS_NUM_GROUPS  4
 865
 866/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
 867 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
 868 * for each flow table pool. We can allocate up to 16M of each pool,
 869 * and we keep track of how much we used via put/get_sz_to_pool.
 870 * Firmware doesn't report any of this for now.
 871 * ESW_POOL is expected to be sorted from large to small
 872 */
 873#define ESW_SIZE (16 * 1024 * 1024)
 874const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
 875                                    64 * 1024, 4 * 1024 };
 876
 877static int
 878get_sz_from_pool(struct mlx5_eswitch *esw)
 879{
 880        int sz = 0, i;
 881
 882        for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
 883                if (esw->fdb_table.offloads.fdb_left[i]) {
 884                        --esw->fdb_table.offloads.fdb_left[i];
 885                        sz = ESW_POOLS[i];
 886                        break;
 887                }
 888        }
 889
 890        return sz;
 891}
 892
 893static void
 894put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
 895{
 896        int i;
 897
 898        for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
 899                if (sz >= ESW_POOLS[i]) {
 900                        ++esw->fdb_table.offloads.fdb_left[i];
 901                        break;
 902                }
 903        }
 904}
 905
 906static struct mlx5_flow_table *
 907create_next_size_table(struct mlx5_eswitch *esw,
 908                       struct mlx5_flow_namespace *ns,
 909                       u16 table_prio,
 910                       int level,
 911                       u32 flags)
 912{
 913        struct mlx5_flow_table *fdb;
 914        int sz;
 915
 916        sz = get_sz_from_pool(esw);
 917        if (!sz)
 918                return ERR_PTR(-ENOSPC);
 919
 920        fdb = mlx5_create_auto_grouped_flow_table(ns,
 921                                                  table_prio,
 922                                                  sz,
 923                                                  ESW_OFFLOADS_NUM_GROUPS,
 924                                                  level,
 925                                                  flags);
 926        if (IS_ERR(fdb)) {
 927                esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
 928                         (int)PTR_ERR(fdb), table_prio, level, sz);
 929                put_sz_to_pool(esw, sz);
 930        }
 931
 932        return fdb;
 933}
 934
 935static struct mlx5_flow_table *
 936esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
 937{
 938        struct mlx5_core_dev *dev = esw->dev;
 939        struct mlx5_flow_table *fdb = NULL;
 940        struct mlx5_flow_namespace *ns;
 941        int table_prio, l = 0;
 942        u32 flags = 0;
 943
 944        if (chain == FDB_SLOW_PATH_CHAIN)
 945                return esw->fdb_table.offloads.slow_fdb;
 946
 947        mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
 948
 949        fdb = fdb_prio_table(esw, chain, prio, level).fdb;
 950        if (fdb) {
 951                /* take ref on earlier levels as well */
 952                while (level >= 0)
 953                        fdb_prio_table(esw, chain, prio, level--).num_rules++;
 954                mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
 955                return fdb;
 956        }
 957
 958        ns = mlx5_get_fdb_sub_ns(dev, chain);
 959        if (!ns) {
 960                esw_warn(dev, "Failed to get FDB sub namespace\n");
 961                mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
 962                return ERR_PTR(-EOPNOTSUPP);
 963        }
 964
 965        if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
 966                flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
 967                          MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
 968
 969        table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
 970
 971        /* create earlier levels for correct fs_core lookup when
 972         * connecting tables
 973         */
 974        for (l = 0; l <= level; l++) {
 975                if (fdb_prio_table(esw, chain, prio, l).fdb) {
 976                        fdb_prio_table(esw, chain, prio, l).num_rules++;
 977                        continue;
 978                }
 979
 980                fdb = create_next_size_table(esw, ns, table_prio, l, flags);
 981                if (IS_ERR(fdb)) {
 982                        l--;
 983                        goto err_create_fdb;
 984                }
 985
 986                fdb_prio_table(esw, chain, prio, l).fdb = fdb;
 987                fdb_prio_table(esw, chain, prio, l).num_rules = 1;
 988        }
 989
 990        mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
 991        return fdb;
 992
 993err_create_fdb:
 994        mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
 995        if (l >= 0)
 996                esw_put_prio_table(esw, chain, prio, l);
 997
 998        return fdb;
 999}
1000
1001static void
1002esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1003{
1004        int l;
1005
1006        if (chain == FDB_SLOW_PATH_CHAIN)
1007                return;
1008
1009        mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
1010
1011        for (l = level; l >= 0; l--) {
1012                if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1013                        continue;
1014
1015                put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1016                mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1017                fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1018        }
1019
1020        mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1021}
1022
1023static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1024{
1025        /* If lazy creation isn't supported, deref the fast path tables */
1026        if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1027                esw_put_prio_table(esw, 0, 1, 1);
1028                esw_put_prio_table(esw, 0, 1, 0);
1029        }
1030}
1031
1032#define MAX_PF_SQ 256
1033#define MAX_SQ_NVPORTS 32
1034
1035static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1036                                           u32 *flow_group_in)
1037{
1038        void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1039                                            flow_group_in,
1040                                            match_criteria);
1041
1042        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1043                MLX5_SET(create_flow_group_in, flow_group_in,
1044                         match_criteria_enable,
1045                         MLX5_MATCH_MISC_PARAMETERS_2);
1046
1047                MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1048                                 misc_parameters_2.metadata_reg_c_0);
1049        } else {
1050                MLX5_SET(create_flow_group_in, flow_group_in,
1051                         match_criteria_enable,
1052                         MLX5_MATCH_MISC_PARAMETERS);
1053
1054                MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1055                                 misc_parameters.source_port);
1056        }
1057}
1058
1059static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1060{
1061        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1062        struct mlx5_flow_table_attr ft_attr = {};
1063        struct mlx5_core_dev *dev = esw->dev;
1064        u32 *flow_group_in, max_flow_counter;
1065        struct mlx5_flow_namespace *root_ns;
1066        struct mlx5_flow_table *fdb = NULL;
1067        int table_size, ix, err = 0, i;
1068        struct mlx5_flow_group *g;
1069        u32 flags = 0, fdb_max;
1070        void *match_criteria;
1071        u8 *dmac;
1072
1073        esw_debug(esw->dev, "Create offloads FDB Tables\n");
1074        flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1075        if (!flow_group_in)
1076                return -ENOMEM;
1077
1078        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1079        if (!root_ns) {
1080                esw_warn(dev, "Failed to get FDB flow namespace\n");
1081                err = -EOPNOTSUPP;
1082                goto ns_err;
1083        }
1084
1085        max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1086                            MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1087        fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1088
1089        esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
1090                  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1091                  max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1092                  fdb_max);
1093
1094        for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1095                esw->fdb_table.offloads.fdb_left[i] =
1096                        ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1097
1098        table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1099                MLX5_ESW_MISS_FLOWS + esw->total_vports;
1100
1101        /* create the slow path fdb with encap set, so further table instances
1102         * can be created at run time while VFs are probed if the FW allows that.
1103         */
1104        if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1105                flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1106                          MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1107
1108        ft_attr.flags = flags;
1109        ft_attr.max_fte = table_size;
1110        ft_attr.prio = FDB_SLOW_PATH;
1111
1112        fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1113        if (IS_ERR(fdb)) {
1114                err = PTR_ERR(fdb);
1115                esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1116                goto slow_fdb_err;
1117        }
1118        esw->fdb_table.offloads.slow_fdb = fdb;
1119
1120        /* If lazy creation isn't supported, open the fast path tables now */
1121        if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1122            esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1123                esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1124                esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1125                esw_get_prio_table(esw, 0, 1, 0);
1126                esw_get_prio_table(esw, 0, 1, 1);
1127        } else {
1128                esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1129                esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1130        }
1131
1132        /* create send-to-vport group */
1133        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1134                 MLX5_MATCH_MISC_PARAMETERS);
1135
1136        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1137
1138        MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1139        MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1140
1141        ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1142        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1143        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1144
1145        g = mlx5_create_flow_group(fdb, flow_group_in);
1146        if (IS_ERR(g)) {
1147                err = PTR_ERR(g);
1148                esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1149                goto send_vport_err;
1150        }
1151        esw->fdb_table.offloads.send_to_vport_grp = g;
1152
1153        /* create peer esw miss group */
1154        memset(flow_group_in, 0, inlen);
1155
1156        esw_set_flow_group_source_port(esw, flow_group_in);
1157
1158        if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1159                match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1160                                              flow_group_in,
1161                                              match_criteria);
1162
1163                MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1164                                 misc_parameters.source_eswitch_owner_vhca_id);
1165
1166                MLX5_SET(create_flow_group_in, flow_group_in,
1167                         source_eswitch_owner_vhca_id_valid, 1);
1168        }
1169
1170        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1171        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1172                 ix + esw->total_vports - 1);
1173        ix += esw->total_vports;
1174
1175        g = mlx5_create_flow_group(fdb, flow_group_in);
1176        if (IS_ERR(g)) {
1177                err = PTR_ERR(g);
1178                esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1179                goto peer_miss_err;
1180        }
1181        esw->fdb_table.offloads.peer_miss_grp = g;
1182
1183        /* create miss group */
1184        memset(flow_group_in, 0, inlen);
1185        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1186                 MLX5_MATCH_OUTER_HEADERS);
1187        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1188                                      match_criteria);
1189        dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1190                            outer_headers.dmac_47_16);
1191        dmac[0] = 0x01;
1192
1193        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1194        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1195                 ix + MLX5_ESW_MISS_FLOWS);
1196
1197        g = mlx5_create_flow_group(fdb, flow_group_in);
1198        if (IS_ERR(g)) {
1199                err = PTR_ERR(g);
1200                esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1201                goto miss_err;
1202        }
1203        esw->fdb_table.offloads.miss_grp = g;
1204
1205        err = esw_add_fdb_miss_rule(esw);
1206        if (err)
1207                goto miss_rule_err;
1208
1209        esw->nvports = nvports;
1210        kvfree(flow_group_in);
1211        return 0;
1212
1213miss_rule_err:
1214        mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1215miss_err:
1216        mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1217peer_miss_err:
1218        mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1219send_vport_err:
1220        esw_destroy_offloads_fast_fdb_tables(esw);
1221        mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1222slow_fdb_err:
1223ns_err:
1224        kvfree(flow_group_in);
1225        return err;
1226}
1227
1228static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1229{
1230        if (!esw->fdb_table.offloads.slow_fdb)
1231                return;
1232
1233        esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1234        mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1235        mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1236        mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1237        mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1238        mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1239
1240        mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1241        esw_destroy_offloads_fast_fdb_tables(esw);
1242}
1243
1244static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1245{
1246        struct mlx5_flow_table_attr ft_attr = {};
1247        struct mlx5_core_dev *dev = esw->dev;
1248        struct mlx5_flow_table *ft_offloads;
1249        struct mlx5_flow_namespace *ns;
1250        int err = 0;
1251
1252        ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1253        if (!ns) {
1254                esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1255                return -EOPNOTSUPP;
1256        }
1257
1258        ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1259
1260        ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1261        if (IS_ERR(ft_offloads)) {
1262                err = PTR_ERR(ft_offloads);
1263                esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1264                return err;
1265        }
1266
1267        esw->offloads.ft_offloads = ft_offloads;
1268        return 0;
1269}
1270
1271static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1272{
1273        struct mlx5_esw_offload *offloads = &esw->offloads;
1274
1275        mlx5_destroy_flow_table(offloads->ft_offloads);
1276}
1277
1278static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1279{
1280        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1281        struct mlx5_flow_group *g;
1282        u32 *flow_group_in;
1283        int err = 0;
1284
1285        nvports = nvports + MLX5_ESW_MISS_FLOWS;
1286        flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1287        if (!flow_group_in)
1288                return -ENOMEM;
1289
1290        /* create vport rx group */
1291        esw_set_flow_group_source_port(esw, flow_group_in);
1292
1293        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1294        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1295
1296        g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1297
1298        if (IS_ERR(g)) {
1299                err = PTR_ERR(g);
1300                mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1301                goto out;
1302        }
1303
1304        esw->offloads.vport_rx_group = g;
1305out:
1306        kvfree(flow_group_in);
1307        return err;
1308}
1309
1310static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1311{
1312        mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1313}
1314
1315struct mlx5_flow_handle *
1316mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1317                                  struct mlx5_flow_destination *dest)
1318{
1319        struct mlx5_flow_act flow_act = {0};
1320        struct mlx5_flow_handle *flow_rule;
1321        struct mlx5_flow_spec *spec;
1322        void *misc;
1323
1324        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1325        if (!spec) {
1326                flow_rule = ERR_PTR(-ENOMEM);
1327                goto out;
1328        }
1329
1330        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1331                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1332                MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1333                         mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1334
1335                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1336                MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
1337
1338                spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1339        } else {
1340                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1341                MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1342
1343                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1344                MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1345
1346                spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1347        }
1348
1349        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1350        flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1351                                        &flow_act, dest, 1);
1352        if (IS_ERR(flow_rule)) {
1353                esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1354                goto out;
1355        }
1356
1357out:
1358        kvfree(spec);
1359        return flow_rule;
1360}
1361
1362static int esw_offloads_start(struct mlx5_eswitch *esw,
1363                              struct netlink_ext_ack *extack)
1364{
1365        int err, err1;
1366
1367        if (esw->mode != MLX5_ESWITCH_LEGACY &&
1368            !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1369                NL_SET_ERR_MSG_MOD(extack,
1370                                   "Can't set offloads mode, SRIOV legacy not enabled");
1371                return -EINVAL;
1372        }
1373
1374        mlx5_eswitch_disable(esw);
1375        mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1376        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
1377        if (err) {
1378                NL_SET_ERR_MSG_MOD(extack,
1379                                   "Failed setting eswitch to offloads");
1380                err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
1381                if (err1) {
1382                        NL_SET_ERR_MSG_MOD(extack,
1383                                           "Failed setting eswitch back to legacy");
1384                }
1385        }
1386        if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1387                if (mlx5_eswitch_inline_mode_get(esw,
1388                                                 &esw->offloads.inline_mode)) {
1389                        esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1390                        NL_SET_ERR_MSG_MOD(extack,
1391                                           "Inline mode is different between vports");
1392                }
1393        }
1394        return err;
1395}
1396
1397void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1398{
1399        kfree(esw->offloads.vport_reps);
1400}
1401
1402int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1403{
1404        int total_vports = esw->total_vports;
1405        struct mlx5_core_dev *dev = esw->dev;
1406        struct mlx5_eswitch_rep *rep;
1407        u8 hw_id[ETH_ALEN], rep_type;
1408        int vport_index;
1409
1410        esw->offloads.vport_reps = kcalloc(total_vports,
1411                                           sizeof(struct mlx5_eswitch_rep),
1412                                           GFP_KERNEL);
1413        if (!esw->offloads.vport_reps)
1414                return -ENOMEM;
1415
1416        mlx5_query_mac_address(dev, hw_id);
1417
1418        mlx5_esw_for_all_reps(esw, vport_index, rep) {
1419                rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1420                rep->vport_index = vport_index;
1421                ether_addr_copy(rep->hw_id, hw_id);
1422
1423                for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1424                        atomic_set(&rep->rep_data[rep_type].state,
1425                                   REP_UNREGISTERED);
1426        }
1427
1428        return 0;
1429}
1430
1431static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1432                                      struct mlx5_eswitch_rep *rep, u8 rep_type)
1433{
1434        if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1435                           REP_LOADED, REP_REGISTERED) == REP_LOADED)
1436                esw->offloads.rep_ops[rep_type]->unload(rep);
1437}
1438
1439static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1440{
1441        struct mlx5_eswitch_rep *rep;
1442
1443        if (mlx5_ecpf_vport_exists(esw->dev)) {
1444                rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1445                __esw_offloads_unload_rep(esw, rep, rep_type);
1446        }
1447
1448        if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1449                rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1450                __esw_offloads_unload_rep(esw, rep, rep_type);
1451        }
1452
1453        rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1454        __esw_offloads_unload_rep(esw, rep, rep_type);
1455}
1456
1457static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1458                                   u8 rep_type)
1459{
1460        struct mlx5_eswitch_rep *rep;
1461        int i;
1462
1463        mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1464                __esw_offloads_unload_rep(esw, rep, rep_type);
1465}
1466
1467static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1468{
1469        u8 rep_type = NUM_REP_TYPES;
1470
1471        while (rep_type-- > 0)
1472                __unload_reps_vf_vport(esw, nvports, rep_type);
1473}
1474
1475static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1476{
1477        __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1478
1479        /* Special vports must be the last to unload. */
1480        __unload_reps_special_vport(esw, rep_type);
1481}
1482
1483static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
1484{
1485        u8 rep_type = NUM_REP_TYPES;
1486
1487        while (rep_type-- > 0)
1488                __unload_reps_all_vport(esw, rep_type);
1489}
1490
1491static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1492                                   struct mlx5_eswitch_rep *rep, u8 rep_type)
1493{
1494        int err = 0;
1495
1496        if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1497                           REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1498                err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1499                if (err)
1500                        atomic_set(&rep->rep_data[rep_type].state,
1501                                   REP_REGISTERED);
1502        }
1503
1504        return err;
1505}
1506
1507static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1508{
1509        struct mlx5_eswitch_rep *rep;
1510        int err;
1511
1512        rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1513        err = __esw_offloads_load_rep(esw, rep, rep_type);
1514        if (err)
1515                return err;
1516
1517        if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1518                rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1519                err = __esw_offloads_load_rep(esw, rep, rep_type);
1520                if (err)
1521                        goto err_pf;
1522        }
1523
1524        if (mlx5_ecpf_vport_exists(esw->dev)) {
1525                rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1526                err = __esw_offloads_load_rep(esw, rep, rep_type);
1527                if (err)
1528                        goto err_ecpf;
1529        }
1530
1531        return 0;
1532
1533err_ecpf:
1534        if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1535                rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1536                __esw_offloads_unload_rep(esw, rep, rep_type);
1537        }
1538
1539err_pf:
1540        rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1541        __esw_offloads_unload_rep(esw, rep, rep_type);
1542        return err;
1543}
1544
1545static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1546                                u8 rep_type)
1547{
1548        struct mlx5_eswitch_rep *rep;
1549        int err, i;
1550
1551        mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
1552                err = __esw_offloads_load_rep(esw, rep, rep_type);
1553                if (err)
1554                        goto err_vf;
1555        }
1556
1557        return 0;
1558
1559err_vf:
1560        __unload_reps_vf_vport(esw, --i, rep_type);
1561        return err;
1562}
1563
1564static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1565{
1566        int err;
1567
1568        /* Special vports must be loaded first, uplink rep creates mdev resource. */
1569        err = __load_reps_special_vport(esw, rep_type);
1570        if (err)
1571                return err;
1572
1573        err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1574        if (err)
1575                goto err_vfs;
1576
1577        return 0;
1578
1579err_vfs:
1580        __unload_reps_special_vport(esw, rep_type);
1581        return err;
1582}
1583
1584static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1585{
1586        u8 rep_type = 0;
1587        int err;
1588
1589        for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1590                err = __load_reps_vf_vport(esw, nvports, rep_type);
1591                if (err)
1592                        goto err_reps;
1593        }
1594
1595        return err;
1596
1597err_reps:
1598        while (rep_type-- > 0)
1599                __unload_reps_vf_vport(esw, nvports, rep_type);
1600        return err;
1601}
1602
1603static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
1604{
1605        u8 rep_type = 0;
1606        int err;
1607
1608        for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1609                err = __load_reps_all_vport(esw, rep_type);
1610                if (err)
1611                        goto err_reps;
1612        }
1613
1614        return err;
1615
1616err_reps:
1617        while (rep_type-- > 0)
1618                __unload_reps_all_vport(esw, rep_type);
1619        return err;
1620}
1621
1622#define ESW_OFFLOADS_DEVCOM_PAIR        (0)
1623#define ESW_OFFLOADS_DEVCOM_UNPAIR      (1)
1624
1625static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1626                                  struct mlx5_eswitch *peer_esw)
1627{
1628        int err;
1629
1630        err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1631        if (err)
1632                return err;
1633
1634        return 0;
1635}
1636
1637static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1638{
1639        mlx5e_tc_clean_fdb_peer_flows(esw);
1640        esw_del_fdb_peer_miss_rules(esw);
1641}
1642
1643static int mlx5_esw_offloads_devcom_event(int event,
1644                                          void *my_data,
1645                                          void *event_data)
1646{
1647        struct mlx5_eswitch *esw = my_data;
1648        struct mlx5_eswitch *peer_esw = event_data;
1649        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1650        int err;
1651
1652        switch (event) {
1653        case ESW_OFFLOADS_DEVCOM_PAIR:
1654                if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1655                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1656                        break;
1657
1658                err = mlx5_esw_offloads_pair(esw, peer_esw);
1659                if (err)
1660                        goto err_out;
1661
1662                err = mlx5_esw_offloads_pair(peer_esw, esw);
1663                if (err)
1664                        goto err_pair;
1665
1666                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1667                break;
1668
1669        case ESW_OFFLOADS_DEVCOM_UNPAIR:
1670                if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1671                        break;
1672
1673                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1674                mlx5_esw_offloads_unpair(peer_esw);
1675                mlx5_esw_offloads_unpair(esw);
1676                break;
1677        }
1678
1679        return 0;
1680
1681err_pair:
1682        mlx5_esw_offloads_unpair(esw);
1683
1684err_out:
1685        mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1686                      event, err);
1687        return err;
1688}
1689
1690static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1691{
1692        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1693
1694        INIT_LIST_HEAD(&esw->offloads.peer_flows);
1695        mutex_init(&esw->offloads.peer_mutex);
1696
1697        if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1698                return;
1699
1700        mlx5_devcom_register_component(devcom,
1701                                       MLX5_DEVCOM_ESW_OFFLOADS,
1702                                       mlx5_esw_offloads_devcom_event,
1703                                       esw);
1704
1705        mlx5_devcom_send_event(devcom,
1706                               MLX5_DEVCOM_ESW_OFFLOADS,
1707                               ESW_OFFLOADS_DEVCOM_PAIR, esw);
1708}
1709
1710static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1711{
1712        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1713
1714        if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1715                return;
1716
1717        mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1718                               ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1719
1720        mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1721}
1722
1723static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1724                                             struct mlx5_vport *vport)
1725{
1726        struct mlx5_flow_act flow_act = {0};
1727        struct mlx5_flow_spec *spec;
1728        int err = 0;
1729
1730        /* For prio tag mode, there is only 1 FTEs:
1731         * 1) Untagged packets - push prio tag VLAN and modify metadata if
1732         * required, allow
1733         * Unmatched traffic is allowed by default
1734         */
1735
1736        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1737        if (!spec) {
1738                err = -ENOMEM;
1739                goto out_no_mem;
1740        }
1741
1742        /* Untagged packets - push prio tag VLAN, allow */
1743        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1744        MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1745        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1746        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1747                          MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1748        flow_act.vlan[0].ethtype = ETH_P_8021Q;
1749        flow_act.vlan[0].vid = 0;
1750        flow_act.vlan[0].prio = 0;
1751
1752        if (vport->ingress.modify_metadata_rule) {
1753                flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1754                flow_act.modify_id = vport->ingress.modify_metadata_id;
1755        }
1756
1757        vport->ingress.allow_rule =
1758                mlx5_add_flow_rules(vport->ingress.acl, spec,
1759                                    &flow_act, NULL, 0);
1760        if (IS_ERR(vport->ingress.allow_rule)) {
1761                err = PTR_ERR(vport->ingress.allow_rule);
1762                esw_warn(esw->dev,
1763                         "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1764                         vport->vport, err);
1765                vport->ingress.allow_rule = NULL;
1766                goto out;
1767        }
1768
1769out:
1770        kvfree(spec);
1771out_no_mem:
1772        if (err)
1773                esw_vport_cleanup_ingress_rules(esw, vport);
1774        return err;
1775}
1776
1777static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1778                                                     struct mlx5_vport *vport)
1779{
1780        u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1781        static const struct mlx5_flow_spec spec = {};
1782        struct mlx5_flow_act flow_act = {};
1783        int err = 0;
1784
1785        MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1786        MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1787        MLX5_SET(set_action_in, action, data,
1788                 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1789
1790        err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1791                                       1, action, &vport->ingress.modify_metadata_id);
1792        if (err) {
1793                esw_warn(esw->dev,
1794                         "failed to alloc modify header for vport %d ingress acl (%d)\n",
1795                         vport->vport, err);
1796                return err;
1797        }
1798
1799        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1800        flow_act.modify_id = vport->ingress.modify_metadata_id;
1801        vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
1802                                                                  &spec, &flow_act, NULL, 0);
1803        if (IS_ERR(vport->ingress.modify_metadata_rule)) {
1804                err = PTR_ERR(vport->ingress.modify_metadata_rule);
1805                esw_warn(esw->dev,
1806                         "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1807                         vport->vport, err);
1808                vport->ingress.modify_metadata_rule = NULL;
1809                goto out;
1810        }
1811
1812out:
1813        if (err)
1814                mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1815        return err;
1816}
1817
1818void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1819                                               struct mlx5_vport *vport)
1820{
1821        if (vport->ingress.modify_metadata_rule) {
1822                mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
1823                mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1824
1825                vport->ingress.modify_metadata_rule = NULL;
1826        }
1827}
1828
1829static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1830                                            struct mlx5_vport *vport)
1831{
1832        struct mlx5_flow_act flow_act = {0};
1833        struct mlx5_flow_spec *spec;
1834        int err = 0;
1835
1836        if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1837                return 0;
1838
1839        /* For prio tag mode, there is only 1 FTEs:
1840         * 1) prio tag packets - pop the prio tag VLAN, allow
1841         * Unmatched traffic is allowed by default
1842         */
1843
1844        esw_vport_cleanup_egress_rules(esw, vport);
1845
1846        err = esw_vport_enable_egress_acl(esw, vport);
1847        if (err) {
1848                mlx5_core_warn(esw->dev,
1849                               "failed to enable egress acl (%d) on vport[%d]\n",
1850                               err, vport->vport);
1851                return err;
1852        }
1853
1854        esw_debug(esw->dev,
1855                  "vport[%d] configure prio tag egress rules\n", vport->vport);
1856
1857        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1858        if (!spec) {
1859                err = -ENOMEM;
1860                goto out_no_mem;
1861        }
1862
1863        /* prio tag vlan rule - pop it so VF receives untagged packets */
1864        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1865        MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1866        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1867        MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1868
1869        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1870        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1871                          MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1872        vport->egress.allowed_vlan =
1873                mlx5_add_flow_rules(vport->egress.acl, spec,
1874                                    &flow_act, NULL, 0);
1875        if (IS_ERR(vport->egress.allowed_vlan)) {
1876                err = PTR_ERR(vport->egress.allowed_vlan);
1877                esw_warn(esw->dev,
1878                         "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1879                         vport->vport, err);
1880                vport->egress.allowed_vlan = NULL;
1881                goto out;
1882        }
1883
1884out:
1885        kvfree(spec);
1886out_no_mem:
1887        if (err)
1888                esw_vport_cleanup_egress_rules(esw, vport);
1889        return err;
1890}
1891
1892static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
1893                                           struct mlx5_vport *vport)
1894{
1895        int err;
1896
1897        if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1898            !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1899                return 0;
1900
1901        esw_vport_cleanup_ingress_rules(esw, vport);
1902
1903        err = esw_vport_enable_ingress_acl(esw, vport);
1904        if (err) {
1905                esw_warn(esw->dev,
1906                         "failed to enable ingress acl (%d) on vport[%d]\n",
1907                         err, vport->vport);
1908                return err;
1909        }
1910
1911        esw_debug(esw->dev,
1912                  "vport[%d] configure ingress rules\n", vport->vport);
1913
1914        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1915                err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1916                if (err)
1917                        goto out;
1918        }
1919
1920        if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1921            mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1922                err = esw_vport_ingress_prio_tag_config(esw, vport);
1923                if (err)
1924                        goto out;
1925        }
1926
1927out:
1928        if (err)
1929                esw_vport_disable_ingress_acl(esw, vport);
1930        return err;
1931}
1932
1933static bool
1934esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1935{
1936        if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1937                return false;
1938
1939        if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1940              MLX5_FDB_TO_VPORT_REG_C_0))
1941                return false;
1942
1943        if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1944                return false;
1945
1946        if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1947            mlx5_ecpf_vport_exists(esw->dev))
1948                return false;
1949
1950        return true;
1951}
1952
1953static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
1954{
1955        struct mlx5_vport *vport;
1956        int i, j;
1957        int err;
1958
1959        if (esw_check_vport_match_metadata_supported(esw))
1960                esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1961
1962        mlx5_esw_for_all_vports(esw, i, vport) {
1963                err = esw_vport_ingress_common_config(esw, vport);
1964                if (err)
1965                        goto err_ingress;
1966
1967                if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1968                        err = esw_vport_egress_prio_tag_config(esw, vport);
1969                        if (err)
1970                                goto err_egress;
1971                }
1972        }
1973
1974        if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1975                esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
1976
1977        return 0;
1978
1979err_egress:
1980        esw_vport_disable_ingress_acl(esw, vport);
1981err_ingress:
1982        for (j = MLX5_VPORT_PF; j < i; j++) {
1983                vport = &esw->vports[j];
1984                esw_vport_disable_egress_acl(esw, vport);
1985                esw_vport_disable_ingress_acl(esw, vport);
1986        }
1987
1988        return err;
1989}
1990
1991static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
1992{
1993        struct mlx5_vport *vport;
1994        int i;
1995
1996        mlx5_esw_for_all_vports(esw, i, vport) {
1997                esw_vport_disable_egress_acl(esw, vport);
1998                esw_vport_disable_ingress_acl(esw, vport);
1999        }
2000
2001        esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2002}
2003
2004static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2005{
2006        int num_vfs = esw->esw_funcs.num_vfs;
2007        int total_vports;
2008        int err;
2009
2010        if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2011                total_vports = esw->total_vports;
2012        else
2013                total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2014
2015        memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2016        mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2017
2018        err = esw_create_offloads_acl_tables(esw);
2019        if (err)
2020                return err;
2021
2022        err = esw_create_offloads_fdb_tables(esw, total_vports);
2023        if (err)
2024                goto create_fdb_err;
2025
2026        err = esw_create_offloads_table(esw, total_vports);
2027        if (err)
2028                goto create_ft_err;
2029
2030        err = esw_create_vport_rx_group(esw, total_vports);
2031        if (err)
2032                goto create_fg_err;
2033
2034        return 0;
2035
2036create_fg_err:
2037        esw_destroy_offloads_table(esw);
2038
2039create_ft_err:
2040        esw_destroy_offloads_fdb_tables(esw);
2041
2042create_fdb_err:
2043        esw_destroy_offloads_acl_tables(esw);
2044
2045        return err;
2046}
2047
2048static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2049{
2050        esw_destroy_vport_rx_group(esw);
2051        esw_destroy_offloads_table(esw);
2052        esw_destroy_offloads_fdb_tables(esw);
2053        esw_destroy_offloads_acl_tables(esw);
2054}
2055
2056static void
2057esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2058{
2059        bool host_pf_disabled;
2060        u16 new_num_vfs;
2061
2062        new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2063                               host_params_context.host_num_of_vfs);
2064        host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2065                                    host_params_context.host_pf_disabled);
2066
2067        if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2068                return;
2069
2070        /* Number of VFs can only change from "0 to x" or "x to 0". */
2071        if (esw->esw_funcs.num_vfs > 0) {
2072                esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
2073        } else {
2074                int err;
2075
2076                err = esw_offloads_load_vf_reps(esw, new_num_vfs);
2077                if (err)
2078                        return;
2079        }
2080        esw->esw_funcs.num_vfs = new_num_vfs;
2081}
2082
2083static void esw_functions_changed_event_handler(struct work_struct *work)
2084{
2085        struct mlx5_host_work *host_work;
2086        struct mlx5_eswitch *esw;
2087        const u32 *out;
2088
2089        host_work = container_of(work, struct mlx5_host_work, work);
2090        esw = host_work->esw;
2091
2092        out = mlx5_esw_query_functions(esw->dev);
2093        if (IS_ERR(out))
2094                goto out;
2095
2096        esw_vfs_changed_event_handler(esw, out);
2097        kvfree(out);
2098out:
2099        kfree(host_work);
2100}
2101
2102int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2103{
2104        struct mlx5_esw_functions *esw_funcs;
2105        struct mlx5_host_work *host_work;
2106        struct mlx5_eswitch *esw;
2107
2108        host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2109        if (!host_work)
2110                return NOTIFY_DONE;
2111
2112        esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2113        esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2114
2115        host_work->esw = esw;
2116
2117        INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2118        queue_work(esw->work_queue, &host_work->work);
2119
2120        return NOTIFY_OK;
2121}
2122
2123int esw_offloads_init(struct mlx5_eswitch *esw)
2124{
2125        int err;
2126
2127        if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2128            MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2129                esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2130        else
2131                esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2132
2133        err = esw_offloads_steering_init(esw);
2134        if (err)
2135                return err;
2136
2137        if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2138                err = mlx5_eswitch_enable_passing_vport_metadata(esw);
2139                if (err)
2140                        goto err_vport_metadata;
2141        }
2142
2143        err = esw_offloads_load_all_reps(esw);
2144        if (err)
2145                goto err_reps;
2146
2147        esw_offloads_devcom_init(esw);
2148        mutex_init(&esw->offloads.termtbl_mutex);
2149
2150        mlx5_rdma_enable_roce(esw->dev);
2151
2152        return 0;
2153
2154err_reps:
2155        if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2156                mlx5_eswitch_disable_passing_vport_metadata(esw);
2157err_vport_metadata:
2158        esw_offloads_steering_cleanup(esw);
2159        return err;
2160}
2161
2162static int esw_offloads_stop(struct mlx5_eswitch *esw,
2163                             struct netlink_ext_ack *extack)
2164{
2165        int err, err1;
2166
2167        mlx5_eswitch_disable(esw);
2168        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
2169        if (err) {
2170                NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2171                err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
2172                if (err1) {
2173                        NL_SET_ERR_MSG_MOD(extack,
2174                                           "Failed setting eswitch back to offloads");
2175                }
2176        }
2177
2178        return err;
2179}
2180
2181void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2182{
2183        mlx5_rdma_disable_roce(esw->dev);
2184        esw_offloads_devcom_cleanup(esw);
2185        esw_offloads_unload_all_reps(esw);
2186        if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2187                mlx5_eswitch_disable_passing_vport_metadata(esw);
2188        esw_offloads_steering_cleanup(esw);
2189        esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2190}
2191
2192static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2193{
2194        switch (mode) {
2195        case DEVLINK_ESWITCH_MODE_LEGACY:
2196                *mlx5_mode = MLX5_ESWITCH_LEGACY;
2197                break;
2198        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2199                *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2200                break;
2201        default:
2202                return -EINVAL;
2203        }
2204
2205        return 0;
2206}
2207
2208static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2209{
2210        switch (mlx5_mode) {
2211        case MLX5_ESWITCH_LEGACY:
2212                *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2213                break;
2214        case MLX5_ESWITCH_OFFLOADS:
2215                *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2216                break;
2217        default:
2218                return -EINVAL;
2219        }
2220
2221        return 0;
2222}
2223
2224static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2225{
2226        switch (mode) {
2227        case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2228                *mlx5_mode = MLX5_INLINE_MODE_NONE;
2229                break;
2230        case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2231                *mlx5_mode = MLX5_INLINE_MODE_L2;
2232                break;
2233        case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2234                *mlx5_mode = MLX5_INLINE_MODE_IP;
2235                break;
2236        case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2237                *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2238                break;
2239        default:
2240                return -EINVAL;
2241        }
2242
2243        return 0;
2244}
2245
2246static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2247{
2248        switch (mlx5_mode) {
2249        case MLX5_INLINE_MODE_NONE:
2250                *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2251                break;
2252        case MLX5_INLINE_MODE_L2:
2253                *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2254                break;
2255        case MLX5_INLINE_MODE_IP:
2256                *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2257                break;
2258        case MLX5_INLINE_MODE_TCP_UDP:
2259                *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2260                break;
2261        default:
2262                return -EINVAL;
2263        }
2264
2265        return 0;
2266}
2267
2268static int mlx5_devlink_eswitch_check(struct devlink *devlink)
2269{
2270        struct mlx5_core_dev *dev = devlink_priv(devlink);
2271
2272        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2273                return -EOPNOTSUPP;
2274
2275        if(!MLX5_ESWITCH_MANAGER(dev))
2276                return -EPERM;
2277
2278        if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2279            !mlx5_core_is_ecpf_esw_manager(dev))
2280                return -EOPNOTSUPP;
2281
2282        return 0;
2283}
2284
2285int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2286                                  struct netlink_ext_ack *extack)
2287{
2288        struct mlx5_core_dev *dev = devlink_priv(devlink);
2289        u16 cur_mlx5_mode, mlx5_mode = 0;
2290        int err;
2291
2292        err = mlx5_devlink_eswitch_check(devlink);
2293        if (err)
2294                return err;
2295
2296        cur_mlx5_mode = dev->priv.eswitch->mode;
2297
2298        if (esw_mode_from_devlink(mode, &mlx5_mode))
2299                return -EINVAL;
2300
2301        if (cur_mlx5_mode == mlx5_mode)
2302                return 0;
2303
2304        if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2305                return esw_offloads_start(dev->priv.eswitch, extack);
2306        else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2307                return esw_offloads_stop(dev->priv.eswitch, extack);
2308        else
2309                return -EINVAL;
2310}
2311
2312int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2313{
2314        struct mlx5_core_dev *dev = devlink_priv(devlink);
2315        int err;
2316
2317        err = mlx5_devlink_eswitch_check(devlink);
2318        if (err)
2319                return err;
2320
2321        return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2322}
2323
2324int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2325                                         struct netlink_ext_ack *extack)
2326{
2327        struct mlx5_core_dev *dev = devlink_priv(devlink);
2328        struct mlx5_eswitch *esw = dev->priv.eswitch;
2329        int err, vport, num_vport;
2330        u8 mlx5_mode;
2331
2332        err = mlx5_devlink_eswitch_check(devlink);
2333        if (err)
2334                return err;
2335
2336        switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2337        case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2338                if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2339                        return 0;
2340                /* fall through */
2341        case MLX5_CAP_INLINE_MODE_L2:
2342                NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2343                return -EOPNOTSUPP;
2344        case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2345                break;
2346        }
2347
2348        if (esw->offloads.num_flows > 0) {
2349                NL_SET_ERR_MSG_MOD(extack,
2350                                   "Can't set inline mode when flows are configured");
2351                return -EOPNOTSUPP;
2352        }
2353
2354        err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2355        if (err)
2356                goto out;
2357
2358        mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2359                err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2360                if (err) {
2361                        NL_SET_ERR_MSG_MOD(extack,
2362                                           "Failed to set min inline on vport");
2363                        goto revert_inline_mode;
2364                }
2365        }
2366
2367        esw->offloads.inline_mode = mlx5_mode;
2368        return 0;
2369
2370revert_inline_mode:
2371        num_vport = --vport;
2372        mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2373                mlx5_modify_nic_vport_min_inline(dev,
2374                                                 vport,
2375                                                 esw->offloads.inline_mode);
2376out:
2377        return err;
2378}
2379
2380int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2381{
2382        struct mlx5_core_dev *dev = devlink_priv(devlink);
2383        struct mlx5_eswitch *esw = dev->priv.eswitch;
2384        int err;
2385
2386        err = mlx5_devlink_eswitch_check(devlink);
2387        if (err)
2388                return err;
2389
2390        return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2391}
2392
2393int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2394{
2395        u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2396        struct mlx5_core_dev *dev = esw->dev;
2397        int vport;
2398
2399        if (!MLX5_CAP_GEN(dev, vport_group_manager))
2400                return -EOPNOTSUPP;
2401
2402        if (esw->mode == MLX5_ESWITCH_NONE)
2403                return -EOPNOTSUPP;
2404
2405        switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2406        case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2407                mlx5_mode = MLX5_INLINE_MODE_NONE;
2408                goto out;
2409        case MLX5_CAP_INLINE_MODE_L2:
2410                mlx5_mode = MLX5_INLINE_MODE_L2;
2411                goto out;
2412        case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2413                goto query_vports;
2414        }
2415
2416query_vports:
2417        mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2418        mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2419                mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2420                if (prev_mlx5_mode != mlx5_mode)
2421                        return -EINVAL;
2422                prev_mlx5_mode = mlx5_mode;
2423        }
2424
2425out:
2426        *mode = mlx5_mode;
2427        return 0;
2428}
2429
2430int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2431                                        enum devlink_eswitch_encap_mode encap,
2432                                        struct netlink_ext_ack *extack)
2433{
2434        struct mlx5_core_dev *dev = devlink_priv(devlink);
2435        struct mlx5_eswitch *esw = dev->priv.eswitch;
2436        int err;
2437
2438        err = mlx5_devlink_eswitch_check(devlink);
2439        if (err)
2440                return err;
2441
2442        if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2443            (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2444             !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2445                return -EOPNOTSUPP;
2446
2447        if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2448                return -EOPNOTSUPP;
2449
2450        if (esw->mode == MLX5_ESWITCH_LEGACY) {
2451                esw->offloads.encap = encap;
2452                return 0;
2453        }
2454
2455        if (esw->offloads.encap == encap)
2456                return 0;
2457
2458        if (esw->offloads.num_flows > 0) {
2459                NL_SET_ERR_MSG_MOD(extack,
2460                                   "Can't set encapsulation when flows are configured");
2461                return -EOPNOTSUPP;
2462        }
2463
2464        esw_destroy_offloads_fdb_tables(esw);
2465
2466        esw->offloads.encap = encap;
2467
2468        err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2469
2470        if (err) {
2471                NL_SET_ERR_MSG_MOD(extack,
2472                                   "Failed re-creating fast FDB table");
2473                esw->offloads.encap = !encap;
2474                (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2475        }
2476
2477        return err;
2478}
2479
2480int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2481                                        enum devlink_eswitch_encap_mode *encap)
2482{
2483        struct mlx5_core_dev *dev = devlink_priv(devlink);
2484        struct mlx5_eswitch *esw = dev->priv.eswitch;
2485        int err;
2486
2487        err = mlx5_devlink_eswitch_check(devlink);
2488        if (err)
2489                return err;
2490
2491        *encap = esw->offloads.encap;
2492        return 0;
2493}
2494
2495void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2496                                      const struct mlx5_eswitch_rep_ops *ops,
2497                                      u8 rep_type)
2498{
2499        struct mlx5_eswitch_rep_data *rep_data;
2500        struct mlx5_eswitch_rep *rep;
2501        int i;
2502
2503        esw->offloads.rep_ops[rep_type] = ops;
2504        mlx5_esw_for_all_reps(esw, i, rep) {
2505                rep_data = &rep->rep_data[rep_type];
2506                atomic_set(&rep_data->state, REP_REGISTERED);
2507        }
2508}
2509EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2510
2511void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2512{
2513        struct mlx5_eswitch_rep *rep;
2514        int i;
2515
2516        if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2517                __unload_reps_all_vport(esw, rep_type);
2518
2519        mlx5_esw_for_all_reps(esw, i, rep)
2520                atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2521}
2522EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2523
2524void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2525{
2526        struct mlx5_eswitch_rep *rep;
2527
2528        rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2529        return rep->rep_data[rep_type].priv;
2530}
2531
2532void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2533                                 u16 vport,
2534                                 u8 rep_type)
2535{
2536        struct mlx5_eswitch_rep *rep;
2537
2538        rep = mlx5_eswitch_get_rep(esw, vport);
2539
2540        if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2541            esw->offloads.rep_ops[rep_type]->get_proto_dev)
2542                return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2543        return NULL;
2544}
2545EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2546
2547void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2548{
2549        return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2550}
2551EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2552
2553struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2554                                                u16 vport)
2555{
2556        return mlx5_eswitch_get_rep(esw, vport);
2557}
2558EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2559
2560bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2561{
2562        return vport_num >= MLX5_VPORT_FIRST_VF &&
2563               vport_num <= esw->dev->priv.sriov.max_vfs;
2564}
2565
2566bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2567{
2568        return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2569}
2570EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2571
2572u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2573                                              u16 vport_num)
2574{
2575        return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2576}
2577EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
2578