linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
   3
   4#include "mlx5_core.h"
   5#include "eswitch.h"
   6#include "helper.h"
   7#include "lgcy.h"
   8
   9static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
  10{
  11        if (vport->ingress.legacy.drop_rule) {
  12                mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
  13                vport->ingress.legacy.drop_rule = NULL;
  14        }
  15        esw_acl_ingress_allow_rule_destroy(vport);
  16}
  17
  18static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
  19                                              struct mlx5_vport *vport)
  20{
  21        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
  22        struct mlx5_core_dev *dev = esw->dev;
  23        struct mlx5_flow_group *g;
  24        void *match_criteria;
  25        u32 *flow_group_in;
  26        int err;
  27
  28        flow_group_in = kvzalloc(inlen, GFP_KERNEL);
  29        if (!flow_group_in)
  30                return -ENOMEM;
  31
  32        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
  33
  34        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
  35                 MLX5_MATCH_OUTER_HEADERS);
  36        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
  37        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
  38        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
  39        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
  40        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
  41
  42        g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
  43        if (IS_ERR(g)) {
  44                err = PTR_ERR(g);
  45                esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
  46                         vport->vport, err);
  47                goto spoof_err;
  48        }
  49        vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
  50
  51        memset(flow_group_in, 0, inlen);
  52        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
  53                 MLX5_MATCH_OUTER_HEADERS);
  54        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
  55        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
  56        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
  57
  58        g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
  59        if (IS_ERR(g)) {
  60                err = PTR_ERR(g);
  61                esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
  62                         vport->vport, err);
  63                goto untagged_err;
  64        }
  65        vport->ingress.legacy.allow_untagged_only_grp = g;
  66
  67        memset(flow_group_in, 0, inlen);
  68        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
  69                 MLX5_MATCH_OUTER_HEADERS);
  70        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
  71        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
  72        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
  73        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
  74
  75        g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
  76        if (IS_ERR(g)) {
  77                err = PTR_ERR(g);
  78                esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
  79                         vport->vport, err);
  80                goto allow_spoof_err;
  81        }
  82        vport->ingress.legacy.allow_spoofchk_only_grp = g;
  83
  84        memset(flow_group_in, 0, inlen);
  85        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
  86        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
  87
  88        g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
  89        if (IS_ERR(g)) {
  90                err = PTR_ERR(g);
  91                esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
  92                         vport->vport, err);
  93                goto drop_err;
  94        }
  95        vport->ingress.legacy.drop_grp = g;
  96        kvfree(flow_group_in);
  97        return 0;
  98
  99drop_err:
 100        if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
 101                mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
 102                vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
 103        }
 104allow_spoof_err:
 105        if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
 106                mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
 107                vport->ingress.legacy.allow_untagged_only_grp = NULL;
 108        }
 109untagged_err:
 110        if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
 111                mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
 112                vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
 113        }
 114spoof_err:
 115        kvfree(flow_group_in);
 116        return err;
 117}
 118
 119static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
 120{
 121        if (vport->ingress.legacy.allow_spoofchk_only_grp) {
 122                mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
 123                vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
 124        }
 125        if (vport->ingress.legacy.allow_untagged_only_grp) {
 126                mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
 127                vport->ingress.legacy.allow_untagged_only_grp = NULL;
 128        }
 129        if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
 130                mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
 131                vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
 132        }
 133        if (vport->ingress.legacy.drop_grp) {
 134                mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
 135                vport->ingress.legacy.drop_grp = NULL;
 136        }
 137}
 138
 139int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
 140                               struct mlx5_vport *vport)
 141{
 142        struct mlx5_flow_destination drop_ctr_dst = {};
 143        struct mlx5_flow_destination *dst = NULL;
 144        struct mlx5_flow_act flow_act = {};
 145        struct mlx5_flow_spec *spec = NULL;
 146        struct mlx5_fc *counter = NULL;
 147        /* The ingress acl table contains 4 groups
 148         * (2 active rules at the same time -
 149         *      1 allow rule from one of the first 3 groups.
 150         *      1 drop rule from the last group):
 151         * 1)Allow untagged traffic with smac=original mac.
 152         * 2)Allow untagged traffic.
 153         * 3)Allow traffic with smac=original mac.
 154         * 4)Drop all other traffic.
 155         */
 156        int table_size = 4;
 157        int dest_num = 0;
 158        int err = 0;
 159        u8 *smac_v;
 160
 161        esw_acl_ingress_lgcy_rules_destroy(vport);
 162
 163        if (vport->ingress.legacy.drop_counter) {
 164                counter = vport->ingress.legacy.drop_counter;
 165        } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
 166                counter = mlx5_fc_create(esw->dev, false);
 167                if (IS_ERR(counter)) {
 168                        esw_warn(esw->dev,
 169                                 "vport[%d] configure ingress drop rule counter failed\n",
 170                                 vport->vport);
 171                        counter = NULL;
 172                }
 173                vport->ingress.legacy.drop_counter = counter;
 174        }
 175
 176        if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
 177                esw_acl_ingress_lgcy_cleanup(esw, vport);
 178                return 0;
 179        }
 180
 181        if (!vport->ingress.acl) {
 182                vport->ingress.acl = esw_acl_table_create(esw, vport,
 183                                                          MLX5_FLOW_NAMESPACE_ESW_INGRESS,
 184                                                          table_size);
 185                if (IS_ERR(vport->ingress.acl)) {
 186                        err = PTR_ERR(vport->ingress.acl);
 187                        vport->ingress.acl = NULL;
 188                        return err;
 189                }
 190
 191                err = esw_acl_ingress_lgcy_groups_create(esw, vport);
 192                if (err)
 193                        goto out;
 194        }
 195
 196        esw_debug(esw->dev,
 197                  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
 198                  vport->vport, vport->info.vlan, vport->info.qos);
 199
 200        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 201        if (!spec) {
 202                err = -ENOMEM;
 203                goto out;
 204        }
 205
 206        if (vport->info.vlan || vport->info.qos)
 207                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 208                                 outer_headers.cvlan_tag);
 209
 210        if (vport->info.spoofchk) {
 211                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 212                                 outer_headers.smac_47_16);
 213                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 214                                 outer_headers.smac_15_0);
 215                smac_v = MLX5_ADDR_OF(fte_match_param,
 216                                      spec->match_value,
 217                                      outer_headers.smac_47_16);
 218                ether_addr_copy(smac_v, vport->info.mac);
 219        }
 220
 221        /* Create ingress allow rule */
 222        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 223        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
 224        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
 225                                                        &flow_act, NULL, 0);
 226        if (IS_ERR(vport->ingress.allow_rule)) {
 227                err = PTR_ERR(vport->ingress.allow_rule);
 228                esw_warn(esw->dev,
 229                         "vport[%d] configure ingress allow rule, err(%d)\n",
 230                         vport->vport, err);
 231                vport->ingress.allow_rule = NULL;
 232                goto out;
 233        }
 234
 235        memset(&flow_act, 0, sizeof(flow_act));
 236        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
 237        /* Attach drop flow counter */
 238        if (counter) {
 239                flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
 240                drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
 241                drop_ctr_dst.counter_id = mlx5_fc_id(counter);
 242                dst = &drop_ctr_dst;
 243                dest_num++;
 244        }
 245        vport->ingress.legacy.drop_rule =
 246                mlx5_add_flow_rules(vport->ingress.acl, NULL,
 247                                    &flow_act, dst, dest_num);
 248        if (IS_ERR(vport->ingress.legacy.drop_rule)) {
 249                err = PTR_ERR(vport->ingress.legacy.drop_rule);
 250                esw_warn(esw->dev,
 251                         "vport[%d] configure ingress drop rule, err(%d)\n",
 252                         vport->vport, err);
 253                vport->ingress.legacy.drop_rule = NULL;
 254                goto out;
 255        }
 256        kvfree(spec);
 257        return 0;
 258
 259out:
 260        esw_acl_ingress_lgcy_cleanup(esw, vport);
 261        kvfree(spec);
 262        return err;
 263}
 264
 265void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
 266                                  struct mlx5_vport *vport)
 267{
 268        if (IS_ERR_OR_NULL(vport->ingress.acl))
 269                goto clean_drop_counter;
 270
 271        esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
 272
 273        esw_acl_ingress_lgcy_rules_destroy(vport);
 274        esw_acl_ingress_lgcy_groups_destroy(vport);
 275        esw_acl_ingress_table_destroy(vport);
 276
 277clean_drop_counter:
 278        if (vport->ingress.legacy.drop_counter) {
 279                mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
 280                vport->ingress.legacy.drop_counter = NULL;
 281        }
 282}
 283