linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/errno.h>
   6#include <linux/netdevice.h>
   7#include <net/flow_offload.h>
   8
   9#include "spectrum.h"
  10#include "spectrum_span.h"
  11#include "reg.h"
  12
  13static struct mlxsw_sp_mall_entry *
  14mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
  15{
  16        struct mlxsw_sp_mall_entry *mall_entry;
  17
  18        list_for_each_entry(mall_entry, &block->mall.list, list)
  19                if (mall_entry->cookie == cookie)
  20                        return mall_entry;
  21
  22        return NULL;
  23}
  24
  25static int
  26mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
  27                              struct mlxsw_sp_mall_entry *mall_entry)
  28{
  29        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  30        struct mlxsw_sp_span_agent_parms agent_parms = {};
  31        struct mlxsw_sp_span_trigger_parms parms;
  32        enum mlxsw_sp_span_trigger trigger;
  33        int err;
  34
  35        if (!mall_entry->mirror.to_dev) {
  36                netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
  37                return -EINVAL;
  38        }
  39
  40        agent_parms.to_dev = mall_entry->mirror.to_dev;
  41        err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
  42                                      &agent_parms);
  43        if (err)
  44                return err;
  45
  46        err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
  47                                              mall_entry->ingress);
  48        if (err)
  49                goto err_analyzed_port_get;
  50
  51        trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
  52                                        MLXSW_SP_SPAN_TRIGGER_EGRESS;
  53        parms.span_id = mall_entry->mirror.span_id;
  54        err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
  55                                       &parms);
  56        if (err)
  57                goto err_agent_bind;
  58
  59        return 0;
  60
  61err_agent_bind:
  62        mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
  63err_analyzed_port_get:
  64        mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
  65        return err;
  66}
  67
  68static void
  69mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
  70                              struct mlxsw_sp_mall_entry *mall_entry)
  71{
  72        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  73        struct mlxsw_sp_span_trigger_parms parms;
  74        enum mlxsw_sp_span_trigger trigger;
  75
  76        trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
  77                                        MLXSW_SP_SPAN_TRIGGER_EGRESS;
  78        parms.span_id = mall_entry->mirror.span_id;
  79        mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
  80        mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
  81        mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
  82}
  83
  84static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
  85                                         bool enable, u32 rate)
  86{
  87        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  88        char mpsc_pl[MLXSW_REG_MPSC_LEN];
  89
  90        mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
  91        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
  92}
  93
  94static int
  95mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
  96                              struct mlxsw_sp_mall_entry *mall_entry)
  97{
  98        int err;
  99
 100        if (rtnl_dereference(mlxsw_sp_port->sample)) {
 101                netdev_err(mlxsw_sp_port->dev, "sample already active\n");
 102                return -EEXIST;
 103        }
 104        rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
 105
 106        err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
 107                                            mall_entry->sample.rate);
 108        if (err)
 109                goto err_port_sample_set;
 110        return 0;
 111
 112err_port_sample_set:
 113        RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
 114        return err;
 115}
 116
 117static void
 118mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
 119{
 120        if (!mlxsw_sp_port->sample)
 121                return;
 122
 123        mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
 124        RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
 125}
 126
 127static int
 128mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
 129                            struct mlxsw_sp_mall_entry *mall_entry)
 130{
 131        switch (mall_entry->type) {
 132        case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
 133                return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
 134        case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
 135                return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
 136        default:
 137                WARN_ON(1);
 138                return -EINVAL;
 139        }
 140}
 141
 142static void
 143mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
 144                            struct mlxsw_sp_mall_entry *mall_entry)
 145{
 146        switch (mall_entry->type) {
 147        case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
 148                mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
 149                break;
 150        case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
 151                mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
 152                break;
 153        default:
 154                WARN_ON(1);
 155        }
 156}
 157
 158static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
 159{
 160        struct mlxsw_sp_mall_entry *mall_entry;
 161
 162        if (list_empty(&block->mall.list))
 163                return;
 164        block->mall.min_prio = UINT_MAX;
 165        block->mall.max_prio = 0;
 166        list_for_each_entry(mall_entry, &block->mall.list, list) {
 167                if (mall_entry->priority < block->mall.min_prio)
 168                        block->mall.min_prio = mall_entry->priority;
 169                if (mall_entry->priority > block->mall.max_prio)
 170                        block->mall.max_prio = mall_entry->priority;
 171        }
 172}
 173
 174int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
 175                          struct mlxsw_sp_flow_block *block,
 176                          struct tc_cls_matchall_offload *f)
 177{
 178        struct mlxsw_sp_flow_block_binding *binding;
 179        struct mlxsw_sp_mall_entry *mall_entry;
 180        __be16 protocol = f->common.protocol;
 181        struct flow_action_entry *act;
 182        unsigned int flower_min_prio;
 183        unsigned int flower_max_prio;
 184        bool flower_prio_valid;
 185        int err;
 186
 187        if (!flow_offload_has_one_action(&f->rule->action)) {
 188                NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
 189                return -EOPNOTSUPP;
 190        }
 191
 192        if (f->common.chain_index) {
 193                NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
 194                return -EOPNOTSUPP;
 195        }
 196
 197        if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
 198                NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
 199                return -EOPNOTSUPP;
 200        }
 201
 202        err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
 203                                       &flower_min_prio, &flower_max_prio);
 204        if (err) {
 205                if (err != -ENOENT) {
 206                        NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
 207                        return err;
 208                }
 209                flower_prio_valid = false;
 210                /* No flower filters are installed in specified chain. */
 211        } else {
 212                flower_prio_valid = true;
 213        }
 214
 215        mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
 216        if (!mall_entry)
 217                return -ENOMEM;
 218        mall_entry->cookie = f->cookie;
 219        mall_entry->priority = f->common.prio;
 220        mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
 221
 222        act = &f->rule->action.entries[0];
 223
 224        if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
 225                if (flower_prio_valid && mall_entry->ingress &&
 226                    mall_entry->priority >= flower_min_prio) {
 227                        NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
 228                        err = -EOPNOTSUPP;
 229                        goto errout;
 230                }
 231                if (flower_prio_valid && !mall_entry->ingress &&
 232                    mall_entry->priority <= flower_max_prio) {
 233                        NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
 234                        err = -EOPNOTSUPP;
 235                        goto errout;
 236                }
 237                mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
 238                mall_entry->mirror.to_dev = act->dev;
 239        } else if (act->id == FLOW_ACTION_SAMPLE &&
 240                   protocol == htons(ETH_P_ALL)) {
 241                if (!mall_entry->ingress) {
 242                        NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
 243                        err = -EOPNOTSUPP;
 244                        goto errout;
 245                }
 246                if (flower_prio_valid &&
 247                    mall_entry->priority >= flower_min_prio) {
 248                        NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
 249                        err = -EOPNOTSUPP;
 250                        goto errout;
 251                }
 252                if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
 253                        NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
 254                        err = -EOPNOTSUPP;
 255                        goto errout;
 256                }
 257                mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
 258                mall_entry->sample.psample_group = act->sample.psample_group;
 259                mall_entry->sample.truncate = act->sample.truncate;
 260                mall_entry->sample.trunc_size = act->sample.trunc_size;
 261                mall_entry->sample.rate = act->sample.rate;
 262        } else {
 263                err = -EOPNOTSUPP;
 264                goto errout;
 265        }
 266
 267        list_for_each_entry(binding, &block->binding_list, list) {
 268                err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
 269                                                  mall_entry);
 270                if (err)
 271                        goto rollback;
 272        }
 273
 274        block->rule_count++;
 275        if (mall_entry->ingress)
 276                block->egress_blocker_rule_count++;
 277        else
 278                block->ingress_blocker_rule_count++;
 279        list_add_tail(&mall_entry->list, &block->mall.list);
 280        mlxsw_sp_mall_prio_update(block);
 281        return 0;
 282
 283rollback:
 284        list_for_each_entry_continue_reverse(binding, &block->binding_list,
 285                                             list)
 286                mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
 287errout:
 288        kfree(mall_entry);
 289        return err;
 290}
 291
 292void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
 293                           struct tc_cls_matchall_offload *f)
 294{
 295        struct mlxsw_sp_flow_block_binding *binding;
 296        struct mlxsw_sp_mall_entry *mall_entry;
 297
 298        mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
 299        if (!mall_entry) {
 300                NL_SET_ERR_MSG(f->common.extack, "Entry not found");
 301                return;
 302        }
 303
 304        list_del(&mall_entry->list);
 305        if (mall_entry->ingress)
 306                block->egress_blocker_rule_count--;
 307        else
 308                block->ingress_blocker_rule_count--;
 309        block->rule_count--;
 310        list_for_each_entry(binding, &block->binding_list, list)
 311                mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
 312        kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
 313        mlxsw_sp_mall_prio_update(block);
 314}
 315
 316int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
 317                            struct mlxsw_sp_port *mlxsw_sp_port)
 318{
 319        struct mlxsw_sp_mall_entry *mall_entry;
 320        int err;
 321
 322        list_for_each_entry(mall_entry, &block->mall.list, list) {
 323                err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
 324                if (err)
 325                        goto rollback;
 326        }
 327        return 0;
 328
 329rollback:
 330        list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
 331                                             list)
 332                mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
 333        return err;
 334}
 335
 336void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
 337                               struct mlxsw_sp_port *mlxsw_sp_port)
 338{
 339        struct mlxsw_sp_mall_entry *mall_entry;
 340
 341        list_for_each_entry(mall_entry, &block->mall.list, list)
 342                mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
 343}
 344
 345int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
 346                           unsigned int *p_min_prio, unsigned int *p_max_prio)
 347{
 348        if (chain_index || list_empty(&block->mall.list))
 349                /* In case there are no matchall rules, the caller
 350                 * receives -ENOENT to indicate there is no need
 351                 * to check the priorities.
 352                 */
 353                return -ENOENT;
 354        *p_min_prio = block->mall.min_prio;
 355        *p_max_prio = block->mall.max_prio;
 356        return 0;
 357}
 358