linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
   3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions are met:
   8 *
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions and the following disclaimer.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. Neither the names of the copyright holders nor the names of its
  15 *    contributors may be used to endorse or promote products derived from
  16 *    this software without specific prior written permission.
  17 *
  18 * Alternatively, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2 as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32 * POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/errno.h>
  37#include <linux/netdevice.h>
  38#include <net/flow_dissector.h>
  39#include <net/pkt_cls.h>
  40#include <net/tc_act/tc_gact.h>
  41#include <net/tc_act/tc_mirred.h>
  42
  43#include "spectrum.h"
  44#include "core_acl_flex_keys.h"
  45
  46static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
  47                                         struct net_device *dev,
  48                                         struct mlxsw_sp_acl_rule_info *rulei,
  49                                         struct tcf_exts *exts)
  50{
  51        const struct tc_action *a;
  52        int err;
  53
  54        if (tc_no_actions(exts))
  55                return 0;
  56
  57        tc_for_each_action(a, exts) {
  58                if (is_tcf_gact_shot(a)) {
  59                        err = mlxsw_sp_acl_rulei_act_drop(rulei);
  60                        if (err)
  61                                return err;
  62                } else if (is_tcf_mirred_egress_redirect(a)) {
  63                        int ifindex = tcf_mirred_ifindex(a);
  64                        struct net_device *out_dev;
  65
  66                        out_dev = __dev_get_by_index(dev_net(dev), ifindex);
  67                        if (out_dev == dev)
  68                                out_dev = NULL;
  69
  70                        err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
  71                                                         out_dev);
  72                        if (err)
  73                                return err;
  74                } else {
  75                        dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
  76                        return -EOPNOTSUPP;
  77                }
  78        }
  79        return 0;
  80}
  81
  82static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
  83                                       struct tc_cls_flower_offload *f)
  84{
  85        struct flow_dissector_key_ipv4_addrs *key =
  86                skb_flow_dissector_target(f->dissector,
  87                                          FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  88                                          f->key);
  89        struct flow_dissector_key_ipv4_addrs *mask =
  90                skb_flow_dissector_target(f->dissector,
  91                                          FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  92                                          f->mask);
  93
  94        mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
  95                                       ntohl(key->src), ntohl(mask->src));
  96        mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
  97                                       ntohl(key->dst), ntohl(mask->dst));
  98}
  99
 100static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
 101                                       struct tc_cls_flower_offload *f)
 102{
 103        struct flow_dissector_key_ipv6_addrs *key =
 104                skb_flow_dissector_target(f->dissector,
 105                                          FLOW_DISSECTOR_KEY_IPV6_ADDRS,
 106                                          f->key);
 107        struct flow_dissector_key_ipv6_addrs *mask =
 108                skb_flow_dissector_target(f->dissector,
 109                                          FLOW_DISSECTOR_KEY_IPV6_ADDRS,
 110                                          f->mask);
 111        size_t addr_half_size = sizeof(key->src) / 2;
 112
 113        mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
 114                                       &key->src.s6_addr[0],
 115                                       &mask->src.s6_addr[0],
 116                                       addr_half_size);
 117        mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
 118                                       &key->src.s6_addr[addr_half_size],
 119                                       &mask->src.s6_addr[addr_half_size],
 120                                       addr_half_size);
 121        mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
 122                                       &key->dst.s6_addr[0],
 123                                       &mask->dst.s6_addr[0],
 124                                       addr_half_size);
 125        mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
 126                                       &key->dst.s6_addr[addr_half_size],
 127                                       &mask->dst.s6_addr[addr_half_size],
 128                                       addr_half_size);
 129}
 130
 131static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
 132                                       struct mlxsw_sp_acl_rule_info *rulei,
 133                                       struct tc_cls_flower_offload *f,
 134                                       u8 ip_proto)
 135{
 136        struct flow_dissector_key_ports *key, *mask;
 137
 138        if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
 139                return 0;
 140
 141        if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
 142                dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
 143                return -EINVAL;
 144        }
 145
 146        key = skb_flow_dissector_target(f->dissector,
 147                                        FLOW_DISSECTOR_KEY_PORTS,
 148                                        f->key);
 149        mask = skb_flow_dissector_target(f->dissector,
 150                                         FLOW_DISSECTOR_KEY_PORTS,
 151                                         f->mask);
 152        mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
 153                                       ntohs(key->dst), ntohs(mask->dst));
 154        mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
 155                                       ntohs(key->src), ntohs(mask->src));
 156        return 0;
 157}
 158
 159static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
 160                                 struct net_device *dev,
 161                                 struct mlxsw_sp_acl_rule_info *rulei,
 162                                 struct tc_cls_flower_offload *f)
 163{
 164        u16 addr_type = 0;
 165        u8 ip_proto = 0;
 166        int err;
 167
 168        if (f->dissector->used_keys &
 169            ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
 170              BIT(FLOW_DISSECTOR_KEY_BASIC) |
 171              BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
 172              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
 173              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
 174              BIT(FLOW_DISSECTOR_KEY_PORTS))) {
 175                dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
 176                return -EOPNOTSUPP;
 177        }
 178
 179        mlxsw_sp_acl_rulei_priority(rulei, f->prio);
 180
 181        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
 182                struct flow_dissector_key_control *key =
 183                        skb_flow_dissector_target(f->dissector,
 184                                                  FLOW_DISSECTOR_KEY_CONTROL,
 185                                                  f->key);
 186                addr_type = key->addr_type;
 187        }
 188
 189        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
 190                struct flow_dissector_key_basic *key =
 191                        skb_flow_dissector_target(f->dissector,
 192                                                  FLOW_DISSECTOR_KEY_BASIC,
 193                                                  f->key);
 194                struct flow_dissector_key_basic *mask =
 195                        skb_flow_dissector_target(f->dissector,
 196                                                  FLOW_DISSECTOR_KEY_BASIC,
 197                                                  f->mask);
 198                u16 n_proto_key = ntohs(key->n_proto);
 199                u16 n_proto_mask = ntohs(mask->n_proto);
 200
 201                if (n_proto_key == ETH_P_ALL) {
 202                        n_proto_key = 0;
 203                        n_proto_mask = 0;
 204                }
 205                mlxsw_sp_acl_rulei_keymask_u32(rulei,
 206                                               MLXSW_AFK_ELEMENT_ETHERTYPE,
 207                                               n_proto_key, n_proto_mask);
 208
 209                ip_proto = key->ip_proto;
 210                mlxsw_sp_acl_rulei_keymask_u32(rulei,
 211                                               MLXSW_AFK_ELEMENT_IP_PROTO,
 212                                               key->ip_proto, mask->ip_proto);
 213        }
 214
 215        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 216                struct flow_dissector_key_eth_addrs *key =
 217                        skb_flow_dissector_target(f->dissector,
 218                                                  FLOW_DISSECTOR_KEY_ETH_ADDRS,
 219                                                  f->key);
 220                struct flow_dissector_key_eth_addrs *mask =
 221                        skb_flow_dissector_target(f->dissector,
 222                                                  FLOW_DISSECTOR_KEY_ETH_ADDRS,
 223                                                  f->mask);
 224
 225                mlxsw_sp_acl_rulei_keymask_buf(rulei,
 226                                               MLXSW_AFK_ELEMENT_DMAC,
 227                                               key->dst, mask->dst,
 228                                               sizeof(key->dst));
 229                mlxsw_sp_acl_rulei_keymask_buf(rulei,
 230                                               MLXSW_AFK_ELEMENT_SMAC,
 231                                               key->src, mask->src,
 232                                               sizeof(key->src));
 233        }
 234
 235        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
 236                mlxsw_sp_flower_parse_ipv4(rulei, f);
 237
 238        if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
 239                mlxsw_sp_flower_parse_ipv6(rulei, f);
 240
 241        err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
 242        if (err)
 243                return err;
 244
 245        return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
 246}
 247
 248int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 249                            __be16 protocol, struct tc_cls_flower_offload *f)
 250{
 251        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 252        struct net_device *dev = mlxsw_sp_port->dev;
 253        struct mlxsw_sp_acl_rule_info *rulei;
 254        struct mlxsw_sp_acl_ruleset *ruleset;
 255        struct mlxsw_sp_acl_rule *rule;
 256        int err;
 257
 258        ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
 259                                           MLXSW_SP_ACL_PROFILE_FLOWER);
 260        if (IS_ERR(ruleset))
 261                return PTR_ERR(ruleset);
 262
 263        rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
 264        if (IS_ERR(rule)) {
 265                err = PTR_ERR(rule);
 266                goto err_rule_create;
 267        }
 268
 269        rulei = mlxsw_sp_acl_rule_rulei(rule);
 270        err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
 271        if (err)
 272                goto err_flower_parse;
 273
 274        err = mlxsw_sp_acl_rulei_commit(rulei);
 275        if (err)
 276                goto err_rulei_commit;
 277
 278        err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
 279        if (err)
 280                goto err_rule_add;
 281
 282        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 283        return 0;
 284
 285err_rule_add:
 286err_rulei_commit:
 287err_flower_parse:
 288        mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
 289err_rule_create:
 290        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 291        return err;
 292}
 293
 294void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 295                             struct tc_cls_flower_offload *f)
 296{
 297        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 298        struct mlxsw_sp_acl_ruleset *ruleset;
 299        struct mlxsw_sp_acl_rule *rule;
 300
 301        ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
 302                                           ingress,
 303                                           MLXSW_SP_ACL_PROFILE_FLOWER);
 304        if (IS_ERR(ruleset))
 305                return;
 306
 307        rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
 308        if (rule) {
 309                mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
 310                mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
 311        }
 312
 313        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 314}
 315